prompter package
Submodules
prompter.schemas module
- class prompter.schemas.Text[source]
Bases:
objectText(content: str)
-
content:
str
- __init__(content)
- Parameters:
content (str)
- Return type:
None
-
content:
- class prompter.schemas.Image[source]
Bases:
objectImage(source: str, media_type: str = ‘image/jpeg’, detail: Literal[‘low’, ‘high’, ‘auto’] = ‘auto’)
-
source:
str
-
media_type:
str= 'image/jpeg'
-
detail:
Literal['low','high','auto'] = 'auto'
- classmethod file(path, media_type='image/jpeg', detail='auto')[source]
- Return type:
- Parameters:
path (str | Path)
media_type (str)
detail (Literal['low', 'high', 'auto'])
- classmethod url(url, detail='auto')[source]
- Return type:
- Parameters:
url (str)
detail (Literal['low', 'high', 'auto'])
- classmethod base64(data, media_type='image/jpeg')[source]
- Return type:
- Parameters:
data (str)
media_type (str)
- __init__(source, media_type='image/jpeg', detail='auto')
- Parameters:
source (str)
media_type (str)
detail (Literal['low', 'high', 'auto'])
- Return type:
None
-
source:
- class prompter.schemas.Document[source]
Bases:
objectDocument(source: str, doc_type: str, extract_text: bool = True, cache: bool = False)
-
source:
str
-
doc_type:
str
-
extract_text:
bool= True
-
cache:
bool= False
- classmethod from_pdf(path, extract_text=True, cache=False)[source]
- Return type:
- Parameters:
path (str | Path)
extract_text (bool)
cache (bool)
- __init__(source, doc_type, extract_text=True, cache=False)
- Parameters:
source (str)
doc_type (str)
extract_text (bool)
cache (bool)
- Return type:
None
-
source:
- class prompter.schemas.User[source]
Bases:
objectUser(*content: str | prompter.schemas.Text | prompter.schemas.Image | prompter.schemas.Document)
- class prompter.schemas.Assistant[source]
Bases:
objectAssistant(*content: str | prompter.schemas.Text | prompter.schemas.Image | prompter.schemas.Document)
- class prompter.schemas.System[source]
Bases:
objectSystem(content: str)
-
content:
str
- __init__(content)
- Parameters:
content (str)
- Return type:
None
-
content:
- class prompter.schemas.ToolCall[source]
Bases:
objectToolCall(name: str, arguments: dict[str, typing.Any], id: str | None = None)
-
name:
str
-
arguments:
dict[str,Any]
-
id:
str|None= None
- __init__(name, arguments, id=None)
- Parameters:
name (str)
arguments (dict[str, Any])
id (str | None)
- Return type:
None
-
name:
- class prompter.schemas.ToolUse[source]
Bases:
objectToolUse(name: str, arguments: Any, result: typing.Any | None = None, error: str | None = None, id: str | None = None)
-
name:
str
-
arguments:
Any
-
result:
Any|None= None
-
error:
str|None= None
-
id:
str|None= None
- __init__(name, arguments, result=None, error=None, id=None)
- Parameters:
name (str)
arguments (Any)
result (Any | None)
error (str | None)
id (str | None)
- Return type:
None
-
name:
- class prompter.schemas.Tool[source]
Bases:
objectTool(name: str, description: str, params=None)
-
name:
str
-
description:
str
-
params:
Union[Type[BaseModel],dict[str,Any],None] = None
-
name:
- class prompter.schemas.Prompt[source]
Bases:
objectPrompt(conversation: Optional[list[prompter.schemas.Text | prompter.schemas.Image | prompter.schemas.Document | prompter.schemas.User | prompter.schemas.Assistant | prompter.schemas.System | prompter.schemas.ToolCall | prompter.schemas.ToolUse]] = None, system: str | None = None, tools: list[prompter.schemas.Tool] | None = None, response_format: Optional[Type[pydantic.main.BaseModel]] = None, tool_choice: Union[Literal[‘auto’, ‘none’, ‘required’], str] = ‘auto’, **kwargs)
- __init__(conversation=None, system=None, tools=None, response_format=None, tool_choice='auto', **kwargs)[source]
- class prompter.schemas.Message[source]
Bases:
objectMessage(content: str)
-
content:
str
- __init__(content)
- Parameters:
content (str)
- Return type:
None
-
content:
- class prompter.schemas.TextMessage[source]
Bases:
objectTextMessage(content: str)
-
content:
str
- __init__(content)
- Parameters:
content (str)
- Return type:
None
-
content:
- class prompter.schemas.LLMResponse[source]
Bases:
objectLLMResponse(raw_response: Any, tools: list[prompter.schemas.Tool], _text_content: str, _tool_calls: list[prompter.schemas.ToolCall])
-
raw_response:
Any
- __init__(raw_response, tools, _text_content, _tool_calls)
- text_messages()[source]
- Return type:
list[TextMessage]
-
raw_response:
prompter.base_executor module
prompter.openai_executor module
- class prompter.openai_executor.OpenAIParams[source]
Bases:
objectOpenAIParams(max_tokens: int = 1024, model: str = ‘gpt-4o’, temperature: float = 0.0)
-
max_tokens:
int= 1024
-
model:
str= 'gpt-4o'
-
temperature:
float= 0.0
- __init__(max_tokens=1024, model='gpt-4o', temperature=0.0)
- Parameters:
max_tokens (int)
model (str)
temperature (float)
- Return type:
None
-
max_tokens:
- class prompter.openai_executor.OpenAIExecutor[source]
Bases:
object- class Converters[source]
Bases:
object
- __init__(client=None, params=None)[source]
- Parameters:
params (OpenAIParams | None)
- execute(prompt, params=None)[source]
- Return type:
- Parameters:
prompt (Prompt)
params (OpenAIParams | None)
- prompter.openai_executor.block_to_openai_messages(block)
- prompter.openai_executor.content_list_to_openai(content)
- Return type:
list[dict[str,Any]]- Parameters:
content (list)
- prompter.openai_executor.flatten_messages(messages)
- Return type:
list[dict[str,Any]]- Parameters:
messages (list[dict[str, Any]])
prompter.anthropic_executor module
- class prompter.anthropic_executor.AnthropicParams[source]
Bases:
objectAnthropicParams(max_tokens: int = 1024, model: str = ‘claude-3-5-sonnet-latest’, temperature: float = 0.0)
-
max_tokens:
int= 1024
-
model:
str= 'claude-3-5-sonnet-latest'
-
temperature:
float= 0.0
- __init__(max_tokens=1024, model='claude-3-5-sonnet-latest', temperature=0.0)
- Parameters:
max_tokens (int)
model (str)
temperature (float)
- Return type:
None
-
max_tokens:
- class prompter.anthropic_executor.ClaudeExecutor[source]
Bases:
object- class Converters[source]
Bases:
object
- static parse_anthropic_response(response, tools)[source]
- Return type:
- Parameters:
tools (list[Tool])
- __init__(client=None, params=None)[source]
- Parameters:
params (AnthropicParams | None)
- execute(prompt, params=None)[source]
- Return type:
- Parameters:
prompt (Prompt)
params (AnthropicParams | None)
- prompter.anthropic_executor.block_to_anthropic_content(block)
- prompter.anthropic_executor.content_list_to_anthropic(content)
- Return type:
list[dict[str,Any]]- Parameters:
content (list)
- prompter.anthropic_executor.tool_to_anthropic(tool)
- Return type:
dict[str,Any]- Parameters:
tool (Tool)
- prompter.anthropic_executor.merge_consecutive_roles(messages)
- Return type:
list[dict[str,Any]]- Parameters:
messages (list[dict[str, Any]])