prompter package

Submodules

prompter.schemas module

class prompter.schemas.Text[source]

Bases: object

Text(content: str)

content: str
__init__(content)
Parameters:

content (str)

Return type:

None

class prompter.schemas.Image[source]

Bases: object

Image(source: str, media_type: str = ‘image/jpeg’, detail: Literal[‘low’, ‘high’, ‘auto’] = ‘auto’)

source: str
media_type: str = 'image/jpeg'
detail: Literal['low', 'high', 'auto'] = 'auto'
classmethod file(path, media_type='image/jpeg', detail='auto')[source]
Return type:

Image

Parameters:
  • path (str | Path)

  • media_type (str)

  • detail (Literal['low', 'high', 'auto'])

classmethod url(url, detail='auto')[source]
Return type:

Image

Parameters:
  • url (str)

  • detail (Literal['low', 'high', 'auto'])

classmethod base64(data, media_type='image/jpeg')[source]
Return type:

Image

Parameters:
  • data (str)

  • media_type (str)

__init__(source, media_type='image/jpeg', detail='auto')
Parameters:
  • source (str)

  • media_type (str)

  • detail (Literal['low', 'high', 'auto'])

Return type:

None

class prompter.schemas.Document[source]

Bases: object

Document(source: str, doc_type: str, extract_text: bool = True, cache: bool = False)

source: str
doc_type: str
extract_text: bool = True
cache: bool = False
classmethod from_pdf(path, extract_text=True, cache=False)[source]
Return type:

Document

Parameters:
  • path (str | Path)

  • extract_text (bool)

  • cache (bool)

classmethod from_url(url, cache=False)[source]
Return type:

Document

Parameters:
  • url (str)

  • cache (bool)

__init__(source, doc_type, extract_text=True, cache=False)
Parameters:
  • source (str)

  • doc_type (str)

  • extract_text (bool)

  • cache (bool)

Return type:

None

class prompter.schemas.User[source]

Bases: object

User(*content: str | prompter.schemas.Text | prompter.schemas.Image | prompter.schemas.Document)

__init__(*content)[source]
Parameters:

content (str | Text | Image | Document)

content: list[str | Text | Image | Document]
class prompter.schemas.Assistant[source]

Bases: object

Assistant(*content: str | prompter.schemas.Text | prompter.schemas.Image | prompter.schemas.Document)

__init__(*content)[source]
Parameters:

content (str | Text | Image | Document)

content: list[str | Text | Image | Document]
class prompter.schemas.System[source]

Bases: object

System(content: str)

content: str
__init__(content)
Parameters:

content (str)

Return type:

None

class prompter.schemas.ToolCall[source]

Bases: object

ToolCall(name: str, arguments: dict[str, typing.Any], id: str | None = None)

name: str
arguments: dict[str, Any]
id: str | None = None
__init__(name, arguments, id=None)
Parameters:
  • name (str)

  • arguments (dict[str, Any])

  • id (str | None)

Return type:

None

class prompter.schemas.ToolUse[source]

Bases: object

ToolUse(name: str, arguments: Any, result: typing.Any | None = None, error: str | None = None, id: str | None = None)

name: str
arguments: Any
result: Any | None = None
error: str | None = None
id: str | None = None
__init__(name, arguments, result=None, error=None, id=None)
Parameters:
  • name (str)

  • arguments (Any)

  • result (Any | None)

  • error (str | None)

  • id (str | None)

Return type:

None

class prompter.schemas.Tool[source]

Bases: object

Tool(name: str, description: str, params=None)

__init__(name, description, params=None)[source]
Parameters:
  • name (str)

  • description (str)

name: str
description: str
params: Union[Type[BaseModel], dict[str, Any], None] = None
validate_arguments(arguments)[source]
Return type:

dict[str, Any]

Parameters:

arguments (dict[str, Any])

class prompter.schemas.Prompt[source]

Bases: object

Prompt(conversation: Optional[list[prompter.schemas.Text | prompter.schemas.Image | prompter.schemas.Document | prompter.schemas.User | prompter.schemas.Assistant | prompter.schemas.System | prompter.schemas.ToolCall | prompter.schemas.ToolUse]] = None, system: str | None = None, tools: list[prompter.schemas.Tool] | None = None, response_format: Optional[Type[pydantic.main.BaseModel]] = None, tool_choice: Union[Literal[‘auto’, ‘none’, ‘required’], str] = ‘auto’, **kwargs)

__init__(conversation=None, system=None, tools=None, response_format=None, tool_choice='auto', **kwargs)[source]
Parameters:
classmethod simple(system, user)[source]
Return type:

Prompt

Parameters:
  • system (str)

  • user (str)

classmethod with_tools(tools, user, system='You are a helpful assistant')[source]
Return type:

Prompt

Parameters:
  • tools (list[Tool])

  • user (str)

  • system (str)

classmethod with_schema(schema, user, system='Extract structured data')[source]
Return type:

Prompt

Parameters:
  • schema (Type[BaseModel])

  • user (str)

  • system (str)

serialize()[source]
Return type:

dict[str, Any]

classmethod deserialize(data)[source]
Return type:

Prompt

Parameters:

data (dict[str, Any])

class prompter.schemas.Message[source]

Bases: object

Message(content: str)

content: str
__init__(content)
Parameters:

content (str)

Return type:

None

class prompter.schemas.TextMessage[source]

Bases: object

TextMessage(content: str)

content: str
__init__(content)
Parameters:

content (str)

Return type:

None

class prompter.schemas.LLMResponse[source]

Bases: object

LLMResponse(raw_response: Any, tools: list[prompter.schemas.Tool], _text_content: str, _tool_calls: list[prompter.schemas.ToolCall])

raw_response: Any
tools: list[Tool]
raise_for_status()[source]
__init__(raw_response, tools, _text_content, _tool_calls)
Parameters:
  • raw_response (Any)

  • tools (list[Tool])

  • _text_content (str)

  • _tool_calls (list[ToolCall])

Return type:

None

text()[source]
Return type:

str

messages()[source]
Return type:

list[Message]

tool_call()[source]
Return type:

Optional[ToolCall]

tool_calls()[source]
Return type:

list[ToolCall]

text_messages()[source]
Return type:

list[TextMessage]

prompter.base_executor module

prompter.openai_executor module

class prompter.openai_executor.OpenAIParams[source]

Bases: object

OpenAIParams(max_tokens: int = 1024, model: str = ‘gpt-4o’, temperature: float = 0.0)

max_tokens: int = 1024
model: str = 'gpt-4o'
temperature: float = 0.0
__init__(max_tokens=1024, model='gpt-4o', temperature=0.0)
Parameters:
  • max_tokens (int)

  • model (str)

  • temperature (float)

Return type:

None

class prompter.openai_executor.OpenAIExecutor[source]

Bases: object

class Converters[source]

Bases: object

static block_to_openai_messages(block)[source]
Return type:

list[dict[str, Any]]

Parameters:

block (Text | Image | Document | User | Assistant | System | ToolCall | ToolUse)

static content_list_to_openai(content)[source]
Return type:

list[dict[str, Any]]

Parameters:

content (list)

static tool_to_openai(tool)[source]
Return type:

dict[str, Any]

Parameters:

tool (Tool)

static flatten_messages(messages)[source]
Return type:

list[dict[str, Any]]

Parameters:

messages (list[dict[str, Any]])

static parse_openai_response(response, tools)[source]
Return type:

LLMResponse

Parameters:

tools (list[Tool])

__init__(client=None, params=None)[source]
Parameters:

params (OpenAIParams | None)

execute(prompt, params=None)[source]
Return type:

LLMResponse

Parameters:
prompter.openai_executor.block_to_openai_messages(block)
Return type:

list[dict[str, Any]]

Parameters:

block (Text | Image | Document | User | Assistant | System | ToolCall | ToolUse)

prompter.openai_executor.content_list_to_openai(content)
Return type:

list[dict[str, Any]]

Parameters:

content (list)

prompter.openai_executor.tool_to_openai(tool)
Return type:

dict[str, Any]

Parameters:

tool (Tool)

prompter.openai_executor.flatten_messages(messages)
Return type:

list[dict[str, Any]]

Parameters:

messages (list[dict[str, Any]])

prompter.openai_executor.parse_openai_response(response, tools)
Return type:

LLMResponse

Parameters:

tools (list[Tool])

prompter.anthropic_executor module

class prompter.anthropic_executor.AnthropicParams[source]

Bases: object

AnthropicParams(max_tokens: int = 1024, model: str = ‘claude-3-5-sonnet-latest’, temperature: float = 0.0)

max_tokens: int = 1024
model: str = 'claude-3-5-sonnet-latest'
temperature: float = 0.0
__init__(max_tokens=1024, model='claude-3-5-sonnet-latest', temperature=0.0)
Parameters:
  • max_tokens (int)

  • model (str)

  • temperature (float)

Return type:

None

class prompter.anthropic_executor.ClaudeExecutor[source]

Bases: object

class Converters[source]

Bases: object

static block_to_anthropic_content(block)[source]
Return type:

list[dict[str, Any]]

Parameters:

block (Text | Image | Document | User | Assistant | System | ToolCall | ToolUse)

static content_list_to_anthropic(content)[source]
Return type:

list[dict[str, Any]]

Parameters:

content (list)

static tool_to_anthropic(tool)[source]
Return type:

dict[str, Any]

Parameters:

tool (Tool)

static merge_consecutive_roles(messages)[source]
Return type:

list[dict[str, Any]]

Parameters:

messages (list[dict[str, Any]])

static parse_anthropic_response(response, tools)[source]
Return type:

LLMResponse

Parameters:

tools (list[Tool])

__init__(client=None, params=None)[source]
Parameters:

params (AnthropicParams | None)

execute(prompt, params=None)[source]
Return type:

LLMResponse

Parameters:
prompter.anthropic_executor.block_to_anthropic_content(block)
Return type:

list[dict[str, Any]]

Parameters:

block (Text | Image | Document | User | Assistant | System | ToolCall | ToolUse)

prompter.anthropic_executor.content_list_to_anthropic(content)
Return type:

list[dict[str, Any]]

Parameters:

content (list)

prompter.anthropic_executor.tool_to_anthropic(tool)
Return type:

dict[str, Any]

Parameters:

tool (Tool)

prompter.anthropic_executor.merge_consecutive_roles(messages)
Return type:

list[dict[str, Any]]

Parameters:

messages (list[dict[str, Any]])

prompter.anthropic_executor.parse_anthropic_response(response, tools)
Return type:

LLMResponse

Parameters:

tools (list[Tool])