Skip to content

Prompt#

ragbits.core.prompt.Prompt #

Prompt(input_data: PromptInputT | None = None, history: ChatFormat | None = None)

Bases: Generic[PromptInputT, PromptOutputT], BasePromptWithParser[PromptOutputT]

Generic class for prompts. It contains the system and user prompts, and additional messages.

To create a new prompt, subclass this class and provide the system and user prompts, and optionally the input and output types. The system prompt is optional.

Initialize the Prompt instance.

PARAMETER DESCRIPTION
input_data

The input data to render the prompt templates with. Must be a Pydantic model instance if the prompt has an input type defined. If None and input_type is defined, a ValueError will be raised.

TYPE: PromptInputT | None DEFAULT: None

history

Optional conversation history to initialize the prompt with. If provided, should be in the standard OpenAI chat format.

TYPE: ChatFormat | None DEFAULT: None

RAISES DESCRIPTION
ValueError

If input_data is None when input_type is defined, or if input_data is a string instead of a Pydantic model.

Source code in packages/ragbits-core/src/ragbits/core/prompt/prompt.py
def __init__(self, input_data: PromptInputT | None = None, history: ChatFormat | None = None) -> None:
    """
    Initialize the Prompt instance.

    Args:
        input_data: The input data to render the prompt templates with. Must be a Pydantic model
            instance if the prompt has an input type defined. If None and input_type is defined,
            a ValueError will be raised.
        history: Optional conversation history to initialize the prompt with. If provided,
            should be in the standard OpenAI chat format.

    Raises:
        ValueError: If input_data is None when input_type is defined, or if input_data
            is a string instead of a Pydantic model.
    """
    if self.input_type and input_data is None:
        raise ValueError("Input data must be provided")

    if isinstance(input_data, str):
        raise ValueError("Input data must be of pydantic model type")

    self.rendered_system_prompt = (
        self._render_template(self.system_prompt_template, input_data) if self.system_prompt_template else None
    )
    self.rendered_user_prompt = self._render_template(self.user_prompt_template, input_data)
    self.images = self._get_images_from_input_data(input_data)

    # Additional few shot examples that can be added dynamically using methods
    # (in opposite to the static `few_shots` attribute which is defined in the class)
    self._instance_few_shots: list[FewShotExample[PromptInputT, PromptOutputT]] = []

    # Additional conversation history that can be added dynamically using methods
    self._conversation_history: list[dict[str, Any]] = history or []
    self.add_user_message(input_data if input_data else self.rendered_user_prompt)
    super().__init__()

system_prompt class-attribute instance-attribute #

system_prompt: str | None = None

user_prompt instance-attribute #

user_prompt: str

few_shots class-attribute instance-attribute #

few_shots: list[FewShotExample[PromptInputT, PromptOutputT]] = []

response_parser instance-attribute #

response_parser: Callable[[str], PromptOutputT | Awaitable[PromptOutputT]]

input_type instance-attribute #

input_type: type[PromptInputT] | None

output_type instance-attribute #

output_type: type[PromptOutputT]

system_prompt_template instance-attribute #

system_prompt_template: Template | None

user_prompt_template instance-attribute #

user_prompt_template: Template

image_input_fields class-attribute instance-attribute #

image_input_fields: list[str] | None = None

rendered_system_prompt instance-attribute #

rendered_system_prompt = _render_template(system_prompt_template, input_data) if system_prompt_template else None

rendered_user_prompt instance-attribute #

rendered_user_prompt = _render_template(user_prompt_template, input_data)

images instance-attribute #

images = _get_images_from_input_data(input_data)

chat property #

chat: ChatFormat

Returns the conversation in the standard OpenAI chat format.

RETURNS DESCRIPTION
ChatFormat

A list of dictionaries, each containing the role and content of a message.

TYPE: ChatFormat

json_mode property #

json_mode: bool

Returns whether the prompt should be sent in JSON mode.

RETURNS DESCRIPTION
bool

Whether the prompt should be sent in JSON mode.

TYPE: bool

add_assistant_message #

add_assistant_message(message: str | PromptOutputT) -> Self

Add an assistant message to the conversation history.

PARAMETER DESCRIPTION
message

The assistant message content.

TYPE: str

RETURNS DESCRIPTION
Self

Prompt[PromptInputT, PromptOutputT]: The current prompt instance to allow chaining.

Source code in packages/ragbits-core/src/ragbits/core/prompt/base.py
def add_assistant_message(self, message: str | PromptOutputT) -> Self:
    """
    Add an assistant message to the conversation history.

    Args:
        message (str): The assistant message content.

    Returns:
        Prompt[PromptInputT, PromptOutputT]: The current prompt instance to allow chaining.
    """
    if not hasattr(self, "_conversation_history"):
        self._conversation_history = []

    if isinstance(message, BaseModel):
        message = message.model_dump_json()
    self._conversation_history.append({"role": "assistant", "content": str(message)})
    return self

add_tool_use_message #

add_tool_use_message(id: str, name: str, arguments: dict, result: Any) -> Self

Add tool call messages to the conversation history.

PARAMETER DESCRIPTION
id

The id of the tool call.

TYPE: str

name

The name of the tool.

TYPE: str

arguments

The arguments of the tool.

TYPE: dict

result

The tool call result.

TYPE: any

RETURNS DESCRIPTION
Self

Prompt[PromptInputT, PromptOutputT]: The current prompt instance to allow chaining.

Source code in packages/ragbits-core/src/ragbits/core/prompt/base.py
def add_tool_use_message(
    self,
    id: str,
    name: str,
    arguments: dict,
    result: Any,  # noqa: ANN401
) -> Self:
    """
    Add tool call messages to the conversation history.

    Args:
        id (str): The id of the tool call.
        name (str): The name of the tool.
        arguments (dict): The arguments of the tool.
        result (any): The tool call result.

    Returns:
        Prompt[PromptInputT, PromptOutputT]: The current prompt instance to allow chaining.
    """
    if not hasattr(self, "_conversation_history"):
        self._conversation_history = []

    self._conversation_history.extend(
        [
            {
                "role": "assistant",
                "content": None,
                "tool_calls": [
                    {
                        "id": id,
                        "type": "function",
                        "function": {
                            "name": name,
                            "arguments": str(arguments),
                        },
                    }
                ],
            },
            {
                "role": "tool",
                "tool_call_id": id,
                "content": str(result),
            },
        ]
    )

    return self

add_few_shot #

add_few_shot(user_message: str | PromptInputT, assistant_message: str | PromptOutputT) -> Prompt[PromptInputT, PromptOutputT]

Add a few-shot example to the conversation.

PARAMETER DESCRIPTION
user_message

The raw user message or input data that will be rendered using the user prompt template.

TYPE: str | PromptInputT

assistant_message

The raw assistant response or output data that will be cast to a string or in case of a Pydantic model, to JSON.

TYPE: str | PromptOutputT

RETURNS DESCRIPTION
Prompt[PromptInputT, PromptOutputT]

Prompt[PromptInputT, PromptOutputT]: The current prompt instance in order to allow chaining.

Source code in packages/ragbits-core/src/ragbits/core/prompt/prompt.py
def add_few_shot(
    self, user_message: str | PromptInputT, assistant_message: str | PromptOutputT
) -> "Prompt[PromptInputT, PromptOutputT]":
    """
    Add a few-shot example to the conversation.

    Args:
        user_message (str | PromptInputT): The raw user message or input data that will be rendered using the
            user prompt template.
        assistant_message (str | PromptOutputT): The raw assistant response or output data that will be cast to a
            string or in case of a Pydantic model, to JSON.

    Returns:
        Prompt[PromptInputT, PromptOutputT]: The current prompt instance in order to allow chaining.
    """
    self._instance_few_shots.append((user_message, assistant_message))
    return self

list_few_shots #

list_few_shots() -> ChatFormat

Returns the few shot examples in the standard OpenAI chat format.

RETURNS DESCRIPTION
ChatFormat

A list of dictionaries, each containing the role and content of a message.

TYPE: ChatFormat

Source code in packages/ragbits-core/src/ragbits/core/prompt/prompt.py
def list_few_shots(self) -> ChatFormat:
    """
    Returns the few shot examples in the standard OpenAI chat format.

    Returns:
        ChatFormat: A list of dictionaries, each containing the role and content of a message.
    """
    result: ChatFormat = []
    user_content: str | list[dict[str, Any]]
    for user_message, assistant_message in self.few_shots + self._instance_few_shots:
        if not isinstance(user_message, str):
            rendered_text_message = self._render_template(self.user_prompt_template, user_message)
            images_in_input_data = self._get_images_from_input_data(user_message)
            if images_in_input_data:
                user_content = [{"type": "text", "text": rendered_text_message}] + [
                    self._create_message_with_image(image) for image in images_in_input_data
                ]
            else:
                user_content = rendered_text_message
        else:
            user_content = user_message

        if isinstance(assistant_message, BaseModel):
            assistant_content = assistant_message.model_dump_json()
        else:
            assistant_content = str(assistant_message)

        result.append({"role": "user", "content": user_content})
        result.append({"role": "assistant", "content": assistant_content})
    return result

add_user_message #

add_user_message(message: str | dict[str, Any] | PromptInputT) -> Prompt[PromptInputT, PromptOutputT]

Add a user message to the conversation history.

PARAMETER DESCRIPTION
message

The user message content. Can be: - A string: Used directly as content - A dictionary: With format {"type": "text", "text": "message"} or image content - An PromptInputT model: Will be rendered using the user prompt template

TYPE: str | dict[str, Any] | PromptInputT

RETURNS DESCRIPTION
Prompt[PromptInputT, PromptOutputT]

Prompt[PromptInputT, PromptOutputT]: The current prompt instance to allow chaining.

Source code in packages/ragbits-core/src/ragbits/core/prompt/prompt.py
def add_user_message(self, message: str | dict[str, Any] | PromptInputT) -> "Prompt[PromptInputT, PromptOutputT]":  # type: ignore
    """
    Add a user message to the conversation history.

    Args:
        message (str | dict[str, Any] | PromptInputT): The user message content. Can be:
            - A string: Used directly as content
            - A dictionary: With format {"type": "text", "text": "message"} or image content
            - An PromptInputT model: Will be rendered using the user prompt template

    Returns:
        Prompt[PromptInputT, PromptOutputT]: The current prompt instance to allow chaining.
    """
    content: str | list[dict[str, Any]] | dict[str, Any]

    if isinstance(message, BaseModel):
        # Type checking to ensure we're passing PromptInputT to the methods
        input_model: PromptInputT = cast(PromptInputT, message)

        # Render the message using the template if it's an input model
        rendered_text = self._render_template(self.user_prompt_template, input_model)
        images_in_input = self._get_images_from_input_data(input_model)

        if images_in_input:
            content = [{"type": "text", "text": rendered_text}] + [
                self._create_message_with_image(image) for image in images_in_input
            ]
        else:
            content = rendered_text
    else:
        content = cast(str | dict[str, Any], message)

    return super().add_user_message(content)

list_images #

list_images() -> list[str]

Returns the images in form of URLs or base64 encoded strings.

RETURNS DESCRIPTION
list[str]

list of images

Source code in packages/ragbits-core/src/ragbits/core/prompt/prompt.py
def list_images(self) -> list[str]:
    """
    Returns the images in form of URLs or base64 encoded strings.

    Returns:
        list of images
    """
    return [
        content["image_url"]["url"]
        for message in self.chat
        if message["content"]
        for content in message["content"]
        if isinstance(message["content"], list) and content["type"] == "image_url"
    ]

output_schema #

output_schema() -> dict | type[BaseModel] | None

Returns the schema of the desired output. Can be used to request structured output from the LLM API or to validate the output. Can return either a Pydantic model or a JSON schema.

RETURNS DESCRIPTION
dict | type[BaseModel] | None

Optional[Dict | Type[BaseModel]]: The schema of the desired output or the model describing it.

Source code in packages/ragbits-core/src/ragbits/core/prompt/prompt.py
def output_schema(self) -> dict | type[BaseModel] | None:
    """
    Returns the schema of the desired output. Can be used to request structured output from the LLM API
    or to validate the output. Can return either a Pydantic model or a JSON schema.

    Returns:
        Optional[Dict | Type[BaseModel]]: The schema of the desired output or the model describing it.
    """
    return self.output_type if issubclass(self.output_type, BaseModel) else None

parse_response async #

parse_response(response: str) -> PromptOutputT

Parse the response from the LLM to the desired output type.

PARAMETER DESCRIPTION
response

The response from the LLM.

TYPE: str

RETURNS DESCRIPTION
PromptOutputT

The parsed response.

TYPE: PromptOutputT

RAISES DESCRIPTION
ResponseParsingError

If the response cannot be parsed.

Source code in packages/ragbits-core/src/ragbits/core/prompt/prompt.py
async def parse_response(self, response: str) -> PromptOutputT:
    """
    Parse the response from the LLM to the desired output type.

    Args:
        response (str): The response from the LLM.

    Returns:
        PromptOutputT: The parsed response.

    Raises:
        ResponseParsingError: If the response cannot be parsed.
    """
    if asyncio.iscoroutinefunction(self.response_parser):
        result = await self.response_parser(response)
    else:
        result = self.response_parser(response)
    return result

to_promptfoo classmethod #

to_promptfoo(config: dict[str, Any]) -> ChatFormat

Generate a prompt in the promptfoo format from a promptfoo test configuration.

PARAMETER DESCRIPTION
config

The promptfoo test configuration.

TYPE: dict[str, Any]

RETURNS DESCRIPTION
ChatFormat

The prompt in the format used by promptfoo.

TYPE: ChatFormat

Source code in packages/ragbits-core/src/ragbits/core/prompt/prompt.py
@classmethod
def to_promptfoo(cls, config: dict[str, Any]) -> ChatFormat:
    """
    Generate a prompt in the promptfoo format from a promptfoo test configuration.

    Args:
        config: The promptfoo test configuration.

    Returns:
        ChatFormat: The prompt in the format used by promptfoo.
    """
    return cls(cls.input_type.model_validate(config["vars"])).chat  # type: ignore