Skip to content

Prompt#

ragbits.core.prompt.Prompt #

Prompt(*args: Any, **kwargs: Any)

Bases: Generic[InputT, OutputT], BasePromptWithParser[OutputT]

Generic class for prompts. It contains the system and user prompts, and additional messages.

To create a new prompt, subclass this class and provide the system and user prompts, and optionally the input and output types. The system prompt is optional.

Source code in packages/ragbits-core/src/ragbits/core/prompt/prompt.py
def __init__(self, *args: Any, **kwargs: Any) -> None:
    input_data = args[0] if args else kwargs.get("input_data")
    if self.input_type and input_data is None:
        raise ValueError("Input data must be provided")

    self.rendered_system_prompt = (
        self._render_template(self.system_prompt_template, input_data) if self.system_prompt_template else None
    )
    self.rendered_user_prompt = self._render_template(self.user_prompt_template, input_data)
    self.images = self._get_images_from_input_data(input_data)

    # Additional few shot examples that can be added dynamically using methods
    # (in opposite to the static `few_shots` attribute which is defined in the class)
    self._instace_few_shots: list[FewShotExample[InputT, OutputT]] = []
    super().__init__()

system_prompt class-attribute instance-attribute #

system_prompt: str | None = None

user_prompt instance-attribute #

user_prompt: str

few_shots class-attribute instance-attribute #

few_shots: list[FewShotExample[InputT, OutputT]] = []

response_parser instance-attribute #

response_parser: Callable[[str], OutputT]

input_type instance-attribute #

input_type: type[InputT] | None

output_type instance-attribute #

output_type: type[OutputT]

system_prompt_template instance-attribute #

system_prompt_template: Template | None

user_prompt_template instance-attribute #

user_prompt_template: Template

image_input_fields class-attribute instance-attribute #

image_input_fields: list[str] | None = None

rendered_system_prompt instance-attribute #

rendered_system_prompt = _render_template(system_prompt_template, input_data) if system_prompt_template else None

rendered_user_prompt instance-attribute #

rendered_user_prompt = _render_template(user_prompt_template, input_data)

images instance-attribute #

images = _get_images_from_input_data(input_data)

chat property #

chat: ChatFormat

Returns the conversation in the standard OpenAI chat format.

RETURNS DESCRIPTION
ChatFormat

A list of dictionaries, each containing the role and content of a message.

TYPE: ChatFormat

json_mode property #

json_mode: bool

Returns whether the prompt should be sent in JSON mode.

RETURNS DESCRIPTION
bool

Whether the prompt should be sent in JSON mode.

TYPE: bool

add_few_shot #

add_few_shot(user_message: str | InputT, assistant_message: str | OutputT) -> Prompt[InputT, OutputT]

Add a few-shot example to the conversation.

PARAMETER DESCRIPTION
user_message

The raw user message or input data that will be rendered using the user prompt template.

TYPE: str | InputT

assistant_message

The raw assistant response or output data that will be cast to a string or in case of a Pydantic model, to JSON.

TYPE: str | OutputT

RETURNS DESCRIPTION
Prompt[InputT, OutputT]

Prompt[InputT, OutputT]: The current prompt instance in order to allow chaining.

Source code in packages/ragbits-core/src/ragbits/core/prompt/prompt.py
def add_few_shot(self, user_message: str | InputT, assistant_message: str | OutputT) -> "Prompt[InputT, OutputT]":
    """
    Add a few-shot example to the conversation.

    Args:
        user_message (str | InputT): The raw user message or input data that will be rendered using the
            user prompt template.
        assistant_message (str | OutputT): The raw assistant response or output data that will be cast to a string
            or in case of a Pydantic model, to JSON.

    Returns:
        Prompt[InputT, OutputT]: The current prompt instance in order to allow chaining.
    """
    self._instace_few_shots.append((user_message, assistant_message))
    return self

list_few_shots #

list_few_shots() -> ChatFormat

Returns the few shot examples in the standard OpenAI chat format.

RETURNS DESCRIPTION
ChatFormat

A list of dictionaries, each containing the role and content of a message.

TYPE: ChatFormat

Source code in packages/ragbits-core/src/ragbits/core/prompt/prompt.py
def list_few_shots(self) -> ChatFormat:
    """
    Returns the few shot examples in the standard OpenAI chat format.

    Returns:
        ChatFormat: A list of dictionaries, each containing the role and content of a message.
    """
    result: ChatFormat = []
    for user_message, assistant_message in self.few_shots + self._instace_few_shots:
        if not isinstance(user_message, str):
            user_content = self._render_template(self.user_prompt_template, user_message)
        else:
            user_content = user_message

        if isinstance(assistant_message, BaseModel):
            assistant_content = assistant_message.model_dump_json()
        else:
            assistant_content = str(assistant_message)

        result.append({"role": "user", "content": user_content})
        result.append({"role": "assistant", "content": assistant_content})
    return result

list_images #

list_images() -> list[bytes | str]

Returns the schema of the list of images compatible with LLM APIs Returns: list of dictionaries

Source code in packages/ragbits-core/src/ragbits/core/prompt/prompt.py
def list_images(self) -> list[bytes | str]:
    """
    Returns the schema of the list of images compatible with LLM APIs
    Returns:
        list of dictionaries
    """
    return self.images

output_schema #

output_schema() -> dict | type[BaseModel] | None

Returns the schema of the desired output. Can be used to request structured output from the LLM API or to validate the output. Can return either a Pydantic model or a JSON schema.

RETURNS DESCRIPTION
dict | type[BaseModel] | None

Optional[Dict | Type[BaseModel]]: The schema of the desired output or the model describing it.

Source code in packages/ragbits-core/src/ragbits/core/prompt/prompt.py
def output_schema(self) -> dict | type[BaseModel] | None:
    """
    Returns the schema of the desired output. Can be used to request structured output from the LLM API
    or to validate the output. Can return either a Pydantic model or a JSON schema.

    Returns:
        Optional[Dict | Type[BaseModel]]: The schema of the desired output or the model describing it.
    """
    return self.output_type if issubclass(self.output_type, BaseModel) else None

parse_response #

parse_response(response: str) -> OutputT

Parse the response from the LLM to the desired output type.

PARAMETER DESCRIPTION
response

The response from the LLM.

TYPE: str

RETURNS DESCRIPTION
OutputT

The parsed response.

TYPE: OutputT

RAISES DESCRIPTION
ResponseParsingError

If the response cannot be parsed.

Source code in packages/ragbits-core/src/ragbits/core/prompt/prompt.py
def parse_response(self, response: str) -> OutputT:
    """
    Parse the response from the LLM to the desired output type.

    Args:
        response (str): The response from the LLM.

    Returns:
        OutputT: The parsed response.

    Raises:
        ResponseParsingError: If the response cannot be parsed.
    """
    return self.response_parser(response)

to_promptfoo classmethod #

to_promptfoo(config: dict[str, Any]) -> ChatFormat

Generate a prompt in the promptfoo format from a promptfoo test configuration.

PARAMETER DESCRIPTION
config

The promptfoo test configuration.

TYPE: dict[str, Any]

RETURNS DESCRIPTION
ChatFormat

The prompt in the format used by promptfoo.

TYPE: ChatFormat

Source code in packages/ragbits-core/src/ragbits/core/prompt/prompt.py
@classmethod
def to_promptfoo(cls, config: dict[str, Any]) -> ChatFormat:
    """
    Generate a prompt in the promptfoo format from a promptfoo test configuration.

    Args:
        config: The promptfoo test configuration.

    Returns:
        ChatFormat: The prompt in the format used by promptfoo.
    """
    return cls(cls.input_type.model_validate(config["vars"])).chat  # type: ignore