Skip to content

OpenAI

OpenAIChatSettings

OpenAI chat completion settings configuration.

This class provides configuration options for OpenAI chat completions, including model parameters, tool usage, and request options.

Examples:

>>> settings = OpenAIChatSettings(
...     temperature=0.7,
...     max_completion_tokens=1000,
...     stream=True
... )
>>> settings.temperature = 0.5
Source code in python/potato_head/openai/_openai.pyi
class OpenAIChatSettings:
    """OpenAI chat completion settings configuration.

    This class provides configuration options for OpenAI chat completions,
    including model parameters, tool usage, and request options.

    Examples:
        >>> settings = OpenAIChatSettings(
        ...     temperature=0.7,
        ...     max_completion_tokens=1000,
        ...     stream=True
        ... )
        >>> settings.temperature = 0.5
    """

    def __init__(
        self,
        *,
        max_completion_tokens: Optional[int] = None,
        temperature: Optional[float] = None,
        top_p: Optional[float] = None,
        top_k: Optional[int] = None,
        frequency_penalty: Optional[float] = None,
        timeout: Optional[float] = None,
        parallel_tool_calls: Optional[bool] = None,
        seed: Optional[int] = None,
        logit_bias: Optional[Dict[str, int]] = None,
        stop_sequences: Optional[List[str]] = None,
        logprobs: Optional[bool] = None,
        audio: Optional[AudioParam] = None,
        metadata: Optional[Dict[str, str]] = None,
        modalities: Optional[List[str]] = None,
        n: Optional[int] = None,
        prediction: Optional[Prediction] = None,
        presence_penalty: Optional[float] = None,
        prompt_cache_key: Optional[str] = None,
        reasoning_effort: Optional[str] = None,
        safety_identifier: Optional[str] = None,
        service_tier: Optional[str] = None,
        store: Optional[bool] = None,
        stream: Optional[bool] = None,
        stream_options: Optional[StreamOptions] = None,
        tool_choice: Optional[ToolChoice] = None,
        tools: Optional[List[Tool]] = None,
        top_logprobs: Optional[int] = None,
        verbosity: Optional[str] = None,
        extra_body: Optional[Any] = None,
    ) -> None:
        """Initialize OpenAI chat settings.

        Args:
            max_completion_tokens (Optional[int]):
                Maximum number of tokens to generate
            temperature (Optional[float]):
                Sampling temperature (0.0 to 2.0)
            top_p (Optional[float]):
                Nucleus sampling parameter
            top_k (Optional[int]):
                Top-k sampling parameter
            frequency_penalty (Optional[float]):
                Frequency penalty (-2.0 to 2.0)
            timeout (Optional[float]):
                Request timeout in seconds
            parallel_tool_calls (Optional[bool]):
                Whether to enable parallel tool calls
            seed (Optional[int]):
                Random seed for deterministic outputs
            logit_bias (Optional[Dict[str, int]]):
                Token bias modifications
            stop_sequences (Optional[List[str]]):
                Sequences where generation should stop
            logprobs (Optional[bool]):
                Whether to return log probabilities
            audio (Optional[AudioParam]):
                Audio generation parameters
            metadata (Optional[Dict[str, str]]):
                Additional metadata for the request
            modalities (Optional[List[str]]):
                List of modalities to use
            n (Optional[int]):
                Number of completions to generate
            prediction (Optional[Prediction]):
                Prediction configuration
            presence_penalty (Optional[float]):
                Presence penalty (-2.0 to 2.0)
            prompt_cache_key (Optional[str]):
                Key for prompt caching
            reasoning_effort (Optional[str]):
                Reasoning effort level
            safety_identifier (Optional[str]):
                Safety configuration identifier
            service_tier (Optional[str]):
                Service tier to use
            store (Optional[bool]):
                Whether to store the conversation
            stream (Optional[bool]):
                Whether to stream the response
            stream_options (Optional[StreamOptions]):
                Streaming configuration options
            tool_choice (Optional[ToolChoice]):
                Tool choice configuration
            tools (Optional[List[Tool]]):
                Available tools for the model
            top_logprobs (Optional[int]):
                Number of top log probabilities to return
            verbosity (Optional[str]):
                Verbosity level for the response
            extra_body (Optional[Any]):
                Additional request body parameters
        """

    def __str__(self) -> str:
        """Return string representation of the settings."""

__init__(*, max_completion_tokens=None, temperature=None, top_p=None, top_k=None, frequency_penalty=None, timeout=None, parallel_tool_calls=None, seed=None, logit_bias=None, stop_sequences=None, logprobs=None, audio=None, metadata=None, modalities=None, n=None, prediction=None, presence_penalty=None, prompt_cache_key=None, reasoning_effort=None, safety_identifier=None, service_tier=None, store=None, stream=None, stream_options=None, tool_choice=None, tools=None, top_logprobs=None, verbosity=None, extra_body=None)

Initialize OpenAI chat settings.

Parameters:

Name Type Description Default
max_completion_tokens Optional[int]

Maximum number of tokens to generate

None
temperature Optional[float]

Sampling temperature (0.0 to 2.0)

None
top_p Optional[float]

Nucleus sampling parameter

None
top_k Optional[int]

Top-k sampling parameter

None
frequency_penalty Optional[float]

Frequency penalty (-2.0 to 2.0)

None
timeout Optional[float]

Request timeout in seconds

None
parallel_tool_calls Optional[bool]

Whether to enable parallel tool calls

None
seed Optional[int]

Random seed for deterministic outputs

None
logit_bias Optional[Dict[str, int]]

Token bias modifications

None
stop_sequences Optional[List[str]]

Sequences where generation should stop

None
logprobs Optional[bool]

Whether to return log probabilities

None
audio Optional[AudioParam]

Audio generation parameters

None
metadata Optional[Dict[str, str]]

Additional metadata for the request

None
modalities Optional[List[str]]

List of modalities to use

None
n Optional[int]

Number of completions to generate

None
prediction Optional[Prediction]

Prediction configuration

None
presence_penalty Optional[float]

Presence penalty (-2.0 to 2.0)

None
prompt_cache_key Optional[str]

Key for prompt caching

None
reasoning_effort Optional[str]

Reasoning effort level

None
safety_identifier Optional[str]

Safety configuration identifier

None
service_tier Optional[str]

Service tier to use

None
store Optional[bool]

Whether to store the conversation

None
stream Optional[bool]

Whether to stream the response

None
stream_options Optional[StreamOptions]

Streaming configuration options

None
tool_choice Optional[ToolChoice]

Tool choice configuration

None
tools Optional[List[Tool]]

Available tools for the model

None
top_logprobs Optional[int]

Number of top log probabilities to return

None
verbosity Optional[str]

Verbosity level for the response

None
extra_body Optional[Any]

Additional request body parameters

None
Source code in python/potato_head/openai/_openai.pyi
def __init__(
    self,
    *,
    max_completion_tokens: Optional[int] = None,
    temperature: Optional[float] = None,
    top_p: Optional[float] = None,
    top_k: Optional[int] = None,
    frequency_penalty: Optional[float] = None,
    timeout: Optional[float] = None,
    parallel_tool_calls: Optional[bool] = None,
    seed: Optional[int] = None,
    logit_bias: Optional[Dict[str, int]] = None,
    stop_sequences: Optional[List[str]] = None,
    logprobs: Optional[bool] = None,
    audio: Optional[AudioParam] = None,
    metadata: Optional[Dict[str, str]] = None,
    modalities: Optional[List[str]] = None,
    n: Optional[int] = None,
    prediction: Optional[Prediction] = None,
    presence_penalty: Optional[float] = None,
    prompt_cache_key: Optional[str] = None,
    reasoning_effort: Optional[str] = None,
    safety_identifier: Optional[str] = None,
    service_tier: Optional[str] = None,
    store: Optional[bool] = None,
    stream: Optional[bool] = None,
    stream_options: Optional[StreamOptions] = None,
    tool_choice: Optional[ToolChoice] = None,
    tools: Optional[List[Tool]] = None,
    top_logprobs: Optional[int] = None,
    verbosity: Optional[str] = None,
    extra_body: Optional[Any] = None,
) -> None:
    """Initialize OpenAI chat settings.

    Args:
        max_completion_tokens (Optional[int]):
            Maximum number of tokens to generate
        temperature (Optional[float]):
            Sampling temperature (0.0 to 2.0)
        top_p (Optional[float]):
            Nucleus sampling parameter
        top_k (Optional[int]):
            Top-k sampling parameter
        frequency_penalty (Optional[float]):
            Frequency penalty (-2.0 to 2.0)
        timeout (Optional[float]):
            Request timeout in seconds
        parallel_tool_calls (Optional[bool]):
            Whether to enable parallel tool calls
        seed (Optional[int]):
            Random seed for deterministic outputs
        logit_bias (Optional[Dict[str, int]]):
            Token bias modifications
        stop_sequences (Optional[List[str]]):
            Sequences where generation should stop
        logprobs (Optional[bool]):
            Whether to return log probabilities
        audio (Optional[AudioParam]):
            Audio generation parameters
        metadata (Optional[Dict[str, str]]):
            Additional metadata for the request
        modalities (Optional[List[str]]):
            List of modalities to use
        n (Optional[int]):
            Number of completions to generate
        prediction (Optional[Prediction]):
            Prediction configuration
        presence_penalty (Optional[float]):
            Presence penalty (-2.0 to 2.0)
        prompt_cache_key (Optional[str]):
            Key for prompt caching
        reasoning_effort (Optional[str]):
            Reasoning effort level
        safety_identifier (Optional[str]):
            Safety configuration identifier
        service_tier (Optional[str]):
            Service tier to use
        store (Optional[bool]):
            Whether to store the conversation
        stream (Optional[bool]):
            Whether to stream the response
        stream_options (Optional[StreamOptions]):
            Streaming configuration options
        tool_choice (Optional[ToolChoice]):
            Tool choice configuration
        tools (Optional[List[Tool]]):
            Available tools for the model
        top_logprobs (Optional[int]):
            Number of top log probabilities to return
        verbosity (Optional[str]):
            Verbosity level for the response
        extra_body (Optional[Any]):
            Additional request body parameters
    """

__str__()

Return string representation of the settings.

Source code in python/potato_head/openai/_openai.pyi
def __str__(self) -> str:
    """Return string representation of the settings."""

OpenAIEmbeddingConfig

OpenAI embedding configuration settings.

Source code in python/potato_head/openai/_openai.pyi
class OpenAIEmbeddingConfig:
    """OpenAI embedding configuration settings."""

    def __init__(
        self,
        model: str,
        dimensions: Optional[int] = None,
        encoding_format: Optional[str] = None,
        user: Optional[str] = None,
    ) -> None:
        """Initialize OpenAI embedding configuration.

        Args:
            model (str):
                The embedding model to use.
            dimensions (Optional[int]):
                The output dimensionality of the embeddings.
            encoding_format (Optional[str]):
                The encoding format to use for the embeddings.
                Can be either "float" or "base64".
            user (Optional[str]):
                The user ID for the embedding request.
        """

    @property
    def model(self) -> str: ...
    @property
    def dimensions(self) -> Optional[int]: ...
    @property
    def encoding_format(self) -> Optional[str]: ...
    @property
    def user(self) -> Optional[str]: ...

__init__(model, dimensions=None, encoding_format=None, user=None)

Initialize OpenAI embedding configuration.

Parameters:

Name Type Description Default
model str

The embedding model to use.

required
dimensions Optional[int]

The output dimensionality of the embeddings.

None
encoding_format Optional[str]

The encoding format to use for the embeddings. Can be either "float" or "base64".

None
user Optional[str]

The user ID for the embedding request.

None
Source code in python/potato_head/openai/_openai.pyi
def __init__(
    self,
    model: str,
    dimensions: Optional[int] = None,
    encoding_format: Optional[str] = None,
    user: Optional[str] = None,
) -> None:
    """Initialize OpenAI embedding configuration.

    Args:
        model (str):
            The embedding model to use.
        dimensions (Optional[int]):
            The output dimensionality of the embeddings.
        encoding_format (Optional[str]):
            The encoding format to use for the embeddings.
            Can be either "float" or "base64".
        user (Optional[str]):
            The user ID for the embedding request.
    """