Skip to content

vllm.entrypoints.openai.completion.protocol

_LONG_INFO module-attribute

_LONG_INFO = iinfo(long)

logger module-attribute

logger = init_logger(__name__)

CompletionLogProbs

Bases: OpenAIBaseModel

Source code in vllm/entrypoints/openai/completion/protocol.py
class CompletionLogProbs(OpenAIBaseModel):
    text_offset: list[int] = Field(default_factory=list)
    token_logprobs: list[float | None] = Field(default_factory=list)
    tokens: list[str] = Field(default_factory=list)
    top_logprobs: list[dict[str, float] | None] = Field(default_factory=list)

text_offset class-attribute instance-attribute

text_offset: list[int] = Field(default_factory=list)

token_logprobs class-attribute instance-attribute

token_logprobs: list[float | None] = Field(
    default_factory=list
)

tokens class-attribute instance-attribute

tokens: list[str] = Field(default_factory=list)

top_logprobs class-attribute instance-attribute

top_logprobs: list[dict[str, float] | None] = Field(
    default_factory=list
)

CompletionRequest

Bases: OpenAIBaseModel

Source code in vllm/entrypoints/openai/completion/protocol.py
class CompletionRequest(OpenAIBaseModel):
    # Ordered by official OpenAI API documentation
    # https://platform.openai.com/docs/api-reference/completions/create
    model: str | None = None
    prompt: list[int] | list[list[int]] | str | list[str] | None = None
    echo: bool | None = False
    frequency_penalty: float | None = 0.0
    logit_bias: dict[str, float] | None = None
    logprobs: int | None = None
    max_tokens: int | None = 16
    n: int = 1
    presence_penalty: float | None = 0.0
    seed: int | None = Field(None, ge=_LONG_INFO.min, le=_LONG_INFO.max)
    stop: str | list[str] | None = []
    stream: bool | None = False
    stream_options: StreamOptions | None = None
    suffix: str | None = None
    temperature: float | None = None
    top_p: float | None = None
    user: str | None = None

    # --8<-- [start:completion-sampling-params]
    use_beam_search: bool = False
    top_k: int | None = None
    min_p: float | None = None
    repetition_penalty: float | None = None
    length_penalty: float = 1.0
    stop_token_ids: list[int] | None = []
    include_stop_str_in_output: bool = False
    ignore_eos: bool = False
    min_tokens: int = 0
    skip_special_tokens: bool = True
    spaces_between_special_tokens: bool = True
    truncate_prompt_tokens: Annotated[int, Field(ge=-1, le=_LONG_INFO.max)] | None = (
        None
    )
    allowed_token_ids: list[int] | None = None
    prompt_logprobs: int | None = None
    # --8<-- [end:completion-sampling-params]

    # --8<-- [start:completion-extra-params]
    prompt_embeds: bytes | list[bytes] | None = None
    add_special_tokens: bool = Field(
        default=True,
        description=(
            "If true (the default), special tokens (e.g. BOS) will be added to "
            "the prompt."
        ),
    )
    response_format: AnyResponseFormat | None = Field(
        default=None,
        description=(
            "Similar to chat completion, this parameter specifies the format "
            "of output. Only {'type': 'json_object'}, {'type': 'json_schema'}"
            ", {'type': 'structural_tag'}, or {'type': 'text' } is supported."
        ),
    )
    structured_outputs: StructuredOutputsParams | None = Field(
        default=None,
        description="Additional kwargs for structured outputs",
    )
    priority: int = Field(
        default=0,
        description=(
            "The priority of the request (lower means earlier handling; "
            "default: 0). Any priority other than 0 will raise an error "
            "if the served model does not use priority scheduling."
        ),
    )
    request_id: str = Field(
        default_factory=random_uuid,
        description=(
            "The request_id related to this request. If the caller does "
            "not set it, a random_uuid will be generated. This id is used "
            "through out the inference process and return in response."
        ),
    )
    logits_processors: LogitsProcessors | None = Field(
        default=None,
        description=(
            "A list of either qualified names of logits processors, or "
            "constructor objects, to apply when sampling. A constructor is "
            "a JSON object with a required 'qualname' field specifying the "
            "qualified name of the processor class/factory, and optional "
            "'args' and 'kwargs' fields containing positional and keyword "
            "arguments. For example: {'qualname': "
            "'my_module.MyLogitsProcessor', 'args': [1, 2], 'kwargs': "
            "{'param': 'value'}}."
        ),
    )

    return_tokens_as_token_ids: bool | None = Field(
        default=None,
        description=(
            "If specified with 'logprobs', tokens are represented "
            " as strings of the form 'token_id:{token_id}' so that tokens "
            "that are not JSON-encodable can be identified."
        ),
    )
    return_token_ids: bool | None = Field(
        default=None,
        description=(
            "If specified, the result will include token IDs alongside the "
            "generated text. In streaming mode, prompt_token_ids is included "
            "only in the first chunk, and token_ids contains the delta tokens "
            "for each chunk. This is useful for debugging or when you "
            "need to map generated text back to input tokens."
        ),
    )

    cache_salt: str | None = Field(
        default=None,
        description=(
            "If specified, the prefix cache will be salted with the provided "
            "string to prevent an attacker to guess prompts in multi-user "
            "environments. The salt should be random, protected from "
            "access by 3rd parties, and long enough to be "
            "unpredictable (e.g., 43 characters base64-encoded, corresponding "
            "to 256 bit)."
        ),
    )

    kv_transfer_params: dict[str, Any] | None = Field(
        default=None,
        description="KVTransfer parameters used for disaggregated serving.",
    )

    vllm_xargs: dict[str, str | int | float] | None = Field(
        default=None,
        description=(
            "Additional request parameters with string or "
            "numeric values, used by custom extensions."
        ),
    )

    # --8<-- [end:completion-extra-params]

    # Default sampling parameters for completion requests
    _DEFAULT_SAMPLING_PARAMS: dict = {
        "repetition_penalty": 1.0,
        "temperature": 1.0,
        "top_p": 1.0,
        "top_k": 0,
        "min_p": 0.0,
    }

    def to_beam_search_params(
        self,
        max_tokens: int,
        default_sampling_params: dict | None = None,
    ) -> BeamSearchParams:
        if default_sampling_params is None:
            default_sampling_params = {}
        n = self.n if self.n is not None else 1

        if (temperature := self.temperature) is None:
            temperature = default_sampling_params.get("temperature", 1.0)

        return BeamSearchParams(
            beam_width=n,
            max_tokens=max_tokens,
            ignore_eos=self.ignore_eos,
            temperature=temperature,
            length_penalty=self.length_penalty,
            include_stop_str_in_output=self.include_stop_str_in_output,
        )

    def to_sampling_params(
        self,
        max_tokens: int,
        logits_processor_pattern: str | None,
        default_sampling_params: dict | None = None,
    ) -> SamplingParams:
        if default_sampling_params is None:
            default_sampling_params = {}

        # Default parameters
        if (repetition_penalty := self.repetition_penalty) is None:
            repetition_penalty = default_sampling_params.get(
                "repetition_penalty",
                self._DEFAULT_SAMPLING_PARAMS["repetition_penalty"],
            )
        if (temperature := self.temperature) is None:
            temperature = default_sampling_params.get(
                "temperature", self._DEFAULT_SAMPLING_PARAMS["temperature"]
            )
        if (top_p := self.top_p) is None:
            top_p = default_sampling_params.get(
                "top_p", self._DEFAULT_SAMPLING_PARAMS["top_p"]
            )
        if (top_k := self.top_k) is None:
            top_k = default_sampling_params.get(
                "top_k", self._DEFAULT_SAMPLING_PARAMS["top_k"]
            )
        if (min_p := self.min_p) is None:
            min_p = default_sampling_params.get(
                "min_p", self._DEFAULT_SAMPLING_PARAMS["min_p"]
            )

        prompt_logprobs = self.prompt_logprobs
        if prompt_logprobs is None and self.echo:
            prompt_logprobs = self.logprobs

        echo_without_generation = self.echo and self.max_tokens == 0

        response_format = self.response_format
        if response_format is not None:
            structured_outputs_kwargs = dict[str, Any]()

            # Set structured output params for response format
            if response_format.type == "json_object":
                structured_outputs_kwargs["json_object"] = True
            elif response_format.type == "json_schema":
                json_schema = response_format.json_schema
                assert json_schema is not None
                structured_outputs_kwargs["json"] = json_schema.json_schema
            elif response_format.type == "structural_tag":
                structural_tag = response_format
                assert structural_tag is not None and isinstance(
                    structural_tag,
                    (
                        LegacyStructuralTagResponseFormat,
                        StructuralTagResponseFormat,
                    ),
                )
                s_tag_obj = structural_tag.model_dump(by_alias=True)
                structured_outputs_kwargs["structural_tag"] = json.dumps(s_tag_obj)

            # If structured outputs wasn't already enabled,
            # we must enable it for these features to work
            if len(structured_outputs_kwargs) > 0:
                self.structured_outputs = (
                    StructuredOutputsParams(**structured_outputs_kwargs)
                    if self.structured_outputs is None
                    else replace(self.structured_outputs, **structured_outputs_kwargs)
                )

        extra_args: dict[str, Any] = self.vllm_xargs if self.vllm_xargs else {}
        if self.kv_transfer_params:
            # Pass in kv_transfer_params via extra_args
            extra_args["kv_transfer_params"] = self.kv_transfer_params
        return SamplingParams.from_optional(
            n=self.n,
            presence_penalty=self.presence_penalty,
            frequency_penalty=self.frequency_penalty,
            repetition_penalty=repetition_penalty,
            temperature=temperature,
            top_p=top_p,
            top_k=top_k,
            min_p=min_p,
            seed=self.seed,
            stop=self.stop,
            stop_token_ids=self.stop_token_ids,
            logprobs=self.logprobs,
            ignore_eos=self.ignore_eos,
            max_tokens=max_tokens if not echo_without_generation else 1,
            min_tokens=self.min_tokens,
            prompt_logprobs=prompt_logprobs,
            skip_special_tokens=self.skip_special_tokens,
            spaces_between_special_tokens=self.spaces_between_special_tokens,
            include_stop_str_in_output=self.include_stop_str_in_output,
            logits_processors=get_logits_processors(
                self.logits_processors, logits_processor_pattern
            ),
            truncate_prompt_tokens=self.truncate_prompt_tokens,
            output_kind=RequestOutputKind.DELTA
            if self.stream
            else RequestOutputKind.FINAL_ONLY,
            structured_outputs=self.structured_outputs,
            logit_bias=self.logit_bias,
            allowed_token_ids=self.allowed_token_ids,
            extra_args=extra_args or None,
            skip_clone=True,  # Created fresh per request, safe to skip clone
        )

    @model_validator(mode="before")
    @classmethod
    def check_structured_outputs_count(cls, data):
        if data.get("structured_outputs", None) is None:
            return data

        structured_outputs_kwargs = data["structured_outputs"]
        count = sum(
            structured_outputs_kwargs.get(k) is not None
            for k in ("json", "regex", "choice")
        )
        if count > 1:
            raise VLLMValidationError(
                "You can only use one kind of constraints for structured "
                "outputs ('json', 'regex' or 'choice').",
                parameter="structured_outputs",
            )
        return data

    @model_validator(mode="before")
    @classmethod
    def check_logprobs(cls, data):
        if (prompt_logprobs := data.get("prompt_logprobs")) is not None:
            if data.get("stream") and (prompt_logprobs > 0 or prompt_logprobs == -1):
                raise VLLMValidationError(
                    "`prompt_logprobs` are not available when `stream=True`.",
                    parameter="prompt_logprobs",
                )

            if prompt_logprobs < 0 and prompt_logprobs != -1:
                raise VLLMValidationError(
                    "`prompt_logprobs` must be a positive value or -1.",
                    parameter="prompt_logprobs",
                    value=prompt_logprobs,
                )
        if (logprobs := data.get("logprobs")) is not None and logprobs < 0:
            raise VLLMValidationError(
                "`logprobs` must be a positive value.",
                parameter="logprobs",
                value=logprobs,
            )

        return data

    @model_validator(mode="before")
    @classmethod
    def validate_stream_options(cls, data):
        if data.get("stream_options") and not data.get("stream"):
            raise VLLMValidationError(
                "Stream options can only be defined when `stream=True`.",
                parameter="stream_options",
            )

        return data

    @model_validator(mode="before")
    @classmethod
    def validate_prompt_and_prompt_embeds(cls, data):
        prompt = data.get("prompt")
        prompt_embeds = data.get("prompt_embeds")

        prompt_is_empty = prompt is None or (isinstance(prompt, str) and prompt == "")
        embeds_is_empty = prompt_embeds is None or (
            isinstance(prompt_embeds, list) and len(prompt_embeds) == 0
        )

        if prompt_is_empty and embeds_is_empty:
            raise ValueError(
                "Either prompt or prompt_embeds must be provided and non-empty."
            )

        return data

    @model_validator(mode="before")
    @classmethod
    def check_cache_salt_support(cls, data):
        if data.get("cache_salt") is not None and (
            not isinstance(data["cache_salt"], str) or not data["cache_salt"]
        ):
            raise ValueError(
                "Parameter 'cache_salt' must be a non-empty string if provided."
            )
        return data

_DEFAULT_SAMPLING_PARAMS class-attribute instance-attribute

_DEFAULT_SAMPLING_PARAMS: dict = {
    "repetition_penalty": 1.0,
    "temperature": 1.0,
    "top_p": 1.0,
    "top_k": 0,
    "min_p": 0.0,
}

add_special_tokens class-attribute instance-attribute

add_special_tokens: bool = Field(
    default=True,
    description="If true (the default), special tokens (e.g. BOS) will be added to the prompt.",
)

allowed_token_ids class-attribute instance-attribute

allowed_token_ids: list[int] | None = None

cache_salt class-attribute instance-attribute

cache_salt: str | None = Field(
    default=None,
    description="If specified, the prefix cache will be salted with the provided string to prevent an attacker to guess prompts in multi-user environments. The salt should be random, protected from access by 3rd parties, and long enough to be unpredictable (e.g., 43 characters base64-encoded, corresponding to 256 bit).",
)

echo class-attribute instance-attribute

echo: bool | None = False

frequency_penalty class-attribute instance-attribute

frequency_penalty: float | None = 0.0

ignore_eos class-attribute instance-attribute

ignore_eos: bool = False

include_stop_str_in_output class-attribute instance-attribute

include_stop_str_in_output: bool = False

kv_transfer_params class-attribute instance-attribute

kv_transfer_params: dict[str, Any] | None = Field(
    default=None,
    description="KVTransfer parameters used for disaggregated serving.",
)

length_penalty class-attribute instance-attribute

length_penalty: float = 1.0

logit_bias class-attribute instance-attribute

logit_bias: dict[str, float] | None = None

logits_processors class-attribute instance-attribute

logits_processors: LogitsProcessors | None = Field(
    default=None,
    description="A list of either qualified names of logits processors, or constructor objects, to apply when sampling. A constructor is a JSON object with a required 'qualname' field specifying the qualified name of the processor class/factory, and optional 'args' and 'kwargs' fields containing positional and keyword arguments. For example: {'qualname': 'my_module.MyLogitsProcessor', 'args': [1, 2], 'kwargs': {'param': 'value'}}.",
)

logprobs class-attribute instance-attribute

logprobs: int | None = None

max_tokens class-attribute instance-attribute

max_tokens: int | None = 16

min_p class-attribute instance-attribute

min_p: float | None = None

min_tokens class-attribute instance-attribute

min_tokens: int = 0

model class-attribute instance-attribute

model: str | None = None

n class-attribute instance-attribute

n: int = 1

presence_penalty class-attribute instance-attribute

presence_penalty: float | None = 0.0

priority class-attribute instance-attribute

priority: int = Field(
    default=0,
    description="The priority of the request (lower means earlier handling; default: 0). Any priority other than 0 will raise an error if the served model does not use priority scheduling.",
)

prompt class-attribute instance-attribute

prompt: (
    list[int] | list[list[int]] | str | list[str] | None
) = None

prompt_embeds class-attribute instance-attribute

prompt_embeds: bytes | list[bytes] | None = None

prompt_logprobs class-attribute instance-attribute

prompt_logprobs: int | None = None

repetition_penalty class-attribute instance-attribute

repetition_penalty: float | None = None

request_id class-attribute instance-attribute

request_id: str = Field(
    default_factory=random_uuid,
    description="The request_id related to this request. If the caller does not set it, a random_uuid will be generated. This id is used through out the inference process and return in response.",
)

response_format class-attribute instance-attribute

response_format: AnyResponseFormat | None = Field(
    default=None,
    description="Similar to chat completion, this parameter specifies the format of output. Only {'type': 'json_object'}, {'type': 'json_schema'}, {'type': 'structural_tag'}, or {'type': 'text' } is supported.",
)

return_token_ids class-attribute instance-attribute

return_token_ids: bool | None = Field(
    default=None,
    description="If specified, the result will include token IDs alongside the generated text. In streaming mode, prompt_token_ids is included only in the first chunk, and token_ids contains the delta tokens for each chunk. This is useful for debugging or when you need to map generated text back to input tokens.",
)

return_tokens_as_token_ids class-attribute instance-attribute

return_tokens_as_token_ids: bool | None = Field(
    default=None,
    description="If specified with 'logprobs', tokens are represented  as strings of the form 'token_id:{token_id}' so that tokens that are not JSON-encodable can be identified.",
)

seed class-attribute instance-attribute

seed: int | None = Field(None, ge=min, le=max)

skip_special_tokens class-attribute instance-attribute

skip_special_tokens: bool = True

spaces_between_special_tokens class-attribute instance-attribute

spaces_between_special_tokens: bool = True

stop class-attribute instance-attribute

stop: str | list[str] | None = []

stop_token_ids class-attribute instance-attribute

stop_token_ids: list[int] | None = []

stream class-attribute instance-attribute

stream: bool | None = False

stream_options class-attribute instance-attribute

stream_options: StreamOptions | None = None

structured_outputs class-attribute instance-attribute

structured_outputs: StructuredOutputsParams | None = Field(
    default=None,
    description="Additional kwargs for structured outputs",
)

suffix class-attribute instance-attribute

suffix: str | None = None

temperature class-attribute instance-attribute

temperature: float | None = None

top_k class-attribute instance-attribute

top_k: int | None = None

top_p class-attribute instance-attribute

top_p: float | None = None

truncate_prompt_tokens class-attribute instance-attribute

truncate_prompt_tokens: (
    Annotated[int, Field(ge=-1, le=max)] | None
) = None
use_beam_search: bool = False

user class-attribute instance-attribute

user: str | None = None

vllm_xargs class-attribute instance-attribute

vllm_xargs: dict[str, str | int | float] | None = Field(
    default=None,
    description="Additional request parameters with string or numeric values, used by custom extensions.",
)

check_cache_salt_support classmethod

check_cache_salt_support(data)
Source code in vllm/entrypoints/openai/completion/protocol.py
@model_validator(mode="before")
@classmethod
def check_cache_salt_support(cls, data):
    if data.get("cache_salt") is not None and (
        not isinstance(data["cache_salt"], str) or not data["cache_salt"]
    ):
        raise ValueError(
            "Parameter 'cache_salt' must be a non-empty string if provided."
        )
    return data

check_logprobs classmethod

check_logprobs(data)
Source code in vllm/entrypoints/openai/completion/protocol.py
@model_validator(mode="before")
@classmethod
def check_logprobs(cls, data):
    if (prompt_logprobs := data.get("prompt_logprobs")) is not None:
        if data.get("stream") and (prompt_logprobs > 0 or prompt_logprobs == -1):
            raise VLLMValidationError(
                "`prompt_logprobs` are not available when `stream=True`.",
                parameter="prompt_logprobs",
            )

        if prompt_logprobs < 0 and prompt_logprobs != -1:
            raise VLLMValidationError(
                "`prompt_logprobs` must be a positive value or -1.",
                parameter="prompt_logprobs",
                value=prompt_logprobs,
            )
    if (logprobs := data.get("logprobs")) is not None and logprobs < 0:
        raise VLLMValidationError(
            "`logprobs` must be a positive value.",
            parameter="logprobs",
            value=logprobs,
        )

    return data

check_structured_outputs_count classmethod

check_structured_outputs_count(data)
Source code in vllm/entrypoints/openai/completion/protocol.py
@model_validator(mode="before")
@classmethod
def check_structured_outputs_count(cls, data):
    if data.get("structured_outputs", None) is None:
        return data

    structured_outputs_kwargs = data["structured_outputs"]
    count = sum(
        structured_outputs_kwargs.get(k) is not None
        for k in ("json", "regex", "choice")
    )
    if count > 1:
        raise VLLMValidationError(
            "You can only use one kind of constraints for structured "
            "outputs ('json', 'regex' or 'choice').",
            parameter="structured_outputs",
        )
    return data

to_beam_search_params

to_beam_search_params(
    max_tokens: int,
    default_sampling_params: dict | None = None,
) -> BeamSearchParams
Source code in vllm/entrypoints/openai/completion/protocol.py
def to_beam_search_params(
    self,
    max_tokens: int,
    default_sampling_params: dict | None = None,
) -> BeamSearchParams:
    if default_sampling_params is None:
        default_sampling_params = {}
    n = self.n if self.n is not None else 1

    if (temperature := self.temperature) is None:
        temperature = default_sampling_params.get("temperature", 1.0)

    return BeamSearchParams(
        beam_width=n,
        max_tokens=max_tokens,
        ignore_eos=self.ignore_eos,
        temperature=temperature,
        length_penalty=self.length_penalty,
        include_stop_str_in_output=self.include_stop_str_in_output,
    )

to_sampling_params

to_sampling_params(
    max_tokens: int,
    logits_processor_pattern: str | None,
    default_sampling_params: dict | None = None,
) -> SamplingParams
Source code in vllm/entrypoints/openai/completion/protocol.py
def to_sampling_params(
    self,
    max_tokens: int,
    logits_processor_pattern: str | None,
    default_sampling_params: dict | None = None,
) -> SamplingParams:
    if default_sampling_params is None:
        default_sampling_params = {}

    # Default parameters
    if (repetition_penalty := self.repetition_penalty) is None:
        repetition_penalty = default_sampling_params.get(
            "repetition_penalty",
            self._DEFAULT_SAMPLING_PARAMS["repetition_penalty"],
        )
    if (temperature := self.temperature) is None:
        temperature = default_sampling_params.get(
            "temperature", self._DEFAULT_SAMPLING_PARAMS["temperature"]
        )
    if (top_p := self.top_p) is None:
        top_p = default_sampling_params.get(
            "top_p", self._DEFAULT_SAMPLING_PARAMS["top_p"]
        )
    if (top_k := self.top_k) is None:
        top_k = default_sampling_params.get(
            "top_k", self._DEFAULT_SAMPLING_PARAMS["top_k"]
        )
    if (min_p := self.min_p) is None:
        min_p = default_sampling_params.get(
            "min_p", self._DEFAULT_SAMPLING_PARAMS["min_p"]
        )

    prompt_logprobs = self.prompt_logprobs
    if prompt_logprobs is None and self.echo:
        prompt_logprobs = self.logprobs

    echo_without_generation = self.echo and self.max_tokens == 0

    response_format = self.response_format
    if response_format is not None:
        structured_outputs_kwargs = dict[str, Any]()

        # Set structured output params for response format
        if response_format.type == "json_object":
            structured_outputs_kwargs["json_object"] = True
        elif response_format.type == "json_schema":
            json_schema = response_format.json_schema
            assert json_schema is not None
            structured_outputs_kwargs["json"] = json_schema.json_schema
        elif response_format.type == "structural_tag":
            structural_tag = response_format
            assert structural_tag is not None and isinstance(
                structural_tag,
                (
                    LegacyStructuralTagResponseFormat,
                    StructuralTagResponseFormat,
                ),
            )
            s_tag_obj = structural_tag.model_dump(by_alias=True)
            structured_outputs_kwargs["structural_tag"] = json.dumps(s_tag_obj)

        # If structured outputs wasn't already enabled,
        # we must enable it for these features to work
        if len(structured_outputs_kwargs) > 0:
            self.structured_outputs = (
                StructuredOutputsParams(**structured_outputs_kwargs)
                if self.structured_outputs is None
                else replace(self.structured_outputs, **structured_outputs_kwargs)
            )

    extra_args: dict[str, Any] = self.vllm_xargs if self.vllm_xargs else {}
    if self.kv_transfer_params:
        # Pass in kv_transfer_params via extra_args
        extra_args["kv_transfer_params"] = self.kv_transfer_params
    return SamplingParams.from_optional(
        n=self.n,
        presence_penalty=self.presence_penalty,
        frequency_penalty=self.frequency_penalty,
        repetition_penalty=repetition_penalty,
        temperature=temperature,
        top_p=top_p,
        top_k=top_k,
        min_p=min_p,
        seed=self.seed,
        stop=self.stop,
        stop_token_ids=self.stop_token_ids,
        logprobs=self.logprobs,
        ignore_eos=self.ignore_eos,
        max_tokens=max_tokens if not echo_without_generation else 1,
        min_tokens=self.min_tokens,
        prompt_logprobs=prompt_logprobs,
        skip_special_tokens=self.skip_special_tokens,
        spaces_between_special_tokens=self.spaces_between_special_tokens,
        include_stop_str_in_output=self.include_stop_str_in_output,
        logits_processors=get_logits_processors(
            self.logits_processors, logits_processor_pattern
        ),
        truncate_prompt_tokens=self.truncate_prompt_tokens,
        output_kind=RequestOutputKind.DELTA
        if self.stream
        else RequestOutputKind.FINAL_ONLY,
        structured_outputs=self.structured_outputs,
        logit_bias=self.logit_bias,
        allowed_token_ids=self.allowed_token_ids,
        extra_args=extra_args or None,
        skip_clone=True,  # Created fresh per request, safe to skip clone
    )

validate_prompt_and_prompt_embeds classmethod

validate_prompt_and_prompt_embeds(data)
Source code in vllm/entrypoints/openai/completion/protocol.py
@model_validator(mode="before")
@classmethod
def validate_prompt_and_prompt_embeds(cls, data):
    prompt = data.get("prompt")
    prompt_embeds = data.get("prompt_embeds")

    prompt_is_empty = prompt is None or (isinstance(prompt, str) and prompt == "")
    embeds_is_empty = prompt_embeds is None or (
        isinstance(prompt_embeds, list) and len(prompt_embeds) == 0
    )

    if prompt_is_empty and embeds_is_empty:
        raise ValueError(
            "Either prompt or prompt_embeds must be provided and non-empty."
        )

    return data

validate_stream_options classmethod

validate_stream_options(data)
Source code in vllm/entrypoints/openai/completion/protocol.py
@model_validator(mode="before")
@classmethod
def validate_stream_options(cls, data):
    if data.get("stream_options") and not data.get("stream"):
        raise VLLMValidationError(
            "Stream options can only be defined when `stream=True`.",
            parameter="stream_options",
        )

    return data

CompletionResponse

Bases: OpenAIBaseModel

Source code in vllm/entrypoints/openai/completion/protocol.py
class CompletionResponse(OpenAIBaseModel):
    id: str = Field(default_factory=lambda: f"cmpl-{random_uuid()}")
    object: Literal["text_completion"] = "text_completion"
    created: int = Field(default_factory=lambda: int(time.time()))
    model: str
    choices: list[CompletionResponseChoice]
    service_tier: Literal["auto", "default", "flex", "scale", "priority"] | None = None
    system_fingerprint: str | None = None
    usage: UsageInfo

    # vLLM-specific fields that are not in OpenAI spec
    kv_transfer_params: dict[str, Any] | None = Field(
        default=None, description="KVTransfer parameters."
    )

choices instance-attribute

created class-attribute instance-attribute

created: int = Field(default_factory=lambda: int(time()))

id class-attribute instance-attribute

id: str = Field(
    default_factory=lambda: f"cmpl-{random_uuid()}"
)

kv_transfer_params class-attribute instance-attribute

kv_transfer_params: dict[str, Any] | None = Field(
    default=None, description="KVTransfer parameters."
)

model instance-attribute

model: str

object class-attribute instance-attribute

object: Literal['text_completion'] = 'text_completion'

service_tier class-attribute instance-attribute

service_tier: (
    Literal["auto", "default", "flex", "scale", "priority"]
    | None
) = None

system_fingerprint class-attribute instance-attribute

system_fingerprint: str | None = None

usage instance-attribute

usage: UsageInfo

CompletionResponseChoice

Bases: OpenAIBaseModel

Source code in vllm/entrypoints/openai/completion/protocol.py
class CompletionResponseChoice(OpenAIBaseModel):
    index: int
    text: str
    logprobs: CompletionLogProbs | None = None
    finish_reason: str | None = None
    stop_reason: int | str | None = Field(
        default=None,
        description=(
            "The stop string or token id that caused the completion "
            "to stop, None if the completion finished for some other reason "
            "including encountering the EOS token"
        ),
    )
    token_ids: list[int] | None = None  # For response
    prompt_logprobs: list[dict[int, Logprob] | None] | None = None
    prompt_token_ids: list[int] | None = None  # For prompt

finish_reason class-attribute instance-attribute

finish_reason: str | None = None

index instance-attribute

index: int

logprobs class-attribute instance-attribute

logprobs: CompletionLogProbs | None = None

prompt_logprobs class-attribute instance-attribute

prompt_logprobs: list[dict[int, Logprob] | None] | None = (
    None
)

prompt_token_ids class-attribute instance-attribute

prompt_token_ids: list[int] | None = None

stop_reason class-attribute instance-attribute

stop_reason: int | str | None = Field(
    default=None,
    description="The stop string or token id that caused the completion to stop, None if the completion finished for some other reason including encountering the EOS token",
)

text instance-attribute

text: str

token_ids class-attribute instance-attribute

token_ids: list[int] | None = None

CompletionResponseStreamChoice

Bases: OpenAIBaseModel

Source code in vllm/entrypoints/openai/completion/protocol.py
class CompletionResponseStreamChoice(OpenAIBaseModel):
    index: int
    text: str
    logprobs: CompletionLogProbs | None = None
    finish_reason: str | None = None
    stop_reason: int | str | None = Field(
        default=None,
        description=(
            "The stop string or token id that caused the completion "
            "to stop, None if the completion finished for some other reason "
            "including encountering the EOS token"
        ),
    )
    # not part of the OpenAI spec but for tracing the tokens
    # prompt tokens is put into choice to align with CompletionResponseChoice
    prompt_token_ids: list[int] | None = None
    token_ids: list[int] | None = None

finish_reason class-attribute instance-attribute

finish_reason: str | None = None

index instance-attribute

index: int

logprobs class-attribute instance-attribute

logprobs: CompletionLogProbs | None = None

prompt_token_ids class-attribute instance-attribute

prompt_token_ids: list[int] | None = None

stop_reason class-attribute instance-attribute

stop_reason: int | str | None = Field(
    default=None,
    description="The stop string or token id that caused the completion to stop, None if the completion finished for some other reason including encountering the EOS token",
)

text instance-attribute

text: str

token_ids class-attribute instance-attribute

token_ids: list[int] | None = None

CompletionStreamResponse

Bases: OpenAIBaseModel

Source code in vllm/entrypoints/openai/completion/protocol.py
class CompletionStreamResponse(OpenAIBaseModel):
    id: str = Field(default_factory=lambda: f"cmpl-{random_uuid()}")
    object: str = "text_completion"
    created: int = Field(default_factory=lambda: int(time.time()))
    model: str
    choices: list[CompletionResponseStreamChoice]
    usage: UsageInfo | None = Field(default=None)

choices instance-attribute

created class-attribute instance-attribute

created: int = Field(default_factory=lambda: int(time()))

id class-attribute instance-attribute

id: str = Field(
    default_factory=lambda: f"cmpl-{random_uuid()}"
)

model instance-attribute

model: str

object class-attribute instance-attribute

object: str = 'text_completion'

usage class-attribute instance-attribute

usage: UsageInfo | None = Field(default=None)