Skip to content

vllm.entrypoints.openai.chat_completion.api_router

ENDPOINT_LOAD_METRICS_FORMAT_HEADER_LABEL module-attribute

ENDPOINT_LOAD_METRICS_FORMAT_HEADER_LABEL = (
    "endpoint-load-metrics-format"
)

logger module-attribute

logger = init_logger(__name__)

router module-attribute

router = APIRouter()

attach_router

attach_router(app: FastAPI)
Source code in vllm/entrypoints/openai/chat_completion/api_router.py
def attach_router(app: FastAPI):
    app.include_router(router)

chat

chat(request: Request) -> OpenAIServingChat | None
Source code in vllm/entrypoints/openai/chat_completion/api_router.py
def chat(request: Request) -> OpenAIServingChat | None:
    return request.app.state.openai_serving_chat

create_chat_completion async

create_chat_completion(
    request: ChatCompletionRequest, raw_request: Request
)
Source code in vllm/entrypoints/openai/chat_completion/api_router.py
@router.post(
    "/v1/chat/completions",
    dependencies=[Depends(validate_json_request)],
    responses={
        HTTPStatus.OK.value: {"content": {"text/event-stream": {}}},
        HTTPStatus.BAD_REQUEST.value: {"model": ErrorResponse},
        HTTPStatus.NOT_FOUND.value: {"model": ErrorResponse},
        HTTPStatus.INTERNAL_SERVER_ERROR.value: {"model": ErrorResponse},
    },
)
@with_cancellation
@load_aware_call
async def create_chat_completion(request: ChatCompletionRequest, raw_request: Request):
    metrics_header_format = raw_request.headers.get(
        ENDPOINT_LOAD_METRICS_FORMAT_HEADER_LABEL, ""
    )
    handler = chat(raw_request)
    if handler is None:
        base_server = raw_request.app.state.openai_serving_tokenization
        return base_server.create_error_response(
            message="The model does not support Chat Completions API"
        )

    try:
        generator = await handler.create_chat_completion(request, raw_request)
    except Exception as e:
        return handler.create_error_response(e)

    if isinstance(generator, ErrorResponse):
        return JSONResponse(
            content=generator.model_dump(), status_code=generator.error.code
        )

    elif isinstance(generator, ChatCompletionResponse):
        return JSONResponse(
            content=generator.model_dump(),
            headers=metrics_header(metrics_header_format),
        )

    return StreamingResponse(content=generator, media_type="text/event-stream")

render_chat_completion async

render_chat_completion(
    request: ChatCompletionRequest, raw_request: Request
)

Render chat completion request and return conversation and engine prompts without generating.

Source code in vllm/entrypoints/openai/chat_completion/api_router.py
@router.post(
    "/v1/chat/completions/render",
    dependencies=[Depends(validate_json_request)],
    response_model=list,
    responses={
        HTTPStatus.BAD_REQUEST.value: {"model": ErrorResponse},
        HTTPStatus.NOT_FOUND.value: {"model": ErrorResponse},
        HTTPStatus.INTERNAL_SERVER_ERROR.value: {"model": ErrorResponse},
    },
)
async def render_chat_completion(request: ChatCompletionRequest, raw_request: Request):
    """Render chat completion request and return conversation and engine
    prompts without generating."""
    handler = chat(raw_request)
    if handler is None:
        base_server = raw_request.app.state.openai_serving_tokenization
        return base_server.create_error_response(
            message="The model does not support Chat Completions API"
        )

    try:
        result = await handler.render_chat_request(request)
    except Exception as e:
        return handler.create_error_response(e)

    if isinstance(result, ErrorResponse):
        return JSONResponse(content=result.model_dump(), status_code=result.error.code)

    return JSONResponse(content=result)