Skip to content

vllm.tokenizers

Modules:

Name Description
deepseek_v32_encoding
deepseekv32
detokenizer_utils
hf
mistral
protocol
registry

TokenizerRegistry module-attribute

TokenizerRegistry = _TokenizerRegistry(
    {
        mode: (f"vllm.tokenizers.{mod_relname}", cls_name)
        for mode, (mod_relname, cls_name) in (items())
    }
)

__all__ module-attribute

__all__ = [
    "TokenizerLike",
    "TokenizerRegistry",
    "cached_get_tokenizer",
    "get_tokenizer",
    "cached_tokenizer_from_config",
    "init_tokenizer_from_config",
]

cached_get_tokenizer module-attribute

cached_get_tokenizer = lru_cache(get_tokenizer)

TokenizerLike

Bases: Protocol

Source code in vllm/tokenizers/protocol.py
class TokenizerLike(Protocol):
    @classmethod
    def from_pretrained(
        cls,
        path_or_repo_id: str | Path,
        *args,
        trust_remote_code: bool = False,
        revision: str | None = None,
        download_dir: str | None = None,
        **kwargs,
    ) -> "TokenizerLike":
        raise NotImplementedError

    def num_special_tokens_to_add(self) -> int:
        raise NotImplementedError

    @property
    def all_special_tokens(self) -> list[str]:
        raise NotImplementedError

    @property
    def all_special_ids(self) -> list[int]:
        raise NotImplementedError

    @property
    def bos_token_id(self) -> int:
        raise NotImplementedError

    @property
    def eos_token_id(self) -> int:
        raise NotImplementedError

    @property
    def pad_token_id(self) -> int:
        raise NotImplementedError

    @property
    def is_fast(self) -> bool:
        raise NotImplementedError

    @property
    def vocab_size(self) -> int:
        raise NotImplementedError

    @property
    def max_token_id(self) -> int:
        raise NotImplementedError

    @property
    def truncation_side(self) -> str:
        raise NotImplementedError

    def __hash__(self) -> int:
        return hash(id(self))

    def __len__(self) -> int:
        return self.vocab_size

    def __call__(
        self,
        text: str | list[str],
        text_pair: str | None = None,
        add_special_tokens: bool = True,
        truncation: bool = False,
        max_length: int | None = None,
    ) -> "BatchEncoding":
        raise NotImplementedError

    def get_vocab(self) -> dict[str, int]:
        raise NotImplementedError

    def get_added_vocab(self) -> dict[str, int]:
        raise NotImplementedError

    def encode(
        self,
        text: str,
        truncation: bool | None = None,
        max_length: int | None = None,
        add_special_tokens: bool = True,
    ) -> list[int]:
        raise NotImplementedError

    def apply_chat_template(
        self,
        messages: list["ChatCompletionMessageParam"],
        tools: list[dict[str, Any]] | None = None,
        **kwargs,
    ) -> str | list[int]:
        raise NotImplementedError

    def convert_tokens_to_string(self, tokens: list[str]) -> str:
        raise NotImplementedError

    def decode(self, ids: list[int] | int, skip_special_tokens: bool = False) -> str:
        raise NotImplementedError

    def convert_ids_to_tokens(
        self,
        ids: list[int],
        skip_special_tokens: bool = False,
    ) -> list[str]:
        raise NotImplementedError

all_special_ids property

all_special_ids: list[int]

all_special_tokens property

all_special_tokens: list[str]

bos_token_id property

bos_token_id: int

eos_token_id property

eos_token_id: int

is_fast property

is_fast: bool

max_token_id property

max_token_id: int

pad_token_id property

pad_token_id: int

truncation_side property

truncation_side: str

vocab_size property

vocab_size: int

__call__

__call__(
    text: str | list[str],
    text_pair: str | None = None,
    add_special_tokens: bool = True,
    truncation: bool = False,
    max_length: int | None = None,
) -> BatchEncoding
Source code in vllm/tokenizers/protocol.py
def __call__(
    self,
    text: str | list[str],
    text_pair: str | None = None,
    add_special_tokens: bool = True,
    truncation: bool = False,
    max_length: int | None = None,
) -> "BatchEncoding":
    raise NotImplementedError

__hash__

__hash__() -> int
Source code in vllm/tokenizers/protocol.py
def __hash__(self) -> int:
    return hash(id(self))

__len__

__len__() -> int
Source code in vllm/tokenizers/protocol.py
def __len__(self) -> int:
    return self.vocab_size

apply_chat_template

apply_chat_template(
    messages: list[ChatCompletionMessageParam],
    tools: list[dict[str, Any]] | None = None,
    **kwargs,
) -> str | list[int]
Source code in vllm/tokenizers/protocol.py
def apply_chat_template(
    self,
    messages: list["ChatCompletionMessageParam"],
    tools: list[dict[str, Any]] | None = None,
    **kwargs,
) -> str | list[int]:
    raise NotImplementedError

convert_ids_to_tokens

convert_ids_to_tokens(
    ids: list[int], skip_special_tokens: bool = False
) -> list[str]
Source code in vllm/tokenizers/protocol.py
def convert_ids_to_tokens(
    self,
    ids: list[int],
    skip_special_tokens: bool = False,
) -> list[str]:
    raise NotImplementedError

convert_tokens_to_string

convert_tokens_to_string(tokens: list[str]) -> str
Source code in vllm/tokenizers/protocol.py
def convert_tokens_to_string(self, tokens: list[str]) -> str:
    raise NotImplementedError

decode

decode(
    ids: list[int] | int, skip_special_tokens: bool = False
) -> str
Source code in vllm/tokenizers/protocol.py
def decode(self, ids: list[int] | int, skip_special_tokens: bool = False) -> str:
    raise NotImplementedError

encode

encode(
    text: str,
    truncation: bool | None = None,
    max_length: int | None = None,
    add_special_tokens: bool = True,
) -> list[int]
Source code in vllm/tokenizers/protocol.py
def encode(
    self,
    text: str,
    truncation: bool | None = None,
    max_length: int | None = None,
    add_special_tokens: bool = True,
) -> list[int]:
    raise NotImplementedError

from_pretrained classmethod

from_pretrained(
    path_or_repo_id: str | Path,
    *args,
    trust_remote_code: bool = False,
    revision: str | None = None,
    download_dir: str | None = None,
    **kwargs,
) -> TokenizerLike
Source code in vllm/tokenizers/protocol.py
@classmethod
def from_pretrained(
    cls,
    path_or_repo_id: str | Path,
    *args,
    trust_remote_code: bool = False,
    revision: str | None = None,
    download_dir: str | None = None,
    **kwargs,
) -> "TokenizerLike":
    raise NotImplementedError

get_added_vocab

get_added_vocab() -> dict[str, int]
Source code in vllm/tokenizers/protocol.py
def get_added_vocab(self) -> dict[str, int]:
    raise NotImplementedError

get_vocab

get_vocab() -> dict[str, int]
Source code in vllm/tokenizers/protocol.py
def get_vocab(self) -> dict[str, int]:
    raise NotImplementedError

num_special_tokens_to_add

num_special_tokens_to_add() -> int
Source code in vllm/tokenizers/protocol.py
def num_special_tokens_to_add(self) -> int:
    raise NotImplementedError

cached_tokenizer_from_config

cached_tokenizer_from_config(
    model_config: ModelConfig, **kwargs
)
Source code in vllm/tokenizers/registry.py
def cached_tokenizer_from_config(model_config: "ModelConfig", **kwargs):
    if model_config.skip_tokenizer_init:
        return None

    return cached_get_tokenizer(
        model_config.tokenizer,
        runner_type=model_config.runner_type,
        tokenizer_mode=model_config.tokenizer_mode,
        revision=model_config.tokenizer_revision,
        trust_remote_code=model_config.trust_remote_code,
        **kwargs,
    )

get_tokenizer

get_tokenizer(
    tokenizer_name: str | Path,
    *args,
    tokenizer_cls: type[_T] = TokenizerLike,
    trust_remote_code: bool = False,
    revision: str | None = None,
    download_dir: str | None = None,
    **kwargs,
) -> _T

Gets a tokenizer for the given model name via HuggingFace or ModelScope.

Source code in vllm/tokenizers/registry.py
def get_tokenizer(
    tokenizer_name: str | Path,
    *args,
    tokenizer_cls: type[_T] = TokenizerLike,  # type: ignore[assignment]
    trust_remote_code: bool = False,
    revision: str | None = None,
    download_dir: str | None = None,
    **kwargs,
) -> _T:
    """Gets a tokenizer for the given model name via HuggingFace or ModelScope."""
    tokenizer_mode, tokenizer_name, args, kwargs = cached_resolve_tokenizer_args(
        tokenizer_name,
        *args,
        trust_remote_code=trust_remote_code,
        revision=revision,
        download_dir=download_dir,
        **kwargs,
    )

    if tokenizer_cls == TokenizerLike:
        tokenizer_cls_ = TokenizerRegistry.load_tokenizer_cls(tokenizer_mode)
    else:
        tokenizer_cls_ = tokenizer_cls

    tokenizer = tokenizer_cls_.from_pretrained(tokenizer_name, *args, **kwargs)
    if not tokenizer.is_fast:
        logger.warning(
            "Using a slow tokenizer. This might cause a significant "
            "slowdown. Consider using a fast tokenizer instead."
        )

    return tokenizer  # type: ignore

init_tokenizer_from_config

init_tokenizer_from_config(model_config: ModelConfig)
Source code in vllm/tokenizers/registry.py
@deprecated(
    "Renamed to `cached_tokenizer_from_config`. The old name will be removed in v0.14."
)
def init_tokenizer_from_config(model_config: "ModelConfig"):
    return cached_tokenizer_from_config(model_config)