Skip to content

vllm.v1.kv_offload.spec

OffloadingSpec

Bases: ABC

Spec for an offloading connector

Source code in vllm/v1/kv_offload/spec.py
class OffloadingSpec(ABC):
    """Spec for an offloading connector"""

    def __init__(self, vllm_config: "VllmConfig", kv_cache_config: "KVCacheConfig"):
        logger.warning(
            "Initializing OffloadingSpec. This API is experimental and "
            "subject to change in the future as we iterate the design."
        )
        self.vllm_config = vllm_config
        self.kv_cache_config = kv_cache_config

        kv_transfer_config = vllm_config.kv_transfer_config
        assert kv_transfer_config is not None
        self.extra_config = kv_transfer_config.kv_connector_extra_config

        # block size used by vLLM for hashing request tokens for the sake
        # of enabling prefix caching
        self.hash_block_size = vllm_config.cache_config.block_size
        # gpu block size per group
        self.gpu_block_size: tuple[int, ...] = tuple(
            kv_cache_group.kv_cache_spec.block_size
            for kv_cache_group in kv_cache_config.kv_cache_groups
        )

        for block_size in self.gpu_block_size:
            assert block_size % self.hash_block_size == 0

        # offloaded_block_size / gpu_block_size
        self.block_size_factor: int = 1

        offloaded_block_size = self.extra_config.get("block_size")
        if offloaded_block_size is not None:
            offloaded_block_size_int = int(offloaded_block_size)
            gpu_block_sizes = set(self.gpu_block_size)
            assert len(gpu_block_sizes) == 1, (
                "If 'block_size' is specified in kv_connector_extra_config, "
                "there must be at least one KV cache group, "
                "and all groups must have the same block size."
            )
            gpu_block_size = gpu_block_sizes.pop()

            assert offloaded_block_size_int % gpu_block_size == 0
            self.block_size_factor = offloaded_block_size_int // gpu_block_size

    @abstractmethod
    def get_manager(self) -> OffloadingManager:
        """
        Get an OffloadingManager that will be used
        by the scheduler-side offloading connector to track
        offloaded blocks and manage evictions.
        """
        pass

    @abstractmethod
    def get_handlers(
        self,
        kv_caches: dict[str, torch.Tensor],
        attn_backends: dict[str, type[AttentionBackend]],
    ) -> Iterator[tuple[type[LoadStoreSpec], type[LoadStoreSpec], OffloadingHandler]]:
        """
        Get offloading handlers along with their respective src and dst types.

        Args:
            kv_caches: A dictionary of layer_name -> gpu_kv_cache tensor.
            attn_backends: A dictionary of layer_name -> AttentionBackend.

        Yields:
            Tuples of (src_type, dst_type, offloading_handler).
        """
        pass

get_handlers abstractmethod

get_handlers(
    kv_caches: dict[str, Tensor],
    attn_backends: dict[str, type[AttentionBackend]],
) -> Iterator[
    tuple[
        type[LoadStoreSpec],
        type[LoadStoreSpec],
        OffloadingHandler,
    ]
]

Get offloading handlers along with their respective src and dst types.

Parameters:

Name Type Description Default
kv_caches dict[str, Tensor]

A dictionary of layer_name -> gpu_kv_cache tensor.

required
attn_backends dict[str, type[AttentionBackend]]

A dictionary of layer_name -> AttentionBackend.

required

Yields:

Type Description
tuple[type[LoadStoreSpec], type[LoadStoreSpec], OffloadingHandler]

Tuples of (src_type, dst_type, offloading_handler).

Source code in vllm/v1/kv_offload/spec.py
@abstractmethod
def get_handlers(
    self,
    kv_caches: dict[str, torch.Tensor],
    attn_backends: dict[str, type[AttentionBackend]],
) -> Iterator[tuple[type[LoadStoreSpec], type[LoadStoreSpec], OffloadingHandler]]:
    """
    Get offloading handlers along with their respective src and dst types.

    Args:
        kv_caches: A dictionary of layer_name -> gpu_kv_cache tensor.
        attn_backends: A dictionary of layer_name -> AttentionBackend.

    Yields:
        Tuples of (src_type, dst_type, offloading_handler).
    """
    pass

get_manager abstractmethod

get_manager() -> OffloadingManager

Get an OffloadingManager that will be used by the scheduler-side offloading connector to track offloaded blocks and manage evictions.

Source code in vllm/v1/kv_offload/spec.py
@abstractmethod
def get_manager(self) -> OffloadingManager:
    """
    Get an OffloadingManager that will be used
    by the scheduler-side offloading connector to track
    offloaded blocks and manage evictions.
    """
    pass