Skip to content

vllm.model_executor.layers.quantization.gptq_marlin

logger module-attribute

logger = init_logger(__name__)

GPTQMarlinConfig

Bases: QuantizationConfig

Config class for GPTQ Marlin

Source code in vllm/model_executor/layers/quantization/gptq_marlin.py
class GPTQMarlinConfig(QuantizationConfig):
    """Config class for GPTQ Marlin"""

    # (num_bits, is_sym) -> quant_type
    TYPE_MAP = {
        (4, True): scalar_types.uint4b8,
        (8, True): scalar_types.uint8b128,
    }

    def __init__(
        self,
        weight_bits: int,
        group_size: int,
        desc_act: bool,
        is_sym: bool,
        lm_head_quantized: bool,
        dynamic: dict[str, dict[str, int | bool]],
        full_config: dict[str, Any],
        modules_in_block_to_quantize: list[str] | None = None,
    ) -> None:
        super().__init__()
        if desc_act and group_size == -1:
            # In this case, act_order == True is the same as act_order == False
            # (since we have only one group per output channel)
            desc_act = False

        # GPTQModel use `dynamic` config property to allow per module
        # quantization config so each module can be individually optimized.
        # Format is dict[str, dict] where key is a regex string that can
        # perform both positive ("+:" prefixed) or negative ("-:" prefixed)
        # matching of a module.
        # Default to positive match, override base quant config mode, if no
        # prefix is used. Value is in dict format of field key and override
        # value.
        # Negative matching will skip quantization init for this module
        # entirely:
        # non-quantized inference. More details and quantization examples can be
        # found at: https://github.com/ModelCloud/GPTQModel
        # Example:
        #  # last 1/2 of the layers 10-21 has 8bit vs 4bit for 0-9
        #  # last 1/4 of the layers 16-21 has 8bit and group_size 64
        # dynamic = {
        #  #`.*\.` matches the layers_node prefix
        #  # positive match layer 10-15
        #  r"+:.*\.(?:1[0-5])\..*": {"bits": 8,},
        #  # positive match layer 16-21
        #  r"+:.*\.(?:1[6-9]|20|21)\..*": {"bits": 8, "group_size": 64,},
        #  r"-:.*\.moe\..*": {}, # negative match (skip) all `moe` layers
        # }
        self.dynamic = dynamic

        self.weight_bits = weight_bits
        self.is_sym = is_sym

        self.pack_factor = 32 // weight_bits  # packed into int32
        self.group_size = group_size
        self.desc_act = desc_act
        self.lm_head_quantized = lm_head_quantized
        self.full_config = full_config

        if (weight_bits, is_sym) not in self.TYPE_MAP:
            raise ValueError(
                f"Unsupported quantization config: bits={weight_bits}, sym={is_sym}"
            )

        self.quant_type = self.TYPE_MAP[(weight_bits, is_sym)]

        self.modules_in_block_to_quantize = modules_in_block_to_quantize or []
        # used to identify GPTQ model quantized by autoround
        self.autoround_version = full_config.get("autoround_version", "")

    def __repr__(self) -> str:
        return (
            f"GPTQMarlinConfig(quant_type={self.quant_type}, "
            f"group_size={self.group_size}, "
            f"desc_act={self.desc_act}, "
            f"lm_head_quantized={self.lm_head_quantized}, "
            f"dynamic={self.dynamic}, "
            f"modules_in_block_to_quantize={self.modules_in_block_to_quantize})"
        )

    @classmethod
    def get_name(cls) -> QuantizationMethods:
        return "gptq_marlin"

    @classmethod
    def get_supported_act_dtypes(cls) -> list[torch.dtype]:
        return [torch.half, torch.bfloat16]

    @classmethod
    def get_min_capability(cls) -> int:
        return 80

    @classmethod
    def get_config_filenames(cls) -> list[str]:
        return ["quantize_config.json"]

    @classmethod
    def from_config(cls, config: dict[str, Any]) -> "GPTQMarlinConfig":
        dynamic = cls.get_from_keys_or(config, ["dynamic"], default={})
        dynamic = {} if dynamic is None else dynamic

        weight_bits = cls.get_from_keys(config, ["bits"])
        group_size = cls.get_from_keys(config, ["group_size"])
        desc_act = cls.get_from_keys(config, ["desc_act"])
        is_sym = cls.get_from_keys(config, ["sym"])
        lm_head_quantized = cls.get_from_keys_or(config, ["lm_head"], default=False)
        modules_in_block_to_quantize = cls.get_from_keys_or(
            config, ["modules_in_block_to_quantize"], default=None
        )
        return cls(
            weight_bits,
            group_size,
            desc_act,
            is_sym,
            lm_head_quantized,
            dynamic,
            config,
            modules_in_block_to_quantize,
        )

    @classmethod
    def override_quantization_method(
        cls, hf_quant_cfg, user_quant
    ) -> QuantizationMethods | None:
        can_convert = cls.is_gptq_marlin_compatible(hf_quant_cfg)

        is_valid_user_quant = (
            user_quant is None or user_quant == "marlin" or user_quant == "gptq_marlin"
        )

        if can_convert and is_valid_user_quant:
            msg = (
                "The model is convertible to {} during runtime."
                " Using {} kernel.".format(cls.get_name(), cls.get_name())
            )
            logger.info(msg)
            return cls.get_name()

        if can_convert and user_quant == "gptq":
            logger.info(
                "Detected that the model can run with gptq_marlin"
                ", however you specified quantization=gptq explicitly,"
                " so forcing gptq. Use quantization=gptq_marlin for"
                " faster inference"
            )
        return None

    def get_quant_method(
        self, layer: torch.nn.Module, prefix: str
    ) -> Optional["QuantizeMethodBase"]:
        if isinstance(layer, FusedMoE):
            from vllm.model_executor.layers.quantization.moe_wna16 import MoeWNA16Config

            if not check_moe_marlin_supports_layer(layer, self.group_size):
                logger.warning_once(
                    f"Layer '{prefix}' is not supported by GPTQMoeMarlin. "
                    "Falling back to Moe WNA16 kernels."
                )
                return MoeWNA16Config.from_config(self.full_config).get_quant_method(
                    layer, prefix
                )
            moe_quant_method = get_moe_quant_method(
                self, layer, prefix, GPTQMarlinMoEMethod
            )
            if moe_quant_method is None:
                return None
            moe_quant_method.input_dtype = get_marlin_input_dtype(prefix)
            return moe_quant_method

        quant_method = get_linear_quant_method(
            self, layer, prefix, GPTQMarlinLinearMethod
        )
        if quant_method is None:
            return None
        quant_method.input_dtype = get_marlin_input_dtype(prefix)
        return quant_method

    @classmethod
    def is_gptq_marlin_compatible(cls, quant_config: dict[str, Any]):
        quant_method = quant_config.get("quant_method", "").lower()
        num_bits = quant_config.get("bits")
        group_size = quant_config.get("group_size")
        sym = quant_config.get("sym")
        desc_act = quant_config.get("desc_act")

        if not current_platform.is_cuda():
            return False

        if quant_method != "gptq":
            return False

        # Marlin conversion is only valid if required properties are found
        if num_bits is None or group_size is None or sym is None or desc_act is None:
            return False

        if (num_bits, sym) not in cls.TYPE_MAP:
            return False

        return check_marlin_supported(
            quant_type=cls.TYPE_MAP[(num_bits, sym)], group_size=group_size
        )

    def apply_vllm_mapper(self, hf_to_vllm_mapper):
        if self.modules_in_block_to_quantize is not None:
            self.modules_in_block_to_quantize = hf_to_vllm_mapper.apply_list(
                self.modules_in_block_to_quantize
            )

    def maybe_update_config(self, model_name: str, revision: str | None = None):
        if self.modules_in_block_to_quantize:
            if is_list_of(self.modules_in_block_to_quantize, list):
                # original modules_in_block_to_quantize: list[list[str]]
                # flatten original modules_in_block_to_quantize
                self.modules_in_block_to_quantize = [
                    item
                    for sublist in self.modules_in_block_to_quantize
                    for item in sublist
                ]
            return

        unquant_dtypes = [torch.float16, torch.bfloat16, torch.float32]
        metadata = get_safetensors_params_metadata(model_name, revision=revision)
        quant_layers: set[str] = {
            param_name.rsplit(".", 1)[0]
            for param_name, info in metadata.items()
            if (dtype := info.get("dtype", None))
            and _SAFETENSORS_TO_TORCH_DTYPE[dtype] not in unquant_dtypes
        }
        self.modules_in_block_to_quantize = list(quant_layers)

TYPE_MAP class-attribute instance-attribute

TYPE_MAP = {(4, True): uint4b8, (8, True): uint8b128}

autoround_version instance-attribute

autoround_version = get('autoround_version', '')

desc_act instance-attribute

desc_act = desc_act

dynamic instance-attribute

dynamic = dynamic

full_config instance-attribute

full_config = full_config

group_size instance-attribute

group_size = group_size

is_sym instance-attribute

is_sym = is_sym

lm_head_quantized instance-attribute

lm_head_quantized = lm_head_quantized

modules_in_block_to_quantize instance-attribute

modules_in_block_to_quantize = (
    modules_in_block_to_quantize or []
)

pack_factor instance-attribute

pack_factor = 32 // weight_bits

quant_type instance-attribute

quant_type = TYPE_MAP[weight_bits, is_sym]

weight_bits instance-attribute

weight_bits = weight_bits

__init__

__init__(
    weight_bits: int,
    group_size: int,
    desc_act: bool,
    is_sym: bool,
    lm_head_quantized: bool,
    dynamic: dict[str, dict[str, int | bool]],
    full_config: dict[str, Any],
    modules_in_block_to_quantize: list[str] | None = None,
) -> None
Source code in vllm/model_executor/layers/quantization/gptq_marlin.py
def __init__(
    self,
    weight_bits: int,
    group_size: int,
    desc_act: bool,
    is_sym: bool,
    lm_head_quantized: bool,
    dynamic: dict[str, dict[str, int | bool]],
    full_config: dict[str, Any],
    modules_in_block_to_quantize: list[str] | None = None,
) -> None:
    super().__init__()
    if desc_act and group_size == -1:
        # In this case, act_order == True is the same as act_order == False
        # (since we have only one group per output channel)
        desc_act = False

    # GPTQModel use `dynamic` config property to allow per module
    # quantization config so each module can be individually optimized.
    # Format is dict[str, dict] where key is a regex string that can
    # perform both positive ("+:" prefixed) or negative ("-:" prefixed)
    # matching of a module.
    # Default to positive match, override base quant config mode, if no
    # prefix is used. Value is in dict format of field key and override
    # value.
    # Negative matching will skip quantization init for this module
    # entirely:
    # non-quantized inference. More details and quantization examples can be
    # found at: https://github.com/ModelCloud/GPTQModel
    # Example:
    #  # last 1/2 of the layers 10-21 has 8bit vs 4bit for 0-9
    #  # last 1/4 of the layers 16-21 has 8bit and group_size 64
    # dynamic = {
    #  #`.*\.` matches the layers_node prefix
    #  # positive match layer 10-15
    #  r"+:.*\.(?:1[0-5])\..*": {"bits": 8,},
    #  # positive match layer 16-21
    #  r"+:.*\.(?:1[6-9]|20|21)\..*": {"bits": 8, "group_size": 64,},
    #  r"-:.*\.moe\..*": {}, # negative match (skip) all `moe` layers
    # }
    self.dynamic = dynamic

    self.weight_bits = weight_bits
    self.is_sym = is_sym

    self.pack_factor = 32 // weight_bits  # packed into int32
    self.group_size = group_size
    self.desc_act = desc_act
    self.lm_head_quantized = lm_head_quantized
    self.full_config = full_config

    if (weight_bits, is_sym) not in self.TYPE_MAP:
        raise ValueError(
            f"Unsupported quantization config: bits={weight_bits}, sym={is_sym}"
        )

    self.quant_type = self.TYPE_MAP[(weight_bits, is_sym)]

    self.modules_in_block_to_quantize = modules_in_block_to_quantize or []
    # used to identify GPTQ model quantized by autoround
    self.autoround_version = full_config.get("autoround_version", "")

__repr__

__repr__() -> str
Source code in vllm/model_executor/layers/quantization/gptq_marlin.py
def __repr__(self) -> str:
    return (
        f"GPTQMarlinConfig(quant_type={self.quant_type}, "
        f"group_size={self.group_size}, "
        f"desc_act={self.desc_act}, "
        f"lm_head_quantized={self.lm_head_quantized}, "
        f"dynamic={self.dynamic}, "
        f"modules_in_block_to_quantize={self.modules_in_block_to_quantize})"
    )

apply_vllm_mapper

apply_vllm_mapper(hf_to_vllm_mapper)
Source code in vllm/model_executor/layers/quantization/gptq_marlin.py
def apply_vllm_mapper(self, hf_to_vllm_mapper):
    if self.modules_in_block_to_quantize is not None:
        self.modules_in_block_to_quantize = hf_to_vllm_mapper.apply_list(
            self.modules_in_block_to_quantize
        )

from_config classmethod

from_config(config: dict[str, Any]) -> GPTQMarlinConfig
Source code in vllm/model_executor/layers/quantization/gptq_marlin.py
@classmethod
def from_config(cls, config: dict[str, Any]) -> "GPTQMarlinConfig":
    dynamic = cls.get_from_keys_or(config, ["dynamic"], default={})
    dynamic = {} if dynamic is None else dynamic

    weight_bits = cls.get_from_keys(config, ["bits"])
    group_size = cls.get_from_keys(config, ["group_size"])
    desc_act = cls.get_from_keys(config, ["desc_act"])
    is_sym = cls.get_from_keys(config, ["sym"])
    lm_head_quantized = cls.get_from_keys_or(config, ["lm_head"], default=False)
    modules_in_block_to_quantize = cls.get_from_keys_or(
        config, ["modules_in_block_to_quantize"], default=None
    )
    return cls(
        weight_bits,
        group_size,
        desc_act,
        is_sym,
        lm_head_quantized,
        dynamic,
        config,
        modules_in_block_to_quantize,
    )

get_config_filenames classmethod

get_config_filenames() -> list[str]
Source code in vllm/model_executor/layers/quantization/gptq_marlin.py
@classmethod
def get_config_filenames(cls) -> list[str]:
    return ["quantize_config.json"]

get_min_capability classmethod

get_min_capability() -> int
Source code in vllm/model_executor/layers/quantization/gptq_marlin.py
@classmethod
def get_min_capability(cls) -> int:
    return 80

get_name classmethod

get_name() -> QuantizationMethods
Source code in vllm/model_executor/layers/quantization/gptq_marlin.py
@classmethod
def get_name(cls) -> QuantizationMethods:
    return "gptq_marlin"

get_quant_method

get_quant_method(
    layer: Module, prefix: str
) -> Optional[QuantizeMethodBase]
Source code in vllm/model_executor/layers/quantization/gptq_marlin.py
def get_quant_method(
    self, layer: torch.nn.Module, prefix: str
) -> Optional["QuantizeMethodBase"]:
    if isinstance(layer, FusedMoE):
        from vllm.model_executor.layers.quantization.moe_wna16 import MoeWNA16Config

        if not check_moe_marlin_supports_layer(layer, self.group_size):
            logger.warning_once(
                f"Layer '{prefix}' is not supported by GPTQMoeMarlin. "
                "Falling back to Moe WNA16 kernels."
            )
            return MoeWNA16Config.from_config(self.full_config).get_quant_method(
                layer, prefix
            )
        moe_quant_method = get_moe_quant_method(
            self, layer, prefix, GPTQMarlinMoEMethod
        )
        if moe_quant_method is None:
            return None
        moe_quant_method.input_dtype = get_marlin_input_dtype(prefix)
        return moe_quant_method

    quant_method = get_linear_quant_method(
        self, layer, prefix, GPTQMarlinLinearMethod
    )
    if quant_method is None:
        return None
    quant_method.input_dtype = get_marlin_input_dtype(prefix)
    return quant_method

get_supported_act_dtypes classmethod

get_supported_act_dtypes() -> list[dtype]
Source code in vllm/model_executor/layers/quantization/gptq_marlin.py
@classmethod
def get_supported_act_dtypes(cls) -> list[torch.dtype]:
    return [torch.half, torch.bfloat16]

is_gptq_marlin_compatible classmethod

is_gptq_marlin_compatible(quant_config: dict[str, Any])
Source code in vllm/model_executor/layers/quantization/gptq_marlin.py
@classmethod
def is_gptq_marlin_compatible(cls, quant_config: dict[str, Any]):
    quant_method = quant_config.get("quant_method", "").lower()
    num_bits = quant_config.get("bits")
    group_size = quant_config.get("group_size")
    sym = quant_config.get("sym")
    desc_act = quant_config.get("desc_act")

    if not current_platform.is_cuda():
        return False

    if quant_method != "gptq":
        return False

    # Marlin conversion is only valid if required properties are found
    if num_bits is None or group_size is None or sym is None or desc_act is None:
        return False

    if (num_bits, sym) not in cls.TYPE_MAP:
        return False

    return check_marlin_supported(
        quant_type=cls.TYPE_MAP[(num_bits, sym)], group_size=group_size
    )

maybe_update_config

maybe_update_config(
    model_name: str, revision: str | None = None
)
Source code in vllm/model_executor/layers/quantization/gptq_marlin.py
def maybe_update_config(self, model_name: str, revision: str | None = None):
    if self.modules_in_block_to_quantize:
        if is_list_of(self.modules_in_block_to_quantize, list):
            # original modules_in_block_to_quantize: list[list[str]]
            # flatten original modules_in_block_to_quantize
            self.modules_in_block_to_quantize = [
                item
                for sublist in self.modules_in_block_to_quantize
                for item in sublist
            ]
        return

    unquant_dtypes = [torch.float16, torch.bfloat16, torch.float32]
    metadata = get_safetensors_params_metadata(model_name, revision=revision)
    quant_layers: set[str] = {
        param_name.rsplit(".", 1)[0]
        for param_name, info in metadata.items()
        if (dtype := info.get("dtype", None))
        and _SAFETENSORS_TO_TORCH_DTYPE[dtype] not in unquant_dtypes
    }
    self.modules_in_block_to_quantize = list(quant_layers)

override_quantization_method classmethod

override_quantization_method(
    hf_quant_cfg, user_quant
) -> QuantizationMethods | None
Source code in vllm/model_executor/layers/quantization/gptq_marlin.py
@classmethod
def override_quantization_method(
    cls, hf_quant_cfg, user_quant
) -> QuantizationMethods | None:
    can_convert = cls.is_gptq_marlin_compatible(hf_quant_cfg)

    is_valid_user_quant = (
        user_quant is None or user_quant == "marlin" or user_quant == "gptq_marlin"
    )

    if can_convert and is_valid_user_quant:
        msg = (
            "The model is convertible to {} during runtime."
            " Using {} kernel.".format(cls.get_name(), cls.get_name())
        )
        logger.info(msg)
        return cls.get_name()

    if can_convert and user_quant == "gptq":
        logger.info(
            "Detected that the model can run with gptq_marlin"
            ", however you specified quantization=gptq explicitly,"
            " so forcing gptq. Use quantization=gptq_marlin for"
            " faster inference"
        )
    return None

GPTQMarlinLinearMethod

Bases: LinearMethodBase

Linear method for GPTQ Marlin.

Parameters:

Name Type Description Default
quant_config GPTQMarlinConfig

The GPTQ Marlin quantization config.

required
Source code in vllm/model_executor/layers/quantization/gptq_marlin.py
class GPTQMarlinLinearMethod(LinearMethodBase):
    """Linear method for GPTQ Marlin.

    Args:
        quant_config: The GPTQ Marlin quantization config.
    """

    _kernel_backends_being_used: set[str] = set()

    def __init__(self, quant_config: GPTQMarlinConfig) -> None:
        self.quant_config = quant_config
        self.input_dtype = None
        self.quant_type = self.quant_config.quant_type

        # Verify supported on platform.
        verify_marlin_supported(
            quant_type=self.quant_config.quant_type,
            group_size=self.quant_config.group_size,
        )

    def create_weights(
        self,
        layer: torch.nn.Module,
        input_size_per_partition: int,
        output_partition_sizes: list[int],
        input_size: int,
        output_size: int,
        params_dtype: torch.dtype,
        **extra_weight_attrs,
    ) -> None:
        output_size_per_partition = sum(output_partition_sizes)
        is_row_parallel = input_size != input_size_per_partition
        weight_loader = extra_weight_attrs.get("weight_loader")
        input_dtype = self.input_dtype

        mp_linear_kernel_config = MPLinearLayerConfig(
            full_weight_shape=(input_size, output_size),
            partition_weight_shape=(
                input_size_per_partition,
                output_size_per_partition,
            ),
            weight_type=self.quant_config.quant_type,
            act_type=params_dtype if input_dtype is None else input_dtype,
            group_size=self.quant_config.group_size,
            zero_points=False,
            has_g_idx=self.quant_config.desc_act,
        )

        kernel_type = choose_mp_linear_kernel(mp_linear_kernel_config)

        if kernel_type.__name__ not in self._kernel_backends_being_used:
            logger.info("Using %s for GPTQMarlinLinearMethod", kernel_type.__name__)
            self._kernel_backends_being_used.add(kernel_type.__name__)

        # Normalize group_size
        if self.quant_config.group_size != -1:
            group_size = self.quant_config.group_size
        else:
            group_size = input_size

        # Determine sharding
        if marlin_repeat_scales_on_all_ranks(
            self.quant_config.desc_act, self.quant_config.group_size, is_row_parallel
        ):
            # By setting scale_dim == None, weight_loader will
            # repeat the scales on each GPU in TP>1 case.
            scales_and_zp_input_dim = None
            scales_and_zp_size = input_size // group_size
        else:
            # By setting scale_dim == 0, weight_loader will
            # shard the scales in TP>1 case.
            scales_and_zp_input_dim = 0
            scales_and_zp_size = input_size_per_partition // group_size

        # Quantized weights
        qweight = PackedvLLMParameter(
            data=torch.empty(
                input_size_per_partition // self.quant_config.pack_factor,
                output_size_per_partition,
                dtype=torch.int32,
            ),
            input_dim=0,
            output_dim=1,
            packed_dim=0,
            packed_factor=self.quant_config.pack_factor,
            weight_loader=weight_loader,
        )

        # Activation order
        g_idx = RowvLLMParameter(
            data=torch.empty(
                input_size_per_partition,
                dtype=torch.int32,
            ),
            input_dim=0,
            weight_loader=weight_loader,
        )

        qzeros_args = {
            "data": torch.empty(
                scales_and_zp_size,
                output_size_per_partition // self.quant_config.pack_factor,
                dtype=torch.int32,
            ),
            "weight_loader": weight_loader,
        }
        weight_scale_args = {
            "data": torch.empty(
                scales_and_zp_size,
                output_size_per_partition,
                dtype=params_dtype,
            ),
            "weight_loader": weight_loader,
        }

        if scales_and_zp_input_dim is None:
            scales = ChannelQuantScaleParameter(output_dim=1, **weight_scale_args)
            qzeros = PackedColumnParameter(
                output_dim=1,
                packed_dim=1,
                packed_factor=self.quant_config.pack_factor,
                **qzeros_args,
            )

        else:
            scales = GroupQuantScaleParameter(
                output_dim=1, input_dim=0, **weight_scale_args
            )
            qzeros = PackedvLLMParameter(
                input_dim=0,
                output_dim=1,
                packed_dim=1,
                packed_factor=self.quant_config.pack_factor,
                **qzeros_args,
            )

        layer.register_parameter("qweight", qweight)
        layer.register_parameter("g_idx", g_idx)
        layer.register_parameter("scales", scales)
        layer.register_parameter("qzeros", qzeros)

        self.kernel = kernel_type(
            mp_linear_kernel_config,
            w_q_param_name="qweight",
            w_s_param_name="scales",
            w_zp_param_name="qzeros",
            w_gidx_param_name="g_idx",
        )

    def process_weights_after_loading(self, layer: torch.nn.Module) -> None:
        self.kernel.process_weights_after_loading(layer)

    def apply(
        self,
        layer: torch.nn.Module,
        x: torch.Tensor,
        bias: torch.Tensor | None = None,
    ) -> torch.Tensor:
        return self.kernel.apply_weights(layer, x, bias)

_kernel_backends_being_used class-attribute instance-attribute

_kernel_backends_being_used: set[str] = set()

input_dtype instance-attribute

input_dtype = None

quant_config instance-attribute

quant_config = quant_config

quant_type instance-attribute

quant_type = quant_type

__init__

__init__(quant_config: GPTQMarlinConfig) -> None
Source code in vllm/model_executor/layers/quantization/gptq_marlin.py
def __init__(self, quant_config: GPTQMarlinConfig) -> None:
    self.quant_config = quant_config
    self.input_dtype = None
    self.quant_type = self.quant_config.quant_type

    # Verify supported on platform.
    verify_marlin_supported(
        quant_type=self.quant_config.quant_type,
        group_size=self.quant_config.group_size,
    )

apply

apply(
    layer: Module, x: Tensor, bias: Tensor | None = None
) -> Tensor
Source code in vllm/model_executor/layers/quantization/gptq_marlin.py
def apply(
    self,
    layer: torch.nn.Module,
    x: torch.Tensor,
    bias: torch.Tensor | None = None,
) -> torch.Tensor:
    return self.kernel.apply_weights(layer, x, bias)

create_weights

create_weights(
    layer: Module,
    input_size_per_partition: int,
    output_partition_sizes: list[int],
    input_size: int,
    output_size: int,
    params_dtype: dtype,
    **extra_weight_attrs,
) -> None
Source code in vllm/model_executor/layers/quantization/gptq_marlin.py
def create_weights(
    self,
    layer: torch.nn.Module,
    input_size_per_partition: int,
    output_partition_sizes: list[int],
    input_size: int,
    output_size: int,
    params_dtype: torch.dtype,
    **extra_weight_attrs,
) -> None:
    output_size_per_partition = sum(output_partition_sizes)
    is_row_parallel = input_size != input_size_per_partition
    weight_loader = extra_weight_attrs.get("weight_loader")
    input_dtype = self.input_dtype

    mp_linear_kernel_config = MPLinearLayerConfig(
        full_weight_shape=(input_size, output_size),
        partition_weight_shape=(
            input_size_per_partition,
            output_size_per_partition,
        ),
        weight_type=self.quant_config.quant_type,
        act_type=params_dtype if input_dtype is None else input_dtype,
        group_size=self.quant_config.group_size,
        zero_points=False,
        has_g_idx=self.quant_config.desc_act,
    )

    kernel_type = choose_mp_linear_kernel(mp_linear_kernel_config)

    if kernel_type.__name__ not in self._kernel_backends_being_used:
        logger.info("Using %s for GPTQMarlinLinearMethod", kernel_type.__name__)
        self._kernel_backends_being_used.add(kernel_type.__name__)

    # Normalize group_size
    if self.quant_config.group_size != -1:
        group_size = self.quant_config.group_size
    else:
        group_size = input_size

    # Determine sharding
    if marlin_repeat_scales_on_all_ranks(
        self.quant_config.desc_act, self.quant_config.group_size, is_row_parallel
    ):
        # By setting scale_dim == None, weight_loader will
        # repeat the scales on each GPU in TP>1 case.
        scales_and_zp_input_dim = None
        scales_and_zp_size = input_size // group_size
    else:
        # By setting scale_dim == 0, weight_loader will
        # shard the scales in TP>1 case.
        scales_and_zp_input_dim = 0
        scales_and_zp_size = input_size_per_partition // group_size

    # Quantized weights
    qweight = PackedvLLMParameter(
        data=torch.empty(
            input_size_per_partition // self.quant_config.pack_factor,
            output_size_per_partition,
            dtype=torch.int32,
        ),
        input_dim=0,
        output_dim=1,
        packed_dim=0,
        packed_factor=self.quant_config.pack_factor,
        weight_loader=weight_loader,
    )

    # Activation order
    g_idx = RowvLLMParameter(
        data=torch.empty(
            input_size_per_partition,
            dtype=torch.int32,
        ),
        input_dim=0,
        weight_loader=weight_loader,
    )

    qzeros_args = {
        "data": torch.empty(
            scales_and_zp_size,
            output_size_per_partition // self.quant_config.pack_factor,
            dtype=torch.int32,
        ),
        "weight_loader": weight_loader,
    }
    weight_scale_args = {
        "data": torch.empty(
            scales_and_zp_size,
            output_size_per_partition,
            dtype=params_dtype,
        ),
        "weight_loader": weight_loader,
    }

    if scales_and_zp_input_dim is None:
        scales = ChannelQuantScaleParameter(output_dim=1, **weight_scale_args)
        qzeros = PackedColumnParameter(
            output_dim=1,
            packed_dim=1,
            packed_factor=self.quant_config.pack_factor,
            **qzeros_args,
        )

    else:
        scales = GroupQuantScaleParameter(
            output_dim=1, input_dim=0, **weight_scale_args
        )
        qzeros = PackedvLLMParameter(
            input_dim=0,
            output_dim=1,
            packed_dim=1,
            packed_factor=self.quant_config.pack_factor,
            **qzeros_args,
        )

    layer.register_parameter("qweight", qweight)
    layer.register_parameter("g_idx", g_idx)
    layer.register_parameter("scales", scales)
    layer.register_parameter("qzeros", qzeros)

    self.kernel = kernel_type(
        mp_linear_kernel_config,
        w_q_param_name="qweight",
        w_s_param_name="scales",
        w_zp_param_name="qzeros",
        w_gidx_param_name="g_idx",
    )

process_weights_after_loading

process_weights_after_loading(layer: Module) -> None
Source code in vllm/model_executor/layers/quantization/gptq_marlin.py
def process_weights_after_loading(self, layer: torch.nn.Module) -> None:
    self.kernel.process_weights_after_loading(layer)

GPTQMarlinMoEMethod

Bases: FusedMoEMethodBase

MoE Marlin method with quantization.

Source code in vllm/model_executor/layers/quantization/gptq_marlin.py
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
class GPTQMarlinMoEMethod(FusedMoEMethodBase):
    """MoE Marlin method with quantization."""

    def __init__(
        self,
        quant_config: GPTQMarlinConfig,
        moe: FusedMoEConfig,
    ) -> None:
        super().__init__(moe)
        self.quant_config = quant_config
        if self.quant_config.quant_type.size_bits == 4:
            self.quant_type = scalar_types.uint4b8
        elif self.quant_config.quant_type.size_bits == 8:
            self.quant_type = scalar_types.uint8b128
        else:
            raise ValueError("GPTQMarlinMoEMethod only supports int4 and int8 now.")
        self.input_dtype = None
        self.use_marlin = True

    def create_weights(
        self,
        layer: torch.nn.Module,
        num_experts: int,
        hidden_size: int,
        intermediate_size_per_partition: int,
        params_dtype: torch.dtype,
        **extra_weight_attrs,
    ):
        layer.input_dtype = self.input_dtype
        is_a_8bit = self.input_dtype is not None and self.input_dtype.itemsize == 1

        if is_a_8bit:
            assert self.quant_type == scalar_types.uint4b8, (
                "W8A8-INT8 is not supported by marlin kernel."
            )

        intermediate_size_full = extra_weight_attrs.pop("intermediate_size_full")

        self.is_k_full = (not self.quant_config.desc_act) or (
            intermediate_size_per_partition == intermediate_size_full
        )

        if self.quant_config.group_size != -1:
            scales_size13 = hidden_size // self.quant_config.group_size
            w2_scales_size = (
                intermediate_size_full
                if self.quant_config.desc_act
                else intermediate_size_per_partition
            )
            scales_size2 = w2_scales_size // self.quant_config.group_size
            strategy = FusedMoeWeightScaleSupported.GROUP.value
        else:
            scales_size13 = 1
            scales_size2 = 1
            strategy = FusedMoeWeightScaleSupported.CHANNEL.value

        layer.num_groups_w13 = scales_size13
        layer.num_groups_w2 = scales_size2

        extra_weight_attrs.update({"quant_method": strategy, "is_transposed": True})
        # Fused gate_up_proj (column parallel)
        w13_qweight = torch.nn.Parameter(
            torch.empty(
                num_experts,
                hidden_size // self.quant_config.pack_factor,
                2 * intermediate_size_per_partition,
                dtype=torch.int32,
            ),
            requires_grad=False,
        )
        layer.register_parameter("w13_qweight", w13_qweight)
        set_weight_attrs(w13_qweight, extra_weight_attrs)
        # down_proj (row parallel)
        w2_qweight = torch.nn.Parameter(
            torch.empty(
                num_experts,
                intermediate_size_per_partition // self.quant_config.pack_factor,
                hidden_size,
                dtype=torch.int32,
            ),
            requires_grad=False,
        )
        layer.register_parameter("w2_qweight", w2_qweight)
        set_weight_attrs(w2_qweight, extra_weight_attrs)
        # up_proj scales
        w13_scales = torch.nn.Parameter(
            torch.empty(
                num_experts,
                scales_size13,
                2 * intermediate_size_per_partition,
                dtype=params_dtype,
            ),
            requires_grad=False,
        )
        layer.register_parameter("w13_scales", w13_scales)
        set_weight_attrs(w13_scales, extra_weight_attrs)
        # down_proj scales
        w2_scales = torch.nn.Parameter(
            torch.empty(num_experts, scales_size2, hidden_size, dtype=params_dtype),
            requires_grad=False,
        )
        layer.register_parameter("w2_scales", w2_scales)
        set_weight_attrs(w2_scales, extra_weight_attrs)
        # don't shard the w2 scales when running act order
        set_weight_attrs(w2_scales, {"load_full_w2": self.quant_config.desc_act})
        # up_proj scales
        w13_qzeros = torch.nn.Parameter(
            torch.empty(
                num_experts,
                scales_size13,
                2 * intermediate_size_per_partition // self.quant_config.pack_factor,
                dtype=params_dtype,
            ),
            requires_grad=False,
        )
        layer.register_parameter("w13_qzeros", w13_qzeros)
        set_weight_attrs(w13_qzeros, extra_weight_attrs)
        # down_proj scales
        w2_qzeros = torch.nn.Parameter(
            torch.empty(
                num_experts,
                scales_size2,
                hidden_size // self.quant_config.pack_factor,
                dtype=params_dtype,
            ),
            requires_grad=False,
        )
        layer.register_parameter("w2_qzeros", w2_qzeros)
        set_weight_attrs(w2_qzeros, extra_weight_attrs)
        # don't shard the w2 scales when running act order
        set_weight_attrs(w2_qzeros, {"load_full_w2": self.quant_config.desc_act})
        w13_g_idx = torch.nn.Parameter(
            torch.empty(
                num_experts,
                hidden_size,
                dtype=torch.int32,
            ),
            requires_grad=False,
        )
        layer.register_parameter("w13_g_idx", w13_g_idx)
        set_weight_attrs(w13_g_idx, extra_weight_attrs)
        w2_g_idx = torch.nn.Parameter(
            torch.empty(
                num_experts,
                intermediate_size_per_partition,
                dtype=torch.int32,
            ),
            requires_grad=False,
        )
        layer.register_parameter("w2_g_idx", w2_g_idx)
        set_weight_attrs(w2_g_idx, extra_weight_attrs)
        w13_g_idx_sort_indices = torch.nn.Parameter(
            torch.empty(
                num_experts,
                hidden_size,
                dtype=torch.int32,
            ),
            requires_grad=False,
        )
        layer.register_parameter("w13_g_idx_sort_indices", w13_g_idx_sort_indices)
        set_weight_attrs(w13_g_idx_sort_indices, extra_weight_attrs)
        w2_g_idx_sort_indices = torch.nn.Parameter(
            torch.empty(
                num_experts,
                intermediate_size_per_partition,
                dtype=torch.int32,
            ),
            requires_grad=False,
        )
        layer.register_parameter("w2_g_idx_sort_indices", w2_g_idx_sort_indices)
        set_weight_attrs(w2_g_idx_sort_indices, extra_weight_attrs)

        device = layer.w13_qweight.device
        layer.workspace = marlin_make_workspace_new(device, 4)

    def process_weights_after_loading(self, layer: torch.nn.Module) -> None:
        is_a_8bit = self.input_dtype is not None and self.input_dtype.itemsize == 1

        if is_a_8bit:
            assert self.quant_type == scalar_types.uint4b8, (
                "W8A8-INT8 is not supported by marlin kernel."
            )

        if self.input_dtype == torch.float8_e4m3fn:
            ops.marlin_int4_fp8_preprocess(layer.w13_qweight, inplace=True)
            ops.marlin_int4_fp8_preprocess(layer.w2_qweight, inplace=True)
            layer.w13_scales.data = layer.w13_scales.data * 512
            layer.w2_scales.data = layer.w2_scales.data * 512

        # Process act_order
        if self.quant_config.desc_act:
            # Get sorting based on g_idx
            num_experts = layer.w13_g_idx.shape[0]
            w13_g_idx_sort_indices = torch.empty_like(layer.w13_g_idx)
            w2_g_idx_sort_indices = torch.empty_like(layer.w2_g_idx)
            w13_sorted_g_idx = torch.empty_like(layer.w13_g_idx)
            w2_sorted_g_idx = torch.empty_like(layer.w2_g_idx)
            for e in range(num_experts):
                w13_g_idx_sort_indices[e] = torch.argsort(layer.w13_g_idx[e]).to(
                    torch.int32
                )
                w2_g_idx_sort_indices[e] = torch.argsort(layer.w2_g_idx[e]).to(
                    torch.int32
                )
                w13_sorted_g_idx[e] = layer.w13_g_idx[e][w13_g_idx_sort_indices[e]]
                w2_sorted_g_idx[e] = layer.w2_g_idx[e][w2_g_idx_sort_indices[e]]
            replace_parameter(layer, "w13_g_idx", w13_sorted_g_idx)
            replace_parameter(layer, "w2_g_idx", w2_sorted_g_idx)
            replace_parameter(layer, "w13_g_idx_sort_indices", w13_g_idx_sort_indices)
            replace_parameter(layer, "w2_g_idx_sort_indices", w2_g_idx_sort_indices)
        else:
            # Reset g_idx related tensors
            num_experts = layer.w13_g_idx.shape[0]
            device = layer.w13_g_idx.device
            layer.w13_g_idx = torch.nn.Parameter(
                torch.empty((num_experts, 0), dtype=torch.int32, device=device),
                requires_grad=False,
            )
            layer.w2_g_idx = torch.nn.Parameter(
                torch.empty((num_experts, 0), dtype=torch.int32, device=device),
                requires_grad=False,
            )
            layer.w13_g_idx_sort_indices = torch.nn.Parameter(
                torch.empty((num_experts, 0), dtype=torch.int32, device=device),
                requires_grad=False,
            )
            layer.w2_g_idx_sort_indices = torch.nn.Parameter(
                torch.empty((num_experts, 0), dtype=torch.int32, device=device),
                requires_grad=False,
            )
        # Repack weights
        marlin_w13_qweight = ops.gptq_marlin_moe_repack(
            layer.w13_qweight,
            layer.w13_g_idx_sort_indices,
            layer.w13_qweight.shape[1] * self.quant_config.pack_factor,
            layer.w13_qweight.shape[2],
            self.quant_config.quant_type.size_bits,
            is_a_8bit=is_a_8bit,
        )
        replace_parameter(layer, "w13_qweight", marlin_w13_qweight)
        marlin_w2_qweight = ops.gptq_marlin_moe_repack(
            layer.w2_qweight,
            layer.w2_g_idx_sort_indices,
            layer.w2_qweight.shape[1] * self.quant_config.pack_factor,
            layer.w2_qweight.shape[2],
            self.quant_config.quant_type.size_bits,
            is_a_8bit=is_a_8bit,
        )
        replace_parameter(layer, "w2_qweight", marlin_w2_qweight)

        # The modular kernel expects w13_weight and w2_weight,
        # but GPTQ uses w13_qweight and w2_qweight
        # Alias for modular kernel
        layer.w13_weight = layer.w13_qweight
        # Alias for modular kernel
        layer.w2_weight = layer.w2_qweight

        # Repack scales
        marlin_w13_scales = marlin_moe_permute_scales(
            s=layer.w13_scales,
            size_k=layer.intermediate_size_per_partition,
            size_n=layer.w13_scales.shape[2],
            group_size=self.quant_config.group_size,
            is_a_8bit=is_a_8bit,
        )
        if self.input_dtype == torch.int8 and layer.num_groups_w13 > 1:
            marlin_w13_scales, w13_input_global_scale = marlin_act_int8_process_scales(
                marlin_w13_scales
            )
            layer.register_parameter(
                "w13_input_global_scale",
                torch.nn.Parameter(w13_input_global_scale, requires_grad=False),
            )

        replace_parameter(layer, "w13_scales", marlin_w13_scales)
        marlin_w2_scales = marlin_moe_permute_scales(
            s=layer.w2_scales,
            size_k=layer.w2_scales.shape[1]
            * (
                self.quant_config.group_size
                if self.quant_config.group_size != -1
                else self.quant_config.pack_factor
            ),
            size_n=layer.w2_scales.shape[2],
            group_size=self.quant_config.group_size,
            is_a_8bit=is_a_8bit,
        )
        if self.input_dtype == torch.int8 and layer.num_groups_w2 > 1:
            marlin_w2_scales, w2_input_global_scale = marlin_act_int8_process_scales(
                marlin_w2_scales
            )
            layer.register_parameter(
                "w2_input_global_scale",
                torch.nn.Parameter(w2_input_global_scale, requires_grad=False),
            )

        replace_parameter(layer, "w2_scales", marlin_w2_scales)

        if hasattr(layer, "w13_bias") and layer.w13_bias is not None:
            layer.w13_bias.data = marlin_permute_bias(layer.w13_bias)

        if hasattr(layer, "w2_bias") and layer.w2_bias is not None:
            layer.w2_bias.data = marlin_permute_bias(layer.w2_bias)

    def get_fused_moe_quant_config(
        self, layer: torch.nn.Module
    ) -> FusedMoEQuantConfig | None:
        from vllm.model_executor.layers.fused_moe.config import (
            gptq_marlin_moe_quant_config,
        )

        return gptq_marlin_moe_quant_config(
            w1_scale=layer.w13_scales,
            w2_scale=layer.w2_scales,
            weight_bits=self.quant_config.weight_bits,
            group_size=self.quant_config.group_size,
            w1_zp=getattr(layer, "w13_qzeros", None)
            if not self.quant_config.is_sym
            else None,
            w2_zp=getattr(layer, "w2_qzeros", None)
            if not self.quant_config.is_sym
            else None,
            w1_bias=getattr(layer, "w13_bias", None),
            w2_bias=getattr(layer, "w2_bias", None),
        )

    def select_gemm_impl(
        self,
        prepare_finalize,
        layer: torch.nn.Module,
    ):
        """
        Select the GEMM implementation for GPTQ-Marlin MoE.

        Returns MarlinExperts configured for GPTQ quantization.
        This is ONLY used when LoRA is enabled.
        Without LoRA, GPTQ uses its own apply() method.
        """
        # Only use modular kernels when LoRA is enabled
        # Without LoRA, GPTQ's own apply() method works fine and is more efficient
        if not self.moe.is_lora_enabled:
            raise NotImplementedError(
                "GPTQ-Marlin uses its own apply() method when LoRA is not enabled. "
                "Modular kernels are only used for LoRA support."
            )

        # The modular marlin kernels do not support 8-bit weights.
        if self.quant_config.weight_bits == 8:
            raise NotImplementedError(
                "GPTQ-Marlin kernel does not support 8-bit weights."
            )

        from vllm.model_executor.layers.fused_moe import modular_kernel as mk
        from vllm.model_executor.layers.fused_moe.fused_marlin_moe import (
            BatchedMarlinExperts,
            MarlinExperts,
        )

        # Ensure quant config is initialized
        assert self.moe_quant_config is not None, (
            "moe_quant_config must be initialized before select_gemm_impl"
        )

        w13_g_idx = (
            getattr(layer, "w13_g_idx", None) if self.quant_config.desc_act else None
        )
        w2_g_idx = (
            getattr(layer, "w2_g_idx", None) if self.quant_config.desc_act else None
        )
        w13_g_idx_sort_indices = (
            getattr(layer, "w13_g_idx_sort_indices", None)
            if self.quant_config.desc_act
            else None
        )
        w2_g_idx_sort_indices = (
            getattr(layer, "w2_g_idx_sort_indices", None)
            if self.quant_config.desc_act
            else None
        )

        # Check if using batched expert format (for Expert Parallelism)
        if (
            prepare_finalize.activation_format
            == mk.FusedMoEActivationFormat.BatchedExperts
        ):
            # For batched format, use BatchedMarlinExperts
            max_num_tokens_per_rank = prepare_finalize.max_num_tokens_per_rank()
            assert max_num_tokens_per_rank is not None
            return BatchedMarlinExperts(
                max_num_tokens=max_num_tokens_per_rank,
                num_dispatchers=prepare_finalize.num_dispatchers(),
                quant_config=self.moe_quant_config,
                w13_g_idx=w13_g_idx,
                w2_g_idx=w2_g_idx,
                w13_g_idx_sort_indices=w13_g_idx_sort_indices,
                w2_g_idx_sort_indices=w2_g_idx_sort_indices,
                is_k_full=self.is_k_full,
            )
        else:
            # Standard Marlin experts for GPTQ
            return MarlinExperts(
                quant_config=self.moe_quant_config,
                w13_g_idx=w13_g_idx,
                w2_g_idx=w2_g_idx,
                w13_g_idx_sort_indices=w13_g_idx_sort_indices,
                w2_g_idx_sort_indices=w2_g_idx_sort_indices,
                is_k_full=self.is_k_full,
            )

    def apply(
        self,
        layer: FusedMoE,
        x: torch.Tensor,
        router_logits: torch.Tensor,
    ) -> torch.Tensor | tuple[torch.Tensor, torch.Tensor]:
        assert layer.activation == "silu", "Only SiLU activation is supported."

        topk_weights, topk_ids, _ = layer.select_experts(
            hidden_states=x,
            router_logits=router_logits,
        )

        return fused_marlin_moe(
            x,
            layer.w13_qweight,
            layer.w2_qweight,
            getattr(layer, "w13_bias", None),
            getattr(layer, "w2_bias", None),
            layer.w13_scales,
            layer.w2_scales,
            router_logits,
            topk_weights,
            topk_ids,
            input_global_scale1=getattr(layer, "w13_input_global_scale", None),
            input_global_scale2=getattr(layer, "w2_input_global_scale", None),
            quant_type_id=self.quant_type.id,
            apply_router_weight_on_input=layer.apply_router_weight_on_input,
            global_num_experts=layer.global_num_experts,
            expert_map=layer.expert_map,
            g_idx1=layer.w13_g_idx,
            g_idx2=layer.w2_g_idx,
            sort_indices1=layer.w13_g_idx_sort_indices,
            sort_indices2=layer.w2_g_idx_sort_indices,
            workspace=layer.workspace,
            is_k_full=self.is_k_full,
            input_dtype=self.input_dtype,
        )

input_dtype instance-attribute

input_dtype = None

quant_config instance-attribute

quant_config = quant_config

quant_type instance-attribute

quant_type = uint4b8

use_marlin instance-attribute

use_marlin = True

__init__

__init__(
    quant_config: GPTQMarlinConfig, moe: FusedMoEConfig
) -> None
Source code in vllm/model_executor/layers/quantization/gptq_marlin.py
def __init__(
    self,
    quant_config: GPTQMarlinConfig,
    moe: FusedMoEConfig,
) -> None:
    super().__init__(moe)
    self.quant_config = quant_config
    if self.quant_config.quant_type.size_bits == 4:
        self.quant_type = scalar_types.uint4b8
    elif self.quant_config.quant_type.size_bits == 8:
        self.quant_type = scalar_types.uint8b128
    else:
        raise ValueError("GPTQMarlinMoEMethod only supports int4 and int8 now.")
    self.input_dtype = None
    self.use_marlin = True

apply

apply(
    layer: FusedMoE, x: Tensor, router_logits: Tensor
) -> Tensor | tuple[Tensor, Tensor]
Source code in vllm/model_executor/layers/quantization/gptq_marlin.py
def apply(
    self,
    layer: FusedMoE,
    x: torch.Tensor,
    router_logits: torch.Tensor,
) -> torch.Tensor | tuple[torch.Tensor, torch.Tensor]:
    assert layer.activation == "silu", "Only SiLU activation is supported."

    topk_weights, topk_ids, _ = layer.select_experts(
        hidden_states=x,
        router_logits=router_logits,
    )

    return fused_marlin_moe(
        x,
        layer.w13_qweight,
        layer.w2_qweight,
        getattr(layer, "w13_bias", None),
        getattr(layer, "w2_bias", None),
        layer.w13_scales,
        layer.w2_scales,
        router_logits,
        topk_weights,
        topk_ids,
        input_global_scale1=getattr(layer, "w13_input_global_scale", None),
        input_global_scale2=getattr(layer, "w2_input_global_scale", None),
        quant_type_id=self.quant_type.id,
        apply_router_weight_on_input=layer.apply_router_weight_on_input,
        global_num_experts=layer.global_num_experts,
        expert_map=layer.expert_map,
        g_idx1=layer.w13_g_idx,
        g_idx2=layer.w2_g_idx,
        sort_indices1=layer.w13_g_idx_sort_indices,
        sort_indices2=layer.w2_g_idx_sort_indices,
        workspace=layer.workspace,
        is_k_full=self.is_k_full,
        input_dtype=self.input_dtype,
    )

create_weights

create_weights(
    layer: Module,
    num_experts: int,
    hidden_size: int,
    intermediate_size_per_partition: int,
    params_dtype: dtype,
    **extra_weight_attrs,
)
Source code in vllm/model_executor/layers/quantization/gptq_marlin.py
def create_weights(
    self,
    layer: torch.nn.Module,
    num_experts: int,
    hidden_size: int,
    intermediate_size_per_partition: int,
    params_dtype: torch.dtype,
    **extra_weight_attrs,
):
    layer.input_dtype = self.input_dtype
    is_a_8bit = self.input_dtype is not None and self.input_dtype.itemsize == 1

    if is_a_8bit:
        assert self.quant_type == scalar_types.uint4b8, (
            "W8A8-INT8 is not supported by marlin kernel."
        )

    intermediate_size_full = extra_weight_attrs.pop("intermediate_size_full")

    self.is_k_full = (not self.quant_config.desc_act) or (
        intermediate_size_per_partition == intermediate_size_full
    )

    if self.quant_config.group_size != -1:
        scales_size13 = hidden_size // self.quant_config.group_size
        w2_scales_size = (
            intermediate_size_full
            if self.quant_config.desc_act
            else intermediate_size_per_partition
        )
        scales_size2 = w2_scales_size // self.quant_config.group_size
        strategy = FusedMoeWeightScaleSupported.GROUP.value
    else:
        scales_size13 = 1
        scales_size2 = 1
        strategy = FusedMoeWeightScaleSupported.CHANNEL.value

    layer.num_groups_w13 = scales_size13
    layer.num_groups_w2 = scales_size2

    extra_weight_attrs.update({"quant_method": strategy, "is_transposed": True})
    # Fused gate_up_proj (column parallel)
    w13_qweight = torch.nn.Parameter(
        torch.empty(
            num_experts,
            hidden_size // self.quant_config.pack_factor,
            2 * intermediate_size_per_partition,
            dtype=torch.int32,
        ),
        requires_grad=False,
    )
    layer.register_parameter("w13_qweight", w13_qweight)
    set_weight_attrs(w13_qweight, extra_weight_attrs)
    # down_proj (row parallel)
    w2_qweight = torch.nn.Parameter(
        torch.empty(
            num_experts,
            intermediate_size_per_partition // self.quant_config.pack_factor,
            hidden_size,
            dtype=torch.int32,
        ),
        requires_grad=False,
    )
    layer.register_parameter("w2_qweight", w2_qweight)
    set_weight_attrs(w2_qweight, extra_weight_attrs)
    # up_proj scales
    w13_scales = torch.nn.Parameter(
        torch.empty(
            num_experts,
            scales_size13,
            2 * intermediate_size_per_partition,
            dtype=params_dtype,
        ),
        requires_grad=False,
    )
    layer.register_parameter("w13_scales", w13_scales)
    set_weight_attrs(w13_scales, extra_weight_attrs)
    # down_proj scales
    w2_scales = torch.nn.Parameter(
        torch.empty(num_experts, scales_size2, hidden_size, dtype=params_dtype),
        requires_grad=False,
    )
    layer.register_parameter("w2_scales", w2_scales)
    set_weight_attrs(w2_scales, extra_weight_attrs)
    # don't shard the w2 scales when running act order
    set_weight_attrs(w2_scales, {"load_full_w2": self.quant_config.desc_act})
    # up_proj scales
    w13_qzeros = torch.nn.Parameter(
        torch.empty(
            num_experts,
            scales_size13,
            2 * intermediate_size_per_partition // self.quant_config.pack_factor,
            dtype=params_dtype,
        ),
        requires_grad=False,
    )
    layer.register_parameter("w13_qzeros", w13_qzeros)
    set_weight_attrs(w13_qzeros, extra_weight_attrs)
    # down_proj scales
    w2_qzeros = torch.nn.Parameter(
        torch.empty(
            num_experts,
            scales_size2,
            hidden_size // self.quant_config.pack_factor,
            dtype=params_dtype,
        ),
        requires_grad=False,
    )
    layer.register_parameter("w2_qzeros", w2_qzeros)
    set_weight_attrs(w2_qzeros, extra_weight_attrs)
    # don't shard the w2 scales when running act order
    set_weight_attrs(w2_qzeros, {"load_full_w2": self.quant_config.desc_act})
    w13_g_idx = torch.nn.Parameter(
        torch.empty(
            num_experts,
            hidden_size,
            dtype=torch.int32,
        ),
        requires_grad=False,
    )
    layer.register_parameter("w13_g_idx", w13_g_idx)
    set_weight_attrs(w13_g_idx, extra_weight_attrs)
    w2_g_idx = torch.nn.Parameter(
        torch.empty(
            num_experts,
            intermediate_size_per_partition,
            dtype=torch.int32,
        ),
        requires_grad=False,
    )
    layer.register_parameter("w2_g_idx", w2_g_idx)
    set_weight_attrs(w2_g_idx, extra_weight_attrs)
    w13_g_idx_sort_indices = torch.nn.Parameter(
        torch.empty(
            num_experts,
            hidden_size,
            dtype=torch.int32,
        ),
        requires_grad=False,
    )
    layer.register_parameter("w13_g_idx_sort_indices", w13_g_idx_sort_indices)
    set_weight_attrs(w13_g_idx_sort_indices, extra_weight_attrs)
    w2_g_idx_sort_indices = torch.nn.Parameter(
        torch.empty(
            num_experts,
            intermediate_size_per_partition,
            dtype=torch.int32,
        ),
        requires_grad=False,
    )
    layer.register_parameter("w2_g_idx_sort_indices", w2_g_idx_sort_indices)
    set_weight_attrs(w2_g_idx_sort_indices, extra_weight_attrs)

    device = layer.w13_qweight.device
    layer.workspace = marlin_make_workspace_new(device, 4)

get_fused_moe_quant_config

get_fused_moe_quant_config(
    layer: Module,
) -> FusedMoEQuantConfig | None
Source code in vllm/model_executor/layers/quantization/gptq_marlin.py
def get_fused_moe_quant_config(
    self, layer: torch.nn.Module
) -> FusedMoEQuantConfig | None:
    from vllm.model_executor.layers.fused_moe.config import (
        gptq_marlin_moe_quant_config,
    )

    return gptq_marlin_moe_quant_config(
        w1_scale=layer.w13_scales,
        w2_scale=layer.w2_scales,
        weight_bits=self.quant_config.weight_bits,
        group_size=self.quant_config.group_size,
        w1_zp=getattr(layer, "w13_qzeros", None)
        if not self.quant_config.is_sym
        else None,
        w2_zp=getattr(layer, "w2_qzeros", None)
        if not self.quant_config.is_sym
        else None,
        w1_bias=getattr(layer, "w13_bias", None),
        w2_bias=getattr(layer, "w2_bias", None),
    )

process_weights_after_loading

process_weights_after_loading(layer: Module) -> None
Source code in vllm/model_executor/layers/quantization/gptq_marlin.py
def process_weights_after_loading(self, layer: torch.nn.Module) -> None:
    is_a_8bit = self.input_dtype is not None and self.input_dtype.itemsize == 1

    if is_a_8bit:
        assert self.quant_type == scalar_types.uint4b8, (
            "W8A8-INT8 is not supported by marlin kernel."
        )

    if self.input_dtype == torch.float8_e4m3fn:
        ops.marlin_int4_fp8_preprocess(layer.w13_qweight, inplace=True)
        ops.marlin_int4_fp8_preprocess(layer.w2_qweight, inplace=True)
        layer.w13_scales.data = layer.w13_scales.data * 512
        layer.w2_scales.data = layer.w2_scales.data * 512

    # Process act_order
    if self.quant_config.desc_act:
        # Get sorting based on g_idx
        num_experts = layer.w13_g_idx.shape[0]
        w13_g_idx_sort_indices = torch.empty_like(layer.w13_g_idx)
        w2_g_idx_sort_indices = torch.empty_like(layer.w2_g_idx)
        w13_sorted_g_idx = torch.empty_like(layer.w13_g_idx)
        w2_sorted_g_idx = torch.empty_like(layer.w2_g_idx)
        for e in range(num_experts):
            w13_g_idx_sort_indices[e] = torch.argsort(layer.w13_g_idx[e]).to(
                torch.int32
            )
            w2_g_idx_sort_indices[e] = torch.argsort(layer.w2_g_idx[e]).to(
                torch.int32
            )
            w13_sorted_g_idx[e] = layer.w13_g_idx[e][w13_g_idx_sort_indices[e]]
            w2_sorted_g_idx[e] = layer.w2_g_idx[e][w2_g_idx_sort_indices[e]]
        replace_parameter(layer, "w13_g_idx", w13_sorted_g_idx)
        replace_parameter(layer, "w2_g_idx", w2_sorted_g_idx)
        replace_parameter(layer, "w13_g_idx_sort_indices", w13_g_idx_sort_indices)
        replace_parameter(layer, "w2_g_idx_sort_indices", w2_g_idx_sort_indices)
    else:
        # Reset g_idx related tensors
        num_experts = layer.w13_g_idx.shape[0]
        device = layer.w13_g_idx.device
        layer.w13_g_idx = torch.nn.Parameter(
            torch.empty((num_experts, 0), dtype=torch.int32, device=device),
            requires_grad=False,
        )
        layer.w2_g_idx = torch.nn.Parameter(
            torch.empty((num_experts, 0), dtype=torch.int32, device=device),
            requires_grad=False,
        )
        layer.w13_g_idx_sort_indices = torch.nn.Parameter(
            torch.empty((num_experts, 0), dtype=torch.int32, device=device),
            requires_grad=False,
        )
        layer.w2_g_idx_sort_indices = torch.nn.Parameter(
            torch.empty((num_experts, 0), dtype=torch.int32, device=device),
            requires_grad=False,
        )
    # Repack weights
    marlin_w13_qweight = ops.gptq_marlin_moe_repack(
        layer.w13_qweight,
        layer.w13_g_idx_sort_indices,
        layer.w13_qweight.shape[1] * self.quant_config.pack_factor,
        layer.w13_qweight.shape[2],
        self.quant_config.quant_type.size_bits,
        is_a_8bit=is_a_8bit,
    )
    replace_parameter(layer, "w13_qweight", marlin_w13_qweight)
    marlin_w2_qweight = ops.gptq_marlin_moe_repack(
        layer.w2_qweight,
        layer.w2_g_idx_sort_indices,
        layer.w2_qweight.shape[1] * self.quant_config.pack_factor,
        layer.w2_qweight.shape[2],
        self.quant_config.quant_type.size_bits,
        is_a_8bit=is_a_8bit,
    )
    replace_parameter(layer, "w2_qweight", marlin_w2_qweight)

    # The modular kernel expects w13_weight and w2_weight,
    # but GPTQ uses w13_qweight and w2_qweight
    # Alias for modular kernel
    layer.w13_weight = layer.w13_qweight
    # Alias for modular kernel
    layer.w2_weight = layer.w2_qweight

    # Repack scales
    marlin_w13_scales = marlin_moe_permute_scales(
        s=layer.w13_scales,
        size_k=layer.intermediate_size_per_partition,
        size_n=layer.w13_scales.shape[2],
        group_size=self.quant_config.group_size,
        is_a_8bit=is_a_8bit,
    )
    if self.input_dtype == torch.int8 and layer.num_groups_w13 > 1:
        marlin_w13_scales, w13_input_global_scale = marlin_act_int8_process_scales(
            marlin_w13_scales
        )
        layer.register_parameter(
            "w13_input_global_scale",
            torch.nn.Parameter(w13_input_global_scale, requires_grad=False),
        )

    replace_parameter(layer, "w13_scales", marlin_w13_scales)
    marlin_w2_scales = marlin_moe_permute_scales(
        s=layer.w2_scales,
        size_k=layer.w2_scales.shape[1]
        * (
            self.quant_config.group_size
            if self.quant_config.group_size != -1
            else self.quant_config.pack_factor
        ),
        size_n=layer.w2_scales.shape[2],
        group_size=self.quant_config.group_size,
        is_a_8bit=is_a_8bit,
    )
    if self.input_dtype == torch.int8 and layer.num_groups_w2 > 1:
        marlin_w2_scales, w2_input_global_scale = marlin_act_int8_process_scales(
            marlin_w2_scales
        )
        layer.register_parameter(
            "w2_input_global_scale",
            torch.nn.Parameter(w2_input_global_scale, requires_grad=False),
        )

    replace_parameter(layer, "w2_scales", marlin_w2_scales)

    if hasattr(layer, "w13_bias") and layer.w13_bias is not None:
        layer.w13_bias.data = marlin_permute_bias(layer.w13_bias)

    if hasattr(layer, "w2_bias") and layer.w2_bias is not None:
        layer.w2_bias.data = marlin_permute_bias(layer.w2_bias)

select_gemm_impl

select_gemm_impl(prepare_finalize, layer: Module)

Select the GEMM implementation for GPTQ-Marlin MoE.

Returns MarlinExperts configured for GPTQ quantization. This is ONLY used when LoRA is enabled. Without LoRA, GPTQ uses its own apply() method.

Source code in vllm/model_executor/layers/quantization/gptq_marlin.py
def select_gemm_impl(
    self,
    prepare_finalize,
    layer: torch.nn.Module,
):
    """
    Select the GEMM implementation for GPTQ-Marlin MoE.

    Returns MarlinExperts configured for GPTQ quantization.
    This is ONLY used when LoRA is enabled.
    Without LoRA, GPTQ uses its own apply() method.
    """
    # Only use modular kernels when LoRA is enabled
    # Without LoRA, GPTQ's own apply() method works fine and is more efficient
    if not self.moe.is_lora_enabled:
        raise NotImplementedError(
            "GPTQ-Marlin uses its own apply() method when LoRA is not enabled. "
            "Modular kernels are only used for LoRA support."
        )

    # The modular marlin kernels do not support 8-bit weights.
    if self.quant_config.weight_bits == 8:
        raise NotImplementedError(
            "GPTQ-Marlin kernel does not support 8-bit weights."
        )

    from vllm.model_executor.layers.fused_moe import modular_kernel as mk
    from vllm.model_executor.layers.fused_moe.fused_marlin_moe import (
        BatchedMarlinExperts,
        MarlinExperts,
    )

    # Ensure quant config is initialized
    assert self.moe_quant_config is not None, (
        "moe_quant_config must be initialized before select_gemm_impl"
    )

    w13_g_idx = (
        getattr(layer, "w13_g_idx", None) if self.quant_config.desc_act else None
    )
    w2_g_idx = (
        getattr(layer, "w2_g_idx", None) if self.quant_config.desc_act else None
    )
    w13_g_idx_sort_indices = (
        getattr(layer, "w13_g_idx_sort_indices", None)
        if self.quant_config.desc_act
        else None
    )
    w2_g_idx_sort_indices = (
        getattr(layer, "w2_g_idx_sort_indices", None)
        if self.quant_config.desc_act
        else None
    )

    # Check if using batched expert format (for Expert Parallelism)
    if (
        prepare_finalize.activation_format
        == mk.FusedMoEActivationFormat.BatchedExperts
    ):
        # For batched format, use BatchedMarlinExperts
        max_num_tokens_per_rank = prepare_finalize.max_num_tokens_per_rank()
        assert max_num_tokens_per_rank is not None
        return BatchedMarlinExperts(
            max_num_tokens=max_num_tokens_per_rank,
            num_dispatchers=prepare_finalize.num_dispatchers(),
            quant_config=self.moe_quant_config,
            w13_g_idx=w13_g_idx,
            w2_g_idx=w2_g_idx,
            w13_g_idx_sort_indices=w13_g_idx_sort_indices,
            w2_g_idx_sort_indices=w2_g_idx_sort_indices,
            is_k_full=self.is_k_full,
        )
    else:
        # Standard Marlin experts for GPTQ
        return MarlinExperts(
            quant_config=self.moe_quant_config,
            w13_g_idx=w13_g_idx,
            w2_g_idx=w2_g_idx,
            w13_g_idx_sort_indices=w13_g_idx_sort_indices,
            w2_g_idx_sort_indices=w2_g_idx_sort_indices,
            is_k_full=self.is_k_full,
        )

get_moe_quant_method

get_moe_quant_method(
    config: GPTQMarlinConfig,
    layer: Module,
    prefix: str,
    moe_method_cls: type,
)
Source code in vllm/model_executor/layers/quantization/gptq_marlin.py
def get_moe_quant_method(
    config: "GPTQMarlinConfig",
    layer: torch.nn.Module,
    prefix: str,
    moe_method_cls: type,
):
    cloned_config = deepcopy(config)

    if isinstance(layer, FusedMoE):
        # False = skip module, None = no override, else = Positive match
        if (
            get_dynamic_override(  # noqa: E712
                cloned_config,  # noqa: E712
                layer_name=prefix,
            )
            == False
        ):  # noqa: E712
            return UnquantizedFusedMoEMethod(layer.moe_config)

        if prefix:
            # Dynamic per module/layer rules may override base config
            override_config(cloned_config, prefix=prefix)

        return moe_method_cls(cloned_config, layer.moe_config)
    return None