Skip to content

vllm.platforms.rocm

_ROCM_DEVICE_ID_NAME_MAP module-attribute

_ROCM_DEVICE_ID_NAME_MAP: dict[str, str] = {
    "0x74a0": "AMD_Instinct_MI300A",
    "0x74a1": "AMD_Instinct_MI300X",
    "0x74b5": "AMD_Instinct_MI300X",
    "0x74a2": "AMD_Instinct_MI308X",
    "0x74a5": "AMD_Instinct_MI325X",
    "0x74b9": "AMD_Instinct_MI325X",
    "0x74a9": "AMD_Instinct_MI300X_HF",
    "0x74bd": "AMD_Instinct_MI300X_HF",
    "0x744c": "AMD_Radeon_RX7900XTX",
}

_ROCM_PARTIALLY_SUPPORTED_MODELS module-attribute

_ROCM_PARTIALLY_SUPPORTED_MODELS: dict[str, str] = {}

_ROCM_SWA_REASON module-attribute

_ROCM_SWA_REASON = ()

_ROCM_UNSUPPORTED_MODELS module-attribute

_ROCM_UNSUPPORTED_MODELS: list[str] = []

logger module-attribute

logger = init_logger(__name__)

val module-attribute

val = environ['HIP_VISIBLE_DEVICES']

RocmPlatform

Bases: Platform

Source code in vllm/platforms/rocm.py
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
class RocmPlatform(Platform):
    _enum = PlatformEnum.ROCM
    device_name: str = "rocm"
    device_type: str = "cuda"
    dispatch_key: str = "CUDA"
    ray_device_key: str = "GPU"
    dist_backend: str = "nccl"
    # rocm shares the same device control env var as CUDA
    device_control_env_var: str = "CUDA_VISIBLE_DEVICES"

    supported_quantization: list[str] = [
        "awq",
        "gptq",
        "fp8",
        "compressed-tensors",
        "fbgemm_fp8",
        "gguf",
        "quark",
        "ptpc_fp8",
        "mxfp4",
        "petit_nvfp4",
        "torchao",
    ]
    # bitsandbytes not supported on gfx9 (warp size 64 limitation)
    if not on_gfx9():
        supported_quantization += ["bitsandbytes"]

    @classmethod
    def get_vit_attn_backend(
        cls, head_size: int, dtype: torch.dtype
    ) -> AttentionBackendEnum:
        from importlib.util import find_spec

        from vllm._aiter_ops import rocm_aiter_ops

        if rocm_aiter_ops.is_mha_enabled():
            # Note: AITER FA is only supported for Qwen-VL models.
            # TODO: Add support for other VL models in their model class.
            return AttentionBackendEnum.ROCM_AITER_FA

        if on_gfx9() and find_spec("flash_attn") is not None:
            return AttentionBackendEnum.FLASH_ATTN

        return AttentionBackendEnum.TORCH_SDPA

    @classmethod
    def get_attn_backend_cls(
        cls,
        selected_backend,
        head_size,
        dtype,
        kv_cache_dtype,
        block_size,
        use_mla,
        has_sink,
        use_sparse,
        use_mm_prefix,
        attn_type: str | None = None,
    ) -> str:
        from vllm._aiter_ops import rocm_aiter_ops

        if use_sparse:
            if kv_cache_dtype.startswith("fp8"):
                raise ValueError(
                    "ROCMAiterMLASparseBackend doesn't support fp8 kv_cache_dtype."
                )
            assert block_size == 1, (
                "Sparse MLA backend on ROCm only supports block size 1 for now."
            )
            logger.info_once("Using Sparse MLA backend on V1 engine.")
            return AttentionBackendEnum.ROCM_AITER_MLA_SPARSE.get_path()

        if use_mla:
            if selected_backend is None:
                selected_backend = (
                    AttentionBackendEnum.ROCM_AITER_MLA
                    if rocm_aiter_ops.is_mla_enabled() or block_size == 1
                    else AttentionBackendEnum.TRITON_MLA
                )
            if selected_backend == AttentionBackendEnum.TRITON_MLA:
                if block_size != 1:
                    logger.info_once("Using Triton MLA backend.")
                    return AttentionBackendEnum.TRITON_MLA.get_path()
                raise ValueError(
                    f" The selected backend, {selected_backend.name},"
                    f"does not support block size {block_size}."
                )
            if selected_backend == AttentionBackendEnum.ROCM_AITER_MLA:
                logger.info("Using AITER MLA backend.")
                return AttentionBackendEnum.ROCM_AITER_MLA.get_path()
            if selected_backend == AttentionBackendEnum.ROCM_AITER_TRITON_MLA:
                logger.info("Using AITER TRITON MLA backend.")
                return AttentionBackendEnum.ROCM_AITER_TRITON_MLA.get_path()

            raise ValueError(
                f" The selected backend, {selected_backend.name},"
                f"is not MLA type while requested for MLA backend."
            )

        if selected_backend == AttentionBackendEnum.FLEX_ATTENTION:
            logger.info("Using FlexAttention backend.")
            return AttentionBackendEnum.FLEX_ATTENTION.get_path()

        if selected_backend == AttentionBackendEnum.TRITON_ATTN:
            logger.info("Using Triton Attention backend on V1 engine.")
            return AttentionBackendEnum.TRITON_ATTN.get_path()

        if selected_backend == AttentionBackendEnum.ROCM_ATTN:
            logger.info("Using Rocm Attention backend on V1 engine.")
            return AttentionBackendEnum.ROCM_ATTN.get_path()

        if selected_backend == AttentionBackendEnum.ROCM_AITER_FA:
            if on_gfx9():
                logger.info("Using Aiter Flash Attention backend on V1 engine.")
                return AttentionBackendEnum.ROCM_AITER_FA.get_path()
            else:
                raise ValueError(
                    f"The selected backend, {selected_backend.name}, "
                    "is only supported on gfx9 architectures."
                )

        if selected_backend == AttentionBackendEnum.ROCM_AITER_UNIFIED_ATTN:
            logger.info("Using Aiter Unified Attention backend on V1 engine.")
            return AttentionBackendEnum.ROCM_AITER_UNIFIED_ATTN.get_path()

        # Handle automatic backend selection based on environment variables
        if selected_backend is None:
            # Priority 1: Check for AITER Unified Attention (must check before MHA)
            if envs.VLLM_ROCM_USE_AITER and envs.VLLM_ROCM_USE_AITER_UNIFIED_ATTENTION:
                logger.info("Using Aiter Unified Attention backend on V1 engine.")
                return AttentionBackendEnum.ROCM_AITER_UNIFIED_ATTN.get_path()

            # Priority 2: Check for AITER MHA (Flash Attention)
            # Only use if explicitly enabled (not just VLLM_ROCM_USE_AITER=1)
            if envs.VLLM_ROCM_USE_AITER and envs.VLLM_ROCM_USE_AITER_MHA and on_gfx9():
                logger.info("Using Aiter Flash Attention backend on V1 engine.")
                return AttentionBackendEnum.ROCM_AITER_FA.get_path()

            # Priority 3: Check for ROCM_ATTN (prefill-decode split)
            if envs.VLLM_V1_USE_PREFILL_DECODE_ATTENTION:
                logger.info("Using Rocm Attention backend on V1 engine.")
                return AttentionBackendEnum.ROCM_ATTN.get_path()

            # Priority 4: Check for AITER enabled without specific flags
            # This defaults to AITER FA only if MHA is not explicitly disabled
            if (
                envs.VLLM_ROCM_USE_AITER
                and on_gfx9()
                and envs.VLLM_ROCM_USE_AITER_MHA is not False
            ):
                logger.info("Using Aiter Flash Attention backend on V1 engine.")
                return AttentionBackendEnum.ROCM_AITER_FA.get_path()

            # Default: Triton Unified Attention
            logger.info("Using Triton Attention backend on V1 engine.")
            return AttentionBackendEnum.TRITON_ATTN.get_path()

        raise RuntimeError(
            f"Attention backend {selected_backend.name} is not supported on "
            "ROCm. Note that V0 attention backends have been removed."
        )

    @classmethod
    def set_device(cls, device: torch.device) -> None:
        """
        Set the device for the current platform.
        """
        torch.cuda.set_device(device)

    @classmethod
    @lru_cache(maxsize=8)
    def get_device_capability(cls, device_id: int = 0) -> DeviceCapability | None:
        major, minor = torch.cuda.get_device_capability(device_id)
        return DeviceCapability(major=major, minor=minor)

    @classmethod
    @with_amdsmi_context
    def is_fully_connected(cls, physical_device_ids: list[int]) -> bool:
        """
        Query if the set of gpus are fully connected by xgmi (1 hop)
        """
        handles = [amdsmi_get_processor_handles()[i] for i in physical_device_ids]
        for i, handle in enumerate(handles):
            for j, peer_handle in enumerate(handles):
                if i < j:
                    try:
                        link_type = amdsmi_topo_get_link_type(handle, peer_handle)
                        # type is 2 for XGMI
                        if link_type["hops"] != 1 or link_type["type"] != 2:
                            return False
                    except AmdSmiException as error:
                        logger.error("AMD 1 hop XGMI detection failed.", exc_info=error)
                        return False
        return True

    @classmethod
    @with_amdsmi_context
    @lru_cache(maxsize=8)
    def get_device_name(cls, device_id: int = 0) -> str:
        physical_device_id = cls.device_id_to_physical_device_id(device_id)
        handle = amdsmi_get_processor_handles()[physical_device_id]
        asic_info = amdsmi_get_gpu_asic_info(handle)
        device_name: str = asic_info["device_id"]
        if device_name in _ROCM_DEVICE_ID_NAME_MAP:
            return _ROCM_DEVICE_ID_NAME_MAP[device_name]
        return asic_info["market_name"]

    @classmethod
    def get_device_total_memory(cls, device_id: int = 0) -> int:
        device_props = torch.cuda.get_device_properties(device_id)
        return device_props.total_memory

    @classmethod
    def check_and_update_config(cls, vllm_config: "VllmConfig") -> None:
        from vllm._aiter_ops import rocm_aiter_ops
        from vllm.config.compilation import CUDAGraphMode

        cache_config = vllm_config.cache_config
        compilation_config = vllm_config.compilation_config
        parallel_config = vllm_config.parallel_config
        is_eager_execution = compilation_config == CUDAGraphMode.NONE
        use_aiter_rms_norm = rocm_aiter_ops.is_rmsnorm_enabled()
        use_aiter_fp8_linear = rocm_aiter_ops.is_linear_fp8_enaled()

        if compilation_config.cudagraph_mode.has_full_cudagraphs():
            # decode context parallel does not support full cudagraphs
            if parallel_config.decode_context_parallel_size > 1:
                logger.warning_once(
                    "Decode context parallel (DCP) is enabled, which is "
                    "incompatible with full CUDA graphs. "
                    "Overriding cudagraph_mode to PIECEWISE."
                )
                compilation_config.cudagraph_mode = CUDAGraphMode.PIECEWISE
            # prefill context parallel do not support full cudagraphs
            elif parallel_config.prefill_context_parallel_size > 1:
                logger.warning_once(
                    "Prefill context parallel (PCP) is enabled, which is "
                    "incompatible with full CUDA graphs. "
                    "Overriding cudagraph_mode to PIECEWISE."
                )
                compilation_config.cudagraph_mode = CUDAGraphMode.PIECEWISE

        if cache_config and cache_config.block_size is None:
            if (
                envs.VLLM_ROCM_USE_AITER_UNIFIED_ATTENTION and envs.VLLM_ROCM_USE_AITER
                # NOTE: This block has been deprecated
                # or get_env_variable_attn_backend()
                # == AttentionBackendEnum.ROCM_AITER_UNIFIED_ATTN
                # TODO: monitor https://github.com/vllm-project/vllm/pull/30396
                # to see how we can transition to the new way of selecting
                # attention backends
            ):
                cache_config.block_size = 64
                logger.warning(
                    "[ROCM_AITER_UNIFIED_ATTN]: Setting kv cache block size to 64."
                )
            else:
                cache_config.block_size = 16

        if parallel_config.worker_cls == "auto":
            parallel_config.worker_cls = "vllm.v1.worker.gpu_worker.Worker"
        #  Aiter rms norm perform best when CUDA Graph capture is enabled.
        if (
            use_aiter_rms_norm
            and not is_eager_execution
            and "-rms_norm" not in compilation_config.custom_ops
        ):
            compilation_config.custom_ops.append("+rms_norm")

        if use_aiter_fp8_linear and "-quant_fp8" not in compilation_config.custom_ops:
            compilation_config.custom_ops.append("+quant_fp8")

    @classmethod
    def verify_model_arch(cls, model_arch: str) -> None:
        if model_arch in _ROCM_UNSUPPORTED_MODELS:
            raise ValueError(
                f"Model architecture '{model_arch}' is not supported by ROCm for now."
            )

        if model_arch in _ROCM_PARTIALLY_SUPPORTED_MODELS:
            msg = _ROCM_PARTIALLY_SUPPORTED_MODELS[model_arch]
            logger.warning(
                "Model architecture '%s' is partially supported by ROCm: %s",
                model_arch,
                msg,
            )

    @classmethod
    def verify_quantization(cls, quant: str) -> None:
        super().verify_quantization(quant)
        if quant == "awq" and not envs.VLLM_USE_TRITON_AWQ:
            logger.warning(
                "Using AWQ quantization with ROCm, but VLLM_USE_TRITON_AWQ"
                " is not set, enabling VLLM_USE_TRITON_AWQ."
            )
        os.environ["VLLM_USE_TRITON_AWQ"] = "1"

    @classmethod
    def get_punica_wrapper(cls) -> str:
        return "vllm.lora.punica_wrapper.punica_gpu.PunicaWrapperGPU"

    @classmethod
    def get_current_memory_usage(
        cls, device: torch.types.Device | None = None
    ) -> float:
        torch.cuda.reset_peak_memory_stats(device)
        return torch.cuda.mem_get_info(device)[1] - torch.cuda.mem_get_info(device)[0]

    @classmethod
    def get_device_communicator_cls(cls) -> str:
        return (
            "vllm.distributed.device_communicators.cuda_communicator.CudaCommunicator"  # noqa
        )

    @classmethod
    def supports_mx(cls) -> bool:
        gcn_arch = torch.cuda.get_device_properties(0).gcnArchName
        return any(gfx in gcn_arch for gfx in ["gfx95"])

    @classmethod
    def supports_fp8(cls) -> bool:
        gcn_arch = torch.cuda.get_device_properties(0).gcnArchName
        return any(gfx in gcn_arch for gfx in ["gfx94", "gfx95", "gfx12"])

    @classmethod
    def is_fp8_fnuz(cls) -> bool:
        # only device 0 is checked, this assumes MI300 platforms are homogeneous
        return "gfx94" in torch.cuda.get_device_properties(0).gcnArchName

    @classmethod
    def fp8_dtype(cls) -> torch.dtype:
        if cls.is_fp8_fnuz():
            return torch.float8_e4m3fnuz
        else:
            return torch.float8_e4m3fn

    @classmethod
    def use_custom_allreduce(cls) -> bool:
        # We only enable custom allreduce for MI300 series
        gcn_arch = torch.cuda.get_device_properties(0).gcnArchName
        supported_archs = ["gfx94", "gfx95"]
        return any(gfx in gcn_arch for gfx in supported_archs)

    @classmethod
    def opaque_attention_op(cls) -> bool:
        return True

    @classmethod
    def is_navi(cls) -> bool:
        return "gfx1" in torch.cuda.get_device_properties(0).gcnArchName

    @classmethod
    def get_static_graph_wrapper_cls(cls) -> str:
        return "vllm.compilation.cuda_graph.CUDAGraphWrapper"

    @classmethod
    def device_count(cls) -> int:
        return cuda_device_count_stateless()

    @classmethod
    def check_if_supports_dtype(cls, dtype: torch.dtype):
        if dtype == torch.bfloat16:  # noqa: SIM102
            if not cls.has_device_capability(80):
                capability = cls.get_device_capability()
                gpu_name = cls.get_device_name()

                if capability is None:
                    compute_str = "does not have a compute capability"
                else:
                    version_str = capability.as_version_str()
                    compute_str = f"has compute capability {version_str}"

                raise ValueError(
                    "Bfloat16 is only supported on GPUs "
                    "with compute capability of at least 8.0. "
                    f"Your {gpu_name} GPU {compute_str}. "
                    "You can use float16 instead by explicitly setting the "
                    "`dtype` flag in CLI, for example: --dtype=half."
                )

    @classmethod
    def support_hybrid_kv_cache(cls) -> bool:
        return True

    @classmethod
    def support_static_graph_mode(cls) -> bool:
        return True

_enum class-attribute instance-attribute

_enum = ROCM

device_control_env_var class-attribute instance-attribute

device_control_env_var: str = 'CUDA_VISIBLE_DEVICES'

device_name class-attribute instance-attribute

device_name: str = 'rocm'

device_type class-attribute instance-attribute

device_type: str = 'cuda'

dispatch_key class-attribute instance-attribute

dispatch_key: str = 'CUDA'

dist_backend class-attribute instance-attribute

dist_backend: str = 'nccl'

ray_device_key class-attribute instance-attribute

ray_device_key: str = 'GPU'

supported_quantization class-attribute instance-attribute

supported_quantization: list[str] = [
    "awq",
    "gptq",
    "fp8",
    "compressed-tensors",
    "fbgemm_fp8",
    "gguf",
    "quark",
    "ptpc_fp8",
    "mxfp4",
    "petit_nvfp4",
    "torchao",
]

check_and_update_config classmethod

check_and_update_config(vllm_config: VllmConfig) -> None
Source code in vllm/platforms/rocm.py
@classmethod
def check_and_update_config(cls, vllm_config: "VllmConfig") -> None:
    from vllm._aiter_ops import rocm_aiter_ops
    from vllm.config.compilation import CUDAGraphMode

    cache_config = vllm_config.cache_config
    compilation_config = vllm_config.compilation_config
    parallel_config = vllm_config.parallel_config
    is_eager_execution = compilation_config == CUDAGraphMode.NONE
    use_aiter_rms_norm = rocm_aiter_ops.is_rmsnorm_enabled()
    use_aiter_fp8_linear = rocm_aiter_ops.is_linear_fp8_enaled()

    if compilation_config.cudagraph_mode.has_full_cudagraphs():
        # decode context parallel does not support full cudagraphs
        if parallel_config.decode_context_parallel_size > 1:
            logger.warning_once(
                "Decode context parallel (DCP) is enabled, which is "
                "incompatible with full CUDA graphs. "
                "Overriding cudagraph_mode to PIECEWISE."
            )
            compilation_config.cudagraph_mode = CUDAGraphMode.PIECEWISE
        # prefill context parallel do not support full cudagraphs
        elif parallel_config.prefill_context_parallel_size > 1:
            logger.warning_once(
                "Prefill context parallel (PCP) is enabled, which is "
                "incompatible with full CUDA graphs. "
                "Overriding cudagraph_mode to PIECEWISE."
            )
            compilation_config.cudagraph_mode = CUDAGraphMode.PIECEWISE

    if cache_config and cache_config.block_size is None:
        if (
            envs.VLLM_ROCM_USE_AITER_UNIFIED_ATTENTION and envs.VLLM_ROCM_USE_AITER
            # NOTE: This block has been deprecated
            # or get_env_variable_attn_backend()
            # == AttentionBackendEnum.ROCM_AITER_UNIFIED_ATTN
            # TODO: monitor https://github.com/vllm-project/vllm/pull/30396
            # to see how we can transition to the new way of selecting
            # attention backends
        ):
            cache_config.block_size = 64
            logger.warning(
                "[ROCM_AITER_UNIFIED_ATTN]: Setting kv cache block size to 64."
            )
        else:
            cache_config.block_size = 16

    if parallel_config.worker_cls == "auto":
        parallel_config.worker_cls = "vllm.v1.worker.gpu_worker.Worker"
    #  Aiter rms norm perform best when CUDA Graph capture is enabled.
    if (
        use_aiter_rms_norm
        and not is_eager_execution
        and "-rms_norm" not in compilation_config.custom_ops
    ):
        compilation_config.custom_ops.append("+rms_norm")

    if use_aiter_fp8_linear and "-quant_fp8" not in compilation_config.custom_ops:
        compilation_config.custom_ops.append("+quant_fp8")

check_if_supports_dtype classmethod

check_if_supports_dtype(dtype: dtype)
Source code in vllm/platforms/rocm.py
@classmethod
def check_if_supports_dtype(cls, dtype: torch.dtype):
    if dtype == torch.bfloat16:  # noqa: SIM102
        if not cls.has_device_capability(80):
            capability = cls.get_device_capability()
            gpu_name = cls.get_device_name()

            if capability is None:
                compute_str = "does not have a compute capability"
            else:
                version_str = capability.as_version_str()
                compute_str = f"has compute capability {version_str}"

            raise ValueError(
                "Bfloat16 is only supported on GPUs "
                "with compute capability of at least 8.0. "
                f"Your {gpu_name} GPU {compute_str}. "
                "You can use float16 instead by explicitly setting the "
                "`dtype` flag in CLI, for example: --dtype=half."
            )

device_count classmethod

device_count() -> int
Source code in vllm/platforms/rocm.py
@classmethod
def device_count(cls) -> int:
    return cuda_device_count_stateless()

fp8_dtype classmethod

fp8_dtype() -> dtype
Source code in vllm/platforms/rocm.py
@classmethod
def fp8_dtype(cls) -> torch.dtype:
    if cls.is_fp8_fnuz():
        return torch.float8_e4m3fnuz
    else:
        return torch.float8_e4m3fn

get_attn_backend_cls classmethod

get_attn_backend_cls(
    selected_backend,
    head_size,
    dtype,
    kv_cache_dtype,
    block_size,
    use_mla,
    has_sink,
    use_sparse,
    use_mm_prefix,
    attn_type: str | None = None,
) -> str
Source code in vllm/platforms/rocm.py
@classmethod
def get_attn_backend_cls(
    cls,
    selected_backend,
    head_size,
    dtype,
    kv_cache_dtype,
    block_size,
    use_mla,
    has_sink,
    use_sparse,
    use_mm_prefix,
    attn_type: str | None = None,
) -> str:
    from vllm._aiter_ops import rocm_aiter_ops

    if use_sparse:
        if kv_cache_dtype.startswith("fp8"):
            raise ValueError(
                "ROCMAiterMLASparseBackend doesn't support fp8 kv_cache_dtype."
            )
        assert block_size == 1, (
            "Sparse MLA backend on ROCm only supports block size 1 for now."
        )
        logger.info_once("Using Sparse MLA backend on V1 engine.")
        return AttentionBackendEnum.ROCM_AITER_MLA_SPARSE.get_path()

    if use_mla:
        if selected_backend is None:
            selected_backend = (
                AttentionBackendEnum.ROCM_AITER_MLA
                if rocm_aiter_ops.is_mla_enabled() or block_size == 1
                else AttentionBackendEnum.TRITON_MLA
            )
        if selected_backend == AttentionBackendEnum.TRITON_MLA:
            if block_size != 1:
                logger.info_once("Using Triton MLA backend.")
                return AttentionBackendEnum.TRITON_MLA.get_path()
            raise ValueError(
                f" The selected backend, {selected_backend.name},"
                f"does not support block size {block_size}."
            )
        if selected_backend == AttentionBackendEnum.ROCM_AITER_MLA:
            logger.info("Using AITER MLA backend.")
            return AttentionBackendEnum.ROCM_AITER_MLA.get_path()
        if selected_backend == AttentionBackendEnum.ROCM_AITER_TRITON_MLA:
            logger.info("Using AITER TRITON MLA backend.")
            return AttentionBackendEnum.ROCM_AITER_TRITON_MLA.get_path()

        raise ValueError(
            f" The selected backend, {selected_backend.name},"
            f"is not MLA type while requested for MLA backend."
        )

    if selected_backend == AttentionBackendEnum.FLEX_ATTENTION:
        logger.info("Using FlexAttention backend.")
        return AttentionBackendEnum.FLEX_ATTENTION.get_path()

    if selected_backend == AttentionBackendEnum.TRITON_ATTN:
        logger.info("Using Triton Attention backend on V1 engine.")
        return AttentionBackendEnum.TRITON_ATTN.get_path()

    if selected_backend == AttentionBackendEnum.ROCM_ATTN:
        logger.info("Using Rocm Attention backend on V1 engine.")
        return AttentionBackendEnum.ROCM_ATTN.get_path()

    if selected_backend == AttentionBackendEnum.ROCM_AITER_FA:
        if on_gfx9():
            logger.info("Using Aiter Flash Attention backend on V1 engine.")
            return AttentionBackendEnum.ROCM_AITER_FA.get_path()
        else:
            raise ValueError(
                f"The selected backend, {selected_backend.name}, "
                "is only supported on gfx9 architectures."
            )

    if selected_backend == AttentionBackendEnum.ROCM_AITER_UNIFIED_ATTN:
        logger.info("Using Aiter Unified Attention backend on V1 engine.")
        return AttentionBackendEnum.ROCM_AITER_UNIFIED_ATTN.get_path()

    # Handle automatic backend selection based on environment variables
    if selected_backend is None:
        # Priority 1: Check for AITER Unified Attention (must check before MHA)
        if envs.VLLM_ROCM_USE_AITER and envs.VLLM_ROCM_USE_AITER_UNIFIED_ATTENTION:
            logger.info("Using Aiter Unified Attention backend on V1 engine.")
            return AttentionBackendEnum.ROCM_AITER_UNIFIED_ATTN.get_path()

        # Priority 2: Check for AITER MHA (Flash Attention)
        # Only use if explicitly enabled (not just VLLM_ROCM_USE_AITER=1)
        if envs.VLLM_ROCM_USE_AITER and envs.VLLM_ROCM_USE_AITER_MHA and on_gfx9():
            logger.info("Using Aiter Flash Attention backend on V1 engine.")
            return AttentionBackendEnum.ROCM_AITER_FA.get_path()

        # Priority 3: Check for ROCM_ATTN (prefill-decode split)
        if envs.VLLM_V1_USE_PREFILL_DECODE_ATTENTION:
            logger.info("Using Rocm Attention backend on V1 engine.")
            return AttentionBackendEnum.ROCM_ATTN.get_path()

        # Priority 4: Check for AITER enabled without specific flags
        # This defaults to AITER FA only if MHA is not explicitly disabled
        if (
            envs.VLLM_ROCM_USE_AITER
            and on_gfx9()
            and envs.VLLM_ROCM_USE_AITER_MHA is not False
        ):
            logger.info("Using Aiter Flash Attention backend on V1 engine.")
            return AttentionBackendEnum.ROCM_AITER_FA.get_path()

        # Default: Triton Unified Attention
        logger.info("Using Triton Attention backend on V1 engine.")
        return AttentionBackendEnum.TRITON_ATTN.get_path()

    raise RuntimeError(
        f"Attention backend {selected_backend.name} is not supported on "
        "ROCm. Note that V0 attention backends have been removed."
    )

get_current_memory_usage classmethod

get_current_memory_usage(
    device: Device | None = None,
) -> float
Source code in vllm/platforms/rocm.py
@classmethod
def get_current_memory_usage(
    cls, device: torch.types.Device | None = None
) -> float:
    torch.cuda.reset_peak_memory_stats(device)
    return torch.cuda.mem_get_info(device)[1] - torch.cuda.mem_get_info(device)[0]

get_device_capability cached classmethod

get_device_capability(
    device_id: int = 0,
) -> DeviceCapability | None
Source code in vllm/platforms/rocm.py
@classmethod
@lru_cache(maxsize=8)
def get_device_capability(cls, device_id: int = 0) -> DeviceCapability | None:
    major, minor = torch.cuda.get_device_capability(device_id)
    return DeviceCapability(major=major, minor=minor)

get_device_communicator_cls classmethod

get_device_communicator_cls() -> str
Source code in vllm/platforms/rocm.py
@classmethod
def get_device_communicator_cls(cls) -> str:
    return (
        "vllm.distributed.device_communicators.cuda_communicator.CudaCommunicator"  # noqa
    )

get_device_name cached classmethod

get_device_name(device_id: int = 0) -> str
Source code in vllm/platforms/rocm.py
@classmethod
@with_amdsmi_context
@lru_cache(maxsize=8)
def get_device_name(cls, device_id: int = 0) -> str:
    physical_device_id = cls.device_id_to_physical_device_id(device_id)
    handle = amdsmi_get_processor_handles()[physical_device_id]
    asic_info = amdsmi_get_gpu_asic_info(handle)
    device_name: str = asic_info["device_id"]
    if device_name in _ROCM_DEVICE_ID_NAME_MAP:
        return _ROCM_DEVICE_ID_NAME_MAP[device_name]
    return asic_info["market_name"]

get_device_total_memory classmethod

get_device_total_memory(device_id: int = 0) -> int
Source code in vllm/platforms/rocm.py
@classmethod
def get_device_total_memory(cls, device_id: int = 0) -> int:
    device_props = torch.cuda.get_device_properties(device_id)
    return device_props.total_memory

get_punica_wrapper classmethod

get_punica_wrapper() -> str
Source code in vllm/platforms/rocm.py
@classmethod
def get_punica_wrapper(cls) -> str:
    return "vllm.lora.punica_wrapper.punica_gpu.PunicaWrapperGPU"

get_static_graph_wrapper_cls classmethod

get_static_graph_wrapper_cls() -> str
Source code in vllm/platforms/rocm.py
@classmethod
def get_static_graph_wrapper_cls(cls) -> str:
    return "vllm.compilation.cuda_graph.CUDAGraphWrapper"

get_vit_attn_backend classmethod

get_vit_attn_backend(
    head_size: int, dtype: dtype
) -> AttentionBackendEnum
Source code in vllm/platforms/rocm.py
@classmethod
def get_vit_attn_backend(
    cls, head_size: int, dtype: torch.dtype
) -> AttentionBackendEnum:
    from importlib.util import find_spec

    from vllm._aiter_ops import rocm_aiter_ops

    if rocm_aiter_ops.is_mha_enabled():
        # Note: AITER FA is only supported for Qwen-VL models.
        # TODO: Add support for other VL models in their model class.
        return AttentionBackendEnum.ROCM_AITER_FA

    if on_gfx9() and find_spec("flash_attn") is not None:
        return AttentionBackendEnum.FLASH_ATTN

    return AttentionBackendEnum.TORCH_SDPA

is_fp8_fnuz classmethod

is_fp8_fnuz() -> bool
Source code in vllm/platforms/rocm.py
@classmethod
def is_fp8_fnuz(cls) -> bool:
    # only device 0 is checked, this assumes MI300 platforms are homogeneous
    return "gfx94" in torch.cuda.get_device_properties(0).gcnArchName

is_fully_connected classmethod

is_fully_connected(physical_device_ids: list[int]) -> bool

Query if the set of gpus are fully connected by xgmi (1 hop)

Source code in vllm/platforms/rocm.py
@classmethod
@with_amdsmi_context
def is_fully_connected(cls, physical_device_ids: list[int]) -> bool:
    """
    Query if the set of gpus are fully connected by xgmi (1 hop)
    """
    handles = [amdsmi_get_processor_handles()[i] for i in physical_device_ids]
    for i, handle in enumerate(handles):
        for j, peer_handle in enumerate(handles):
            if i < j:
                try:
                    link_type = amdsmi_topo_get_link_type(handle, peer_handle)
                    # type is 2 for XGMI
                    if link_type["hops"] != 1 or link_type["type"] != 2:
                        return False
                except AmdSmiException as error:
                    logger.error("AMD 1 hop XGMI detection failed.", exc_info=error)
                    return False
    return True

is_navi classmethod

is_navi() -> bool
Source code in vllm/platforms/rocm.py
@classmethod
def is_navi(cls) -> bool:
    return "gfx1" in torch.cuda.get_device_properties(0).gcnArchName

opaque_attention_op classmethod

opaque_attention_op() -> bool
Source code in vllm/platforms/rocm.py
@classmethod
def opaque_attention_op(cls) -> bool:
    return True

set_device classmethod

set_device(device: device) -> None

Set the device for the current platform.

Source code in vllm/platforms/rocm.py
@classmethod
def set_device(cls, device: torch.device) -> None:
    """
    Set the device for the current platform.
    """
    torch.cuda.set_device(device)

support_hybrid_kv_cache classmethod

support_hybrid_kv_cache() -> bool
Source code in vllm/platforms/rocm.py
@classmethod
def support_hybrid_kv_cache(cls) -> bool:
    return True

support_static_graph_mode classmethod

support_static_graph_mode() -> bool
Source code in vllm/platforms/rocm.py
@classmethod
def support_static_graph_mode(cls) -> bool:
    return True

supports_fp8 classmethod

supports_fp8() -> bool
Source code in vllm/platforms/rocm.py
@classmethod
def supports_fp8(cls) -> bool:
    gcn_arch = torch.cuda.get_device_properties(0).gcnArchName
    return any(gfx in gcn_arch for gfx in ["gfx94", "gfx95", "gfx12"])

supports_mx classmethod

supports_mx() -> bool
Source code in vllm/platforms/rocm.py
@classmethod
def supports_mx(cls) -> bool:
    gcn_arch = torch.cuda.get_device_properties(0).gcnArchName
    return any(gfx in gcn_arch for gfx in ["gfx95"])

use_custom_allreduce classmethod

use_custom_allreduce() -> bool
Source code in vllm/platforms/rocm.py
@classmethod
def use_custom_allreduce(cls) -> bool:
    # We only enable custom allreduce for MI300 series
    gcn_arch = torch.cuda.get_device_properties(0).gcnArchName
    supported_archs = ["gfx94", "gfx95"]
    return any(gfx in gcn_arch for gfx in supported_archs)

verify_model_arch classmethod

verify_model_arch(model_arch: str) -> None
Source code in vllm/platforms/rocm.py
@classmethod
def verify_model_arch(cls, model_arch: str) -> None:
    if model_arch in _ROCM_UNSUPPORTED_MODELS:
        raise ValueError(
            f"Model architecture '{model_arch}' is not supported by ROCm for now."
        )

    if model_arch in _ROCM_PARTIALLY_SUPPORTED_MODELS:
        msg = _ROCM_PARTIALLY_SUPPORTED_MODELS[model_arch]
        logger.warning(
            "Model architecture '%s' is partially supported by ROCm: %s",
            model_arch,
            msg,
        )

verify_quantization classmethod

verify_quantization(quant: str) -> None
Source code in vllm/platforms/rocm.py
@classmethod
def verify_quantization(cls, quant: str) -> None:
    super().verify_quantization(quant)
    if quant == "awq" and not envs.VLLM_USE_TRITON_AWQ:
        logger.warning(
            "Using AWQ quantization with ROCm, but VLLM_USE_TRITON_AWQ"
            " is not set, enabling VLLM_USE_TRITON_AWQ."
        )
    os.environ["VLLM_USE_TRITON_AWQ"] = "1"

on_gfx1x cached

on_gfx1x() -> bool
Source code in vllm/platforms/rocm.py
@cache
def on_gfx1x() -> bool:
    GPU_ARCH = torch.cuda.get_device_properties("cuda").gcnArchName
    return any(arch in GPU_ARCH for arch in ["gfx11", "gfx12"])

on_gfx9 cached

on_gfx9() -> bool
Source code in vllm/platforms/rocm.py
@cache
def on_gfx9() -> bool:
    GPU_ARCH = torch.cuda.get_device_properties("cuda").gcnArchName
    return any(arch in GPU_ARCH for arch in ["gfx90a", "gfx942", "gfx950"])

on_gfx950 cached

on_gfx950() -> bool
Source code in vllm/platforms/rocm.py
@cache
def on_gfx950() -> bool:
    GPU_ARCH = torch.cuda.get_device_properties("cuda").gcnArchName
    return any(arch in GPU_ARCH for arch in ["gfx950"])

on_mi3xx cached

on_mi3xx() -> bool
Source code in vllm/platforms/rocm.py
@cache
def on_mi3xx() -> bool:
    GPU_ARCH = torch.cuda.get_device_properties("cuda").gcnArchName
    return any(arch in GPU_ARCH for arch in ["gfx942", "gfx950"])

use_rocm_custom_paged_attention cached

use_rocm_custom_paged_attention(
    qtype: dtype,
    head_size: int,
    block_size: int,
    gqa_ratio: int,
    max_seq_len: int,
    sliding_window: int,
    kv_cache_dtype: str,
    alibi_slopes: Tensor | None = None,
    sinks: Tensor | None = None,
) -> bool
Source code in vllm/platforms/rocm.py
@cache
def use_rocm_custom_paged_attention(
    qtype: torch.dtype,
    head_size: int,
    block_size: int,
    gqa_ratio: int,
    max_seq_len: int,
    sliding_window: int,
    kv_cache_dtype: str,
    alibi_slopes: torch.Tensor | None = None,
    sinks: torch.Tensor | None = None,
) -> bool:
    from vllm._aiter_ops import rocm_aiter_ops

    GPU_ARCH = torch.cuda.get_device_properties("cuda").gcnArchName
    ON_GFX9 = any(arch in GPU_ARCH for arch in ["gfx90a", "gfx942", "gfx950"])
    ON_GFX11_GFX12 = any(arch in GPU_ARCH for arch in ["gfx11", "gfx12"])

    # custom paged attn always supported on V0. On V1, requires sliding window
    # disabled due to observed numerical discrepancy.
    if ON_GFX9:
        return (
            (sliding_window == 0 or sliding_window == (-1, -1))
            and (qtype == torch.half or qtype == torch.bfloat16)
            and (head_size == 64 or head_size == 128)
            and (block_size == 16 or block_size == 32)
            and (gqa_ratio >= 1 and gqa_ratio <= 16)
            and max_seq_len <= 128 * 1024
            and (envs.VLLM_ROCM_CUSTOM_PAGED_ATTN)
            and not (rocm_aiter_ops.is_pa_attn_enabled())
            and sinks is None
        )

    else:
        return (
            ON_GFX11_GFX12
            and (sliding_window == 0 or sliding_window == (-1, -1))
            and (qtype == torch.half or qtype == torch.bfloat16)
            and head_size == 128
            and block_size == 16
            and (gqa_ratio >= 3 and gqa_ratio <= 16)
            and max_seq_len <= 128 * 1024
            and alibi_slopes is None
            and kv_cache_dtype == "auto"
            and envs.VLLM_ROCM_CUSTOM_PAGED_ATTN
            and sinks is None
        )

with_amdsmi_context

with_amdsmi_context(fn)
Source code in vllm/platforms/rocm.py
def with_amdsmi_context(fn):
    @wraps(fn)
    def wrapper(*args, **kwargs):
        amdsmi_init()
        try:
            return fn(*args, **kwargs)
        finally:
            amdsmi_shut_down()

    return wrapper