Skip to content

vllm.model_executor.models.minimax_text_01

Inference-only MiniMaxText01 model.

MiniMaxText01Attention

Bases: Module

Source code in vllm/model_executor/models/minimax_text_01.py
class MiniMaxText01Attention(nn.Module):

    def __init__(
        self,
        hidden_size: int,
        num_heads: int,
        head_dim: int,
        num_kv_heads: int,
        rotary_dim: int,
        max_position: int = 4096 * 32,
        rope_theta: float = 10000,
        sliding_window: Optional[int] = None,
        quant_config: Optional[QuantizationConfig] = None,
        layer_idx: int = None,
        cache_config: Optional[CacheConfig] = None,
        prefix: str = "mha",
    ) -> None:
        super().__init__()
        self.layer_idx = layer_idx

        self.hidden_size = hidden_size
        tp_size = get_tensor_model_parallel_world_size()
        self.total_num_heads = num_heads
        assert self.total_num_heads % tp_size == 0
        self.num_heads = self.total_num_heads // tp_size
        self.total_num_kv_heads = num_kv_heads
        if self.total_num_kv_heads >= tp_size:
            assert self.total_num_kv_heads % tp_size == 0
        else:
            assert tp_size % self.total_num_kv_heads == 0
        self.num_kv_heads = max(1, self.total_num_kv_heads // tp_size)
        self.head_dim = head_dim

        self.q_size = self.num_heads * self.head_dim
        self.kv_size = self.num_kv_heads * self.head_dim
        self.scaling = self.head_dim**-0.5
        self.rope_theta = rope_theta
        self.sliding_window = sliding_window
        self.prefix = prefix

        self.qkv_proj = QKVParallelLinear(
            hidden_size,
            self.head_dim,
            self.total_num_heads,
            self.total_num_kv_heads,
            bias=False,
            quant_config=quant_config,
            prefix=f"{prefix}.qkv_proj",
        )
        self.o_proj = RowParallelLinear(
            self.total_num_heads * self.head_dim,
            hidden_size,
            bias=False,
            quant_config=quant_config,
            prefix=f"{prefix}.o_proj",
        )
        self.attn = Attention(
            self.num_heads,
            self.head_dim,
            self.scaling,
            num_kv_heads=self.num_kv_heads,
            cache_config=cache_config,
            quant_config=quant_config,
            prefix=f"{prefix}.attn",
        )
        self.rotary_emb = get_rope(
            head_size=self.head_dim,
            rotary_dim=rotary_dim,
            max_position=max_position,
            base=int(rope_theta),
            is_neox_style=True,
            dtype=torch.float32,
        )
        return

    def forward(self, hidden_states: torch.Tensor, output: torch.Tensor,
                positions: torch.Tensor, **kwargs) -> None:
        qkv, _ = self.qkv_proj(hidden_states)
        q, k, v = qkv.split([self.q_size, self.kv_size, self.kv_size], dim=-1)
        q, k = self.rotary_emb(positions, q, k)
        attn_output = self.attn(q, k, v)
        output[:], _ = self.o_proj(attn_output)

attn instance-attribute

attn = Attention(
    num_heads,
    head_dim,
    scaling,
    num_kv_heads=num_kv_heads,
    cache_config=cache_config,
    quant_config=quant_config,
    prefix=f"{prefix}.attn",
)

head_dim instance-attribute

head_dim = head_dim

hidden_size instance-attribute

hidden_size = hidden_size

kv_size instance-attribute

kv_size = num_kv_heads * head_dim

layer_idx instance-attribute

layer_idx = layer_idx

num_heads instance-attribute

num_heads = total_num_heads // tp_size

num_kv_heads instance-attribute

num_kv_heads = max(1, total_num_kv_heads // tp_size)

o_proj instance-attribute

o_proj = RowParallelLinear(
    total_num_heads * head_dim,
    hidden_size,
    bias=False,
    quant_config=quant_config,
    prefix=f"{prefix}.o_proj",
)

prefix instance-attribute

prefix = prefix

q_size instance-attribute

q_size = num_heads * head_dim

qkv_proj instance-attribute

qkv_proj = QKVParallelLinear(
    hidden_size,
    head_dim,
    total_num_heads,
    total_num_kv_heads,
    bias=False,
    quant_config=quant_config,
    prefix=f"{prefix}.qkv_proj",
)

rope_theta instance-attribute

rope_theta = rope_theta

rotary_emb instance-attribute

rotary_emb = get_rope(
    head_size=head_dim,
    rotary_dim=rotary_dim,
    max_position=max_position,
    base=int(rope_theta),
    is_neox_style=True,
    dtype=float32,
)

scaling instance-attribute

scaling = head_dim ** -0.5

sliding_window instance-attribute

sliding_window = sliding_window

total_num_heads instance-attribute

total_num_heads = num_heads

total_num_kv_heads instance-attribute

total_num_kv_heads = num_kv_heads

__init__

__init__(
    hidden_size: int,
    num_heads: int,
    head_dim: int,
    num_kv_heads: int,
    rotary_dim: int,
    max_position: int = 4096 * 32,
    rope_theta: float = 10000,
    sliding_window: Optional[int] = None,
    quant_config: Optional[QuantizationConfig] = None,
    layer_idx: int = None,
    cache_config: Optional[CacheConfig] = None,
    prefix: str = "mha",
) -> None
Source code in vllm/model_executor/models/minimax_text_01.py
def __init__(
    self,
    hidden_size: int,
    num_heads: int,
    head_dim: int,
    num_kv_heads: int,
    rotary_dim: int,
    max_position: int = 4096 * 32,
    rope_theta: float = 10000,
    sliding_window: Optional[int] = None,
    quant_config: Optional[QuantizationConfig] = None,
    layer_idx: int = None,
    cache_config: Optional[CacheConfig] = None,
    prefix: str = "mha",
) -> None:
    super().__init__()
    self.layer_idx = layer_idx

    self.hidden_size = hidden_size
    tp_size = get_tensor_model_parallel_world_size()
    self.total_num_heads = num_heads
    assert self.total_num_heads % tp_size == 0
    self.num_heads = self.total_num_heads // tp_size
    self.total_num_kv_heads = num_kv_heads
    if self.total_num_kv_heads >= tp_size:
        assert self.total_num_kv_heads % tp_size == 0
    else:
        assert tp_size % self.total_num_kv_heads == 0
    self.num_kv_heads = max(1, self.total_num_kv_heads // tp_size)
    self.head_dim = head_dim

    self.q_size = self.num_heads * self.head_dim
    self.kv_size = self.num_kv_heads * self.head_dim
    self.scaling = self.head_dim**-0.5
    self.rope_theta = rope_theta
    self.sliding_window = sliding_window
    self.prefix = prefix

    self.qkv_proj = QKVParallelLinear(
        hidden_size,
        self.head_dim,
        self.total_num_heads,
        self.total_num_kv_heads,
        bias=False,
        quant_config=quant_config,
        prefix=f"{prefix}.qkv_proj",
    )
    self.o_proj = RowParallelLinear(
        self.total_num_heads * self.head_dim,
        hidden_size,
        bias=False,
        quant_config=quant_config,
        prefix=f"{prefix}.o_proj",
    )
    self.attn = Attention(
        self.num_heads,
        self.head_dim,
        self.scaling,
        num_kv_heads=self.num_kv_heads,
        cache_config=cache_config,
        quant_config=quant_config,
        prefix=f"{prefix}.attn",
    )
    self.rotary_emb = get_rope(
        head_size=self.head_dim,
        rotary_dim=rotary_dim,
        max_position=max_position,
        base=int(rope_theta),
        is_neox_style=True,
        dtype=torch.float32,
    )
    return

forward

forward(
    hidden_states: Tensor,
    output: Tensor,
    positions: Tensor,
    **kwargs,
) -> None
Source code in vllm/model_executor/models/minimax_text_01.py
def forward(self, hidden_states: torch.Tensor, output: torch.Tensor,
            positions: torch.Tensor, **kwargs) -> None:
    qkv, _ = self.qkv_proj(hidden_states)
    q, k, v = qkv.split([self.q_size, self.kv_size, self.kv_size], dim=-1)
    q, k = self.rotary_emb(positions, q, k)
    attn_output = self.attn(q, k, v)
    output[:], _ = self.o_proj(attn_output)

MiniMaxText01DecoderLayer

Bases: Module

Source code in vllm/model_executor/models/minimax_text_01.py
class MiniMaxText01DecoderLayer(nn.Module):

    def __init__(
        self,
        config: MiniMaxConfig,
        model_config: Optional[ModelConfig] = None,
        cache_config: Optional[CacheConfig] = None,
        quant_config: Optional[QuantizationConfig] = None,
        expert_num: int = 1,
        layer_id: int = None,
        linear_layer_id: Optional[int] = None,
        prefix: str = "decoder",
    ) -> None:
        self._ilayer = layer_id
        self._irank = get_tensor_model_parallel_rank()
        self.prefix = prefix
        super().__init__()

        self.hidden_size = config.hidden_size
        self.expert_num = expert_num

        rope_theta = getattr(config, "rope_theta", 10000)

        head_dim = getattr(config, "head_dim", None)
        if head_dim is None:
            head_dim = config.hidden_size // config.num_attention_heads
        if hasattr(config, "max_model_len") and isinstance(
                config.max_model_len, int):
            max_position_embeddings = min(config.max_position_embeddings,
                                          config.max_model_len)
        if config.attention_type == 0:
            use_headxdim = True
            hidden_inner = (head_dim * config.num_attention_heads
                            if use_headxdim else config.hidden_size)
            self.self_attn = MiniMaxText01LinearAttention(
                hidden_size=self.hidden_size,
                hidden_inner_size=hidden_inner,
                num_heads=config.num_attention_heads,
                head_dim=head_dim,
                max_position=max_position_embeddings,
                block_size=config.block if hasattr(config, "block") else 256,
                num_hidden_layer=config.num_hidden_layers,
                model_config=model_config,
                cache_config=cache_config,
                quant_config=quant_config,
                layer_idx=self._ilayer,
                linear_layer_idx=linear_layer_id,
                prefix=prefix)
        elif config.attention_type == 1:
            self.self_attn = MiniMaxText01Attention(
                hidden_size=self.hidden_size,
                num_heads=config.num_attention_heads,
                head_dim=head_dim,
                rotary_dim=config.rotary_dim
                if hasattr(config, "rotary_dim") else head_dim,
                num_kv_heads=config.num_key_value_heads,
                max_position=max_position_embeddings,
                rope_theta=rope_theta,
                sliding_window=config.sliding_window,
                quant_config=quant_config,
                layer_idx=self._ilayer,
                cache_config=cache_config,
                prefix=prefix)
        else:
            raise ValueError(
                f"Unsupported attention type: {self.config.attention_type}")

        if expert_num == 1:
            self.mlp = MiniMaxText01MLP(
                hidden_size=self.hidden_size,
                intermediate_size=config.intermediate_size,
                quant_config=quant_config,
                layer_idx=self._ilayer,
                prefix=prefix)
        else:
            self.block_sparse_moe = MiniMaxText01MoE(
                num_experts=expert_num,
                top_k=config.num_experts_per_tok,
                hidden_size=config.hidden_size,
                intermediate_size=config.intermediate_size,
                layer_idx=self._ilayer,
                quant_config=quant_config,
                prefix=prefix)

        self.input_layernorm = RMSNorm(config.hidden_size,
                                       eps=config.rms_norm_eps)
        self.post_attention_layernorm = RMSNorm(config.hidden_size,
                                                eps=config.rms_norm_eps)
        if config.attention_type == 0:
            self.layernorm_attention_alpha = getattr(
                config, 'layernorm_linear_attention_alpha',
                getattr(config, 'linear_attn_alpha_factor', 1))
            self.layernorm_attention_beta = getattr(
                config, 'layernorm_linear_attention_beta',
                getattr(config, 'linear_attn_beta_factor', 1))
        else:
            self.layernorm_attention_alpha = getattr(
                config, 'layernorm_full_attention_alpha',
                getattr(config, 'full_attn_alpha_factor', 1))
            self.layernorm_attention_beta = getattr(
                config, 'layernorm_full_attention_beta',
                getattr(config, 'full_attn_beta_factor', 1))
        self.layernorm_mlp_alpha = getattr(
            config, 'layernorm_mlp_alpha',
            getattr(config, 'mlp_alpha_factor', 1))
        self.layernorm_mlp_beta = getattr(
            config, 'layernorm_mlp_beta', getattr(config, 'mlp_beta_factor',
                                                  1))
        self.postnorm = getattr(config, 'postnorm', False)
        self.shared_moe = False

        shared_intermediate = getattr(config, 'shared_intermediate_size', 0)
        if isinstance(shared_intermediate, list):
            shared_intermediate = shared_intermediate[
                layer_id] if layer_id < len(shared_intermediate) else 0
        if shared_intermediate > 0:
            self.shared_moe = True
            self.shared_mlp = MiniMaxText01MLP(
                hidden_size=self.hidden_size,
                intermediate_size=shared_intermediate,
                quant_config=quant_config,
                layer_idx=self._ilayer,
                prefix=prefix)
            self.coefficient = ReplicatedLinear(
                self.hidden_size,
                1,
                bias=False,
                quant_config=quant_config,
                params_dtype=torch.float32,
            )
            self.coefficient.weight.weight_loader = (
                self.shared_moe_coefficient_loader)
            self.shared_moe_mode = getattr(config, 'shared_moe_mode',
                                           'softmax')
        return

    def forward(self,
                hidden_states: torch.Tensor,
                positions: torch.Tensor,
                kv_caches: Union[list[dict], Optional[torch.Tensor]],
                attn_metadata: AttentionMetadata,
                residual: Optional[torch.Tensor],
                is_warmup: bool = False,
                **kwargs) -> tuple[torch.Tensor, torch.Tensor]:

        layernorm_input = hidden_states
        layernorm_output = self.input_layernorm(layernorm_input)
        residual = layernorm_output if self.postnorm else layernorm_input
        self_attention_output = torch.empty_like(layernorm_output)
        self.self_attn(
            hidden_states=layernorm_output,
            output=self_attention_output,
            positions=positions,
            kv_caches=kv_caches,
        )

        residual = residual * self.layernorm_attention_alpha
        self_attention_output = (self_attention_output *
                                 self.layernorm_attention_beta)

        layernorm_input = residual + self_attention_output
        layernorm_output = self.post_attention_layernorm(layernorm_input)
        residual = layernorm_output if self.postnorm else layernorm_input

        if self.expert_num == 1:
            hidden_states = self.mlp(layernorm_output)
        else:
            moe_layernorm_output = layernorm_output.clone()
            moe_hidden_states = self.block_sparse_moe(moe_layernorm_output)
            if self.shared_moe:
                before_moe_dtype = layernorm_output.dtype
                moe_hidden_fp32 = moe_hidden_states.to(torch.float32)
                output_mlp = self.shared_mlp(layernorm_output).to(
                    torch.float32)

                coef, _ = self.coefficient(layernorm_output.to(torch.float32))

                if self.shared_moe_mode == 'softmax':
                    coef = torch.nn.functional.softmax(coef, dim=-1)
                    hidden_states = moe_hidden_fp32 * (
                        1 - coef) + output_mlp * coef
                elif self.shared_moe_mode == 'sigmoid':
                    coef = torch.nn.functional.sigmoid(coef)
                    hidden_states = moe_hidden_fp32 * (
                        1 - coef) + output_mlp * coef

                hidden_states = hidden_states.to(before_moe_dtype)
            else:
                hidden_states = moe_hidden_states

        residual = residual * self.layernorm_mlp_alpha
        hidden_states = hidden_states * self.layernorm_mlp_beta

        hidden_states = residual + hidden_states

        return hidden_states, None

    @staticmethod
    def shared_moe_coefficient_loader(param: torch.Tensor,
                                      loaded_weight: torch.Tensor) -> None:
        assert param.size() == loaded_weight.size()

        param.data.copy_(loaded_weight.to(torch.float32))
        return

_ilayer instance-attribute

_ilayer = layer_id

_irank instance-attribute

block_sparse_moe instance-attribute

block_sparse_moe = MiniMaxText01MoE(
    num_experts=expert_num,
    top_k=num_experts_per_tok,
    hidden_size=hidden_size,
    intermediate_size=intermediate_size,
    layer_idx=_ilayer,
    quant_config=quant_config,
    prefix=prefix,
)

coefficient instance-attribute

coefficient = ReplicatedLinear(
    hidden_size,
    1,
    bias=False,
    quant_config=quant_config,
    params_dtype=float32,
)

expert_num instance-attribute

expert_num = expert_num

hidden_size instance-attribute

hidden_size = hidden_size

input_layernorm instance-attribute

input_layernorm = RMSNorm(hidden_size, eps=rms_norm_eps)

layernorm_attention_alpha instance-attribute

layernorm_attention_alpha = getattr(
    config,
    "layernorm_linear_attention_alpha",
    getattr(config, "linear_attn_alpha_factor", 1),
)

layernorm_attention_beta instance-attribute

layernorm_attention_beta = getattr(
    config,
    "layernorm_linear_attention_beta",
    getattr(config, "linear_attn_beta_factor", 1),
)

layernorm_mlp_alpha instance-attribute

layernorm_mlp_alpha = getattr(
    config,
    "layernorm_mlp_alpha",
    getattr(config, "mlp_alpha_factor", 1),
)

layernorm_mlp_beta instance-attribute

layernorm_mlp_beta = getattr(
    config,
    "layernorm_mlp_beta",
    getattr(config, "mlp_beta_factor", 1),
)

mlp instance-attribute

mlp = MiniMaxText01MLP(
    hidden_size=hidden_size,
    intermediate_size=intermediate_size,
    quant_config=quant_config,
    layer_idx=_ilayer,
    prefix=prefix,
)

post_attention_layernorm instance-attribute

post_attention_layernorm = RMSNorm(
    hidden_size, eps=rms_norm_eps
)

postnorm instance-attribute

postnorm = getattr(config, 'postnorm', False)

prefix instance-attribute

prefix = prefix

self_attn instance-attribute

self_attn = MiniMaxText01LinearAttention(
    hidden_size=hidden_size,
    hidden_inner_size=hidden_inner,
    num_heads=num_attention_heads,
    head_dim=head_dim,
    max_position=max_position_embeddings,
    block_size=block if hasattr(config, "block") else 256,
    num_hidden_layer=num_hidden_layers,
    model_config=model_config,
    cache_config=cache_config,
    quant_config=quant_config,
    layer_idx=_ilayer,
    linear_layer_idx=linear_layer_id,
    prefix=prefix,
)

shared_mlp instance-attribute

shared_mlp = MiniMaxText01MLP(
    hidden_size=hidden_size,
    intermediate_size=shared_intermediate,
    quant_config=quant_config,
    layer_idx=_ilayer,
    prefix=prefix,
)

shared_moe instance-attribute

shared_moe = False

shared_moe_mode instance-attribute

shared_moe_mode = getattr(
    config, "shared_moe_mode", "softmax"
)

__init__

__init__(
    config: MiniMaxConfig,
    model_config: Optional[ModelConfig] = None,
    cache_config: Optional[CacheConfig] = None,
    quant_config: Optional[QuantizationConfig] = None,
    expert_num: int = 1,
    layer_id: int = None,
    linear_layer_id: Optional[int] = None,
    prefix: str = "decoder",
) -> None
Source code in vllm/model_executor/models/minimax_text_01.py
def __init__(
    self,
    config: MiniMaxConfig,
    model_config: Optional[ModelConfig] = None,
    cache_config: Optional[CacheConfig] = None,
    quant_config: Optional[QuantizationConfig] = None,
    expert_num: int = 1,
    layer_id: int = None,
    linear_layer_id: Optional[int] = None,
    prefix: str = "decoder",
) -> None:
    self._ilayer = layer_id
    self._irank = get_tensor_model_parallel_rank()
    self.prefix = prefix
    super().__init__()

    self.hidden_size = config.hidden_size
    self.expert_num = expert_num

    rope_theta = getattr(config, "rope_theta", 10000)

    head_dim = getattr(config, "head_dim", None)
    if head_dim is None:
        head_dim = config.hidden_size // config.num_attention_heads
    if hasattr(config, "max_model_len") and isinstance(
            config.max_model_len, int):
        max_position_embeddings = min(config.max_position_embeddings,
                                      config.max_model_len)
    if config.attention_type == 0:
        use_headxdim = True
        hidden_inner = (head_dim * config.num_attention_heads
                        if use_headxdim else config.hidden_size)
        self.self_attn = MiniMaxText01LinearAttention(
            hidden_size=self.hidden_size,
            hidden_inner_size=hidden_inner,
            num_heads=config.num_attention_heads,
            head_dim=head_dim,
            max_position=max_position_embeddings,
            block_size=config.block if hasattr(config, "block") else 256,
            num_hidden_layer=config.num_hidden_layers,
            model_config=model_config,
            cache_config=cache_config,
            quant_config=quant_config,
            layer_idx=self._ilayer,
            linear_layer_idx=linear_layer_id,
            prefix=prefix)
    elif config.attention_type == 1:
        self.self_attn = MiniMaxText01Attention(
            hidden_size=self.hidden_size,
            num_heads=config.num_attention_heads,
            head_dim=head_dim,
            rotary_dim=config.rotary_dim
            if hasattr(config, "rotary_dim") else head_dim,
            num_kv_heads=config.num_key_value_heads,
            max_position=max_position_embeddings,
            rope_theta=rope_theta,
            sliding_window=config.sliding_window,
            quant_config=quant_config,
            layer_idx=self._ilayer,
            cache_config=cache_config,
            prefix=prefix)
    else:
        raise ValueError(
            f"Unsupported attention type: {self.config.attention_type}")

    if expert_num == 1:
        self.mlp = MiniMaxText01MLP(
            hidden_size=self.hidden_size,
            intermediate_size=config.intermediate_size,
            quant_config=quant_config,
            layer_idx=self._ilayer,
            prefix=prefix)
    else:
        self.block_sparse_moe = MiniMaxText01MoE(
            num_experts=expert_num,
            top_k=config.num_experts_per_tok,
            hidden_size=config.hidden_size,
            intermediate_size=config.intermediate_size,
            layer_idx=self._ilayer,
            quant_config=quant_config,
            prefix=prefix)

    self.input_layernorm = RMSNorm(config.hidden_size,
                                   eps=config.rms_norm_eps)
    self.post_attention_layernorm = RMSNorm(config.hidden_size,
                                            eps=config.rms_norm_eps)
    if config.attention_type == 0:
        self.layernorm_attention_alpha = getattr(
            config, 'layernorm_linear_attention_alpha',
            getattr(config, 'linear_attn_alpha_factor', 1))
        self.layernorm_attention_beta = getattr(
            config, 'layernorm_linear_attention_beta',
            getattr(config, 'linear_attn_beta_factor', 1))
    else:
        self.layernorm_attention_alpha = getattr(
            config, 'layernorm_full_attention_alpha',
            getattr(config, 'full_attn_alpha_factor', 1))
        self.layernorm_attention_beta = getattr(
            config, 'layernorm_full_attention_beta',
            getattr(config, 'full_attn_beta_factor', 1))
    self.layernorm_mlp_alpha = getattr(
        config, 'layernorm_mlp_alpha',
        getattr(config, 'mlp_alpha_factor', 1))
    self.layernorm_mlp_beta = getattr(
        config, 'layernorm_mlp_beta', getattr(config, 'mlp_beta_factor',
                                              1))
    self.postnorm = getattr(config, 'postnorm', False)
    self.shared_moe = False

    shared_intermediate = getattr(config, 'shared_intermediate_size', 0)
    if isinstance(shared_intermediate, list):
        shared_intermediate = shared_intermediate[
            layer_id] if layer_id < len(shared_intermediate) else 0
    if shared_intermediate > 0:
        self.shared_moe = True
        self.shared_mlp = MiniMaxText01MLP(
            hidden_size=self.hidden_size,
            intermediate_size=shared_intermediate,
            quant_config=quant_config,
            layer_idx=self._ilayer,
            prefix=prefix)
        self.coefficient = ReplicatedLinear(
            self.hidden_size,
            1,
            bias=False,
            quant_config=quant_config,
            params_dtype=torch.float32,
        )
        self.coefficient.weight.weight_loader = (
            self.shared_moe_coefficient_loader)
        self.shared_moe_mode = getattr(config, 'shared_moe_mode',
                                       'softmax')
    return

forward

forward(
    hidden_states: Tensor,
    positions: Tensor,
    kv_caches: Union[list[dict], Optional[Tensor]],
    attn_metadata: AttentionMetadata,
    residual: Optional[Tensor],
    is_warmup: bool = False,
    **kwargs,
) -> tuple[Tensor, Tensor]
Source code in vllm/model_executor/models/minimax_text_01.py
def forward(self,
            hidden_states: torch.Tensor,
            positions: torch.Tensor,
            kv_caches: Union[list[dict], Optional[torch.Tensor]],
            attn_metadata: AttentionMetadata,
            residual: Optional[torch.Tensor],
            is_warmup: bool = False,
            **kwargs) -> tuple[torch.Tensor, torch.Tensor]:

    layernorm_input = hidden_states
    layernorm_output = self.input_layernorm(layernorm_input)
    residual = layernorm_output if self.postnorm else layernorm_input
    self_attention_output = torch.empty_like(layernorm_output)
    self.self_attn(
        hidden_states=layernorm_output,
        output=self_attention_output,
        positions=positions,
        kv_caches=kv_caches,
    )

    residual = residual * self.layernorm_attention_alpha
    self_attention_output = (self_attention_output *
                             self.layernorm_attention_beta)

    layernorm_input = residual + self_attention_output
    layernorm_output = self.post_attention_layernorm(layernorm_input)
    residual = layernorm_output if self.postnorm else layernorm_input

    if self.expert_num == 1:
        hidden_states = self.mlp(layernorm_output)
    else:
        moe_layernorm_output = layernorm_output.clone()
        moe_hidden_states = self.block_sparse_moe(moe_layernorm_output)
        if self.shared_moe:
            before_moe_dtype = layernorm_output.dtype
            moe_hidden_fp32 = moe_hidden_states.to(torch.float32)
            output_mlp = self.shared_mlp(layernorm_output).to(
                torch.float32)

            coef, _ = self.coefficient(layernorm_output.to(torch.float32))

            if self.shared_moe_mode == 'softmax':
                coef = torch.nn.functional.softmax(coef, dim=-1)
                hidden_states = moe_hidden_fp32 * (
                    1 - coef) + output_mlp * coef
            elif self.shared_moe_mode == 'sigmoid':
                coef = torch.nn.functional.sigmoid(coef)
                hidden_states = moe_hidden_fp32 * (
                    1 - coef) + output_mlp * coef

            hidden_states = hidden_states.to(before_moe_dtype)
        else:
            hidden_states = moe_hidden_states

    residual = residual * self.layernorm_mlp_alpha
    hidden_states = hidden_states * self.layernorm_mlp_beta

    hidden_states = residual + hidden_states

    return hidden_states, None

shared_moe_coefficient_loader staticmethod

shared_moe_coefficient_loader(
    param: Tensor, loaded_weight: Tensor
) -> None
Source code in vllm/model_executor/models/minimax_text_01.py
@staticmethod
def shared_moe_coefficient_loader(param: torch.Tensor,
                                  loaded_weight: torch.Tensor) -> None:
    assert param.size() == loaded_weight.size()

    param.data.copy_(loaded_weight.to(torch.float32))
    return

MiniMaxText01ForCausalLM

Bases: Module, HasInnerState, IsHybrid

Source code in vllm/model_executor/models/minimax_text_01.py
class MiniMaxText01ForCausalLM(nn.Module, HasInnerState, IsHybrid):

    def __init__(self, *, vllm_config: VllmConfig, prefix: str = "") -> None:

        super().__init__()
        config = vllm_config.model_config.hf_config
        lora_config = vllm_config.lora_config
        self.config = config
        self.lora_config = lora_config

        if not hasattr(config, "sliding_window"):
            config.sliding_window = None

        self.CONCAT_FFN = True

        self.unpadded_vocab_size = self.config.vocab_size
        if hasattr(vllm_config.model_config, "max_model_len"):
            self.config.max_model_len = vllm_config.model_config.max_model_len
        self.model = MiniMaxText01Model(vllm_config=vllm_config,
                                        prefix=maybe_prefix(prefix, "model"))
        if get_pp_group().is_last_rank:
            self.lm_head = ParallelLMHead(
                self.unpadded_vocab_size,
                self.config.hidden_size,
                org_num_embeddings=self.config.vocab_size,
                padding_size=DEFAULT_VOCAB_PADDING_SIZE,
            )

            self.logits_processor = LogitsProcessor(self.unpadded_vocab_size,
                                                    self.config.vocab_size)

        else:
            self.lm_head = PPMissingLayer()
        self.lm_head.float()
        flash_layer_count = sum(
            1 for attn_type in self.model.decoder_attention_types
            if attn_type == 1)
        self.kv_cache = [torch.tensor([]) for _ in range(flash_layer_count)]
        return

    def copy_inputs_before_cuda_graphs(self, input_buffers, **kwargs):
        return self.model.minimax_cache.copy_inputs_before_cuda_graphs(
            input_buffers, **kwargs)

    def get_seqlen_agnostic_capture_inputs(self, batch_size: int):
        return self.model.minimax_cache.get_seqlen_agnostic_capture_inputs(
            batch_size)

    def get_input_embeddings(
        self,
        input_ids: torch.Tensor,
    ) -> torch.Tensor:
        return self.model.get_input_embeddings(input_ids)

    def forward(self,
                input_ids: torch.Tensor,
                positions: torch.Tensor,
                intermediate_tensors: Optional[IntermediateTensors] = None,
                inputs_embeds: Optional[torch.Tensor] = None,
                **kwargs) -> torch.Tensor:
        hidden_states = self.model(input_ids, positions, intermediate_tensors,
                                   inputs_embeds, **kwargs)

        return hidden_states

    def compute_logits(self, hidden_states: torch.Tensor,
                       sampling_metadata: SamplingMetadata) -> torch.Tensor:
        logits = self.logits_processor(self.lm_head, hidden_states.float(),
                                       sampling_metadata)

        return logits

    def make_empty_intermediate_tensors(
            self, batch_size: int, dtype: torch.dtype,
            device: torch.device) -> IntermediateTensors:
        return IntermediateTensors({
            "hidden_states":
            torch.zeros((batch_size, self.config.hidden_size),
                        dtype=dtype,
                        device=device),
            "residual":
            torch.zeros((batch_size, self.config.hidden_size),
                        dtype=dtype,
                        device=device),
        })

    def load_weights(self, weights: Iterable[tuple[str,
                                                   torch.Tensor]]) -> set[str]:
        params_dict = dict(self.named_parameters())
        loaded_params: set[str] = set()

        def which_layer(name: str) -> int:
            if "layers" in name:
                after_layer = name.split("layers")[-1]
                return int(after_layer.split(".")[1])
            return None

        def is_linear_attn_layer(layer_idx: int) -> bool:
            if layer_idx is None or layer_idx >= len(
                    self.model.decoder_attention_types):
                return False
            return self.model.decoder_attention_types[layer_idx] == 0

        def is_moe_weight(name: str) -> bool:
            return "block_sparse_moe" in name and not name.endswith(".bias")

        def get_expert_id(param_name):
            pattern = r'model\.layers\.\d+\.block_sparse_moe\.experts\.(\d+)\.'
            match = re.search(pattern, param_name)
            if match:
                return match.group(1)
            return None

        def load_sparse_moe_weight(name: str, loaded_weight: torch.Tensor,
                                   self) -> None:
            if isinstance(self.config.num_local_experts, list):
                expert_params_mapping = [
                    ("w13_weight"
                     if weight_name in ["w1", "w3"] else "w2_weight",
                     f"experts.{expert_id}.{weight_name}.weight", expert_id)
                    for expert_id in range(max(self.config.num_local_experts))
                    for weight_name in ["w1", "w2", "w3"]
                ]
            else:
                expert_params_mapping = [
                    ("w13_scale" if weight_name in ["w1", "w3"] else
                     "w2_scale", f"{expert_id}.{weight_name}.weight_scale",
                     expert_id, weight_name)
                    for expert_id in range(self.config.num_local_experts)
                    for weight_name in ["w1", "w2", "w3"]
                ] + [("w13_weight" if weight_name in ["w1", "w3"] else
                      "w2_weight", f"{expert_id}.{weight_name}.weight",
                      expert_id, weight_name)
                     for expert_id in range(self.config.num_local_experts)
                     for weight_name in ["w1", "w2", "w3"]]
            for (param_name, weight_name, expert_id,
                 shard_id) in expert_params_mapping:
                name_expert_id = get_expert_id(name)
                if name_expert_id is not None and int(name_expert_id) != int(
                        expert_id):
                    continue
                if weight_name not in name:
                    continue
                name = name.replace(weight_name, param_name)
                if is_pp_missing_parameter(name, self):
                    return
                param = params_dict[name]
                weight_loader = param.weight_loader
                weight_loader = weight_loader_with_alias(name)(weight_loader)
                weight_loader(param,
                              loaded_weight,
                              weight_name,
                              expert_id=expert_id,
                              shard_id=shard_id)
                loaded_params.add(name)
                break
            else:
                if is_pp_missing_parameter(name, self):
                    return
                param = params_dict[name]
                weight_loader = getattr(param, "weight_loader",
                                        default_weight_loader)
                weight_loader = weight_loader_with_alias(name)(weight_loader)
                weight_loader(param, loaded_weight)
                loaded_params.add(name)
            return

        def is_shared_mlp_weight(name: str) -> bool:
            return "shared_mlp" in name and not name.endswith(".bias")

        def load_shared_mlp_weight(name: str, loaded_weight: torch.Tensor,
                                   self) -> None:
            if not self.CONCAT_FFN:
                if "gate_proj" in name:
                    name = name.replace("gate_proj", "w1", 1)
                elif "up_proj" in name:
                    name = name.replace("up_proj", "w3", 1)
                elif "down_proj" in name:
                    name = name.replace("down_proj", "w2", 1)
            else:
                if "gate_proj" in name:
                    name = name.replace("gate_proj", "gate_up_proj", 1)
                    loaded_shard_id = 0
                elif "up_proj" in name:
                    name = name.replace("up_proj", "gate_up_proj", 1)
                    loaded_shard_id = 1
            if is_pp_missing_parameter(name, self):
                return
            param = params_dict[name]
            weight_loader = getattr(param, "weight_loader",
                                    default_weight_loader)
            weight_loader = weight_loader_with_alias(name)(weight_loader)
            if not self.CONCAT_FFN:
                weight_loader(param, loaded_weight)
            else:
                if "gate_up_proj" in name:
                    weight_loader(param, loaded_weight, loaded_shard_id)
                elif "down_proj" in name:
                    weight_loader(param, loaded_weight)
                else:
                    raise AssertionError(
                        "MLP weight not in [gate_up_proj, down_proj]")
            loaded_params.add(name)
            return

        def is_mha_weight(name: str) -> bool:
            return "self_attn" in name and not name.endswith(".bias")

        def load_linear_attn_weight(name: str, loaded_weight: torch.Tensor,
                                    self) -> None:
            if is_pp_missing_parameter(name, self):
                return
            param = params_dict[name]

            weight_loader = getattr(
                param, "weight_loader",
                MiniMaxText01LinearAttention.weight_direct_load)
            weight_loader = weight_loader_with_alias(name)(weight_loader)
            weight_loader(param, loaded_weight)
            loaded_params.add(name)
            return

        def load_flash_attn_weight(name: str, loaded_weight: torch.Tensor,
                                   self) -> None:

            flash_mha_params_mapping = [
                ("qkv_proj", "q_proj", "q"),
                ("qkv_proj", "k_proj", "k"),
                ("qkv_proj", "v_proj", "v"),
                ("gate_up_proj", "gate_proj", 0),
                ("gate_up_proj", "up_proj", 1),
            ]
            for (param_name, weight_name,
                 shard_id) in flash_mha_params_mapping:
                if weight_name not in name:
                    continue
                name = name.replace(weight_name, param_name)
                if is_pp_missing_parameter(name, self):
                    return
                param = params_dict[name]
                weight_loader = getattr(param, "weight_loader",
                                        default_weight_loader)
                weight_loader = weight_loader_with_alias(name)(weight_loader)
                weight_loader(param, loaded_weight, shard_id)
                loaded_params.add(name)
                break
            else:
                if is_pp_missing_parameter(name, self):
                    return
                param = params_dict[name]

                weight_loader = getattr(param, "weight_loader",
                                        default_weight_loader)
                weight_loader = weight_loader_with_alias(name)(weight_loader)
                weight_loader(param, loaded_weight)
                loaded_params.add(name)
            return

        def is_layer_norm_weight(name: str) -> bool:
            return "norm" in name and not name.endswith(
                ".bias") and name in params_dict

        def load_layer_norm_weight(name: str, loaded_weight: torch.Tensor,
                                   self) -> None:
            if is_pp_missing_parameter(name, self):
                return
            param = params_dict[name]
            weight_loader = getattr(param, "weight_loader",
                                    default_weight_loader)
            weight_loader = weight_loader_with_alias(name)(weight_loader)
            weight_loader(param, loaded_weight)
            loaded_params.add(name)
            return

        def load_basic_weight(name: str, loaded_weight: torch.Tensor,
                              self) -> None:
            if is_pp_missing_parameter(name, self):
                return
            param = params_dict[name]
            weight_loader = getattr(param, "weight_loader",
                                    default_weight_loader)
            weight_loader = weight_loader_with_alias(name)(weight_loader)
            weight_loader(param, loaded_weight)
            loaded_params.add(name)
            return

        for name, loaded_weight in weights:
            weight_at_layer = which_layer(name)
            if weight_at_layer and weight_at_layer >= len(
                    self.model.decoder_attention_types):
                continue

            if is_layer_norm_weight(name):
                load_layer_norm_weight(name, loaded_weight, self)
                continue
            if is_mha_weight(name):
                if is_linear_attn_layer(weight_at_layer):
                    load_linear_attn_weight(name, loaded_weight, self)
                else:
                    load_flash_attn_weight(name, loaded_weight, self)
                continue
            if is_moe_weight(name):
                load_sparse_moe_weight(name, loaded_weight, self)
                continue
            if is_shared_mlp_weight(name):
                load_shared_mlp_weight(name, loaded_weight, self)
                continue

            if "rotary_emb.inv_freq" in name:
                continue

            load_basic_weight(name, loaded_weight, self)
        return loaded_params

    @classmethod
    def get_mamba_state_dtype_from_config(
        cls,
        vllm_config: "VllmConfig",
    ) -> tuple[torch.dtype, torch.dtype]:

        return MambaStateDtypeCalculator.linear_attention_state_dtype(
            vllm_config.model_config.dtype,
            vllm_config.cache_config.mamba_cache_dtype,
        )

    @classmethod
    def get_mamba_state_shape_from_config(
        cls,
        vllm_config: "VllmConfig",
        use_v1: bool = True,
    ) -> tuple[tuple[int, ...], ...]:
        """Calculate shape for MiniMaxText01LinearAttention cache.

        Args:
            vllm_config: vLLM config
            use_v1: Get shapes for V1 (or V0)

        Returns:
            Tuple containing:
            - state_shape: Shape of the cache
        """
        parallel_config = vllm_config.parallel_config
        hf_config = vllm_config.model_config.hf_config

        return MambaStateShapeCalculator.linear_attention_state_shape(
            num_heads=hf_config.num_attention_heads,
            tp_size=parallel_config.tensor_parallel_size,
            head_dim=hf_config.head_dim,
        )

CONCAT_FFN instance-attribute

CONCAT_FFN = True

config instance-attribute

config = config

kv_cache instance-attribute

kv_cache = [
    (tensor([])) for _ in (range(flash_layer_count))
]

lm_head instance-attribute

lm_head = ParallelLMHead(
    unpadded_vocab_size,
    hidden_size,
    org_num_embeddings=vocab_size,
    padding_size=DEFAULT_VOCAB_PADDING_SIZE,
)

logits_processor instance-attribute

logits_processor = LogitsProcessor(
    unpadded_vocab_size, vocab_size
)

lora_config instance-attribute

lora_config = lora_config

model instance-attribute

model = MiniMaxText01Model(
    vllm_config=vllm_config,
    prefix=maybe_prefix(prefix, "model"),
)

unpadded_vocab_size instance-attribute

unpadded_vocab_size = vocab_size

__init__

__init__(
    *, vllm_config: VllmConfig, prefix: str = ""
) -> None
Source code in vllm/model_executor/models/minimax_text_01.py
def __init__(self, *, vllm_config: VllmConfig, prefix: str = "") -> None:

    super().__init__()
    config = vllm_config.model_config.hf_config
    lora_config = vllm_config.lora_config
    self.config = config
    self.lora_config = lora_config

    if not hasattr(config, "sliding_window"):
        config.sliding_window = None

    self.CONCAT_FFN = True

    self.unpadded_vocab_size = self.config.vocab_size
    if hasattr(vllm_config.model_config, "max_model_len"):
        self.config.max_model_len = vllm_config.model_config.max_model_len
    self.model = MiniMaxText01Model(vllm_config=vllm_config,
                                    prefix=maybe_prefix(prefix, "model"))
    if get_pp_group().is_last_rank:
        self.lm_head = ParallelLMHead(
            self.unpadded_vocab_size,
            self.config.hidden_size,
            org_num_embeddings=self.config.vocab_size,
            padding_size=DEFAULT_VOCAB_PADDING_SIZE,
        )

        self.logits_processor = LogitsProcessor(self.unpadded_vocab_size,
                                                self.config.vocab_size)

    else:
        self.lm_head = PPMissingLayer()
    self.lm_head.float()
    flash_layer_count = sum(
        1 for attn_type in self.model.decoder_attention_types
        if attn_type == 1)
    self.kv_cache = [torch.tensor([]) for _ in range(flash_layer_count)]
    return

compute_logits

compute_logits(
    hidden_states: Tensor,
    sampling_metadata: SamplingMetadata,
) -> Tensor
Source code in vllm/model_executor/models/minimax_text_01.py
def compute_logits(self, hidden_states: torch.Tensor,
                   sampling_metadata: SamplingMetadata) -> torch.Tensor:
    logits = self.logits_processor(self.lm_head, hidden_states.float(),
                                   sampling_metadata)

    return logits

copy_inputs_before_cuda_graphs

copy_inputs_before_cuda_graphs(input_buffers, **kwargs)
Source code in vllm/model_executor/models/minimax_text_01.py
def copy_inputs_before_cuda_graphs(self, input_buffers, **kwargs):
    return self.model.minimax_cache.copy_inputs_before_cuda_graphs(
        input_buffers, **kwargs)

forward

forward(
    input_ids: Tensor,
    positions: Tensor,
    intermediate_tensors: Optional[
        IntermediateTensors
    ] = None,
    inputs_embeds: Optional[Tensor] = None,
    **kwargs,
) -> Tensor
Source code in vllm/model_executor/models/minimax_text_01.py
def forward(self,
            input_ids: torch.Tensor,
            positions: torch.Tensor,
            intermediate_tensors: Optional[IntermediateTensors] = None,
            inputs_embeds: Optional[torch.Tensor] = None,
            **kwargs) -> torch.Tensor:
    hidden_states = self.model(input_ids, positions, intermediate_tensors,
                               inputs_embeds, **kwargs)

    return hidden_states

get_input_embeddings

get_input_embeddings(input_ids: Tensor) -> Tensor
Source code in vllm/model_executor/models/minimax_text_01.py
def get_input_embeddings(
    self,
    input_ids: torch.Tensor,
) -> torch.Tensor:
    return self.model.get_input_embeddings(input_ids)

get_mamba_state_dtype_from_config classmethod

get_mamba_state_dtype_from_config(
    vllm_config: VllmConfig,
) -> tuple[dtype, dtype]
Source code in vllm/model_executor/models/minimax_text_01.py
@classmethod
def get_mamba_state_dtype_from_config(
    cls,
    vllm_config: "VllmConfig",
) -> tuple[torch.dtype, torch.dtype]:

    return MambaStateDtypeCalculator.linear_attention_state_dtype(
        vllm_config.model_config.dtype,
        vllm_config.cache_config.mamba_cache_dtype,
    )

get_mamba_state_shape_from_config classmethod

get_mamba_state_shape_from_config(
    vllm_config: VllmConfig, use_v1: bool = True
) -> tuple[tuple[int, ...], ...]

Calculate shape for MiniMaxText01LinearAttention cache.

Parameters:

Name Type Description Default
vllm_config VllmConfig

vLLM config

required
use_v1 bool

Get shapes for V1 (or V0)

True

Returns:

Type Description
tuple[int, ...]

Tuple containing:

...
  • state_shape: Shape of the cache
Source code in vllm/model_executor/models/minimax_text_01.py
@classmethod
def get_mamba_state_shape_from_config(
    cls,
    vllm_config: "VllmConfig",
    use_v1: bool = True,
) -> tuple[tuple[int, ...], ...]:
    """Calculate shape for MiniMaxText01LinearAttention cache.

    Args:
        vllm_config: vLLM config
        use_v1: Get shapes for V1 (or V0)

    Returns:
        Tuple containing:
        - state_shape: Shape of the cache
    """
    parallel_config = vllm_config.parallel_config
    hf_config = vllm_config.model_config.hf_config

    return MambaStateShapeCalculator.linear_attention_state_shape(
        num_heads=hf_config.num_attention_heads,
        tp_size=parallel_config.tensor_parallel_size,
        head_dim=hf_config.head_dim,
    )

get_seqlen_agnostic_capture_inputs

get_seqlen_agnostic_capture_inputs(batch_size: int)
Source code in vllm/model_executor/models/minimax_text_01.py
def get_seqlen_agnostic_capture_inputs(self, batch_size: int):
    return self.model.minimax_cache.get_seqlen_agnostic_capture_inputs(
        batch_size)

load_weights

load_weights(
    weights: Iterable[tuple[str, Tensor]],
) -> set[str]
Source code in vllm/model_executor/models/minimax_text_01.py
def load_weights(self, weights: Iterable[tuple[str,
                                               torch.Tensor]]) -> set[str]:
    params_dict = dict(self.named_parameters())
    loaded_params: set[str] = set()

    def which_layer(name: str) -> int:
        if "layers" in name:
            after_layer = name.split("layers")[-1]
            return int(after_layer.split(".")[1])
        return None

    def is_linear_attn_layer(layer_idx: int) -> bool:
        if layer_idx is None or layer_idx >= len(
                self.model.decoder_attention_types):
            return False
        return self.model.decoder_attention_types[layer_idx] == 0

    def is_moe_weight(name: str) -> bool:
        return "block_sparse_moe" in name and not name.endswith(".bias")

    def get_expert_id(param_name):
        pattern = r'model\.layers\.\d+\.block_sparse_moe\.experts\.(\d+)\.'
        match = re.search(pattern, param_name)
        if match:
            return match.group(1)
        return None

    def load_sparse_moe_weight(name: str, loaded_weight: torch.Tensor,
                               self) -> None:
        if isinstance(self.config.num_local_experts, list):
            expert_params_mapping = [
                ("w13_weight"
                 if weight_name in ["w1", "w3"] else "w2_weight",
                 f"experts.{expert_id}.{weight_name}.weight", expert_id)
                for expert_id in range(max(self.config.num_local_experts))
                for weight_name in ["w1", "w2", "w3"]
            ]
        else:
            expert_params_mapping = [
                ("w13_scale" if weight_name in ["w1", "w3"] else
                 "w2_scale", f"{expert_id}.{weight_name}.weight_scale",
                 expert_id, weight_name)
                for expert_id in range(self.config.num_local_experts)
                for weight_name in ["w1", "w2", "w3"]
            ] + [("w13_weight" if weight_name in ["w1", "w3"] else
                  "w2_weight", f"{expert_id}.{weight_name}.weight",
                  expert_id, weight_name)
                 for expert_id in range(self.config.num_local_experts)
                 for weight_name in ["w1", "w2", "w3"]]
        for (param_name, weight_name, expert_id,
             shard_id) in expert_params_mapping:
            name_expert_id = get_expert_id(name)
            if name_expert_id is not None and int(name_expert_id) != int(
                    expert_id):
                continue
            if weight_name not in name:
                continue
            name = name.replace(weight_name, param_name)
            if is_pp_missing_parameter(name, self):
                return
            param = params_dict[name]
            weight_loader = param.weight_loader
            weight_loader = weight_loader_with_alias(name)(weight_loader)
            weight_loader(param,
                          loaded_weight,
                          weight_name,
                          expert_id=expert_id,
                          shard_id=shard_id)
            loaded_params.add(name)
            break
        else:
            if is_pp_missing_parameter(name, self):
                return
            param = params_dict[name]
            weight_loader = getattr(param, "weight_loader",
                                    default_weight_loader)
            weight_loader = weight_loader_with_alias(name)(weight_loader)
            weight_loader(param, loaded_weight)
            loaded_params.add(name)
        return

    def is_shared_mlp_weight(name: str) -> bool:
        return "shared_mlp" in name and not name.endswith(".bias")

    def load_shared_mlp_weight(name: str, loaded_weight: torch.Tensor,
                               self) -> None:
        if not self.CONCAT_FFN:
            if "gate_proj" in name:
                name = name.replace("gate_proj", "w1", 1)
            elif "up_proj" in name:
                name = name.replace("up_proj", "w3", 1)
            elif "down_proj" in name:
                name = name.replace("down_proj", "w2", 1)
        else:
            if "gate_proj" in name:
                name = name.replace("gate_proj", "gate_up_proj", 1)
                loaded_shard_id = 0
            elif "up_proj" in name:
                name = name.replace("up_proj", "gate_up_proj", 1)
                loaded_shard_id = 1
        if is_pp_missing_parameter(name, self):
            return
        param = params_dict[name]
        weight_loader = getattr(param, "weight_loader",
                                default_weight_loader)
        weight_loader = weight_loader_with_alias(name)(weight_loader)
        if not self.CONCAT_FFN:
            weight_loader(param, loaded_weight)
        else:
            if "gate_up_proj" in name:
                weight_loader(param, loaded_weight, loaded_shard_id)
            elif "down_proj" in name:
                weight_loader(param, loaded_weight)
            else:
                raise AssertionError(
                    "MLP weight not in [gate_up_proj, down_proj]")
        loaded_params.add(name)
        return

    def is_mha_weight(name: str) -> bool:
        return "self_attn" in name and not name.endswith(".bias")

    def load_linear_attn_weight(name: str, loaded_weight: torch.Tensor,
                                self) -> None:
        if is_pp_missing_parameter(name, self):
            return
        param = params_dict[name]

        weight_loader = getattr(
            param, "weight_loader",
            MiniMaxText01LinearAttention.weight_direct_load)
        weight_loader = weight_loader_with_alias(name)(weight_loader)
        weight_loader(param, loaded_weight)
        loaded_params.add(name)
        return

    def load_flash_attn_weight(name: str, loaded_weight: torch.Tensor,
                               self) -> None:

        flash_mha_params_mapping = [
            ("qkv_proj", "q_proj", "q"),
            ("qkv_proj", "k_proj", "k"),
            ("qkv_proj", "v_proj", "v"),
            ("gate_up_proj", "gate_proj", 0),
            ("gate_up_proj", "up_proj", 1),
        ]
        for (param_name, weight_name,
             shard_id) in flash_mha_params_mapping:
            if weight_name not in name:
                continue
            name = name.replace(weight_name, param_name)
            if is_pp_missing_parameter(name, self):
                return
            param = params_dict[name]
            weight_loader = getattr(param, "weight_loader",
                                    default_weight_loader)
            weight_loader = weight_loader_with_alias(name)(weight_loader)
            weight_loader(param, loaded_weight, shard_id)
            loaded_params.add(name)
            break
        else:
            if is_pp_missing_parameter(name, self):
                return
            param = params_dict[name]

            weight_loader = getattr(param, "weight_loader",
                                    default_weight_loader)
            weight_loader = weight_loader_with_alias(name)(weight_loader)
            weight_loader(param, loaded_weight)
            loaded_params.add(name)
        return

    def is_layer_norm_weight(name: str) -> bool:
        return "norm" in name and not name.endswith(
            ".bias") and name in params_dict

    def load_layer_norm_weight(name: str, loaded_weight: torch.Tensor,
                               self) -> None:
        if is_pp_missing_parameter(name, self):
            return
        param = params_dict[name]
        weight_loader = getattr(param, "weight_loader",
                                default_weight_loader)
        weight_loader = weight_loader_with_alias(name)(weight_loader)
        weight_loader(param, loaded_weight)
        loaded_params.add(name)
        return

    def load_basic_weight(name: str, loaded_weight: torch.Tensor,
                          self) -> None:
        if is_pp_missing_parameter(name, self):
            return
        param = params_dict[name]
        weight_loader = getattr(param, "weight_loader",
                                default_weight_loader)
        weight_loader = weight_loader_with_alias(name)(weight_loader)
        weight_loader(param, loaded_weight)
        loaded_params.add(name)
        return

    for name, loaded_weight in weights:
        weight_at_layer = which_layer(name)
        if weight_at_layer and weight_at_layer >= len(
                self.model.decoder_attention_types):
            continue

        if is_layer_norm_weight(name):
            load_layer_norm_weight(name, loaded_weight, self)
            continue
        if is_mha_weight(name):
            if is_linear_attn_layer(weight_at_layer):
                load_linear_attn_weight(name, loaded_weight, self)
            else:
                load_flash_attn_weight(name, loaded_weight, self)
            continue
        if is_moe_weight(name):
            load_sparse_moe_weight(name, loaded_weight, self)
            continue
        if is_shared_mlp_weight(name):
            load_shared_mlp_weight(name, loaded_weight, self)
            continue

        if "rotary_emb.inv_freq" in name:
            continue

        load_basic_weight(name, loaded_weight, self)
    return loaded_params

make_empty_intermediate_tensors

make_empty_intermediate_tensors(
    batch_size: int, dtype: dtype, device: device
) -> IntermediateTensors
Source code in vllm/model_executor/models/minimax_text_01.py
def make_empty_intermediate_tensors(
        self, batch_size: int, dtype: torch.dtype,
        device: torch.device) -> IntermediateTensors:
    return IntermediateTensors({
        "hidden_states":
        torch.zeros((batch_size, self.config.hidden_size),
                    dtype=dtype,
                    device=device),
        "residual":
        torch.zeros((batch_size, self.config.hidden_size),
                    dtype=dtype,
                    device=device),
    })

MiniMaxText01MLP

Bases: Module

Source code in vllm/model_executor/models/minimax_text_01.py
class MiniMaxText01MLP(nn.Module):

    def __init__(
        self,
        hidden_size: int,
        intermediate_size: int,
        quant_config: Optional[QuantizationConfig] = None,
        layer_idx: int = None,
        prefix: str = "mlp",
    ) -> None:
        super().__init__()
        self.layer_idx = layer_idx

        self.gate_up_proj = MergedColumnParallelLinear(
            hidden_size,
            [intermediate_size] * 2,
            bias=False,
            quant_config=quant_config,
            prefix=f"{prefix}.gate_up_proj",
        )
        self.down_proj = RowParallelLinear(
            intermediate_size,
            hidden_size,
            bias=False,
            quant_config=quant_config,
            prefix=f"{prefix}.down_proj",
        )
        self.act_fn = SiluAndMul()
        return

    def forward(self, x: torch.Tensor) -> torch.Tensor:

        gate_up, _ = self.gate_up_proj(x)
        x = self.act_fn(gate_up)
        x, _ = self.down_proj(x)
        return x

act_fn instance-attribute

act_fn = SiluAndMul()

down_proj instance-attribute

down_proj = RowParallelLinear(
    intermediate_size,
    hidden_size,
    bias=False,
    quant_config=quant_config,
    prefix=f"{prefix}.down_proj",
)

gate_up_proj instance-attribute

gate_up_proj = MergedColumnParallelLinear(
    hidden_size,
    [intermediate_size] * 2,
    bias=False,
    quant_config=quant_config,
    prefix=f"{prefix}.gate_up_proj",
)

layer_idx instance-attribute

layer_idx = layer_idx

__init__

__init__(
    hidden_size: int,
    intermediate_size: int,
    quant_config: Optional[QuantizationConfig] = None,
    layer_idx: int = None,
    prefix: str = "mlp",
) -> None
Source code in vllm/model_executor/models/minimax_text_01.py
def __init__(
    self,
    hidden_size: int,
    intermediate_size: int,
    quant_config: Optional[QuantizationConfig] = None,
    layer_idx: int = None,
    prefix: str = "mlp",
) -> None:
    super().__init__()
    self.layer_idx = layer_idx

    self.gate_up_proj = MergedColumnParallelLinear(
        hidden_size,
        [intermediate_size] * 2,
        bias=False,
        quant_config=quant_config,
        prefix=f"{prefix}.gate_up_proj",
    )
    self.down_proj = RowParallelLinear(
        intermediate_size,
        hidden_size,
        bias=False,
        quant_config=quant_config,
        prefix=f"{prefix}.down_proj",
    )
    self.act_fn = SiluAndMul()
    return

forward

forward(x: Tensor) -> Tensor
Source code in vllm/model_executor/models/minimax_text_01.py
def forward(self, x: torch.Tensor) -> torch.Tensor:

    gate_up, _ = self.gate_up_proj(x)
    x = self.act_fn(gate_up)
    x, _ = self.down_proj(x)
    return x

MiniMaxText01MoE

Bases: Module

Source code in vllm/model_executor/models/minimax_text_01.py
class MiniMaxText01MoE(nn.Module):

    def __init__(
        self,
        num_experts: int,
        top_k: int,
        hidden_size: int,
        intermediate_size: int,
        params_dtype: Optional[torch.dtype] = None,
        layer_idx: int = None,
        quant_config: Optional[QuantizationConfig] = None,
        prefix: str = "moe",
    ) -> None:
        super().__init__()

        self.layer_idx = layer_idx
        self.tp_size = get_tensor_model_parallel_world_size()
        self.num_total_experts = num_experts
        self.top_k = top_k
        self.hidden_size = hidden_size
        self.intermediate_size = intermediate_size // self.tp_size
        self.quant_config = quant_config

        if params_dtype is None:
            params_dtype = torch.get_default_dtype()
        self.params_dtype = params_dtype

        self.gate = ReplicatedLinear(
            self.hidden_size,
            self.num_total_experts,
            bias=False,
            params_dtype=torch.float32,
            quant_config=None,
            prefix=f"{prefix}.gate",
        )
        self.gate.weight.weight_loader = MiniMaxText01MoE.gate_weight_loader

        self.experts = FusedMoE(
            num_experts=self.num_total_experts,
            top_k=self.top_k,
            hidden_size=self.hidden_size,
            intermediate_size=self.intermediate_size * self.tp_size,
            params_dtype=self.params_dtype,
            reduce_results=True,
            renormalize=True,
            quant_config=self.quant_config,
            tp_size=self.tp_size,
            prefix=f"{prefix}.experts",
        )
        return

    @staticmethod
    def gate_weight_loader(param: nn.Parameter,
                           loaded_weight: torch.Tensor) -> None:
        assert param.size() == loaded_weight.size()
        param.data.copy_(loaded_weight.to(torch.float32))
        return

    def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
        num_tokens, hidden_size = hidden_states.shape
        hidden_states = hidden_states.view(-1, self.hidden_size)
        router_logits_fp32, _ = self.gate(hidden_states.to(torch.float32))
        final_hidden_states = self.experts(
            hidden_states, router_logits_fp32.to(hidden_states.dtype))
        final_hidden = final_hidden_states.view(num_tokens, hidden_size)
        return final_hidden

experts instance-attribute

experts = FusedMoE(
    num_experts=num_total_experts,
    top_k=top_k,
    hidden_size=hidden_size,
    intermediate_size=intermediate_size * tp_size,
    params_dtype=params_dtype,
    reduce_results=True,
    renormalize=True,
    quant_config=quant_config,
    tp_size=tp_size,
    prefix=f"{prefix}.experts",
)

gate instance-attribute

gate = ReplicatedLinear(
    hidden_size,
    num_total_experts,
    bias=False,
    params_dtype=float32,
    quant_config=None,
    prefix=f"{prefix}.gate",
)

hidden_size instance-attribute

hidden_size = hidden_size

intermediate_size instance-attribute

intermediate_size = intermediate_size // tp_size

layer_idx instance-attribute

layer_idx = layer_idx

num_total_experts instance-attribute

num_total_experts = num_experts

params_dtype instance-attribute

params_dtype = params_dtype

quant_config instance-attribute

quant_config = quant_config

top_k instance-attribute

top_k = top_k

tp_size instance-attribute

__init__

__init__(
    num_experts: int,
    top_k: int,
    hidden_size: int,
    intermediate_size: int,
    params_dtype: Optional[dtype] = None,
    layer_idx: int = None,
    quant_config: Optional[QuantizationConfig] = None,
    prefix: str = "moe",
) -> None
Source code in vllm/model_executor/models/minimax_text_01.py
def __init__(
    self,
    num_experts: int,
    top_k: int,
    hidden_size: int,
    intermediate_size: int,
    params_dtype: Optional[torch.dtype] = None,
    layer_idx: int = None,
    quant_config: Optional[QuantizationConfig] = None,
    prefix: str = "moe",
) -> None:
    super().__init__()

    self.layer_idx = layer_idx
    self.tp_size = get_tensor_model_parallel_world_size()
    self.num_total_experts = num_experts
    self.top_k = top_k
    self.hidden_size = hidden_size
    self.intermediate_size = intermediate_size // self.tp_size
    self.quant_config = quant_config

    if params_dtype is None:
        params_dtype = torch.get_default_dtype()
    self.params_dtype = params_dtype

    self.gate = ReplicatedLinear(
        self.hidden_size,
        self.num_total_experts,
        bias=False,
        params_dtype=torch.float32,
        quant_config=None,
        prefix=f"{prefix}.gate",
    )
    self.gate.weight.weight_loader = MiniMaxText01MoE.gate_weight_loader

    self.experts = FusedMoE(
        num_experts=self.num_total_experts,
        top_k=self.top_k,
        hidden_size=self.hidden_size,
        intermediate_size=self.intermediate_size * self.tp_size,
        params_dtype=self.params_dtype,
        reduce_results=True,
        renormalize=True,
        quant_config=self.quant_config,
        tp_size=self.tp_size,
        prefix=f"{prefix}.experts",
    )
    return

forward

forward(hidden_states: Tensor) -> Tensor
Source code in vllm/model_executor/models/minimax_text_01.py
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
    num_tokens, hidden_size = hidden_states.shape
    hidden_states = hidden_states.view(-1, self.hidden_size)
    router_logits_fp32, _ = self.gate(hidden_states.to(torch.float32))
    final_hidden_states = self.experts(
        hidden_states, router_logits_fp32.to(hidden_states.dtype))
    final_hidden = final_hidden_states.view(num_tokens, hidden_size)
    return final_hidden

gate_weight_loader staticmethod

gate_weight_loader(
    param: Parameter, loaded_weight: Tensor
) -> None
Source code in vllm/model_executor/models/minimax_text_01.py
@staticmethod
def gate_weight_loader(param: nn.Parameter,
                       loaded_weight: torch.Tensor) -> None:
    assert param.size() == loaded_weight.size()
    param.data.copy_(loaded_weight.to(torch.float32))
    return

MiniMaxText01Model

Bases: Module

Source code in vllm/model_executor/models/minimax_text_01.py
@support_torch_compile
class MiniMaxText01Model(nn.Module):

    def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
        super().__init__()
        config: MiniMaxConfig = vllm_config.model_config.hf_config
        model_config = vllm_config.model_config
        quant_config = vllm_config.quant_config
        cache_config = vllm_config.cache_config
        scheduler_config = vllm_config.scheduler_config

        self.padding_idx = config.pad_token_id
        self.vocab_size = config.vocab_size

        self.decoder_attention_types = getattr(
            config, "attn_type_list", False) or getattr(
                config, "decoder_attention_types", False)
        # The HF format uses "layer_types" instead of "attn_type_list"
        # where "linear_attention" is 0 and "full_attention" is 1
        if not self.decoder_attention_types and hasattr(config, "layer_types"):
            self.decoder_attention_types = []
            for layer_type in config.layer_types:
                if layer_type == "linear_attention":
                    self.decoder_attention_types.append(0)
                elif layer_type == "full_attention":
                    self.decoder_attention_types.append(1)
                else:
                    raise ValueError(f"Unsupported layer type: {layer_type}")
        # Default to full attention
        if not self.decoder_attention_types:
            self.decoder_attention_types = [1] * config.num_hidden_layers
        self.num_layers = config.num_hidden_layers

        self._layer_barrier = False
        if get_pp_group().is_first_rank:
            self.embed_tokens = VocabParallelEmbedding(
                self.vocab_size,
                config.hidden_size,
                org_num_embeddings=self.vocab_size,
            )
        else:
            self.embed_tokens = PPMissingLayer()

        def layer_fn(prefix):
            layer_idx = int(prefix.split('.')[-1])
            layer_config = config
            layer_config.attention_type = self.decoder_attention_types[
                layer_idx]
            layer_config.layer_idx = layer_idx

            decoder_kwargs = {
                "quant_config": quant_config,
                "layer_id": layer_idx,
                "model_config": model_config,
                "cache_config": cache_config
            }

            if layer_config.attention_type == 0:
                decoder_kwargs["linear_layer_id"] = sum(
                    1 for i in range(layer_idx)
                    if self.decoder_attention_types[i] == 0)
            else:
                decoder_kwargs["linear_layer_id"] = None

            if hasattr(config, "num_local_experts") and isinstance(
                    config.num_local_experts, list):
                decoder_kwargs["expert_num"] = config.num_local_experts[
                    layer_idx]
            elif hasattr(config, "num_local_experts") and isinstance(
                    config.num_local_experts, int):
                decoder_kwargs["expert_num"] = config.num_local_experts
            else:
                decoder_kwargs["expert_num"] = 1

            return MiniMaxText01DecoderLayer(layer_config,
                                             **decoder_kwargs,
                                             prefix=prefix)

        self.start_layer, self.end_layer, self.layers = make_layers(
            config.num_hidden_layers, layer_fn, prefix=f"{prefix}.layers")

        linear_layer_nums = sum(1 for i in range(config.num_hidden_layers)
                                if self.decoder_attention_types[i] == 0)
        max_slots_number = scheduler_config.max_num_seqs
        self.cache_shape = (linear_layer_nums, max_slots_number,
                            config.num_attention_heads //
                            get_tensor_model_parallel_world_size(),
                            config.head_dim, config.head_dim)
        _dummy = torch.zeros(1)
        self._dtype = _dummy.dtype
        del _dummy

        if not envs.VLLM_USE_V1:
            self.minimax_cache = MinimaxCacheManager(
                dtype=torch.float32, cache_shape=self.cache_shape)

        norm_kwargs = {}
        if hasattr(config, "rms_norm_eps"):
            norm_kwargs["eps"] = config.rms_norm_eps
        if get_pp_group().is_last_rank:
            self.norm = RMSNorm(config.hidden_size, **norm_kwargs)
        else:
            self.norm = PPMissingLayer()
        self.embed_scale = 1.0
        return

    def _clear_prefill_cache(self, attn_metadata,
                             minimax_cache_tensors: torch.Tensor, **kwargs):
        seq_to_slot_maps = {}
        seq_id_map = sum(list(kwargs["request_ids_to_seq_ids"].values()), [])
        for _, seq_to_slot_map in (
                self.minimax_cache.cache_indices_mapping.items()):
            seq_to_slot_maps.update(seq_to_slot_map)

        slots_to_clear = []
        for _prefill_id in range(getattr(attn_metadata, "num_prefills", 0)):
            if _prefill_id >= len(seq_id_map):
                break
            seq_id = seq_id_map[_prefill_id]
            if attn_metadata.context_lens_tensor[
                    _prefill_id] == 0 and seq_id in seq_to_slot_maps:
                slots_to_clear.append(seq_to_slot_maps[seq_id])

        if slots_to_clear:
            slots_tensor = torch.tensor(slots_to_clear,
                                        device=minimax_cache_tensors.device,
                                        dtype=torch.long)
            minimax_cache_tensors[:, slots_tensor, ...] = 0

    def get_input_embeddings(
        self,
        input_ids: torch.Tensor,
    ) -> torch.Tensor:
        return self.embed_tokens(input_ids)

    def forward(self,
                input_ids: Optional[torch.Tensor],
                positions: torch.Tensor,
                intermediate_tensors: Optional[IntermediateTensors] = None,
                inputs_embeds: Optional[torch.Tensor] = None,
                **kwargs) -> Union[torch.Tensor, IntermediateTensors]:
        forward_context = get_forward_context()
        attn_metadata = forward_context.attn_metadata
        if not envs.VLLM_USE_V1 and attn_metadata is None:
            return None
        if not envs.VLLM_USE_V1:
            if "request_ids_to_seq_ids" not in kwargs:
                kwargs["request_ids_to_seq_ids"] = {}
            if "finished_requests_ids" not in kwargs:
                kwargs["finished_requests_ids"] = []
            (
                minimax_cache_tensors,
                state_indices_tensor,
            ) = self.minimax_cache.current_run_tensors(**kwargs)
            if getattr(attn_metadata, "num_prefills", 0) > 0:
                self._clear_prefill_cache(attn_metadata, minimax_cache_tensors,
                                          **kwargs)

            minimax_cache_params = MinimaxCacheParams(minimax_cache_tensors,
                                                      state_indices_tensor)
        else:
            minimax_cache_params = None

        if get_pp_group().is_first_rank:
            if inputs_embeds is None:
                hidden_states = self.embed_scale * self.embed_tokens(input_ids)
            else:
                hidden_states = inputs_embeds
            residual = None
        else:
            assert intermediate_tensors is not None
            hidden_states = intermediate_tensors["hidden_states"]
            residual = intermediate_tensors["residual"]

        minimax_cache_index = 0

        for layer in islice(self.layers, self.start_layer, self.end_layer):
            _caches = None
            if not envs.VLLM_USE_V1 and isinstance(
                    layer.self_attn, MiniMaxText01LinearAttention):
                current_state_layer = minimax_cache_index
                _caches = minimax_cache_params.at_layer_idx(
                    current_state_layer)
                minimax_cache_index += 1
            hidden_states, residual = layer(
                hidden_states=hidden_states,
                positions=positions,
                kv_caches=_caches,
                attn_metadata=attn_metadata,
                residual=residual,
            )
        if not get_pp_group().is_last_rank:
            return IntermediateTensors({
                "hidden_states": hidden_states,
                "residual": residual
            })
        if residual is not None:
            hidden_states, _ = self.norm(hidden_states, residual)
        else:
            hidden_states = self.norm(hidden_states)

        return hidden_states

_dtype instance-attribute

_dtype = dtype

_layer_barrier instance-attribute

_layer_barrier = False

cache_shape instance-attribute

cache_shape = (
    linear_layer_nums,
    max_slots_number,
    num_attention_heads
    // get_tensor_model_parallel_world_size(),
    head_dim,
    head_dim,
)

decoder_attention_types instance-attribute

decoder_attention_types = getattr(
    config, "attn_type_list", False
) or getattr(config, "decoder_attention_types", False)

embed_scale instance-attribute

embed_scale = 1.0

embed_tokens instance-attribute

embed_tokens = VocabParallelEmbedding(
    vocab_size, hidden_size, org_num_embeddings=vocab_size
)

minimax_cache instance-attribute

minimax_cache = MinimaxCacheManager(
    dtype=float32, cache_shape=cache_shape
)

norm instance-attribute

norm = RMSNorm(hidden_size, **norm_kwargs)

num_layers instance-attribute

num_layers = num_hidden_layers

padding_idx instance-attribute

padding_idx = pad_token_id

vocab_size instance-attribute

vocab_size = vocab_size

__init__

__init__(*, vllm_config: VllmConfig, prefix: str = '')
Source code in vllm/model_executor/models/minimax_text_01.py
def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
    super().__init__()
    config: MiniMaxConfig = vllm_config.model_config.hf_config
    model_config = vllm_config.model_config
    quant_config = vllm_config.quant_config
    cache_config = vllm_config.cache_config
    scheduler_config = vllm_config.scheduler_config

    self.padding_idx = config.pad_token_id
    self.vocab_size = config.vocab_size

    self.decoder_attention_types = getattr(
        config, "attn_type_list", False) or getattr(
            config, "decoder_attention_types", False)
    # The HF format uses "layer_types" instead of "attn_type_list"
    # where "linear_attention" is 0 and "full_attention" is 1
    if not self.decoder_attention_types and hasattr(config, "layer_types"):
        self.decoder_attention_types = []
        for layer_type in config.layer_types:
            if layer_type == "linear_attention":
                self.decoder_attention_types.append(0)
            elif layer_type == "full_attention":
                self.decoder_attention_types.append(1)
            else:
                raise ValueError(f"Unsupported layer type: {layer_type}")
    # Default to full attention
    if not self.decoder_attention_types:
        self.decoder_attention_types = [1] * config.num_hidden_layers
    self.num_layers = config.num_hidden_layers

    self._layer_barrier = False
    if get_pp_group().is_first_rank:
        self.embed_tokens = VocabParallelEmbedding(
            self.vocab_size,
            config.hidden_size,
            org_num_embeddings=self.vocab_size,
        )
    else:
        self.embed_tokens = PPMissingLayer()

    def layer_fn(prefix):
        layer_idx = int(prefix.split('.')[-1])
        layer_config = config
        layer_config.attention_type = self.decoder_attention_types[
            layer_idx]
        layer_config.layer_idx = layer_idx

        decoder_kwargs = {
            "quant_config": quant_config,
            "layer_id": layer_idx,
            "model_config": model_config,
            "cache_config": cache_config
        }

        if layer_config.attention_type == 0:
            decoder_kwargs["linear_layer_id"] = sum(
                1 for i in range(layer_idx)
                if self.decoder_attention_types[i] == 0)
        else:
            decoder_kwargs["linear_layer_id"] = None

        if hasattr(config, "num_local_experts") and isinstance(
                config.num_local_experts, list):
            decoder_kwargs["expert_num"] = config.num_local_experts[
                layer_idx]
        elif hasattr(config, "num_local_experts") and isinstance(
                config.num_local_experts, int):
            decoder_kwargs["expert_num"] = config.num_local_experts
        else:
            decoder_kwargs["expert_num"] = 1

        return MiniMaxText01DecoderLayer(layer_config,
                                         **decoder_kwargs,
                                         prefix=prefix)

    self.start_layer, self.end_layer, self.layers = make_layers(
        config.num_hidden_layers, layer_fn, prefix=f"{prefix}.layers")

    linear_layer_nums = sum(1 for i in range(config.num_hidden_layers)
                            if self.decoder_attention_types[i] == 0)
    max_slots_number = scheduler_config.max_num_seqs
    self.cache_shape = (linear_layer_nums, max_slots_number,
                        config.num_attention_heads //
                        get_tensor_model_parallel_world_size(),
                        config.head_dim, config.head_dim)
    _dummy = torch.zeros(1)
    self._dtype = _dummy.dtype
    del _dummy

    if not envs.VLLM_USE_V1:
        self.minimax_cache = MinimaxCacheManager(
            dtype=torch.float32, cache_shape=self.cache_shape)

    norm_kwargs = {}
    if hasattr(config, "rms_norm_eps"):
        norm_kwargs["eps"] = config.rms_norm_eps
    if get_pp_group().is_last_rank:
        self.norm = RMSNorm(config.hidden_size, **norm_kwargs)
    else:
        self.norm = PPMissingLayer()
    self.embed_scale = 1.0
    return

_clear_prefill_cache

_clear_prefill_cache(
    attn_metadata, minimax_cache_tensors: Tensor, **kwargs
)
Source code in vllm/model_executor/models/minimax_text_01.py
def _clear_prefill_cache(self, attn_metadata,
                         minimax_cache_tensors: torch.Tensor, **kwargs):
    seq_to_slot_maps = {}
    seq_id_map = sum(list(kwargs["request_ids_to_seq_ids"].values()), [])
    for _, seq_to_slot_map in (
            self.minimax_cache.cache_indices_mapping.items()):
        seq_to_slot_maps.update(seq_to_slot_map)

    slots_to_clear = []
    for _prefill_id in range(getattr(attn_metadata, "num_prefills", 0)):
        if _prefill_id >= len(seq_id_map):
            break
        seq_id = seq_id_map[_prefill_id]
        if attn_metadata.context_lens_tensor[
                _prefill_id] == 0 and seq_id in seq_to_slot_maps:
            slots_to_clear.append(seq_to_slot_maps[seq_id])

    if slots_to_clear:
        slots_tensor = torch.tensor(slots_to_clear,
                                    device=minimax_cache_tensors.device,
                                    dtype=torch.long)
        minimax_cache_tensors[:, slots_tensor, ...] = 0

forward

forward(
    input_ids: Optional[Tensor],
    positions: Tensor,
    intermediate_tensors: Optional[
        IntermediateTensors
    ] = None,
    inputs_embeds: Optional[Tensor] = None,
    **kwargs,
) -> Union[Tensor, IntermediateTensors]
Source code in vllm/model_executor/models/minimax_text_01.py
def forward(self,
            input_ids: Optional[torch.Tensor],
            positions: torch.Tensor,
            intermediate_tensors: Optional[IntermediateTensors] = None,
            inputs_embeds: Optional[torch.Tensor] = None,
            **kwargs) -> Union[torch.Tensor, IntermediateTensors]:
    forward_context = get_forward_context()
    attn_metadata = forward_context.attn_metadata
    if not envs.VLLM_USE_V1 and attn_metadata is None:
        return None
    if not envs.VLLM_USE_V1:
        if "request_ids_to_seq_ids" not in kwargs:
            kwargs["request_ids_to_seq_ids"] = {}
        if "finished_requests_ids" not in kwargs:
            kwargs["finished_requests_ids"] = []
        (
            minimax_cache_tensors,
            state_indices_tensor,
        ) = self.minimax_cache.current_run_tensors(**kwargs)
        if getattr(attn_metadata, "num_prefills", 0) > 0:
            self._clear_prefill_cache(attn_metadata, minimax_cache_tensors,
                                      **kwargs)

        minimax_cache_params = MinimaxCacheParams(minimax_cache_tensors,
                                                  state_indices_tensor)
    else:
        minimax_cache_params = None

    if get_pp_group().is_first_rank:
        if inputs_embeds is None:
            hidden_states = self.embed_scale * self.embed_tokens(input_ids)
        else:
            hidden_states = inputs_embeds
        residual = None
    else:
        assert intermediate_tensors is not None
        hidden_states = intermediate_tensors["hidden_states"]
        residual = intermediate_tensors["residual"]

    minimax_cache_index = 0

    for layer in islice(self.layers, self.start_layer, self.end_layer):
        _caches = None
        if not envs.VLLM_USE_V1 and isinstance(
                layer.self_attn, MiniMaxText01LinearAttention):
            current_state_layer = minimax_cache_index
            _caches = minimax_cache_params.at_layer_idx(
                current_state_layer)
            minimax_cache_index += 1
        hidden_states, residual = layer(
            hidden_states=hidden_states,
            positions=positions,
            kv_caches=_caches,
            attn_metadata=attn_metadata,
            residual=residual,
        )
    if not get_pp_group().is_last_rank:
        return IntermediateTensors({
            "hidden_states": hidden_states,
            "residual": residual
        })
    if residual is not None:
        hidden_states, _ = self.norm(hidden_states, residual)
    else:
        hidden_states = self.norm(hidden_states)

    return hidden_states

get_input_embeddings

get_input_embeddings(input_ids: Tensor) -> Tensor
Source code in vllm/model_executor/models/minimax_text_01.py
def get_input_embeddings(
    self,
    input_ids: torch.Tensor,
) -> torch.Tensor:
    return self.embed_tokens(input_ids)

replace_weight_name

replace_weight_name(
    name: str,
    key: str = None,
    to: str = None,
    count: int = None,
    prefix: str = None,
) -> str
Source code in vllm/model_executor/models/minimax_text_01.py
def replace_weight_name(name: str,
                        key: str = None,
                        to: str = None,
                        count: int = None,
                        prefix: str = None) -> str:
    name = name.replace(key, to) if count is None else \
        name.replace(key, to, count)
    return name

weight_loader_with_alias

weight_loader_with_alias(alias: str)
Source code in vllm/model_executor/models/minimax_text_01.py
def weight_loader_with_alias(alias: str):

    def wrapper(func: callable):

        def inner_func(param: torch.Tensor,
                       loaded_weight: torch.Tensor,
                       *args,
                       prefix: str = None,
                       **kwargs):
            value = func(param, loaded_weight, *args, **kwargs)
            return value

        return inner_func

    return wrapper