1 Star 1 Fork 0

Hauk Zero/from-mha-to-mla

加入 Gitee
与超过 1200万 开发者一起发现、参与优秀开源项目,私有仓库也完全免费 :)
免费加入
文件
该仓库未声明开源许可证文件(LICENSE),使用请关注具体项目描述及其代码上游依赖。
克隆/下载
mla_cc.py 4.21 KB
一键复制 编辑 原始数据 按行查看 历史
Hauk Zero 提交于 2个月前 . add all
import torch
from torch import nn
from typing import Optional
def apply_rope(x: torch.Tensor, *args, **kwargs):
return x
def update_compressed_kv_cache(compressed_kv: torch.Tensor):
return compressed_kv.repeat(1, 5, 1)
def repeat_kv(hidden_states: torch.Tensor, n_rep: Optional[int] = 1):
batch, num_key_value_heads, slen, head_dim = hidden_states.shape
if n_rep == 1:
return hidden_states
hidden_states = hidden_states[:, :, None, :, :].expand(
batch, num_key_value_heads, n_rep, slen, head_dim
)
return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
class MultiHeadLatentAttention(nn.Module):
def __init__(
self,
hidden_size: int,
num_heads: int,
q_lora_rank: int,
qk_rope_head_dim: int,
qk_nope_head_dim: int,
kv_lora_rank: int,
v_head_dim: int,
use_cache: Optional[bool] = False,
):
super().__init__()
qk_head_dim = qk_nope_head_dim + qk_rope_head_dim
self.hidden_size = hidden_size
self.num_heads = num_heads
self.q_lora_rank = q_lora_rank
self.qk_rope_head_dim = qk_rope_head_dim
self.qk_nope_head_dim = qk_nope_head_dim
self.kv_lora_rank = kv_lora_rank
self.qk_head_dim = qk_head_dim
self.v_head_dim = v_head_dim
self.scale = self.qk_head_dim**-0.5
self.use_cache = use_cache
self.q_a_proj = nn.Linear(hidden_size, q_lora_rank)
self.q_b_proj = nn.Linear(q_lora_rank, num_heads * qk_head_dim)
self.kv_a_proj = nn.Linear(hidden_size, qk_rope_head_dim + kv_lora_rank)
self.kv_b_proj = nn.Linear(
kv_lora_rank, num_heads * (qk_nope_head_dim + v_head_dim)
)
self.o_proj = nn.Linear(num_heads * v_head_dim, hidden_size)
def forward(
self,
hidden_states: torch.Tensor,
position_ids: Optional[torch.LongTensor] = None,
):
bsz, q_len, _ = hidden_states.size()
query_states = self.q_b_proj(self.q_a_proj(hidden_states))
query_states = query_states.view(
bsz, q_len, self.num_heads, self.qk_head_dim
).transpose(1, 2)
q_nope, q_rope = query_states.split(
[self.qk_nope_head_dim, self.qk_rope_head_dim], dim=-1
)
q_rope = apply_rope(q_rope)
query_states = torch.cat([q_nope, q_rope], dim=-1)
# compressed_kv: (bsz, q_len, qk_rope_head_dim + kv_lora_rank)
compressed_kv = self.kv_a_proj(hidden_states)
k_rope, kv_nope = compressed_kv.split(
[self.qk_rope_head_dim, self.kv_lora_rank], dim=-1
)
# k_rope: (bsz, num_heads, q_len, qk_rope_head_dim)
k_rope = repeat_kv(k_rope.unsqueeze(1), self.num_heads)
k_rope = apply_rope(k_rope)
# Store compressed kv as a whole
if self.use_cache:
compressed_kv = torch.cat([k_rope[:, 0, :, :], kv_nope], dim=-1)
compressed_kv = update_compressed_kv_cache(compressed_kv)
k_rope, kv_nope = compressed_kv.split(
[self.qk_rope_head_dim, self.kv_lora_rank], dim=-1
)
k_rope = repeat_kv(k_rope.unsqueeze(1), self.num_heads)
kv_len = compressed_kv.shape[1]
kv = (
self.kv_b_proj(kv_nope)
.view(bsz, kv_len, self.num_heads, self.qk_nope_head_dim + self.v_head_dim)
.transpose(1, 2)
)
k_nope, value_states = kv.split(
[self.qk_nope_head_dim, self.v_head_dim], dim=-1
)
key_states = torch.cat([k_rope, k_nope], dim=-1)
# Softmax(Q @ K^T / sqrt(d_k))
# attn: (bsz, num_heads, q_len, kv_len)
attn_weights = (
torch.einsum("bhld, bhnd -> bhln", query_states, key_states) * self.scale
)
attn_weights = attn_weights.softmax(dim=-1)
# A @ V
# attn: (bsz, num_heads, q_len, head_dim)
attn_weights = torch.einsum("bhln, bhnd -> bhld", attn_weights, value_states)
# output: (bsz, q_len, hidden_size)
attn_output = self.o_proj(
attn_weights.transpose(1, 2)
.contiguous()
.view(bsz, q_len, self.num_heads * self.v_head_dim)
)
return attn_output
Loading...
马建仓 AI 助手
尝试更多
代码解读
代码找茬
代码优化
Python
1
https://gitee.com/haukzero/from-mha-to-mla.git
git@gitee.com:haukzero/from-mha-to-mla.git
haukzero
from-mha-to-mla
from-mha-to-mla
master

搜索帮助

371d5123 14472233 46e8bd33 14472233