Python 基于pytorch实现多头自注意力机制代码;Multiheads-Self-Attention代码实现
import torch
import torch.nn as nn
class MultiheadSelfAttention(nn.Module):
def __init__(self, emb_dim, heads):
super(MultiheadSelfAttention, self).__init__()
self.emb_dim = emb_dim
self.heads = heads
self.dropout = nn.Dropout(0.1)
self.key_projection = nn.Linear(emb_dim, emb_dim)
self.query_projection = nn.Linear(emb_dim, emb_dim)
self.value_projection = nn.Linear(emb_dim, emb_dim)
self.output_projection = nn.Linear(emb_dim, emb_dim)
def forward(self, query, key, value, mask=None):
batch_size = query.shape[0]
query_len = query.shape[1]
key_len = key.shape[1]
# Linear projection
query = self.query_projection(query)
key = self.key_projection(key)
value = self.value_projection(value)
# Split by heads
query = query.view(batch_size, query_len, self.heads, -1)
key = key.view(batch_size, key_len, self.heads, -1)
value = value.view(batch_size, key_len, self.heads, -1)
# Transpose for calculation
query = query.transpose(2, 3)
key = key.transpose(2, 3)
# Calculate score
score = torch.matmul(query, key)
score = score / (self.emb_dim ** 0.5)
# Masking
if mask is not None:
mask = mask.unsqueeze(1).repeat(1, self.heads, 1, 1)
score.masked_fill_(mask == 0, -1e10)
# Context
context = torch.softmax(score, dim=-1)
context = self.dropout(context)
# Output
output = torch.matmul(context, value)
output = output.transpose(2, 3)
output = output.contiguous()
output = output.view(batch_size, query_len, self.emb_dim)
output = self.output_projection(output)
return output
# 示例用法
emb_dim = 512
heads = 8
attention = MultiheadSelfAttention(emb_dim, heads)
query = torch.randn(10, 8, emb_dim) # 假设batch size为10,序列长度为8
key = value = query # 这里,我们使用相同的输入作为key和value
output = attention(query, key, value)
print(output.shape) # 输出: torch.Size([10, 8, 512])
这段代码定义了一个名为MultiheadSelfAttention
的类,它实现了多头自注意力机制。类中包含了线性投影层和多头注意力的计算方法。在forward
方法中,输入通过线性投影层,然后按照头数进行分割并转置,计算得分,应用掩码(如果提供),执行软最大值函数,再进行反转置操作以获得输出,最后通过线性投影层输出。这个类可以用于处理文本数据中的自注意力,
评论已关闭