在PyTorch中實現自注意力機制可以使用torch.nn.MultiheadAttention
模塊。具體實現步驟如下:
import torch
import torch.nn as nn
class SelfAttention(nn.Module):
def __init__(self, embed_size, heads):
super(SelfAttention, self).__init__()
self.embed_size = embed_size
self.heads = heads
self.head_dim = embed_size // heads
assert self.head_dim * heads == embed_size, "Embed size needs to be divisible by heads"
self.values = nn.Linear(self.head_dim, self.head_dim, bias=False)
self.keys = nn.Linear(self.head_dim, self.head_dim, bias=False)
self.queries = nn.Linear(self.head_dim, self.head_dim, bias=False)
self.fc_out = nn.Linear(heads * self.head_dim, embed_size)
def forward(self, value, key, query, mask=None):
N = query.shape[0]
value_len, key_len, query_len = value.shape[1], key.shape[1], query.shape[1]
# Split the embedding into self.heads pieces
values = value.reshape(N, value_len, self.heads, self.head_dim)
keys = key.reshape(N, key_len, self.heads, self.head_dim)
queries = query.reshape(N, query_len, self.heads, self.head_dim)
values = self.values(values)
keys = self.keys(keys)
queries = self.queries(queries)
energy = torch.einsum("nqhd, nkhd->nhqk", [queries, keys])
if mask is not None:
energy = energy.masked_fill(mask == 0, float("-1e20"))
attention = torch.softmax(energy / (self.embed_size ** (1/2)), dim=3)
out = torch.einsum("nhql, nlhd->nqhd", [attention, values]).reshape(
N, query_len, self.heads * self.head_dim
)
out = self.fc_out(out)
return out
# Define input tensor
value = torch.rand(3, 10, 512) # (N, value_len, embed_size)
key = torch.rand(3, 10, 512) # (N, key_len, embed_size)
query = torch.rand(3, 10, 512) # (N, query_len, embed_size)
# Create self attention layer
self_attn = SelfAttention(512, 8)
# Perform self attention
output = self_attn(value, key, query)
print(output.shape)
通過以上步驟,就可以在PyTorch中實現自注意力機制。