做网站商业计划书范文,百度seo关键词,中小型网站建设如何,网站解析怎么做一、EncoderLayer架构如图#xff08;不改变输入形状#xff09; 二、ConvLayer架构如图#xff08;输入形状中特征维度减半#xff09; 三、Encoder整体
包括三部分
1. 多层EncoderLayer
2. 多层ConvLayer
3. 层归一化
代码如下
class AttentionLayer(nn.Module):de…一、EncoderLayer架构如图不改变输入形状 二、ConvLayer架构如图输入形状中特征维度减半 三、Encoder整体
包括三部分
1. 多层EncoderLayer
2. 多层ConvLayer
3. 层归一化
代码如下
class AttentionLayer(nn.Module):def __init__(self, attention, d_model, n_heads, d_keysNone, d_valuesNone, mixFalse):super(AttentionLayer, self).__init__()d_keys d_keys or (d_model//n_heads)d_values d_values or (d_model//n_heads)self.inner_attention attentionself.query_projection nn.Linear(d_model, d_keys * n_heads)self.key_projection nn.Linear(d_model, d_keys * n_heads)self.value_projection nn.Linear(d_model, d_values * n_heads)self.out_projection nn.Linear(d_values * n_heads, d_model)self.n_heads n_headsself.mix mixdef forward(self, queries, keys, values, attn_mask):B, L, _ queries.shape_, S, _ keys.shapeH self.n_headsqueries self.query_projection(queries).view(B, L, H, -1)keys self.key_projection(keys).view(B, S, H, -1)values self.value_projection(values).view(B, S, H, -1)out, attn self.inner_attention(queries,keys,values,attn_mask)if self.mix:out out.transpose(2,1).contiguous()out out.view(B, L, -1)return self.out_projection(out), attnclass ConvLayer(nn.Module):def __init__(self, c_in):super(ConvLayer, self).__init__()padding 1 if torch.__version__1.5.0 else 2self.downConv nn.Conv1d(in_channelsc_in,out_channelsc_in,kernel_size3,paddingpadding,padding_modecircular)# 批量归一化层的作用是在训练过程中对每个批次的数据进行归一化处理# 使其均值接近于 0方差接近于 1从而加速模型的训练和提高模型的稳定性# 不会改变形状self.norm nn.BatchNorm1d(c_in)self.activation nn.ELU()self.maxPool nn.MaxPool1d(kernel_size3, stride2, padding1)def forward(self, x):x self.downConv(x.permute(0, 2, 1))x self.norm(x)x self.activation(x)x self.maxPool(x)x x.transpose(1,2)return xclass EncoderLayer(nn.Module):def __init__(self, attention, d_model, d_ffNone, dropout0.1, activationrelu):super(EncoderLayer, self).__init__()d_ff d_ff or 4*d_modelself.attention attentionself.conv1 nn.Conv1d(in_channelsd_model, out_channelsd_ff, kernel_size1)self.conv2 nn.Conv1d(in_channelsd_ff, out_channelsd_model, kernel_size1)self.norm1 nn.LayerNorm(d_model)self.norm2 nn.LayerNorm(d_model)self.dropout nn.Dropout(dropout)self.activation F.relu if activation relu else F.geludef forward(self, x, attn_maskNone):# x [B, L, D]# x x self.dropout(self.attention(# x, x, x,# attn_mask attn_mask# ))new_x, attn self.attention(x, x, x,attn_mask attn_mask)x x self.dropout(new_x)y x self.norm1(x)y self.dropout(self.activation(self.conv1(y.transpose(-1,1))))y self.dropout(self.conv2(y).transpose(-1,1))return self.norm2(xy), attnclass Encoder(nn.Module):def __init__(self, attn_layers, conv_layersNone, norm_layerNone):super(Encoder, self).__init__()self.attn_layers nn.ModuleList(attn_layers)self.conv_layers nn.ModuleList(conv_layers) if conv_layers is not None else Noneself.norm norm_layerdef forward(self, x, attn_maskNone):# x [B, L, D]attns []if self.conv_layers is not None:for attn_layer, conv_layer in zip(self.attn_layers, self.conv_layers):x, attn attn_layer(x, attn_maskattn_mask)x conv_layer(x)attns.append(attn)x, attn self.attn_layers[-1](x, attn_maskattn_mask)attns.append(attn)else:for attn_layer in self.attn_layers:x, attn attn_layer(x, attn_maskattn_mask)attns.append(attn)if self.norm is not None:x self.norm(x)