当前位置: 首页 > news >正文

做正规小说网站有哪些宁波seo关键词优化报价

做正规小说网站有哪些,宁波seo关键词优化报价,手机网站字体大小自适应,网站代理被抓文章目录 文章目录 00 写在前面01 基于Pytorch版本的E3D LSTM代码02 论文下载 00 写在前面 测试代码#xff0c;比较重要#xff0c;它可以大概判断tensor维度在网络传播过程中#xff0c;各个维度的变化情况#xff0c;方便改成适合自己的数据集。 需要github上的数据集… 文章目录 文章目录 00 写在前面01 基于Pytorch版本的E3D LSTM代码02 论文下载 00 写在前面 测试代码比较重要它可以大概判断tensor维度在网络传播过程中各个维度的变化情况方便改成适合自己的数据集。 需要github上的数据集以及可运行的代码可以私聊 01 基于Pytorch版本的E3D LSTM代码 # 库函数调用 from functools import reduce from src.utils import nice_print, mem_report, cpu_stats import copy import operator import torch import torch.nn as nn import torch.nn.functional as F# E3DLSTM模型代码 class E3DLSTM(nn.Module):def __init__(self, input_shape, hidden_size, num_layers, kernel_size, tau):super().__init__()self._tau tauself._cells []input_shape list(input_shape)for i in range(num_layers):cell E3DLSTMCell(input_shape, hidden_size, kernel_size)# NOTE hidden state becomes input to the next cellinput_shape[0] hidden_sizeself._cells.append(cell)# Hook to register submodulesetattr(self, cell{}.format(i), cell)def forward(self, input):# NOTE (seq_len, batch, input_shape)batch_size input.size(1)c_history_states []h_states []outputs []for step, x in enumerate(input):for cell_idx, cell in enumerate(self._cells):if step 0:c_history, m, h self._cells[cell_idx].init_hidden(batch_size, self._tau, input.device)c_history_states.append(c_history)h_states.append(h)# NOTE c_history and h are coming from the previous time stamp, but we iterate over cellsc_history, m, h cell(x, c_history_states[cell_idx], m, h_states[cell_idx])c_history_states[cell_idx] c_historyh_states[cell_idx] h# NOTE hidden state of previous LSTM is passed as input to the next onex houtputs.append(h)# NOTE Concat along the channelsreturn torch.cat(outputs, dim1)class E3DLSTMCell(nn.Module):def __init__(self, input_shape, hidden_size, kernel_size):super().__init__()in_channels input_shape[0]self._input_shape input_shapeself._hidden_size hidden_size# memory gates: input, cell(input modulation), forgetself.weight_xi ConvDeconv3d(in_channels, hidden_size, kernel_size)self.weight_hi ConvDeconv3d(hidden_size, hidden_size, kernel_size, biasFalse)self.weight_xg copy.deepcopy(self.weight_xi)self.weight_hg copy.deepcopy(self.weight_hi)self.weight_xr copy.deepcopy(self.weight_xi)self.weight_hr copy.deepcopy(self.weight_hi)memory_shape list(input_shape)memory_shape[0] hidden_size# self.layer_norm nn.LayerNorm(memory_shape)self.group_norm nn.GroupNorm(1, hidden_size) # wzj# for spatiotemporal memoryself.weight_xi_prime copy.deepcopy(self.weight_xi)self.weight_mi_prime copy.deepcopy(self.weight_hi)self.weight_xg_prime copy.deepcopy(self.weight_xi)self.weight_mg_prime copy.deepcopy(self.weight_hi)self.weight_xf_prime copy.deepcopy(self.weight_xi)self.weight_mf_prime copy.deepcopy(self.weight_hi)self.weight_xo copy.deepcopy(self.weight_xi)self.weight_ho copy.deepcopy(self.weight_hi)self.weight_co copy.deepcopy(self.weight_hi)self.weight_mo copy.deepcopy(self.weight_hi)self.weight_111 nn.Conv3d(hidden_size hidden_size, hidden_size, 1)def self_attention(self, r, c_history):batch_size r.size(0)channels r.size(1)r_flatten r.view(batch_size, -1, channels)# BxtaoTHWxCc_history_flatten c_history.view(batch_size, -1, channels)# Attention mechanism# BxTHWxC x BxtaoTHWxC B x THW x taoTHWscores torch.einsum(bxc,byc-bxy, r_flatten, c_history_flatten)attention F.softmax(scores, dim2)return torch.einsum(bxy,byc-bxc, attention, c_history_flatten).view(*r.shape)def self_attention_fast(self, r, c_history):# Scaled Dot-Product but for tensors# instead of dot-product we do matrix contraction on twh dimensionsscaling_factor 1 / (reduce(operator.mul, r.shape[-3:], 1) ** 0.5)scores torch.einsum(bctwh,lbctwh-bl, r, c_history) * scaling_factorattention F.softmax(scores, dim0)return torch.einsum(bl,lbctwh-bctwh, attention, c_history)def forward(self, x, c_history, m, h):# Normalized shape for LayerNorm is CxT×H×Wnormalized_shape list(h.shape[-3:])def LR(input):# return F.layer_norm(input, normalized_shape)return self.group_norm(input, normalized_shape) # wzj# R is CxT×H×Wr torch.sigmoid(LR(self.weight_xr(x) self.weight_hr(h)))i torch.sigmoid(LR(self.weight_xi(x) self.weight_hi(h)))g torch.tanh(LR(self.weight_xg(x) self.weight_hg(h)))recall self.self_attention_fast(r, c_history)# nice_print(**locals())# mem_report()# cpu_stats()c i * g self.group_norm(c_history[-1] recall) # wzji_prime torch.sigmoid(LR(self.weight_xi_prime(x) self.weight_mi_prime(m)))g_prime torch.tanh(LR(self.weight_xg_prime(x) self.weight_mg_prime(m)))f_prime torch.sigmoid(LR(self.weight_xf_prime(x) self.weight_mf_prime(m)))m i_prime * g_prime f_prime * mo torch.sigmoid(LR(self.weight_xo(x) self.weight_ho(h) self.weight_co(c) self.weight_mo(m)))h o * torch.tanh(self.weight_111(torch.cat([c, m], dim1)))# TODO is it correct FIFO?c_history torch.cat([c_history[1:], c[None, :]], dim0)# nice_print(**locals())return (c_history, m, h)def init_hidden(self, batch_size, tau, deviceNone):memory_shape list(self._input_shape)memory_shape[0] self._hidden_sizec_history torch.zeros(tau, batch_size, *memory_shape, devicedevice)m torch.zeros(batch_size, *memory_shape, devicedevice)h torch.zeros(batch_size, *memory_shape, devicedevice)return (c_history, m, h)class ConvDeconv3d(nn.Module):def __init__(self, in_channels, out_channels, *vargs, **kwargs):super().__init__()self.conv3d nn.Conv3d(in_channels, out_channels, *vargs, **kwargs)# self.conv_transpose3d nn.ConvTranspose3d(out_channels, out_channels, *vargs, **kwargs)def forward(self, input):# print(self.conv3d(input).shape, input.shape)# return self.conv_transpose3d(self.conv3d(input))return F.interpolate(self.conv3d(input), sizeinput.shape[-3:], modenearest)class Out(nn.Module):def __init__(self, in_channels, out_channels):super().__init__()self.conv nn.Conv3d(in_channels, out_channels, kernel_size 3, stride1, padding1)def forward(self, x):return self.conv(x)class E3DLSTM_NET(nn.Module):def __init__(self, input_shape, hidden_size, num_layers, kernel_size, tau, time_steps, output_shape):super().__init__()self.input_shape input_shapeself.hidden_size hidden_sizeself.num_layers num_layersself.kernel_size kernel_sizeself.tau tauself.time_steps time_stepsself.output_shape output_shapeself.dtype torch.float32self.encoder E3DLSTM(input_shape, hidden_size, num_layers, kernel_size, tau).type(self.dtype)self.decoder nn.Conv3d(hidden_size * time_steps, output_shape[0], kernel_size, padding(0, 2, 2)).type(self.dtype)self.out Out(4, 1)def forward(self, input_seq):return self.out(self.decoder(self.encoder(input_seq)))# 测试代码 if __name__ __main__:input_shape (16, 4, 16, 16)output_shape (16, 1, 16, 16)tau 2hidden_size 64kernel (3, 5, 5)lstm_layers 4time_steps 29x torch.ones([29, 2, 16, 4, 16, 16])model E3DLSTM_NET(input_shape, hidden_size, lstm_layers, kernel, tau, time_steps, output_shape)print(finished!)f model(x)print(f)02 论文下载 Eidetic 3D LSTM: A Model for Video Prediction and Beyond Eidetic 3D LSTM: A Model for Video Prediction and Beyond Github链接e3d_lstm
http://www.tj-hxxt.cn/news/135228.html

相关文章:

  • 微信网站开发教程罗湖网页设计
  • 北京制作手机网站清远做网站的公司
  • 关于网站建设电话销售的开场白东莞常平镇邮政编码
  • 深圳网站开发培训网络推广是网络营销的基础对不对
  • 网站编辑注意问题pptai一键生成免费
  • 网站建设图片链接方法建设营销型网页
  • 银川手机网站建设看板娘wordpress怎么带声音
  • 微信公众号接口文档广州网站优化指导
  • 水果配送网站建设做网站的企业有哪些
  • 网站营销工作流程硬件工程师需要学哪些
  • 微网站自定义脚本中国制造网建站
  • 用ps做网站是用像素还是毫米响应式网站开发需要的条件
  • 做网站有哪些需求怎么开发一个网站
  • 微商做网站做效果图展板网站
  • 广西建设工程协会网站查询系统营销型网站建设营销型网站建设
  • 城乡建设学校网站软件下载官网源码
  • 长沙网站推广合作龙岩建筑网
  • 做网站跳转肇庆seo网络推广
  • 企业网站注册流程thinkphp做的网站
  • 优秀flash网站欣赏wordpress wp
  • 雅江网站建设翰诺网站建设
  • 深圳做夜场做网站前端怎么在猪八戒网站接单做
  • 在线支付 网站模板python基础教程资料
  • 网站站长英语新网网站内部优化
  • 中国网站开发公司排名需要做网站的企业
  • 做爰动态视频网站网站做seo外链
  • 南宁住房建设部网站北京网站优化推广公司
  • 阿里云服务器在哪里放着济南网络优化哪家专业
  • 佛山网站设计模板网站须知
  • 网站建设移交确认书安徽六安市地图