当前位置: 首页 > news >正文

python做的大型网站WordPress如何建小语种网站

python做的大型网站,WordPress如何建小语种网站,如何套用别人网站做页面,wordpress代码高亮太慢文章目录 文章目录 00 写在前面01 基于Pytorch版本的E3D LSTM代码02 论文下载 00 写在前面 测试代码#xff0c;比较重要#xff0c;它可以大概判断tensor维度在网络传播过程中#xff0c;各个维度的变化情况#xff0c;方便改成适合自己的数据集。 需要github上的数据集… 文章目录 文章目录 00 写在前面01 基于Pytorch版本的E3D LSTM代码02 论文下载 00 写在前面 测试代码比较重要它可以大概判断tensor维度在网络传播过程中各个维度的变化情况方便改成适合自己的数据集。 需要github上的数据集以及可运行的代码可以私聊 01 基于Pytorch版本的E3D LSTM代码 # 库函数调用 from functools import reduce from src.utils import nice_print, mem_report, cpu_stats import copy import operator import torch import torch.nn as nn import torch.nn.functional as F# E3DLSTM模型代码 class E3DLSTM(nn.Module):def __init__(self, input_shape, hidden_size, num_layers, kernel_size, tau):super().__init__()self._tau tauself._cells []input_shape list(input_shape)for i in range(num_layers):cell E3DLSTMCell(input_shape, hidden_size, kernel_size)# NOTE hidden state becomes input to the next cellinput_shape[0] hidden_sizeself._cells.append(cell)# Hook to register submodulesetattr(self, cell{}.format(i), cell)def forward(self, input):# NOTE (seq_len, batch, input_shape)batch_size input.size(1)c_history_states []h_states []outputs []for step, x in enumerate(input):for cell_idx, cell in enumerate(self._cells):if step 0:c_history, m, h self._cells[cell_idx].init_hidden(batch_size, self._tau, input.device)c_history_states.append(c_history)h_states.append(h)# NOTE c_history and h are coming from the previous time stamp, but we iterate over cellsc_history, m, h cell(x, c_history_states[cell_idx], m, h_states[cell_idx])c_history_states[cell_idx] c_historyh_states[cell_idx] h# NOTE hidden state of previous LSTM is passed as input to the next onex houtputs.append(h)# NOTE Concat along the channelsreturn torch.cat(outputs, dim1)class E3DLSTMCell(nn.Module):def __init__(self, input_shape, hidden_size, kernel_size):super().__init__()in_channels input_shape[0]self._input_shape input_shapeself._hidden_size hidden_size# memory gates: input, cell(input modulation), forgetself.weight_xi ConvDeconv3d(in_channels, hidden_size, kernel_size)self.weight_hi ConvDeconv3d(hidden_size, hidden_size, kernel_size, biasFalse)self.weight_xg copy.deepcopy(self.weight_xi)self.weight_hg copy.deepcopy(self.weight_hi)self.weight_xr copy.deepcopy(self.weight_xi)self.weight_hr copy.deepcopy(self.weight_hi)memory_shape list(input_shape)memory_shape[0] hidden_size# self.layer_norm nn.LayerNorm(memory_shape)self.group_norm nn.GroupNorm(1, hidden_size) # wzj# for spatiotemporal memoryself.weight_xi_prime copy.deepcopy(self.weight_xi)self.weight_mi_prime copy.deepcopy(self.weight_hi)self.weight_xg_prime copy.deepcopy(self.weight_xi)self.weight_mg_prime copy.deepcopy(self.weight_hi)self.weight_xf_prime copy.deepcopy(self.weight_xi)self.weight_mf_prime copy.deepcopy(self.weight_hi)self.weight_xo copy.deepcopy(self.weight_xi)self.weight_ho copy.deepcopy(self.weight_hi)self.weight_co copy.deepcopy(self.weight_hi)self.weight_mo copy.deepcopy(self.weight_hi)self.weight_111 nn.Conv3d(hidden_size hidden_size, hidden_size, 1)def self_attention(self, r, c_history):batch_size r.size(0)channels r.size(1)r_flatten r.view(batch_size, -1, channels)# BxtaoTHWxCc_history_flatten c_history.view(batch_size, -1, channels)# Attention mechanism# BxTHWxC x BxtaoTHWxC B x THW x taoTHWscores torch.einsum(bxc,byc-bxy, r_flatten, c_history_flatten)attention F.softmax(scores, dim2)return torch.einsum(bxy,byc-bxc, attention, c_history_flatten).view(*r.shape)def self_attention_fast(self, r, c_history):# Scaled Dot-Product but for tensors# instead of dot-product we do matrix contraction on twh dimensionsscaling_factor 1 / (reduce(operator.mul, r.shape[-3:], 1) ** 0.5)scores torch.einsum(bctwh,lbctwh-bl, r, c_history) * scaling_factorattention F.softmax(scores, dim0)return torch.einsum(bl,lbctwh-bctwh, attention, c_history)def forward(self, x, c_history, m, h):# Normalized shape for LayerNorm is CxT×H×Wnormalized_shape list(h.shape[-3:])def LR(input):# return F.layer_norm(input, normalized_shape)return self.group_norm(input, normalized_shape) # wzj# R is CxT×H×Wr torch.sigmoid(LR(self.weight_xr(x) self.weight_hr(h)))i torch.sigmoid(LR(self.weight_xi(x) self.weight_hi(h)))g torch.tanh(LR(self.weight_xg(x) self.weight_hg(h)))recall self.self_attention_fast(r, c_history)# nice_print(**locals())# mem_report()# cpu_stats()c i * g self.group_norm(c_history[-1] recall) # wzji_prime torch.sigmoid(LR(self.weight_xi_prime(x) self.weight_mi_prime(m)))g_prime torch.tanh(LR(self.weight_xg_prime(x) self.weight_mg_prime(m)))f_prime torch.sigmoid(LR(self.weight_xf_prime(x) self.weight_mf_prime(m)))m i_prime * g_prime f_prime * mo torch.sigmoid(LR(self.weight_xo(x) self.weight_ho(h) self.weight_co(c) self.weight_mo(m)))h o * torch.tanh(self.weight_111(torch.cat([c, m], dim1)))# TODO is it correct FIFO?c_history torch.cat([c_history[1:], c[None, :]], dim0)# nice_print(**locals())return (c_history, m, h)def init_hidden(self, batch_size, tau, deviceNone):memory_shape list(self._input_shape)memory_shape[0] self._hidden_sizec_history torch.zeros(tau, batch_size, *memory_shape, devicedevice)m torch.zeros(batch_size, *memory_shape, devicedevice)h torch.zeros(batch_size, *memory_shape, devicedevice)return (c_history, m, h)class ConvDeconv3d(nn.Module):def __init__(self, in_channels, out_channels, *vargs, **kwargs):super().__init__()self.conv3d nn.Conv3d(in_channels, out_channels, *vargs, **kwargs)# self.conv_transpose3d nn.ConvTranspose3d(out_channels, out_channels, *vargs, **kwargs)def forward(self, input):# print(self.conv3d(input).shape, input.shape)# return self.conv_transpose3d(self.conv3d(input))return F.interpolate(self.conv3d(input), sizeinput.shape[-3:], modenearest)class Out(nn.Module):def __init__(self, in_channels, out_channels):super().__init__()self.conv nn.Conv3d(in_channels, out_channels, kernel_size 3, stride1, padding1)def forward(self, x):return self.conv(x)class E3DLSTM_NET(nn.Module):def __init__(self, input_shape, hidden_size, num_layers, kernel_size, tau, time_steps, output_shape):super().__init__()self.input_shape input_shapeself.hidden_size hidden_sizeself.num_layers num_layersself.kernel_size kernel_sizeself.tau tauself.time_steps time_stepsself.output_shape output_shapeself.dtype torch.float32self.encoder E3DLSTM(input_shape, hidden_size, num_layers, kernel_size, tau).type(self.dtype)self.decoder nn.Conv3d(hidden_size * time_steps, output_shape[0], kernel_size, padding(0, 2, 2)).type(self.dtype)self.out Out(4, 1)def forward(self, input_seq):return self.out(self.decoder(self.encoder(input_seq)))# 测试代码 if __name__ __main__:input_shape (16, 4, 16, 16)output_shape (16, 1, 16, 16)tau 2hidden_size 64kernel (3, 5, 5)lstm_layers 4time_steps 29x torch.ones([29, 2, 16, 4, 16, 16])model E3DLSTM_NET(input_shape, hidden_size, lstm_layers, kernel, tau, time_steps, output_shape)print(finished!)f model(x)print(f)02 论文下载 Eidetic 3D LSTM: A Model for Video Prediction and Beyond Eidetic 3D LSTM: A Model for Video Prediction and Beyond Github链接e3d_lstm
http://www.hkea.cn/news/14356877/

相关文章:

  • php如何做视频网站网站建设的知识
  • 购物网站运营wordpress商品展示模块
  • 国内永久免费建站商城网站建设开发公司
  • 商城网站的模块设计inews wordpress
  • 天凡建设股份有限公司网站装修设计网站哪个好
  • 江门市住房和城乡建设部网站wordpress充值激活码
  • wordpress做seo合适吗seo整站优化托管
  • 合适做服装的国际网站天津哪家公司做公司网站
  • 网站视频怎么做网站备案承诺书
  • 深圳市城乡住房和建设局网站首页wordpress悬浮搜索框
  • 北京网站建设公司排名浩森宇特建设网站比较好公司
  • 网站服务器费用明细商城小程序费用标准
  • dw做网站的导航栏怎么做快速收录网站内页
  • 网站设计 手写vps网站目录权限设置
  • 软文网站名称深圳58同城招聘网最新招聘信息
  • 南昌 定制网站百度账号怎么改名字
  • 360站长平台织梦淘客网站
  • 福田建网站外包seo搜索营销分析方案
  • 青岛工程建设管理信息网站php招投标网站源码
  • 网站关键词怎么做排名农林网站建设公司
  • 一流校建设网站网站开发使用框架原因
  • 电商网站的建设背景舟山高端网站设计
  • 网站开发最新技术网络热词2021流行语
  • 企业网站建设方案市场做网站开发很赚钱吗
  • 可以自己买个服务器做网站吗seo推广培训费用
  • 阿里 云网站娱乐网站模板
  • 涿州做网站苏州seo排名公司
  • 做图网站地图万州网站制作
  • 西安手机网站制作公司北京北排建设公司招标网站
  • wordpress 更改插件样式连云港网站优化公司