当前位置: 首页 > news >正文

用分布式做的网站公司网页设计费记哪个科目

用分布式做的网站,公司网页设计费记哪个科目,wordpress悬浮搜索,一个网站做无限关键词使用pickle制作类cifar10二进制格式的数据集 使用pytorc框架来训练#xff08;以猫狗大战数据集为例#xff09; 此方法是为了实现阿里云PAI studio上可视化训练模型时使用的数据格式。 一、制作类cifar10二进制格式数据 import os, cv2 from pickled import * from load_da… 使用pickle制作类cifar10二进制格式的数据集 使用pytorc框架来训练以猫狗大战数据集为例 此方法是为了实现阿里云PAI studio上可视化训练模型时使用的数据格式。 一、制作类cifar10二进制格式数据 import os, cv2 from pickled import * from load_data import *data_path ./data_n/test file_list ./data_n/test.txt save_path ./binif __name__ __main__:data, label, lst read_data(file_list, data_path, shape128)pickled(save_path, data, label, lst, bin_num1) read_data模块 import cv2 import os import numpy as npDATA_LEN 49152 CHANNEL_LEN 16384 SHAPE 128def imread(im_path, shapeNone, colorRGB, modecv2.IMREAD_UNCHANGED):im cv2.imread(im_path, cv2.IMREAD_UNCHANGED)if color RGB:im cv2.cvtColor(im, cv2.COLOR_BGR2RGB)# im np.transpose(im, [2, 1, 0])if shape ! None:assert isinstance(shape, int) im cv2.resize(im, (shape, shape))return imdef read_data(filename, data_path, shapeNone, colorRGB):filename (str): a file data file is stored in such format:image_name labeldata_path (str): image data folderreturn (numpy): a array of image and a array of label if os.path.isdir(filename):print(Cant found data file!)else:f open(filename)lines f.read().splitlines()count len(lines)data np.zeros((count, DATA_LEN), dtypenp.uint8)#label np.zeros(count, dtypenp.uint8)lst [ln.split( )[0] for ln in lines]label [int(ln.split( )[1]) for ln in lines]idx 0s, c SHAPE, CHANNEL_LENfor ln in lines:fname, lab ln.split( )im imread(os.path.join(data_path, fname), shapes, colorRGB)im cv2.imread(os.path.join(data_path, fname), cv2.IMREAD_UNCHANGED)im cv2.cvtColor(im, cv2.COLOR_BGR2RGB)im cv2.resize(im, (s, s)) # print(len(np.reshape(im[:,:,0], c))) # 1024data[idx, :c] np.reshape(im[:, :, 0], c)data[idx, c:2*c] np.reshape(im[:, :, 1], c)data[idx, 2*c:] np.reshape(im[:, :, 2], c)label[idx] int(lab)idx idx 1return data, label, lst pickled模块 import os import pickleBIN_COUNTS 5def pickled(savepath, data, label, fnames, bin_numBIN_COUNTS, modetrain):savepath (str): save pathdata (array): image data, a nx3072 arraylabel (list): image label, a list with length nfnames (str list): image names, a list with length nbin_num (int): save data in several filesmode (str): {train, test}assert os.path.isdir(savepath)total_num len(fnames)samples_per_bin total_num / bin_numassert samples_per_bin 0idx 0for i in range(bin_num): start int(i*samples_per_bin)end int((i1)*samples_per_bin)print(start)print(end)if end total_num:dict {data: data[start:end, :],labels: label[start:end],filenames: fnames[start:end]}else:dict {data: data[start:, :],labels: label[start:],filenames: fnames[start:]}if mode train:dict[batch_label] training batch {} of {}.format(idx, bin_num)else:dict[batch_label] testing batch {} of {}.format(idx, bin_num)# with open(os.path.join(savepath, data_batch_str(idx)), wb) as fi:with open(os.path.join(savepath, batch_tt str(idx)), wb) as fi:pickle.dump(dict, fi)idx idx 1def unpickled(filename):#assert os.path.isdir(filename)assert os.path.isfile(filename)with open(filename, rb) as fo:dict pickle.load(fo)return dict 测试生成的二进制数据 import os import pickle import numpy as np import cv2def load_batch(fpath):with open(fpath, rb) as f:d pickle.load(f)data d[data]labels d[labels]return data, labelsdef load_data(dirname, one_hotFalse):X_train []Y_train []for i in range(0):fpath os.path.join(dirname, data_batch_ str(i))print(fpath)data, labels load_batch(fpath)if i 0:X_train dataY_train labelselse:X_train np.concatenate([X_train, data], axis0)Y_train np.concatenate([Y_train, labels], axis0)ftpath os.path.join(dirname, batch_tt0)X_test, Y_test load_batch(ftpath)X_test np.dstack((X_test[:, :16384], X_test[:, 16384:32768],X_test[:, 32768:]))X_test np.reshape(X_test, [-1, 128, 128, 3])print(X_test.shape)xx_test np.transpose(X_test,(0, 3, 1, 2))print(xx_test.shape) # print(X_test[2])imgs X_test[2:4]img imgs[1]print(img.shape)img cv2.cvtColor(img, cv2.COLOR_RGB2BGR)cv2.imshow(img, img)cv2.waitKey(0)if __name__ __main__:dirname testload_data(dirname) 二、使用制作好的数据训练 import torch import os import torchvision.transforms as transforms import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from PIL import Image import pickle import numpy as np#device torch.device(cuda:0 if torch.cuda.is_available() else cpu)def load_batch(fpath):with open(fpath, rb) as f:d pickle.load(f)data d[data]labels d[labels]return data, labelsdef load_data(dirname, one_hotFalse, trainFalse):print(dirname)if train:X_train []Y_train []for i in range(1):fpath os.path.join(dirname, data_batch_ str(i))print(fpath)data, labels load_batch(fpath)if i 0:X_train dataY_train labelselse:X_train np.concatenate([X_train, data], axis0)Y_train np.concatenate([Y_train, labels], axis0)X_train np.dstack((X_train[:, :16384], X_train[:, 16384:32768],X_train[:, 32768:]))X_train np.reshape(X_train, [-1, 128, 128, 3])# X_train np.transpose(X_train, (0, 3, 1, 2))return X_train, Y_trainelse:ftpath os.path.join(dirname, test_batch_0)print(ftpath)X_test, Y_test load_batch(ftpath)X_test np.dstack((X_test[:, :16384], X_test[:, 16384:32768],X_test[:, 32768:]))X_test np.reshape(X_test, [-1, 128, 128, 3])# 这里不需要转化数据格式[n, h, w, c]# X_test np.transpose(X_test, (0, 3, 1, 2))return X_test, Y_testclass MyDataset(torch.utils.data.Dataset):def __init__(self, namedir, transformNone, trainFalse):super().__init__()self.namedir namedirself.transform transformself.train trainself.datas, self.labels load_data(self.namedir, trainself.train)def __getitem__(self, index): # print(index)imgs self.datas[index] # print(imgs.shape) # print(imgs)img_labes int(self.labels[index])# print(img_labes)if self.transform is not None:imgs self.transform(imgs)return imgs, img_labesdef __len__(self):return len(self.labels)class MyDataset_s(torch.utils.data.Dataset):def __init__(self, datatxt, transformNone):super().__init__()imgs []fh open(datatxt, r)for line in fh:line line.rstrip()words line.split()imgs.append((words[0], int(words[1])))self.imgs imgsself.transform transformdef __getitem__(self, index):fn, label self.imgs[index]img Image.open(fn).convert(RGB)if self.transform is not None:img self.transform(img)return img, labeldef __len__(self):return len(self.imgs)mean [0.5071, 0.4867, 0.4408] stdv [0.2675, 0.2565, 0.2761]transform transforms.Compose([# transforms.Resize([224, 224]),# transforms.RandomHorizontalFlip(),transforms.ToTensor(),transforms.Normalize(meanmean, stdstdv)])train_data MyDataset(namedirdata\\train\\, transformtransform, trainTrue) trainloader torch.utils.data.DataLoader(datasettrain_data, batch_size4, shuffleTrue) test_data MyDataset(namedirdata\\val\\, transformtransform, trainFalse) testloader torch.utils.data.DataLoader(datasettest_data, batch_size4, shuffleTrue)classes (cat, dog)class Net(nn.Module):def __init__(self):super(Net, self).__init__()self.conv1 nn.Conv2d(3, 16, kernel_size3, stride1, padding1)self.pool nn.MaxPool2d(2, 2)self.conv2 nn.Conv2d(16, 32, kernel_size3, stride1, padding1)self.conv3 nn.Conv2d(32, 64, kernel_size3, stride1, padding1)self.conv4 nn.Conv2d(64, 32, kernel_size3, stride1, padding1)# self.conv5 nn.Conv2d(32, 16, kernel_size3, stride1, padding1)self.fc1 nn.Linear(32 * 8 * 8, 256)self.fc2 nn.Linear(256, 64)self.fc3 nn.Linear(64, 2)def forward(self, x): # (n, 3, 128, 128)x self.pool(F.relu(self.conv1(x))) # (n, 16, 64, 64)x self.pool(F.relu(self.conv2(x))) # (n, 32, 32, 32)x self.pool(F.relu(self.conv3(x))) # (n, 64, 16, 16)x self.pool(F.relu(self.conv4(x))) # (n, 32, 8, 8)# x self.pool(F.relu(self.conv5(x)))# print(x)x x.view(-1, 32 * 8 * 8)x F.relu(self.fc1(x))x F.relu(self.fc2(x))x self.fc3(x)return xclass VGG16(nn.Module):def __init__(self):super(VGG16, self).__init__()# 3 * 224 * 224self.conv1_1 nn.Conv2d(3, 64, 3) # 64 * 222 * 222self.conv1_2 nn.Conv2d(64, 64, 3, padding(1, 1)) # 64 * 222 * 222self.maxpool1 nn.MaxPool2d((2, 2), padding(1, 1)) # pooling 64 * 112 * 112self.conv2_1 nn.Conv2d(64, 128, 3) # 128 * 110 * 110self.conv2_2 nn.Conv2d(128, 128, 3, padding(1, 1)) # 128 * 110 * 110self.maxpool2 nn.MaxPool2d((2, 2), padding(1, 1)) # pooling 128 * 56 * 56self.conv3_1 nn.Conv2d(128, 256, 3) # 256 * 54 * 54self.conv3_2 nn.Conv2d(256, 256, 2, padding(1, 1)) # 256 * 54 * 54self.conv3_3 nn.Conv2d(256, 256, 3, padding(1, 1)) # 256 * 54 * 54self.maxpool3 nn.MaxPool2d((2, 2), padding(1, 1)) # 256 * 28 * 28self.conv4_1 nn.Conv2d(256, 512, 3) # 512 * 26 * 26self.conv4_2 nn.Conv2d(512, 512, 3, padding(1, 1)) # 512 * 26 * 26self.conv4_3 nn.Conv2d(512, 512, 3, padding(1, 1)) # 512 * 26 * 26self.maxpool4 nn.MaxPool2d((2, 2), padding(1, 1)) # pooling 512 * 14 * 14self.conv5_1 nn.Conv2d(512, 512, 3) # 512 * 12 * 12self.conv5_2 nn.Conv2d(512, 512, 3, padding(1, 1)) # 512 * 12 * 12self.conv5_3 nn.Conv2d(512, 512, 3, padding(1, 1)) # 512 * 12 * 12self.maxpool5 nn.MaxPool2d((2, 2), padding(1, 1)) # pooling 512 * 7 * 7# viewself.fc1 nn.Linear(512 * 7 * 7, 512)self.fc2 nn.Linear(512, 64)self.fc3 nn.Linear(64, 2)def forward(self, x):# x.size(0)即为batch_sizein_size x.size(0)out self.conv1_1(x) # 222out F.relu(out)out self.conv1_2(out) # 222out F.relu(out)out self.maxpool1(out) # 112out self.conv2_1(out) # 110out F.relu(out)out self.conv2_2(out) # 110out F.relu(out)out self.maxpool2(out) # 56out self.conv3_1(out) # 54out F.relu(out)out self.conv3_2(out) # 54out F.relu(out)out self.conv3_3(out) # 54out F.relu(out)out self.maxpool3(out) # 28out self.conv4_1(out) # 26out F.relu(out)out self.conv4_2(out) # 26out F.relu(out)out self.conv4_3(out) # 26out F.relu(out)out self.maxpool4(out) # 14out self.conv5_1(out) # 12out F.relu(out)out self.conv5_2(out) # 12out F.relu(out)out self.conv5_3(out) # 12out F.relu(out)out self.maxpool5(out) # 7# 展平out out.view(in_size, -1)out self.fc1(out)out F.relu(out)out self.fc2(out)out F.relu(out)out self.fc3(out)# out F.log_softmax(out, dim1)return outclass VGG8(nn.Module):def __init__(self):super(VGG8, self).__init__()# 3 * 224 * 224self.conv1_1 nn.Conv2d(3, 64, 3) # 64 * 222 * 222self.maxpool1 nn.MaxPool2d((2, 2), padding(1, 1)) # pooling 64 * 112 * 112self.conv2_1 nn.Conv2d(64, 128, 3) # 128 * 110 * 110self.maxpool2 nn.MaxPool2d((2, 2), padding(1, 1)) # pooling 128 * 56 * 56self.conv3_1 nn.Conv2d(128, 256, 3) # 256 * 54 * 54self.maxpool3 nn.MaxPool2d((2, 2), padding(1, 1)) # 256 * 28 * 28self.conv4_1 nn.Conv2d(256, 512, 3) # 512 * 26 * 26self.maxpool4 nn.MaxPool2d((2, 2), padding(1, 1)) # pooling 512 * 14 * 14self.conv5_1 nn.Conv2d(512, 512, 3) # 512 * 12 * 12self.maxpool5 nn.MaxPool2d((2, 2), padding(1, 1)) # pooling 512 * 7 * 7# viewself.fc1 nn.Linear(512 * 7 * 7, 512)self.fc2 nn.Linear(512, 64)self.fc3 nn.Linear(64, 2)def forward(self, x):# x.size(0)即为batch_sizein_size x.size(0)out self.conv1_1(x) # 222out F.relu(out)out self.maxpool1(out) # 112out self.conv2_1(out) # 110out F.relu(out)out self.maxpool2(out) # 56out self.conv3_1(out) # 54out F.relu(out)out self.maxpool3(out) # 28out self.conv4_1(out) # 26out F.relu(out)out self.maxpool4(out) # 14out self.conv5_1(out) # 12out F.relu(out)out self.maxpool5(out) # 7# 展平out out.view(in_size, -1)out self.fc1(out)out F.relu(out)out self.fc2(out)out F.relu(out)out self.fc3(out)# out F.log_softmax(out, dim1)return outnet Net()#net.to(device) criterion nn.CrossEntropyLoss() optimizer optim.SGD(net.parameters(), lr0.005, momentum0.9)if __name__ __main__:for epoch in range(11):running_loss 0.0for i, data in enumerate(trainloader, 0):inputs, labels data # inputs, labels inputs.to(device), labels.to(device)optimizer.zero_grad()outputs net(inputs)loss criterion(outputs, labels)loss.backward()optimizer.step()running_loss loss.item()if i % 100 99:print([%d, %5d] loss: %.3f % (epoch 1, i 1, running_loss / 100))running_loss 0.0if epoch % 2 0:correct 0total 0with torch.no_grad():for data in testloader:images, labels data# images, labels images.to(device), labels.to(device)outputs net(images)_, predicted torch.max(outputs.data, 1)total labels.size(0)correct (predicted labels).sum().item()print(Accuracy of the network on the 1000 test images: %d %% % (100 * correct / total))print(finished !!!)
http://www.hkea.cn/news/14402250/

相关文章:

  • 萝岗公司网站建设知识付费网站制作
  • 网站建设与网页制作的实验目的建设小说网站违法吗
  • 简述网站建设及维护的全过程家居装修公司排名
  • 上海建设行政主管部门政务网站wordpress文章页获取标签代码
  • 手机制作表白网站江苏中益建设官方网站
  • 网站开发是固定资产吗成都打鱼网站建设
  • 快速网站收录wordpress外网访问不
  • 成都投资网站建设广告设计网址
  • 大连市公众平台网站网站开启伪静态
  • 天元建设集团有限公司承兑汇票兑付网站个别页面做seo
  • 网站后台设置应注意什么wordpress 全局置顶
  • 专业定制网站注册个网站要多少钱
  • 建设网站建站公司企业解决方案是什么
  • 网站图片大小多少合适网站免费建站众享星球
  • php网站如何做特效做网站也分内存大小的吗
  • 做虾皮网站赚钱吗搜狗站长工具平台
  • 百度站长工具怎么查排名网站一般用什么免费字体
  • 建设银行北海市分行网站百度网页打不开怎么办
  • 怎样用网站做淘宝客部署iis网站
  • 漯河网站优化2022年国内互联网公司排名
  • 新开传奇网站发布网单职业鹏鸿生态板官方网站开发区代理
  • 网站title优化一起看地图app下载手机版
  • 北京装饰网站建设济南网站建设山东聚搜网见效快
  • 网站开发职业认知小结一个人做网站要多久
  • 怎样制作网站建设方案wordpress修改样式
  • 统计网站怎么做最新型建筑模板有哪些
  • 受欢迎的惠州网站建设wordpress页眉显示购物车
  • 桥梁建设 网站邢台网站建设信息
  • 做创意ppt网站有哪些方面焦作网站设计公司
  • 网站诊断工具wordpress配置伪静态