pytorch简单框架
网络搭建:
mynn.py:
import torch
from torch import nn
class mynn(nn.Module):
def __init__(self):
super(mynn, self).__init__()
self.layer1 = nn.Sequential(
nn.Linear(3520, 4096), nn.BatchNorm1d(4096), nn.ReLU(True)
)
self.layer2 = nn.Sequential(
nn.Linear(4096, 4096), nn.BatchNorm1d(4096), nn.ReLU(True)
)
self.layer3 = nn.Sequential(
nn.Linear(4096, 4096), nn.BatchNorm1d(4096), nn.ReLU(True)
)
self.layer4 = nn.Sequential(
nn.Linear(4096, 4096), nn.BatchNorm1d(4096), nn.ReLU(True)
)
self.layer5 = nn.Sequential(
nn.Linear(4096, 3072), nn.BatchNorm1d(3072), nn.ReLU(True)
)
self.layer6 = nn.Sequential(
nn.Linear(3072, 2048), nn.BatchNorm1d(2048), nn.ReLU(True)
)
self.layer7 = nn.Sequential(
nn.Linear(2048, 1024), nn.BatchNorm1d(1024), nn.ReLU(True)
)
self.layer8 = nn.Sequential(
nn.Linear(1024, 256), nn.BatchNorm1d(256), nn.ReLU(True)
)
self.layer9 = nn.Sequential(
nn.Linear(256, 64), nn.BatchNorm1d(64), nn.ReLU(True)
)
self.layer10 = nn.Sequential(
nn.Linear(64, 32), nn.BatchNorm1d(32), nn.ReLU(True)
)
self.layer11 = nn.Sequential(
nn.Linear(32, 3)
) def forward(self, x):
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.layer5(x)
x = self.layer6(x)
x = self.layer7(x)
x = self.layer8(x)
x = self.layer9(x)
x = self.layer10(x)
x = self.layer11(x)
return x Dataset重定义:
mydataset.py
import os
from torch.utils import data
import numpy as np
from astropy.io import fits
from torchvision import transforms as T
from PIL import Image
import pandas as pd class mydataset(data.Dataset): def __init__(self,csv_file,root_dir=None,transform=None):
self.landmarks_frame=np.loadtxt(open(csv_file,"rb"),delimiter=",") #landmarks_frame是一个numpy矩阵
self.root_dir=root_dir
self.transform=transform
def __len__(self):
return len(self.landmarks_frame)
def __getitem__(self, idx):
lfit=self.landmarks_frame[idx,:]
lable=lfit[len(lfit)-1]
datafit=lfit[0:(len(lfit)-1)]
return lable,datafit
主程序:
main.py
import torch
from torch import nn, optim
from torchvision import datasets, transforms
from torch.autograd import Variable
#from models import Mynet, my_AlexNet, my_VGG
from sdata import mydataset
import time
import numpy as np
from model import mynn
if __name__ == '__main__': #如果Dataloader开启num_workers > 0 必须要在'__main__'下才能消除报错 data_train = mydataset.mydataset(csv_file="G:\\DATA\\train.csv",root_dir=None,transform=None)
#data_test = mydataset(test=True)
data_test = mydataset.mydataset(csv_file="G:\\DATA\\test.csv", root_dir=None, transform=None)
data_loader_train = torch.utils.data.DataLoader(dataset=data_train,
batch_size=256,
shuffle=True,
num_workers=0,
pin_memory=True)
data_loader_test = torch.utils.data.DataLoader(dataset=data_test,
batch_size=256,
shuffle=True,
num_workers=0,
pin_memory=True)
print("**dataloader done**")
model = mynn.mynn() if torch.cuda.is_available():
#model = model.cuda()
model.to(torch.device('cuda'))
#损失函数
criterion = nn.CrossEntropyLoss()
#optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.9)
#优化算法
optimizer = optim.Adam(model.parameters(), lr=0.001, betas=(0.9, 0.999), eps=1e-08, weight_decay=1e-4)
n_epochs = 1000 global_train_acc = [] s_time = time.time() for epoch in range(n_epochs):
running_loss = 0.0
running_correct = 0.0
print('Epoch {}/{}'.format(epoch, n_epochs))
for label,datafit in data_loader_train:
x_train, y_train = datafit,label
#x_train, y_train = Variable(x_train.cuda()), Variable(y_train.cuda())
x_train, y_train = x_train.to(torch.device('cuda')), y_train.to(torch.device('cuda'))
x_train=x_train.float()
y_train=y_train.long()
#x_train, y_train = Variable(x_train), Variable(y_train)
outputs = model(x_train)
_, pred = torch.max(outputs.data, 1)
optimizer.zero_grad()
loss = criterion(outputs, y_train)
loss.backward()
optimizer.step() running_loss += loss.item()
running_correct += torch.sum(pred == y_train.data) testing_correct = 0.0
for label,datafit in data_loader_test:
x_test, y_test = datafit,label
x_test=x_test.float()
y_test=y_test.long()
x_test, y_test = Variable(x_test.cuda()), Variable(y_test.cuda())
# x_test, y_test = Variable(x_test), Variable(y_test)
outputs = model(x_test)
_, pred = torch.max(outputs.data, 1)
testing_correct += torch.sum(pred == y_test.data) print('Loss is:{:.4f}, Train Accuracy is:{:.4f}%, Test Accuracy '
'is:{:.4f}'.format(running_loss / len(data_train),
100 * running_correct / len(data_train),
100 * testing_correct / len(data_test))) e_time = time.time()
print('time_run is :', e_time - s_time)
print('*******done******') 将天文数据写入csv中:
main.py
# -*- coding: utf-8 -*-
"""
Spyder Editor This is a temporary script file.
""" import matplotlib.pyplot as plt
from astropy.io import fits
import os
import matplotlib
matplotlib.use('Qt5Agg')
from astropy.io import fits
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn import svm
from sklearn.decomposition import PCA
def getData(fitPath,cla):
fileList=[] #所有.fit文件
files=os.listdir(fitPath) #返回一个列表,其中包含在目录条目的名称
y=[]
for f in files:
if os.path.isfile(fitPath+'/'+f) and f[-4:-1]==".fi":
fileList.append(fitPath+'/'+f) #添加文件
len=90000
x=np.ones(3521)
num=1
for path in fileList:
f = fits.open(path)
header = f[0].header # fit文件中的各种标识 SPEC_CLN = header['SPEC_CLN']
SN_G = header['SN_G']
NAXIS1 = header['NAXIS1'] # 光谱数据维度
COEFF0 = header['COEFF0']
COEFF1 = header['COEFF1']
wave = np.ones(NAXIS1) # 光谱图像中的横坐标
for i in range(NAXIS1):
wave[i] = i
logwavelength = COEFF0 + wave * COEFF1
for i in range(NAXIS1):
wave[i] = 10 ** logwavelength[i]
min=0
for i in range(NAXIS1-1):
if wave[i]<=4000 and wave[i+1]>=4000:
min=i
spec = f[0].data[0, :] # 光谱数据 fit中的第一行数据
spec=spec[min:min+3521]
spec=np.array(spec)
spec[3520]=cla
if num==1:
x=spec
num=2
else:
x=np.row_stack((x,spec))
#np.savetxt(csvPath,x, delimiter=',')
return x if __name__ == '__main__':
x=getData("G:\DATA\STAR",0)
x_train,x_test=train_test_split(x,test_size=0.1 ,random_state=0) y=getData("G:\DATA\QSO",1)
y_train, y_test = train_test_split(y, test_size=0.1, random_state=0)
x_train = np.row_stack((x_train,y_train))
x_test=np.row_stack((x_test,y_test)) z=getData("G:\DATA\GALAXY",2)
z_train, z_test = train_test_split(z, test_size=0.1, random_state=0)
x_train=np.row_stack((x_train,z_train))
x_test = np.row_stack((x_test,z_test))
np.savetxt("G:\\DATA\\train.csv",x_train, delimiter=',')
np.savetxt("G:\\DATA\\test.csv", x_test, delimiter=',') 贝叶斯分类
# -*- coding: utf-8 -*-
"""
Spyder Editor This is a temporary script file.
""" import matplotlib.pyplot as plt
from astropy.io import fits
import os
import matplotlib
from sklearn.decomposition import PCA
matplotlib.use('Qt5Agg')
from astropy.io import fits
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn import svm
from sklearn.decomposition import PCA
from sklearn.naive_bayes import GaussianNB
from sklearn.naive_bayes import MultinomialNB
from sklearn.naive_bayes import BernoulliNB
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler def getData(path,cla):
fileList=[] #所有.fit文件
files=os.listdir(path) #返回一个列表,其中包含在目录条目的名称
y=[]
for f in files:
if os.path.isfile(path+'/'+f) and f[-4:-1]==".fi":
fileList.append(path+'/'+f) #添加文件
len=90000
x=np.ones(3520)
num=1
nn=0
for path in fileList:
f = fits.open(path)
header = f[0].header # fit文件中的各种标识 SPEC_CLN = header['SPEC_CLN']
SN_G = header['SN_G']
NAXIS1 = header['NAXIS1'] # 光谱数据维度
COEFF0 = header['COEFF0']
COEFF1 = header['COEFF1'] wave = np.ones(NAXIS1) # 光谱图像中的横坐标
for i in range(NAXIS1):
wave[i] = i
logwavelength = COEFF0 + wave * COEFF1
for i in range(NAXIS1):
wave[i] = 10 ** logwavelength[i]
min=0
for i in range(NAXIS1-1):
if wave[i]<=4000 and wave[i+1]>=4000:
min=i
spec = f[0].data[0, :] # 光谱数据 fit中的第一行数据
spec=spec[min:min+3520]
spec=np.array(spec) #归一化处理
smin,smax=spec.min(),spec.max()
spec= (spec-smin)/(smax-smin)
spec=spec.reshape(1, -1) if num==1:
x=spec
num=2
y.append(cla)
else:
x=np.row_stack((x,spec))
y.append(cla)
nn=nn+1 y=np.array(y)
y.reshape(1,-1)
return x,y
def show_accuracy(y_hat,y_test):
num=0.0
for i in range(len(y_test)):
if y_hat[i]==y_test[i]:
num=num+1.0
return num/float(len(y_test))
if __name__ == '__main__':
x1,y1=getData("G:\DATA\m01520949",0)
#pca = PCA(n_components=1000)
#x1 = pca.fit_transform(x1)
x1_train, x1_test, y1_train, y1_test= train_test_split(x1,y1, test_size=0.1, random_state=0)
x2,y2=getData("G:\DATA\m11520604",1)
#x2=pca.fit_transform(x2)
x2_train, x2_test, y2_train, y2_test = train_test_split(x2, y2, test_size=0.1, random_state=0)
x3,y3=getData("G:\DATA\m21520739", 2)
#x3 = pca.fit_transform(x3)
x3_train, x3_test, y3_train, y3_test = train_test_split(x3, y3, test_size=0.1, random_state=0)
x4, y4 = getData("G:\DATA\m31520635", 3)
# x3 = pca.fit_transform(x3)
x4_train, x4_test, y4_train, y4_test = train_test_split(x4, y4, test_size=0.1, random_state=0)
#训练集 x_train = np.row_stack((x1_train, x2_train))
x_train = np.row_stack((x_train, x3_train))
x_train = np.row_stack((x_train, x4_train)) y_train=np.append(y1_train,y2_train)
y_train=np.append(y_train,y3_train)
y_train = np.append(y_train, y4_train)
#测试集
x_test = np.row_stack((x1_test, x2_test))
x_test = np.row_stack((x_test, x3_test))
x_test = np.row_stack((x_test, x4_test)) y_test=np.append(y1_test,y2_test)
y_test=np.append(y_test,y3_test)
y_test = np.append(y_test, y4_test) yy1_train=y_train
# for i in range(len(y_train)):
# if yy1_train[i]!=0:
# yy1_train[i]=1
# else:
# yy1_train[i]=-1
# yy1_test=y_test # for i in range(len(y_test)):
# if yy1_test[i]!=0:
# yy1_test[i]=1
# else:
# yy1_test[i]=-1
print(x_train.shape)
print(y_train.shape)
print(x_test.shape)
print(y_test.shape)
#clf = svm.SVC(C=0.9, kernel='linear', gamma=50, decision_function_shape='ovo')
#clf.fit(x_train, yy1_train)
#print(clf.score(x_train,yy1_train))
#y_hat = clf.predict(x_test)
#print(show_accuracy(y_hat, yy1_test))
#pca=PCA(n_components=260)
#x_train=pca.fit_transform(x_train)
#x_test=pca.fit_transform(x_test)
model=MultinomialNB(alpha=1.0, fit_prior=True, class_prior=None)
model.fit(x_train,y_train)
y_hat=model.predict(x_test)
print(show_accuracy(y_hat, y_test)) RNN分类:
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 8 15:10:35 2019 @author: DELL
""" # -*- coding: utf-8 -*-
"""
Created on Sun Sep 29 15:02:36 2019 @author: DELL
""" import torch
from torch import nn
from torch.autograd import Variable
import torchvision.datasets as dsets
import torch.utils.data as Data
import matplotlib.pyplot as plt
import torchvision
import os
from torch.utils import data
import numpy as np
from astropy.io import fits
from torchvision import transforms as T
from PIL import Image
import pandas as pd device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") class mydataset(data.Dataset): def __init__(self, csv_file, root_dir=None, transform=None):
self.landmarks_frame = np.loadtxt(open(csv_file, "rb"), delimiter=",") # landmarks_frame是一个numpy矩阵
self.root_dir = root_dir
self.transform = transform def __len__(self):
return len(self.landmarks_frame) def __getitem__(self, idx):
lfit = self.landmarks_frame[idx, :]
lable = lfit[len(lfit) - 1]
datafit = lfit[0:(len(lfit) - 1)]
return lable, datafit torch.manual_seed(1) EPOCH = 60
BATCH_SIZE = 16
TIME_STEP = 28
IMPUT_SIZE = 28
LR = 0.00001
DOWNLOAD_MNIST = True train_data = mydataset(csv_file="G:\\DATA\\mtrain.csv")
test_data = mydataset(csv_file="G:\\DATA\\mtest.csv")
# test_data = torchvision.datasets.MNIST(root='./mnist',train=False)
train_loader = Data.DataLoader(dataset=train_data, batch_size=BATCH_SIZE, shuffle=True)
test_loader = Data.DataLoader(dataset=test_data, batch_size=400, shuffle=True)
dataiter = iter(test_loader)
test_y, test_x = dataiter.next()
test_x = test_x.view(-1, 5, 704)
test_x = test_x.float()
test_y = test_y.float()
test_x = test_x.to(device)
test_y = test_y.to(device)
print(test_y.shape)
print(test_x.shape) # test_x=Variable(torch.unsqueeze(test_data.test_data,dim=1),volatile=True).type(torch.FloatTensor)/255
# test_x=test_x.to(device)
# test_y=(test_data.test_labels)
# test_y=test_y.to(device)
class RNN(nn.Module):
def __init__(self):
super(RNN, self).__init__()
self.rnn1 = nn.LSTM(
input_size=704,
hidden_size=128,
num_layers=1,
batch_first=True,
)
self.rnn2 = nn.LSTM(
input_size=128,
hidden_size=128,
num_layers=1,
batch_first=True,
)
self.bn = nn.BatchNorm1d(5)
self.out = nn.Sequential(
nn.Linear(128, 64), nn.BatchNorm1d(64), nn.ReLU(True),
nn.Linear(64, 32), nn.BatchNorm1d(32), nn.ReLU(True),
nn.Linear(32, 4)) def forward(self, x):
r_out1, (h_n1, c_n1) = self.rnn1(x, None) # r_out.shape=[64,28,64]
r_out1 = self.bn(r_out1)
r_out2, (h_n2, c_n2) = self.rnn2(r_out1, (h_n1, c_n1))
r_out2 = self.bn(r_out2)
r_out3, (h_n3, c_n3) = self.rnn2(r_out2, (h_n2, c_n2))
r_out3 = self.bn(r_out3)
r_out4, (h_n4, c_n4) = self.rnn2(r_out3, (h_n3, c_n3))
r_out4 = self.bn(r_out4)
r_out5, (h_n5, c_n5) = self.rnn2(r_out4, (h_n4, c_n4))
r_out5 = self.bn(r_out5)
r_out6, (h_n6, c_n6) = self.rnn2(r_out5, (h_n5, c_n5))
r_out6 = self.bn(r_out6)
r_out7, (h_n7, c_n7) = self.rnn2(r_out6, (h_n6, c_n6))
out = self.out(r_out7[:, -1, :]) # 只取最后一行 (r_out[:,-1,:]).shape=[64,64]
return out rnn = RNN()
rnn = rnn.to(device)
optimizer = torch.optim.Adam(rnn.parameters(), lr=LR)
loss_func = nn.CrossEntropyLoss()
loss_func = loss_func.to(device)
# print(test_y)
for epoch in range(EPOCH):
for step, (y, x) in enumerate(train_loader): b_x = Variable((x.view(-1, 5, 704)))
b_y = Variable(y)
b_x = b_x.float()
b_y = b_y.long() b_x = b_x.to(device)
b_y = b_y.to(device) output = rnn(b_x)
# print(output.shape)
loss = loss_func(output, b_y)
optimizer.zero_grad()
loss.backward()
optimizer.step() if step % 10 == 0:
test_output = rnn(test_x)
pred_y = torch.max(test_output, 1)[1].data.squeeze()
pred_y = pred_y.float()
# print((pred_y==test_y).sum())
# print(pred_y)
accuracy = float((pred_y == test_y).sum().item()) / float(test_y.size(0))
print("Epoch: ", epoch, "| train loss : %.4f" % loss, '| test accuracy: %.2f' % accuracy)
pytorch简单框架的更多相关文章
- 深度学习调用TensorFlow、PyTorch等框架
深度学习调用TensorFlow.PyTorch等框架 一.开发目标目标 提供统一接口的库,它可以从C++和Python中的多个框架中运行深度学习模型.欧米诺使研究人员能够在自己选择的框架内轻松建立模 ...
- NHIBERNATE的简单框架的设计
NHIBERNATE的简单框架的设计 上次的 NHibernate的Session管理策略和NHibernateHelper 发布并提供下载,给NHibernate刚入门的同学们带来很多便利. 最近有 ...
- spring的了解以及简单框架的搭建
了解spring: Spring是一个开源的控制反转(Inversion of Controller)和面向切面(AOP)的框架,目的是为了简化开发. IOC(控制反转): public class ...
- Java反射(六)纯面向接口编程的简单框架实践
我们知道在使用MyBatis开发时,只需要添加DAO接口和对应的映射XML文件,不需要写DAO的实现类,其实底层是通过动态代理实现. 本文将使用前几篇文章的知识点实现一个纯面向接口编程的简单框架,与M ...
- 浅析MyBatis(二):手写一个自己的MyBatis简单框架
在上一篇文章中,我们由一个快速案例剖析了 MyBatis 的整体架构与整体运行流程,在本篇文章中笔者会根据 MyBatis 的运行流程手写一个自定义 MyBatis 简单框架,在实践中加深对 MyBa ...
- 卷积神经网络概念及使用 PyTorch 简单实现
卷积神经网络 卷积神经网络(CNN)是深度学习的代表算法之一 .具有表征学习能力,能够按其阶层结构对输入信息进行平移不变分类,因此也被称为“平移不变人工神经网络”.随着深度学习理论的提出和数值计算设备 ...
- Java界面编程-建立一个可以画出图形的简单框架
引子:总共使用3个.java文件,建立一个简单界面编程的框架. 第1个文件:NotHelloWorldComponent.java //NotHelloWorldComponent.java 1 im ...
- java简单框架设计
设计框架包可以作为一个工具给大家用,需要有完全不同设计思路给出来,不同于我们去做一个web服务.网站. 或者一个业务微服务,需要从原来使用视角转换成一个构建者视角. 框架或者工具,更多是框架来管理或者 ...
- SpringMVC+Thymeleaf +HTML的简单框架
一.问题 项目中需要公众号开发,移动端使用的是H5,但是如果不用前端框架的话,只能考虑JS前端用ajax解析JSON字符串了.今天我们就简单的说下前端框架Thymeleaf如何解决这个问题的: 二.开 ...
随机推荐
- leetcode1281 整数的各位积和之差
class Solution { public: int subtractProductAndSum(int n) { ; ; ){ ; n/=; prod*=r; add+=r; } int res ...
- 代码实现:从键盘输入接收一个文件夹路径,打印出该文件夹下所有的.java文件名
package com.loaderman.test; import java.io.File; import java.io.FileReader; import java.util.Scanner ...
- 数据解析框架之Gson
GSON是由Google提供的,用于JAVA对象与JSON字符串之间互相转换的一个解决方案.用处很多,比如在前后台通讯中,前台先将java对象转化成JSON,然后将JSON传输给后台交由后台处理——相 ...
- 注册 Ironic 裸金属节点并部署裸金属实例
目录 文章目录 目录 前文列表 注册(Enrollment)裸机 创建裸金属实例的 Flavor 部署裸金属实例 日志分析 问题:Failed to create neutron ports for ...
- kill-9 kill-15
kill -9 PID 是操作系统从内核级别强制杀死一个进程. kill -15 PID 可以理解为操作系统发送一个通知告诉应用主动关闭. kill -15 PID 效果是正常退出进程,退出前可以被阻 ...
- k8s设置集群角色
查看所有的node节点 [root@test1 ~]# kubectl get nodes NAME STATUS ROLES AGE VERSION test1 Ready <none> ...
- C基础知识(13):内存管理
如果事先不知道数组的具体长度,则需要动态分配内存.下面是例子. #include <stdio.h> #include <stdlib.h> #include <stri ...
- 7NiuYun云存储UploadPicture
1.七牛云云存储图片,上传.下载.源代码地址:ssh (git@gitee.com:516877626/QiNiuYunUploadpicture.git) .https(https://gitee. ...
- JavaScript参考DOM部分
目录 DOM完整版 DOM 介绍 节点 节点树 Node接口 属性 方法 NodeList 接口,HTMLCollection 接口 介绍 NodeList.prototype.length Node ...
- 《0day安全-软件漏洞分析技术》实验笔记1
Crack 小实验 工具 Dev C++(书上使用的VS) IDA_Pro_v7.0 Ollydbg LordPE 010 editor 使用IDA打开文件 找到程序分支点 空格跳转到汇编界面 得到V ...