【Code】numpy、pytorch实现全连接神经网络
"""
利用numpy实现一个两层的全连接网络
网络结构是:input ->(w1) fc_h -> relu ->(w2) output
数据是随机出的
"""
import numpy as np
#维度和大小参数定义
batch_size = 64
input_dim = 1000
output_dim = 10
hidden_dim = 100
# 数据虚拟 (x,y)
# 每行是一条数据 输入是64*1000,1000表示有1000维度的特征 输出是64*100
# 训练完参数之后,若对一条数据forward,直接运用w1 w2参数即可
# 使用relu激活函数
x = np.random.randn(batch_size,input_dim)
y = np.random.randn(batch_size,output_dim)
#定义要训练的参数 w1(1000*100) w2(100*10)
# 方便起见,不设bisa
w1 = np.random.randn(input_dim,hidden_dim)
w2 = np.random.randn(hidden_dim,output_dim)
# lr
lr = 1e-06
#实现
for i in range(500):
#迭代500次
#前向传播
h = x.dot(w1) #隐藏层
h_relu = np.maximum(h,0) #relu激活函数
y_hat = h_relu.dot(w2)
#计算损失
loss = np.square(y_hat - y).sum()
#计算梯度
y_hat_grad = 2.0*(y_hat-y)
w2_grad = h_relu.T.dot(y_hat_grad)
h_relu_grad = y_hat_grad.dot(w2.T)
h_grad = h_relu_grad.copy()
h_grad[h < 0] = 0
w1_grad = x.T.dot(h_grad)
#更新参数
w1 = w1 - lr*w1_grad
w2 = w2 - lr*w2_grad
#print("epoch "+str(i)+" end......")
#print("参数w1:")
#print(w1)
#print("参数w1:")
#print(w2)
"""
使用pytorch实现上面的二层神经网络
"""
# pytorch中
## 内积
# tensor.mm(tensor)
## 转置
# tensor.t()
## 乘方运算
# tensor.pow(n)
import torch
device = torch.device('cpu')
# device = torch.device('cuda') # Uncomment this to run on GPU
# N is batch size; D_in is input dimension;
# H is hidden dimension; D_out is output dimension.
N, D_in, H, D_out = 64, 1000, 100, 10
# Create random input and output data
x = torch.randn(N, D_in, device=device)
y = torch.randn(N, D_out, device=device)
# Randomly initialize weights
w1 = torch.randn(D_in, H, device=device)
w2 = torch.randn(H, D_out, device=device)
learning_rate = 1e-6
for t in range(500):
# Forward pass: compute predicted y
h = x.mm(w1)
h_relu = h.clamp(min=0)
y_pred = h_relu.mm(w2)
# Compute and print loss; loss is a scalar, and is stored in a PyTorch Tensor
# of shape (); we can get its value as a Python number with loss.item().
loss = (y_pred - y).pow(2).sum()
#print(t, loss.item())
# Backprop to compute gradients of w1 and w2 with respect to loss
grad_y_pred = 2.0 * (y_pred - y)
grad_w2 = h_relu.t().mm(grad_y_pred)
grad_h_relu = grad_y_pred.mm(w2.t())
grad_h = grad_h_relu.clone()
grad_h[h < 0] = 0
grad_w1 = x.t().mm(grad_h)
# Update weights using gradient descent
w1 -= learning_rate * grad_w1
w2 -= learning_rate * grad_w2
"""
使用pytorch的自动求导 重新实现
"""
import torch
device = torch.device('cpu')
# device = torch.device('cuda') # Uncomment this to run on GPU
# N is batch size; D_in is input dimension;
# H is hidden dimension; D_out is output dimension.
N, D_in, H, D_out = 64, 1000, 100, 10
# Create random Tensors to hold input and outputs
x = torch.randn(N, D_in, device=device)
y = torch.randn(N, D_out, device=device)
# Create random Tensors for weights; setting requires_grad=True means that we
# want to compute gradients for these Tensors during the backward pass.
w1 = torch.randn(D_in, H, device=device, requires_grad=True)
w2 = torch.randn(H, D_out, device=device, requires_grad=True)
learning_rate = 1e-6
for t in range(500):
# Forward pass: compute predicted y using operations on Tensors. Since w1 and
# w2 have requires_grad=True, operations involving these Tensors will cause
# PyTorch to build a computational graph, allowing automatic computation of
# gradients. Since we are no longer implementing the backward pass by hand we
# don't need to keep references to intermediate values.
y_pred = x.mm(w1).clamp(min=0).mm(w2)
# Compute and print loss. Loss is a Tensor of shape (), and loss.item()
# is a Python number giving its value.
loss = (y_pred - y).pow(2).sum()
#print(t, loss.item())
# Use autograd to compute the backward pass. This call will compute the
# gradient of loss with respect to all Tensors with requires_grad=True.
# After this call w1.grad and w2.grad will be Tensors holding the gradient
# of the loss with respect to w1 and w2 respectively.
loss.backward()
# Update weights using gradient descent. For this step we just want to mutate
# the values of w1 and w2 in-place; we don't want to build up a computational
# graph for the update steps, so we use the torch.no_grad() context manager
# to prevent PyTorch from building a computational graph for the updates
with torch.no_grad():
w1 -= learning_rate * w1.grad
w2 -= learning_rate * w2.grad
# Manually zero the gradients after running the backward pass
w1.grad.zero_()
w2.grad.zero_()
# 自己定义网络的一层实现
# 定义自己Relu类,继承自Function函数
# 必须同时实现forward和backward
# 前向传播时,forward用的自己定义的这个
# 反向传播时,必然会再找自己实现的这个backward,如不写,则no implment error
class MyReLU(torch.autograd.Function):
"""
We can implement our own custom autograd Functions by subclassing
torch.autograd.Function and implementing the forward and backward passes
which operate on Tensors.
"""
@staticmethod
def forward(ctx, x):
"""
In the forward pass we receive a context object and a Tensor containing the
input; we must return a Tensor containing the output, and we can use the
context object to cache objects for use in the backward pass.
"""
ctx.save_for_backward(x)
return x.clamp(min=0)
@staticmethod
def backward(ctx, grad_output):
"""
In the backward pass we receive the context object and a Tensor containing
the gradient of the loss with respect to the output produced during the
forward pass. We can retrieve cached data from the context object, and must
compute and return the gradient of the loss with respect to the input to the
forward function.
"""
x, = ctx.saved_tensors
grad_x = grad_output.clone()
grad_x[x < 0] = 0
return grad_x
device = torch.device('cpu')
# device = torch.device('cuda') # Uncomment this to run on GPU
# N is batch size; D_in is input dimension;
# H is hidden dimension; D_out is output dimension.
N, D_in, H, D_out = 64, 1000, 100, 10
# Create random Tensors to hold input and output
x = torch.randn(N, D_in, device=device)
y = torch.randn(N, D_out, device=device)
# Create random Tensors for weights.
w1 = torch.randn(D_in, H, device=device, requires_grad=True)
w2 = torch.randn(H, D_out, device=device, requires_grad=True)
learning_rate = 1e-6
for t in range(500):
# Forward pass: compute predicted y using operations on Tensors; we call our
# custom ReLU implementation using the MyReLU.apply function
y_pred = MyReLU.apply(x.mm(w1)).mm(w2)
# Compute and print loss
loss = (y_pred - y).pow(2).sum()
#print(t, loss.item())
# Use autograd to compute the backward pass.
loss.backward()
with torch.no_grad():
# Update weights using gradient descent
w1 -= learning_rate * w1.grad
w2 -= learning_rate * w2.grad
# Manually zero the gradients after running the backward pass
w1.grad.zero_()
w2.grad.zero_()
【Code】numpy、pytorch实现全连接神经网络的更多相关文章
- 如何使用numpy实现一个全连接神经网络?(上)
全连接神经网络的概念我就不介绍了,对这个不是很了解的朋友,可以移步其他博主的关于神经网络的文章,这里只介绍我使用基本工具实现全连接神经网络的方法. 所用工具: numpy == 1.16.4 matp ...
- MINIST深度学习识别:python全连接神经网络和pytorch LeNet CNN网络训练实现及比较(三)
版权声明:本文为博主原创文章,欢迎转载,并请注明出处.联系方式:460356155@qq.com 在前两篇文章MINIST深度学习识别:python全连接神经网络和pytorch LeNet CNN网 ...
- TensorFlow之DNN(二):全连接神经网络的加速技巧(Xavier初始化、Adam、Batch Norm、学习率衰减与梯度截断)
在上一篇博客<TensorFlow之DNN(一):构建“裸机版”全连接神经网络>中,我整理了一个用TensorFlow实现的简单全连接神经网络模型,没有运用加速技巧(小批量梯度下降不算哦) ...
- TensorFlow之DNN(一):构建“裸机版”全连接神经网络
博客断更了一周,干啥去了?想做个聊天机器人出来,去看教程了,然后大受打击,哭着回来补TensorFlow和自然语言处理的基础了.本来如意算盘打得挺响,作为一个初学者,直接看项目(不是指MINIST手写 ...
- 【TensorFlow/简单网络】MNIST数据集-softmax、全连接神经网络,卷积神经网络模型
初学tensorflow,参考了以下几篇博客: soft模型 tensorflow构建全连接神经网络 tensorflow构建卷积神经网络 tensorflow构建卷积神经网络 tensorflow构 ...
- Tensorflow 多层全连接神经网络
本节涉及: 身份证问题 单层网络的模型 多层全连接神经网络 激活函数 tanh 身份证问题新模型的代码实现 模型的优化 一.身份证问题 身份证号码是18位的数字[此处暂不考虑字母的情况],身份证倒数第 ...
- tensorflow中使用mnist数据集训练全连接神经网络-学习笔记
tensorflow中使用mnist数据集训练全连接神经网络 ——学习曹健老师“人工智能实践:tensorflow笔记”的学习笔记, 感谢曹老师 前期准备:mnist数据集下载,并存入data目录: ...
- 深度学习tensorflow实战笔记(1)全连接神经网络(FCN)训练自己的数据(从txt文件中读取)
1.准备数据 把数据放进txt文件中(数据量大的话,就写一段程序自己把数据自动的写入txt文件中,任何语言都能实现),数据之间用逗号隔开,最后一列标注数据的标签(用于分类),比如0,1.每一行表示一个 ...
- 基于MNIST数据集使用TensorFlow训练一个包含一个隐含层的全连接神经网络
包含一个隐含层的全连接神经网络结构如下: 包含一个隐含层的神经网络结构图 以MNIST数据集为例,以上结构的神经网络训练如下: #coding=utf-8 from tensorflow.exampl ...
随机推荐
- Spring异常分析
异常报错 2019-01-14 10:40:18.427 ERROR 11776 --- [ost-startStop-1] o.s.b.w.e.t.TomcatStarter : Error sta ...
- JS数组添加删除
栈是一种LIFO(Last-In-First-Out,后进先出)的数据结构著作权归作者所有.商业转载请联系作者获得授权,非商业转载请注明出处.原文: https://www.w3cplus.com/j ...
- C# 把字符串类型日期转换为日期类型(转载)
C# 把字符串类型日期转换为日期类型 来源:https://www.cnblogs.com/raincedar/p/7009243.html 方法一:Convert.ToDateTime(stri ...
- C# 读取PDF多级书签
在PDF中,书签作为一种导航的有效工具,能帮助我们快速地定位到文档中的指定段落.同时,书签也能让人对文档结构一目了然,在某种程度上也可作为目录使用.对于C#操作PDF中的书签,在上一篇文章中介绍了具体 ...
- python 面试题
1.os.path与sys.path的区别是什么? os.path 主要用于系统文件路径的操作 sys.path 主要是python解释器的系统环境参数的操作 2.re模块中match和search方 ...
- Web前端-Ajax基础技术(上)
Web前端-Ajax基础技术(上) ajax是浏览器提供一套的api,用于向服务器发出请求,接受服务端返回的响应,通过javascript调用,实现通过代码控制请求与响应,实现网络编程. ajax发送 ...
- webpack4.x笔记-配置基本的前端开发环境(一)
webpack的基本使用 webpack 本质上是一个打包工具,它会根据代码的内容解析模块依赖,帮助我们把多个模块的代码打包.借用 webpack 官网的图片: 虽然webpack4.x的版本可以零配 ...
- 关于写作那些事之利用 js 统计各大博客阅读量
在日常文章数据统计的过程中,纯手动方式已经难以应付,于是乎,逐步开始了程序介入方式进行统计. 在上一节中,探索利用 csv 文件格式进行文章数据统计,本来以为能够应付一阵子,没想到仅仅一天我就放弃了. ...
- 通过 Sqoop1.4.7 将 Mysql5.7、Hive2.3.4、Hbase1.4.9 之间的数据导入导出
目录 目录 1.什么是 Sqoop? 2.下载应用程序及配置环境变量 2.1.下载 Sqoop 1.4.7 2.2.设置环境变量 2.3.设置安装所需环境 3.安装 Sqoop 1.4.7 3.1.修 ...
- 导入虚拟机vmware,此主机支持Intel VT-x,但Intel VT-x处于禁用状态和黑屏
解决方法:进入BIOS(按什么键进入bios,需要看你用什么电脑),把Intel Virtualization Technology 设置enabled 然后是黑屏解决方法:管理员模式 ...