cnn.py cs231n
n
import numpy as np from cs231n.layers import *
from cs231n.fast_layers import *
from cs231n.layer_utils import * class ThreeLayerConvNet(object):
"""
A three-layer convolutional network with the following architecture: conv - relu - 2x2 max pool - affine - relu - affine - softmax The network operates on minibatches of data that have shape (N, C, H, W)
consisting of N images, each with height H and width W and with C input
channels.
""" def __init__(self, input_dim=(3, 32, 32), num_filters=32, filter_size=7,
hidden_dim=100, num_classes=10, weight_scale=1e-3, reg=0.0,
dtype=np.float32):
"""
Initialize a new network. Inputs:
- input_dim: Tuple (C, H, W) giving size of input data
- num_filters: Number of filters to use in the convolutional layer
- filter_size: Size of filters to use in the convolutional layer
- hidden_dim: Number of units to use in the fully-connected hidden layer
- num_classes: Number of scores to produce from the final affine layer.
- weight_scale: Scalar giving standard deviation for random initialization
of weights.
- reg: Scalar giving L2 regularization strength
- dtype: numpy datatype to use for computation.
"""
C,H,W=input_dim self.params = {}
self.reg = reg
self.dtype = dtype
self.params['W1']=np.random.randn(num_filters,C,filter_size,filter_size)*weight_scale
self.params['b1']=np.zeros(num_filters,)
self.params['W2']=np.random.randn(num_filters*H*W/4,hidden_dim)*weight_scale
self.params['b2']=np.zeros(hidden_dim,)
self.params['W3']=np.random.randn(hidden_dim,num_classes)*weight_scale
self.params['b3']=np.zeros(num_classes,)
# why randn needs int while seros needs tuple!!!!
for k, v in self.params.iteritems():
self.params[k] = v.astype(dtype) def loss(self, X, y=None):
"""
Evaluate loss and gradient for the three-layer convolutional network. Input / output: Same API as TwoLayerNet in fc_net.py.
"""
W1, b1 = self.params['W1'], self.params['b1']
W2, b2 = self.params['W2'], self.params['b2']
W3, b3 = self.params['W3'], self.params['b3'] # pass conv_param to the forward pass for the convolutional layer
filter_size = W1.shape[2]
conv_param = {'stride': 1, 'pad': (filter_size - 1) / 2} # pass pool_param to the forward pass for the max-pooling layer
pool_param = {'pool_height': 2, 'pool_width': 2, 'stride': 2} scores = None
out1,cache1=conv_relu_pool_forward(X,W1,b1,conv_param,pool_param) out=out1.reshape(out1.shape[0],-1) out,cache2=affine_relu_forward(out,W2,b2) scores,cache3=affine_forward(out,W3,b3) if y is None:
return scores loss, grads = 0, {}
loss,dout=softmax_loss(scores,y) loss+=self.reg*0.5*np.sum(W3**2)
loss+=self.reg*0.5*np.sum(W2**2)
loss+=self.reg*0.5*np.sum(W1**2) dout,grads['W3'],grads['b3']=affine_backward(dout,cache3)
grads['W3']+=W3*self.reg dout,grads['W2'],grads['b2']=affine_relu_backward(dout,cache2)
grads['W2']+=W2*self.reg dout=dout.reshape(*out1.shape)
dout,grads['W1'],grads['b1']=conv_relu_pool_backward(dout,cache1)
grads['W1']+=W1*self.reg ############################################################################
# END OF YOUR CODE #
############################################################################ return loss, grads pass
n
cnn.py cs231n的更多相关文章
- fc_net.py cs231n
n如果有错误,欢迎指出,不胜感激 import numpy as np from cs231n.layers import * from cs231n.layer_utils import * cla ...
- layers.py cs231n
如果有错误,欢迎指出,不胜感激. import numpy as np def affine_forward(x, w, b): 第一个最简单的 affine_forward简单的前向传递,返回 ou ...
- optim.py cs231n
n如果有错误,欢迎指出,不胜感激 import numpy as np """ This file implements various first-order upda ...
- [Keras] mnist with cnn
典型的卷积神经网络. Keras傻瓜式读取数据:自动下载,自动解压,自动加载. # X_train: array([[[[ 0., 0., 0., ..., 0., 0., 0.], [ 0., 0. ...
- 卷积神经网络CNN(Convolutional Neural Networks)没有原理只有实现
零.说明: 本文的所有代码均可在 DML 找到,欢迎点星星. 注.CNN的这份代码非常慢,基本上没有实际使用的可能,所以我只是发出来,代表我还是实践过而已 一.引入: CNN这个模型实在是有些年份了, ...
- 深度学习之卷积神经网络(CNN)详解与代码实现(一)
卷积神经网络(CNN)详解与代码实现 本文系作者原创,转载请注明出处:https://www.cnblogs.com/further-further-further/p/10430073.html 目 ...
- python,tensorflow,CNN实现mnist数据集的训练与验证正确率
1.工程目录 2.导入data和input_data.py 链接:https://pan.baidu.com/s/1EBNyNurBXWeJVyhNeVnmnA 提取码:4nnl 3.CNN.py i ...
- 基于MNIST数据的卷积神经网络CNN
基于tensorflow使用CNN识别MNIST 参数数量:第一个卷积层5x5x1x32=800个参数,第二个卷积层5x5x32x64=51200个参数,第三个全连接层7x7x64x1024=3211 ...
- 【转载】 深度学习之卷积神经网络(CNN)详解与代码实现(一)
原文地址: https://www.cnblogs.com/further-further-further/p/10430073.html ------------------------------ ...
随机推荐
- PKU 百炼OJ 奖学金
http://bailian.openjudge.cn/ss2017/A/ #include<iostream> #include <cmath> #include <m ...
- .net面试问题总结
原文://http://blog.csdn.net/wenyan07/article/details/41541489 用.net做B/S结构的系统,您是用几层结构来开发,每一层之间的关系以及为什么要 ...
- HBase实际应用中的性能优化方法
- Leetcode937. Reorder Log Files重新排列日志文件
你有一个日志数组 logs.每条日志都是以空格分隔的字串. 对于每条日志,其第一个字为字母数字标识符.然后,要么: 标识符后面的每个字将仅由小写字母组成,或: 标识符后面的每个字将仅由数字组成. 我们 ...
- ODOO 新API修饰符
Odoo8中,API接口分为traditaional style和record style两种类型: traditional style指的就是我们在7.0中使用的类型,def(self,cr,uid ...
- istringstream字符串流对象
1.读取字符串流对象 istringstream类用于执行C++风格的字符串流的输入操作. ostringstream类用于执行C++风格的字符串流的输出操作. strstream类同时可以支持C++ ...
- AppServer获取参数的方法
AppServer中从APP_PARAM表中根据param_code获取param_value: appManageService.getParamValueByCode(param_code) -- ...
- typescript使用小结
1. typescript使得js在书写的过程中有了参数类型的限制在 传参的过程中变得严格,减少了不必要的错误的发生 2. tslint同时也兼备了一部分eslint的作用,在一定程度上我们使用tsl ...
- 移动端网站如何开发(电脑端网站到手机端网站我们需要在html代码中添加哪个meta标签)
移动端网站如何开发(电脑端网站到手机端网站我们需要在html代码中添加哪个meta标签) 一.总结 一句话总结: 添加viewport标签:meta name="viewport" ...
- x86架构:x86架构
ylbtech-x86架构:x86架构 X86架构(The X86 architecture)是微处理器执行的计算机语言指令集,指一个intel通用计算机系列的标准编号缩写,也标识一套通用的计算机指令 ...