[code=python]
import os
import sys
import time import numpy import shelve import theano
import theano.tensor as T
from theano.tensor.shared_randomstreams import RandomStreams class dA(object):
"""Denoising Auto-Encoder class (dA) A denoising autoencoders tries to reconstruct the input from a corrupted
version of it by projecting it first in a latent space and reprojecting
it afterwards back in the input space. Please refer to Vincent et al.,2008
for more details. If x is the input then equation (1) computes a partially
destroyed version of x by means of a stochastic mapping q_D. Equation (2)
computes the projection of the input into the latent space. Equation (3)
computes the reconstruction of the input, while equation (4) computes the
reconstruction error. .. math:: \tilde{x} ~ q_D(\tilde{x}|x) (1) y = s(W \tilde{x} + b) (2) x = s(W' y + b') (3) L(x,z) = -sum_{k=1}^d [x_k \log z_k + (1-x_k) \log( 1-z_k)] (4) """ def __init__(
self,
numpy_rng,
theano_rng=None,
input=None,
#n_visible=784,
n_hidden=100,
W=None,
bhid=None,
#bvis=None
):
"""
Initialize the dA class by specifying the number of visible units (the
dimension d of the input ), the number of hidden units ( the dimension
d' of the latent or hidden space ) and the corruption level. The
constructor also receives symbolic variables for the input, weights and
bias. Such a symbolic variables are useful when, for example the input
is the result of some computations, or when weights are shared between
the dA and an MLP layer. When dealing with SdAs this always happens,
the dA on layer 2 gets as input the output of the dA on layer 1,
and the weights of the dA are used in the second stage of training
to construct an MLP. :type numpy_rng: numpy.random.RandomState
:param numpy_rng: number random generator used to generate weights :type theano_rng: theano.tensor.shared_randomstreams.RandomStreams
:param theano_rng: Theano random generator; if None is given one is
generated based on a seed drawn from `rng` :type input: theano.tensor.TensorType
:param input: a symbolic description of the input or None for
standalone dA :type n_hidden: int
:param n_hidden: number of hidden units :type W: theano.tensor.TensorType
:param W: Theano variable pointing to a set of weights that should be
shared belong the dA and another architecture; if dA should
be standalone set this to None :type bhid: theano.tensor.TensorType
:param bhid: Theano variable pointing to a set of biases values (for
hidden units) that should be shared belong dA and another
architecture; if dA should be standalone set this to None """
#self.n_visible = n_visible
self.n_hidden = n_hidden # create a Theano random generator that gives symbolic random values
if not theano_rng:
theano_rng = RandomStreams(numpy_rng.randint(2 ** 30)) # note : W' was written as `W_prime` and b' as `b_prime`
if not W:
# W is initialized with `initial_W` which is uniformely sampled
# from -4*sqrt(6./(n_visible+n_hidden)) and
# 4*sqrt(6./(n_hidden+n_visible))the output of uniform if
# converted using asarray to dtype
# theano.config.floatX so that the code is runable on GPU
initial_W = numpy.asarray(
numpy_rng.uniform(
low=-4 * numpy.sqrt(6. / (n_hidden + n_hidden)),
high=4 * numpy.sqrt(6. / (n_hidden + n_hidden)),
size=(n_hidden, n_hidden)
),
dtype=theano.config.floatX
)
W=theano.shared(value=initial_W, name='W', borrow=True) '''
if not bvis:
bvis = theano.shared(
value=numpy.zeros(
n_visible,
dtype=theano.config.floatX
),
borrow=True
)
'''
if not bhid:
bhid = theano.shared(
value=numpy.zeros(
n_hidden,
dtype=theano.config.floatX
),
name='b',
borrow=True
) self.W = W
# b corresponds to the bias of the hidden
self.b = bhid
# b_prime corresponds to the bias of the visible
#self.b_prime = bvis
# tied weights, therefore W_prime is W transpose
#self.W_prime = self.W.T
self.theano_rng = theano_rng
# if no input is given, generate a variable representing the input
if input is None:
# we use a matrix because we expect a minibatch of several
# examples, each example being a row
self.x = T.dmatrix(name='input')
else:
self.x = input self.params = [self.W, self.b]
# end-snippet-1
def get_hidden_values(self):
""" Computes the values of the hidden layer """
return T.sum(T.nnet.sigmoid(T.dot(self.x, self.W) + self.b),axis = 0) '''
def get_corrupted_input(self, input, corruption_level):
"""This function keeps ``1-corruption_level`` entries of the inputs the
same and zero-out randomly selected subset of size ``coruption_level``
Note : first argument of theano.rng.binomial is the shape(size) of
random numbers that it should produce
second argument is the number of trials
third argument is the probability of success of any trial this will produce an array of 0s and 1s where 1 has a
probability of 1 - ``corruption_level`` and 0 with
``corruption_level`` The binomial function return int64 data type by
default. int64 multiplicated by the input
type(floatX) always return float64. To keep all data
in floatX when floatX is float32, we set the dtype of
the binomial to floatX. As in our case the value of
the binomial is always 0 or 1, this don't change the
result. This is needed to allow the gpu to work
correctly as it only support float32 for now. """
return self.theano_rng.binomial(size=input.shape, n=1,
p=1 - corruption_level,
dtype=theano.config.floatX) * input
'''
''' def get_reconstructed_input(self, hidden):
"""Computes the reconstructed input given the values of the
hidden layer """
return T.nnet.sigmoid(T.dot(hidden, self.W_prime) + self.b_prime) def get_cost_updates(self, corruption_level, learning_rate):
""" This function computes the cost and the updates for one trainng
step of the dA """ #tilde_x = self.get_corrupted_input(self.x, corruption_level)
y = self.get_hidden_values(tilde_x)
#z = self.get_reconstructed_input(y)
# note : we sum over the size of a datapoint; if we are using
# minibatches, L will be a vector, with one entry per
# example in minibatch
L = - T.sum(self.x * T.log(z) + (1 - self.x) * T.log(1 - z), axis=1)
# note : L is now a vector, where each element is the
# cross-entropy cost of the reconstruction of the
# corresponding example of the minibatch. We need to
# compute the average of all these to get the cost of
# the minibatch
cost = T.mean(L) # compute the gradients of the cost of the `dA` with respect
# to its parameters
gparams = T.grad(cost, self.params)
# generate the list of updates
updates = [
(param, param - learning_rate * gparam)
for param, gparam in zip(self.params, gparams)
] return (cost, updates)
''' x = T.fmatrix('x') # question matrix
y = T.fmatrix('x') # answer matrix
index = T.lscalar()
rng = numpy.random.RandomState(23455)
theano_rng = RandomStreams(rng.randint(2 ** 30))
n_hidden=2
learning_rate=0.1
da_q=[]
da_a=[]
for count in range(n_hidden):
da_q.append(dA(
numpy_rng=rng,
theano_rng=theano_rng,
input=x,
#n_visible=28 * 28,
n_hidden=100
)) for count in range(n_hidden):
da_a.append(dA(
numpy_rng=rng,
theano_rng=theano_rng,
input=y,
#n_visible=28 * 28,
n_hidden=100
))
cost_matrix=[]
for hid_index in range(n_hidden):
cost_matrix.append(T.sum(T.sqr(da_q[hid_index].get_hidden_values()-da_a[hid_index].get_hidden_values())/2))
cost=T.sum(cost_matrix)
params=da_q[0].params+da_a[hid_index].params
for hid_index in range(1,n_hidden):
params+=da_q[hid_index].params+da_a[hid_index].params
gparams=T.grad(cost, params)
updates = []
for param, gparam in zip(params, gparams):
updates.append((param, param - learning_rate * gparam))
db = shelve.open(r'data\training_data\training_data_30_50_1_9_games.dat')
x1=db['train_set1']
q,a=x1[0]
q1,a1=x1[1]
train_da = theano.function(
[index],
cost,
updates=updates,
givens={
x: x1[0][0],
y: x1[0][1]
}
)
print train_da(0)
[/code]

qa_model的更多相关文章

  1. QA系统Match-LSTM代码研读

    QA系统Match-LSTM代码研读 背景 在QA模型中,Match-LSTM是较早提出的,使用Prt-Net边界模型.本文是对阅读其实现代码的总结.主要思路是对照着论文和代码,对论文中模型的关键结构 ...

随机推荐

  1. VMWare:vSphere6 企业版参考序列号

    HV4WC-01087-1ZJ48-031XP-9A843 NF0F3-402E3-MZR80-083QP-3CKM2 4F6FX-2W197-8ZKZ9-Y31ZM-1C3LZ JZ2E9-6D2D ...

  2. Hp电脑开机报错:no boot disk has been detected or the disk has failed

    hp主机开机报错no boot disk has been detected  or the disk has failed,重启之后没有作用,开机之后仍然是同样界面.考虑是硬盘问题,按ESC+F10 ...

  3. Linux 小知识翻译 - 「架构 续」(arch)

    上次,从「计算机的内部构造」的角度解释了架构这个术语.这次,介绍下架构中经常提到的「i386架构」及之后的「i486」,「i586」. 安装Linux的时候,很多人即使不了解但也会经常听到i386架构 ...

  4. Sublime Text3如何快速预览html文件

    Sublime Text3 步骤1:选择 Tools----> Build System ----> New Build System... 步骤2:输入以下内容 "cmd&qu ...

  5. 5、爬虫之scrapy框架

    一 scrapy框架简介 1 介绍 Scrapy一个开源和协作的框架,其最初是为了页面抓取 (更确切来说, 网络抓取 )所设计的,使用它可以以快速.简单.可扩展的方式从网站中提取所需的数据.但目前Sc ...

  6. Intent加强

    Intent是一种运行时绑定(runtime binding)机制,它能在程序运行的过程中连接两个不同的组件.通过Intent,你的程序可以向Android表达某种请求或者意愿,Android会根据意 ...

  7. CF 633 E. Binary Table

    题目链接 题目大意:给定一个棋盘,棋盘上有0或1,你可以将一整行取反或者一整列取反,要使得最后剩的1最少.\((1\le n\le 20,1\le m\le 100000)\). 一个容易想到的思路就 ...

  8. linked-list-cycle-ii (数学证明)

    题意:略. 这个题最关键的点在于后面,如何找到循环开始的节点. 第一阶段,先用快慢指针找到相遇的节点C.(至于为什么,了解一下欧几里德拓展解决二元不定方程.)A是表头.B是开始循环的位置. 第一次阶段 ...

  9. QT 12 QTimage转换Mat

    QImage cvMat2QImage(const cv::Mat& mat) { // 8-bits unsigned, NO. OF CHANNELS = 1 if(mat.type() ...

  10. JEECG平台权限设计

    JEECG平台权限设计 链接存放位置:https://github.com/PlayTaoist/jeecg-lession/tree/master/%E6%9D%83%E9%99%90%E7%AE% ...