RNN与情感分类问题实战-加载IMDB数据集
Sentiment Analysis

Two approaches
SimpleRNNCell
single layer
multi-layers
RNNCell
Single layer
import os
import tensorflow as tf
import numpy as np
from tensorflow import keras
from tensorflow.keras import layers
tf.random.set_seed(22)
np.random.seed(22)
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
assert tf.__version__.startswith('2.')
batchsz = 128
# the most frequest words
total_words = 10000
max_review_len = 80
embedding_len = 100
(x_train,
y_train), (x_test,
y_test) = keras.datasets.imdb.load_data(num_words=total_words)
# x_train:[b, 80]
# x_test: [b, 80]
x_train = keras.preprocessing.sequence.pad_sequences(x_train,
maxlen=max_review_len)
x_test = keras.preprocessing.sequence.pad_sequences(x_test,
maxlen=max_review_len)
db_train = tf.data.Dataset.from_tensor_slices((x_train, y_train))
db_train = db_train.shuffle(1000).batch(batchsz, drop_remainder=True)
db_test = tf.data.Dataset.from_tensor_slices((x_test, y_test))
db_test = db_test.batch(batchsz, drop_remainder=True)
print('x_train shape:', x_train.shape, tf.reduce_max(y_train),
tf.reduce_min(y_train))
print('x_test shape:', x_test.shape)
class MyRNN(keras.Model):
def __init__(self, units):
super(MyRNN, self).__init__()
# [b, 64]
self.state0 = [tf.zeros([batchsz, units])]
self.state1 = [tf.zeros([batchsz, units])]
# transform text to embedding representation
# [b, 80] => [b, 80, 100]
self.embedding = layers.Embedding(total_words,
embedding_len,
input_length=max_review_len)
# [b, 80, 100] , h_dim: 64
# RNN: cell1 ,cell2, cell3
# SimpleRNN,units=64表示100个向量转成64个初始的状态
self.rnn_cell0 = layers.SimpleRNNCell(units, dropout=0.5)
self.rnn_cell1 = layers.SimpleRNNCell(units, dropout=0.5)
# fc, [b, 80, 100] => [b, 64] => [b, 1]
self.outlayer = layers.Dense(1)
def call(self, inputs, training=None):
"""
net(x) net(x, training=True) :train mode
net(x, training=False): test
:param inputs: [b, 80]
:param training:
:return:
"""
# [b, 80]
x = inputs
# embedding: [b, 80] => [b, 80, 100]
x = self.embedding(x)
# rnn cell compute
# [b, 80, 100] => [b, 64]
state0 = self.state0
state1 = self.state1
for word in tf.unstack(x, axis=1): # word: [b, 100]
# h1 = x*wxh+h0*whh
# out0: [b, 64]
out0, state0 = self.rnn_cell0(word, state0, training)
# out1: [b, 64]
out1, state1 = self.rnn_cell1(out0, state1, training)
# out: [b, 64] => [b, 1]
x = self.outlayer(out1)
# p(y is pos|x)
prob = tf.sigmoid(x)
return prob
def main():
units = 64
epochs = 4
model = MyRNN(units)
model.compile(optimizer=keras.optimizers.Adam(0.001),
loss=tf.losses.BinaryCrossentropy(),
metrics=['accuracy'])
model.fit(db_train, epochs=epochs, validation_data=db_test)
model.evaluate(db_test)
if __name__ == '__main__':
main()
Multi-layers
import os
import tensorflow as tf
import numpy as np
from tensorflow import keras
from tensorflow.keras import layers
tf.random.set_seed(22)
np.random.seed(22)
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
assert tf.__version__.startswith('2.')
batchsz = 128
# the most frequest words
total_words = 10000 # 编码10000个单词
max_review_len = 80 # 句子长度80
embedding_len = 100
(x_train,
y_train), (x_test,
y_test) = keras.datasets.imdb.load_data(num_words=total_words)
# x_train:[b, 80]
# x_test: [b, 80]
x_train = keras.preprocessing.sequence.pad_sequences(x_train,
maxlen=max_review_len)
x_test = keras.preprocessing.sequence.pad_sequences(x_test,
maxlen=max_review_len)
db_train = tf.data.Dataset.from_tensor_slices((x_train, y_train))
# drop_remainder,丢弃最后一个大小不合适的batch
db_train = db_train.shuffle(1000).batch(batchsz, drop_remainder=True)
db_test = tf.data.Dataset.from_tensor_slices((x_test, y_test))
db_test = db_test.batch(batchsz, drop_remainder=True)
print('x_train shape:', x_train.shape, tf.reduce_max(y_train),
tf.reduce_min(y_train))
print('x_test shape:', x_test.shape)
class MyRNN(keras.Model):
def __init__(self, units):
super(MyRNN, self).__init__()
# transform text to embedding representation
# [b, 80] => [b, 80, 100] # embedding_len=100表示一个单词为100的向量
self.embedding = layers.Embedding(total_words,
embedding_len,
input_length=max_review_len)
# [b, 80, 100] , h_dim: 64
self.rnn = keras.Sequential([
layers.SimpleRNN(units,
dropout=0.5,
return_sequences=True,
unroll=True),
layers.SimpleRNN(units, dropout=0.5, unroll=True)
])
# fc, [b, 80, 100] => [b, 64] => [b, 1] # 得到分类结果
self.outlayer = layers.Dense(1)
def call(self, inputs, training=None):
"""
net(x) net(x, training=True) :train mode
net(x, training=False): test
:param inputs: [b, 80]
:param training: 计算过程是train还是test
:return:
"""
# [b, 80]
x = inputs
# embedding: [b, 80] => [b, 80, 100]
x = self.embedding(x)
# rnn cell compute
# x: [b, 80, 100] => [b, 64]
x = self.rnn(x)
# out: [b, 64] => [b, 1]
x = self.outlayer(x)
# p(y is pos|x)
prob = tf.sigmoid(x)
return prob
def main():
units = 64
epochs = 4
model = MyRNN(units)
model.compile(optimizer=keras.optimizers.Adam(0.001),
loss=tf.losses.BinaryCrossentropy(),
metrics=['accuracy'])
model.fit(db_train, epochs=epochs, validation_data=db_test)
model.evaluate(db_test)
if __name__ == '__main__':
main()
RNN与情感分类问题实战-加载IMDB数据集的更多相关文章
- pytorch 加载mnist数据集报错not gzip file
利用pytorch加载mnist数据集的代码如下 import torchvision import torchvision.transforms as transforms from torch.u ...
- torchvision的理解和学习 加载常用数据集,对主流模型的调用.md
torchvision的理解和学习 加载常用数据集,对主流模型的调用 https://blog.csdn.net/tsq292978891/article/details/79403617 加载常用数 ...
- 科学计算三维可视化---TVTK管线与数据加载(数据集)
一:数据集 三维可视化的第一步是选用合适的数据结构来表示数据,TVTK提供了多种表示不同种类数据的数据集 (一)数据集--ImageData >>> from tvtk.api im ...
- Tensorflow之快速加载MNIST数据集
from tensorflow.examples.tutorials.mnist import input_data import tensorflow as tf def myprint(v): p ...
- Pytorch文本分类(imdb数据集),含DataLoader数据加载,最优模型保存
用pytorch进行文本分类,数据集为keras内置的imdb影评数据(二分类),代码包含六个部分(详见代码) 使用环境: pytorch:1.1.0 cuda:10.0 gpu:RTX2070 (1 ...
- [DeeplearningAI笔记]序列模型2.9情感分类
5.2自然语言处理 觉得有用的话,欢迎一起讨论相互学习~Follow Me 2.9 Sentiment classification 情感分类 情感分类任务简单来说是看一段文本,然后分辨这个人是否喜欢 ...
- JVM学习二:JVM之类加载器之加载分析
前面一遍,我们对类的加载有了一个整体的认识,而这一节我们细节分析一下类加载器的第一步,即:加载. 一.概念 类的加载指的是将类的.class文件中的二进制数据读入到内存中,将其放在运行时数据区的方法区 ...
- UIButton 加载网络图片
以后就可以 用这个分类 UIButton轻松加载网络图片了, UIButton+WebCache.h #import <UIKit/UIKit.h> @interface UIButt ...
- Pytorch加载并可视化FashionMNIST指定层(Udacity)
加载并可视化FashionMNIST 在这个notebook中,我们要加载并查看 Fashion-MNIST 数据库中的图像. 任何分类问题的第一步,都是查看你正在使用的数据集.这样你可以了解有关图像 ...
随机推荐
- bzoj 4517: [Sdoi2016]排列计数【容斥原理+组合数学】
第一个一眼就A的容斥题! 这个显然是容斥的经典问题------错排,首先考虑没有固定的情况,设\( D_n \)为\( n \)个数字的错排方案数. \[ D_n=n!-\sum_{t=1}^{n}( ...
- jenkins软件工具部署
Jenkins服务软件的安装部署 1.我们在linux安装部署Jenkins的时候,因为jenkins是一个java web程序,所以首先要保证linux系统上已经安装配置好java JDK环境,并安 ...
- poj 1664 放苹果 递归
题目链接: http://poj.org/problem?id=1664 题目描述: 有n个苹果,m个盒子,盒子和苹果都没有顺序,盒子可以为空,问:有多少种放置方式? 解题思路: 当前有n个苹果,m个 ...
- 灰度世界算法(Gray World Algorithm) 分类: 图像处理 Matlab 2014-12-07 18:40 874人阅读 评论(0) 收藏
人的视觉系统具有颜色恒常性,能从变化的光照环境和成像条件下获取物体表面颜色的不变特性,但成像设备不具有这样的调节功能, 不同的光照环境会导致采集的图像颜色与真实颜色存在一定程度的偏差,需要选择合适的颜 ...
- jmeter(九)分布式测试
Jmeter 是java 应用,对于CPU和内存的消耗比较大,因此,当需要模拟数以千计的并发用户时,使用单台机器模拟所有的并发用户就有些力不从心,甚至会引起JAVA内存溢出错误.为了让jmeter工具 ...
- [转]Open Data Protocol (OData) Basic Tutorial
本文转自:http://www.odata.org/getting-started/basic-tutorial/ Basic Tutorial The Open Data Protocol (ODa ...
- java自动包装与解包
关于java的自动包装机制想必大家都用过吧,一般这些机制都用于在往容器中存储基本类型数据的时候,因为容器中不允许存在基本数据类型,所以就会调用自动包装机制,将基本数据类型转换为对象,将基本数据保存在对 ...
- DOM,javascript,Web API之间的关系——onclick 引起的思考与调研
平时习惯了用js操作dom树来与html页面进行交互,基本都是通过web API接口实现的,最近看闭包和原生js的知识点比较多,昨天无意中看到了onclick中的this指向问题,遂用native j ...
- 12. binary search Trees
12. binary search Trees The search tree data structure supports many dynamic-set operations,inclu ...
- [小记]Android缓存问题
今天晚上,产品经理打电话说我们的Android App除了问题,问题很简单就是一个缓存问题,由于这个程序是前同事写的,我也只能呵呵一笑,有些事你就得扛.还是回到正题吧,这个缓存问题,实在有点奇葩,所以 ...