pytorch --Rnn语言模型(LSTM,BiLSTM) -- 《Recurrent neural network based language model》
论文通过实现RNN来完成了文本分类。
论文地址:88888888
模型结构图:

原理自行参考论文,code and comment(https://github.com/graykode/nlp-tutorial):
# -*- coding: utf-8 -*-
# @time : 2019/11/9 15:12 import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable dtype = torch.FloatTensor sentences = [ "i like dog", "i love coffee", "i hate milk"] word_list = " ".join(sentences).split()
word_list = list(set(word_list))
word_dict = {w: i for i, w in enumerate(word_list)}
number_dict = {i: w for i, w in enumerate(word_list)}
n_class = len(word_dict) # TextRNN Parameter
batch_size = len(sentences)
n_step = 2 # number of cells(= number of Step)
n_hidden = 5 # number of hidden units in one cell def make_batch(sentences):
input_batch = []
target_batch = [] for sen in sentences:
word = sen.split()
input = [word_dict[n] for n in word[:-1]]
target = word_dict[word[-1]] input_batch.append(np.eye(n_class)[input])
target_batch.append(target) return input_batch, target_batch # to Torch.Tensor
input_batch, target_batch = make_batch(sentences)
input_batch = Variable(torch.Tensor(input_batch))
target_batch = Variable(torch.LongTensor(target_batch)) class TextRNN(nn.Module):
def __init__(self):
super(TextRNN, self).__init__() self.rnn = nn.RNN(input_size=n_class, hidden_size=n_hidden,batch_first=True)
self.W = nn.Parameter(torch.randn([n_hidden, n_class]).type(dtype))
self.b = nn.Parameter(torch.randn([n_class]).type(dtype)) def forward(self, hidden, X):
if self.rnn.batch_first == True:
# X [batch_size,time_step,word_vector]
outputs, hidden = self.rnn(X, hidden) # outputs [batch_size, time_step, hidden_size*num_directions]
output = outputs[:, -1, :] # [batch_size, num_directions(=1) * n_hidden]
model = torch.mm(output, self.W) + self.b # model : [batch_size, n_class]
return model
else:
X = X.transpose(0, 1) # X : [n_step, batch_size, n_class]
outputs, hidden = self.rnn(X, hidden)
# outputs : [n_step, batch_size, num_directions(=1) * n_hidden]
# hidden : [num_layers(=1) * num_directions(=1), batch_size, n_hidden] output = outputs[-1,:,:] # [batch_size, num_directions(=1) * n_hidden]
model = torch.mm(output, self.W) + self.b # model : [batch_size, n_class]
return model model = TextRNN() criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=0.001) # Training
for epoch in range(5000):
optimizer.zero_grad() # hidden : [num_layers * num_directions, batch, hidden_size]
hidden = Variable(torch.zeros(1, batch_size, n_hidden))
# input_batch : [batch_size, n_step, n_class]
output = model(hidden, input_batch) # output : [batch_size, n_class], target_batch : [batch_size] (LongTensor, not one-hot)
loss = criterion(output, target_batch)
if (epoch + 1) % 1000 == 0:
print('Epoch:', '%04d' % (epoch + 1), 'cost =', '{:.6f}'.format(loss)) loss.backward()
optimizer.step() # Predict
hidden_initial = Variable(torch.zeros(1, batch_size, n_hidden))
predict = model(hidden_initial, input_batch).data.max(1, keepdim=True)[1]
print([sen.split()[:2] for sen in sentences], '->', [number_dict[n.item()] for n in predict.squeeze()])
LSTM unit的RNN模型:
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable dtype = torch.FloatTensor char_arr = [c for c in 'abcdefghijklmnopqrstuvwxyz']
word_dict = {n: i for i, n in enumerate(char_arr)}
number_dict = {i: w for i, w in enumerate(char_arr)}
n_class = len(word_dict) # number of class(=number of vocab) seq_data = ['make', 'need', 'coal', 'word', 'love', 'hate', 'live', 'home', 'hash', 'star'] # TextLSTM Parameters
n_step = 3
n_hidden = 128 def make_batch(seq_data):
input_batch, target_batch = [], [] for seq in seq_data:
input = [word_dict[n] for n in seq[:-1]] # 'm', 'a' , 'k' is input
target = word_dict[seq[-1]] # 'e' is target
input_batch.append(np.eye(n_class)[input])
target_batch.append(target) return Variable(torch.Tensor(input_batch)), Variable(torch.LongTensor(target_batch)) class TextLSTM(nn.Module):
def __init__(self):
super(TextLSTM, self).__init__() self.lstm = nn.LSTM(input_size=n_class, hidden_size=n_hidden)
self.W = nn.Parameter(torch.randn([n_hidden, n_class]).type(dtype))
self.b = nn.Parameter(torch.randn([n_class]).type(dtype)) def forward(self, X):
input = X.transpose(0, 1) # X : [n_step, batch_size, n_class] hidden_state = Variable(
torch.zeros(1, len(X), n_hidden)) # [num_layers(=1) * num_directions(=1), batch_size, n_hidden]
cell_state = Variable(
torch.zeros(1, len(X), n_hidden)) # [num_layers(=1) * num_directions(=1), batch_size, n_hidden] outputs, (_, _) = self.lstm(input, (hidden_state, cell_state))
outputs = outputs[-1] # [batch_size, n_hidden]
model = torch.mm(outputs, self.W) + self.b # model : [batch_size, n_class]
return model input_batch, target_batch = make_batch(seq_data) model = TextLSTM() criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=0.001) # Training
for epoch in range(1000): output = model(input_batch)
loss = criterion(output, target_batch)
if (epoch + 1) % 100 == 0:
print('Epoch:', '%04d' % (epoch + 1), 'cost =', '{:.6f}'.format(loss))
optimizer.zero_grad()
loss.backward()
optimizer.step() inputs = [sen[:3] for sen in seq_data] predict = model(input_batch).data.max(1, keepdim=True)[1]
print(inputs, '->', [number_dict[n.item()] for n in predict.squeeze()])
BiLSTM RNN model:
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
import torch.nn.functional as F dtype = torch.FloatTensor sentence = (
'Lorem ipsum dolor sit amet consectetur adipisicing elit '
'sed do eiusmod tempor incididunt ut labore et dolore magna '
'aliqua Ut enim ad minim veniam quis nostrud exercitation'
) word_dict = {w: i for i, w in enumerate(list(set(sentence.split())))}
number_dict = {i: w for i, w in enumerate(list(set(sentence.split())))}
n_class = len(word_dict)
max_len = len(sentence.split())
n_hidden = 5 def make_batch(sentence):
input_batch = []
target_batch = [] words = sentence.split()
for i, word in enumerate(words[:-1]):
input = [word_dict[n] for n in words[:(i + 1)]]
input = input + [0] * (max_len - len(input))
target = word_dict[words[i + 1]]
input_batch.append(np.eye(n_class)[input])
target_batch.append(target) return Variable(torch.Tensor(input_batch)), Variable(torch.LongTensor(target_batch)) class BiLSTM(nn.Module):
def __init__(self):
super(BiLSTM, self).__init__() self.lstm = nn.LSTM(input_size=n_class, hidden_size=n_hidden, bidirectional=True)
self.W = nn.Parameter(torch.randn([n_hidden * 2, n_class]).type(dtype))
self.b = nn.Parameter(torch.randn([n_class]).type(dtype)) def forward(self, X):
input = X.transpose(0, 1) # input : [n_step, batch_size, n_class] hidden_state = Variable(torch.zeros(1*2, len(X), n_hidden)) # [num_layers(=1) * num_directions(=1), batch_size, n_hidden]
cell_state = Variable(torch.zeros(1*2, len(X), n_hidden)) # [num_layers(=1) * num_directions(=1), batch_size, n_hidden] outputs, (_, _) = self.lstm(input, (hidden_state, cell_state))
outputs = outputs[-1] # [batch_size, n_hidden]
model = torch.mm(outputs, self.W) + self.b # model : [batch_size, n_class]
return model input_batch, target_batch = make_batch(sentence) model = BiLSTM() criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=0.001) # Training
for epoch in range(10000):
output = model(input_batch)
loss = criterion(output, target_batch)
if (epoch + 1) % 1000 == 0:
print('Epoch:', '%04d' % (epoch + 1), 'cost =', '{:.6f}'.format(loss)) optimizer.zero_grad()
loss.backward()
optimizer.step() predict = model(input_batch).data.max(1, keepdim=True)[1]
print(sentence)
print([number_dict[n.item()] for n in predict.squeeze()])
pytorch --Rnn语言模型(LSTM,BiLSTM) -- 《Recurrent neural network based language model》的更多相关文章
- 4.5 RNN循环神经网络(recurrent neural network)
自己开发了一个股票智能分析软件,功能很强大,需要的点击下面的链接获取: https://www.cnblogs.com/bclshuai/p/11380657.html 1.1 RNN循环神经网络 ...
- 论文笔记:ReNet: A Recurrent Neural Network Based Alternative to Convolutional Networks
ReNet: A Recurrent Neural Network Based Alternative to Convolutional Networks2018-03-05 11:13:05 ...
- 【NLP】Recurrent Neural Network and Language Models
0. Overview What is language models? A time series prediction problem. It assigns a probility to a s ...
- RNN循环神经网络(Recurrent Neural Network)学习
一.RNN简介 1.)什么是RNN? RNN是一种特殊的神经网络结构,考虑前一时刻的输入,且赋予了网络对前面的内容的一种'记忆'功能. 2.)RNN可以解决什么问题? 时间先后顺序的问题都可以使用RN ...
- Recurrent Neural Network系列1--RNN(循环神经网络)概述
作者:zhbzz2007 出处:http://www.cnblogs.com/zhbzz2007 欢迎转载,也请保留这段声明.谢谢! 本文翻译自 RECURRENT NEURAL NETWORKS T ...
- (zhuan) Recurrent Neural Network
Recurrent Neural Network 2016年07月01日 Deep learning Deep learning 字数:24235 this blog from: http:/ ...
- Recurrent neural network (RNN) - Pytorch版
import torch import torch.nn as nn import torchvision import torchvision.transforms as transforms # ...
- Recurrent Neural Network系列2--利用Python,Theano实现RNN
作者:zhbzz2007 出处:http://www.cnblogs.com/zhbzz2007 欢迎转载,也请保留这段声明.谢谢! 本文翻译自 RECURRENT NEURAL NETWORKS T ...
- Recurrent Neural Network系列4--利用Python,Theano实现GRU或LSTM
yi作者:zhbzz2007 出处:http://www.cnblogs.com/zhbzz2007 欢迎转载,也请保留这段声明.谢谢! 本文翻译自 RECURRENT NEURAL NETWORK ...
随机推荐
- 引用dll出现的问题:发生一个或多个错误,引用无效或不支持该引用
获取到新的项目后,然后FineUI就出现黄色的标志,肯定是不可以用的,需要重新引用下. 然后我就开始重新引用下,就出现下面的问题: 因为是购买的UI,一开始我怀疑是引用的版本不一样呢,其实都不是 只需 ...
- org.springframework.core.type.classreading.ClassMetadataReadingVisitor 异常
今天项目启动的时候发现了一个异常: Exception in thread "main" org.springframework.beans.factory.BeanDefinit ...
- LeetCode 第26题--数组中重复元素
1. 题目 2.题目分析与思路 3.代码 1. 题目 给定数组 nums = [1,1,2], 函数应该返回新的长度 2, 并且原数组 nums 的前两个元素被修改为 1, 2. 你不需要考虑数组中超 ...
- (树形DP入门题)Anniversary party(没有上司的舞会) HDU - 1520
题意: 有个公司要举行一场晚会.为了让到会的每个人不受他的直接上司约束而能玩得开心,公司领导决定:如果邀请了某个人,那么一定不会再邀请他的直接的上司,但该人的上司的上司,上司的上司的上司等都可以邀请. ...
- 「4.0」一个人开发一个App,小程序从0到1,布局
一提到布局这个词,我总是索索发抖,不是因为天冷,而是因为布局的目标实在太宏大.古代想雄霸天下的王,就喜欢布局这个,布局那个,结果硬生生把自己的国家给布局没了.至于是哪个君王,我倒可以非常认真,非常坦诚 ...
- 使用Async方法 Using Async Methods 精通ASP-NET-MVC-5-弗瑞曼 Listing 4-32.
- c#数字图像处理(十三)图像开运算与闭运算
图像开运算与闭运算定义 二值图像开运算的数学表达式为: g(x, y)=open[f(x, y ), B]=dilate{erode[f(x, y),B],B} 二值图像的开运算事实上就是先作腐蚀运算 ...
- mysql5.7的基本使用
mysql的基本使用:最简单的增删改查 (建议用类似记事本的东西写代码,错了容易改) 以下就是这篇文章的代码 一,增和查 CREATE DATABASE one; 新建了一个名为one的数据库 S ...
- 工具之sed
转自:http://www.cnblogs.com/dong008259/archive/2011/12/07/2279897.html sed是一个很好的文件处理工具,本身是一个管道命令,主要是以行 ...
- tomcat性能优化梳理
tomcat性能优化 Tomcat本身优化 Tomcat内存优化 启动时告诉JVM我要一块大内存(调优内存是最直接的方式) 我们可以在 tomcat 的启动脚本 catalina.sh 中设置 jav ...