先运行main.py进行文本序列化,再train.py模型训练

dataset.py

from torch.utils.data import DataLoader,Dataset
import torch
import os
from utils import tokenlize
import config class ImdbDataset(Dataset):
def __init__(self,train=True):
super(ImdbDataset,self).__init__()
data_path = r"H:\073-nlp自然语言处理-v5.bt38[周大伟]\073-nlp自然语言处理-v5.bt38[周大伟]\第四天\代码\data\aclImdb_v1\aclImdb"
data_path += r"\train" if train else r"\test"
self.total_path = []
for temp_path in [r"\pos",r"\neg"]:
cur_path = data_path + temp_path
self.total_path += [os.path.join(cur_path,i) for i in os.listdir(cur_path) if i.endswith(".txt")] def __getitem__(self, idx):
file = self.total_path[idx]
review = open(file,encoding="utf-8").read()
review = tokenlize(review)
label = int(file.split("_")[-1].split(".")[0])
label = 0 if label < 5 else 1
return review,label def __len__(self):
return len(self.total_path) def collate_fn(batch):
'''
对batch数据进行处理
:param batch:
:return:
'''
reviews,labels = zip(*batch)
reviews = torch.LongTensor([config.ws.transform(i,max_len=config.max_len) for i in reviews])
labels = torch.LongTensor(labels)
return reviews,labels def get_dataloader(train):
imdbdataset = ImdbDataset(train=True)
batch_size = config.train_batch_size if train else config.test_batch_size
return DataLoader(imdbdataset,batch_size=batch_size,shuffle=True,collate_fn=collate_fn) if __name__ == '__main__':
# dataset = ImdbDataset(train=True)
# print(dataset[1])
for idx,(review,label) in enumerate(get_dataloader(train=True)):
print(review)
print(label)
break

  utils.py

"""
实现额外的方法
"""
import re def tokenlize(sentence):
"""
进行文本分词
:param sentence: str
:return: [str,str,str]
""" fileters = ['!', '"', '#', '$', '%', '&', '\(', '\)', '\*', '\+', ',', '-', '\.', '/', ':', ';', '<', '=', '>',
'\?', '@', '\[', '\\', '\]', '^', '_', '`', '\{', '\|', '\}', '~', '\t', '\n', '\x97', '\x96', '”', '“', ]
sentence = sentence.lower() #把大写转化为小写
sentence = re.sub("<br />"," ",sentence)
# sentence = re.sub("I'm","I am",sentence)
# sentence = re.sub("isn't","is not",sentence)
sentence = re.sub("|".join(fileters)," ",sentence)
result = [i for i in sentence.split(" ") if len(i)>0] return result

word_sequence.py

'''
文本序列化
''' class WordSequence():
UNK_TAG = "<UNK>"
PAD_TAG = "<PAD>"
UNK = 1
PAD = 0 def __init__(self):
self.dict = {
self.UNK_TAG:self.UNK,
self.PAD_TAG:self.PAD
}
self.count = {} def fit(self,sentence):
'''
统计词频
:param sentence:
:return:
'''
for word in sentence:
self.count[word] = self.count.get(word,0)+1 def build_vocab(self,min_count=0,max_count = None,max_features = None):
"""
根据条件构建 词典
:param min_count:最小词频
:param max_count: 最大词频
:param max_features: 最大词语数
:return:
"""
if min_count is not None:
self.count = {word:count for word,count in self.count.items() if count >min_count}
if max_count is not None:
self.count = {word:count for word,count in self.count.items() if count<max_count}
if max_features is not None:
#排序
self.count = dict(sorted(self.count.items(),lambda x:x[-1],reverse=True)[:max_features]) for word in self.count:
self.dict[word] = len(self.dict) #每次word对应一个数字 #把dict进行翻转
self.inverse_dict = dict(zip(self.dict.values(),self.dict.keys())) def transform(self,sentence,max_len =None):
'''
把句子转化为数字序列
:param sentence:
:return:
'''
if len(sentence) > max_len:
sentence = sentence[:max_len]
else:
sentence = sentence + [self.PAD_TAG]*(max_len-len(sentence))
return [self.dict.get(i,1) for i in sentence] def inverse_transform(self,incides):
"""
把数字序列转化为字符
:param incides:
:return:
"""
return [self.inverse_dict.get(i,"<UNK>") for i in incides] def __len__(self):
return len(self.dict) if __name__ == '__main__':
sentences = [["今天","天气","很","好"],
["今天","去","吃","什么"]] ws = WordSequence()
for sentence in sentences:
ws.fit(sentence) ws.build_vocab(min_count=0)
print(ws.dict)
ret = ws.transform(["好","热","呀","呀","呀","呀","呀","呀","呀"],max_len=5)
print(ret)
ret = ws.inverse_transform(ret)
print(ret)

  main.py

from word_sequence import WordSequence
from dataset import get_dataloader
import pickle
from tqdm import tqdm if __name__ == '__main__':
ws = WordSequence()
train_data = get_dataloader(True)
test_data = get_dataloader(False)
for reviews,labels in tqdm(train_data,total=len(train_data)):
for review in reviews:
ws.fit(review)
for reviews,labels in tqdm(test_data,total=len(test_data)):
for review in reviews:
ws.fit(review)
print("正在建立...")
ws.build_vocab()
print(len(ws))
pickle.dump(ws,open("./models/ws.pkl","wb"))

  model.py

"""
构建模型
"""
import torch.nn as nn
import config
import torch.nn.functional as F class ImdbModel(nn.Module):
def __init__(self):
super(ImdbModel,self).__init__()
self.embedding = nn.Embedding(num_embeddings=len(config.ws),embedding_dim=300,padding_idx=config.ws.PAD)
self.fc = nn.Linear(config.max_len*300,2) def forward(self,input):
'''
:param input:
:return:
'''
input_embeded = self.embedding(input) input_embeded_viewed = input_embeded.view(input_embeded.size(0),-1) out = self.fc(input_embeded_viewed)
return F.log_softmax(out,dim=-1)

  LSTMmodel.py

"""
构建模型
"""
import torch.nn as nn
import torch
import config
import torch.nn.functional as F class ImdbModel(nn.Module):
def __init__(self):
super(ImdbModel,self).__init__()
self.embedding = nn.Embedding(num_embeddings=len(config.ws),embedding_dim=300,padding_idx=config.ws.PAD)
self.lstm = nn.LSTM(input_size=200,hidden_size=64,num_layers=2,batch_first=True,bidirectional=True,dropout=0.5)
self.fc1 = nn.Linear(64*2,64)
self.fc2 = nn.Linear(64,2) def forward(self,input):
'''
:param input:
:return:
'''
input_embeded = self.embedding(input) #[batch_size,seq_len,200] output,(h_n,c_n) = self.lstm(input_embeded)
out = torch.cat(h_n[-1,:,:],h_n[-2,:,:],dim=-1) #拼接正向最后一个输出和反向最后一个输出 #进行全连接
out_fc1 = self.fc1(out)
#进行relu
out_fc1_relu = F.relu(out_fc1)
#全连接
out = self.fc2(out_fc1_relu)
return F.log_softmax(out,dim=-1)

  train.py

'''
进行模型的训练
'''
import torch import config
from model import ImdbModel
from dataset import get_dataloader
from torch.optim import Adam
from tqdm import tqdm
import torch.nn.functional as F
import numpy as np
import matplotlib.pyplot as plt
from eval import eval model = ImdbModel().to(config.device)
optimizer = Adam(model.parameters(),lr=0.001)
loss_list = [] def train(epoch):
train_dataloader = get_dataloader(train=True)
bar = tqdm(train_dataloader,total=len(train_dataloader)) for idx,(input,target) in enumerate(bar):
optimizer.zero_grad()
input = input.to(config.device)
target = target.to(config.device)
output = model(input)
loss = F.nll_loss(output,target)
loss.backward()
loss_list.append(loss.item())
optimizer.step()
bar.set_description("epoch:{} idx:{} loss:{:.6f}".format(epoch,idx,np.mean(loss_list))) if idx%10 == 0:
torch.save(model.state_dict(),"./models/model.pkl")
torch.save(optimizer.state_dict(),"./models/optimizer.pkl") if __name__ == '__main__':
for i in range(5):
train(i)
eval()
plt.figure(figsize=(20,8))
plt.plot(range(len(loss_list)),loss_list)

  eval.py

'''
进行模型的训练
'''
import torch import config
from model import ImdbModel
from dataset import get_dataloader
from torch.optim import Adam
from tqdm import tqdm
import torch.nn.functional as F
import numpy as np
import matplotlib.pyplot as plt def eval():
model = ImdbModel().to(config.device)
model.load_state_dict(torch.load("./models/model.pkl"))
model.eval()
loss_list = []
acc_list = []
test_dataloader = get_dataloader(train=False)
with torch.no_grad():
for input,target in test_dataloader:
input = input.to(config.device)
target = target.to(config.device)
output = model(input)
loss = F.nll_loss(output,target)
loss_list.append(loss.item())
#准确率
pred= output.max(dim = -1)[-1]
acc_list.append(pred.eq(target).cpu().float().mean())
print("loss:{:.6f},acc:{}".format(np.mean(loss_list),np.mean(acc_list))) if __name__ == '__main__':
eval()

  

pytorch LSTM情感分类全部代码的更多相关文章

  1. pytorch 文本情感分类和命名实体识别NER中LSTM输出的区别

    文本情感分类: 文本情感分类采用LSTM的最后一层输出 比如双层的LSTM,使用正向的最后一层和反向的最后一层进行拼接 def forward(self,input): ''' :param inpu ...

  2. NLP(十九) 双向LSTM情感分类模型

    使用IMDB情绪数据来比较CNN和RNN两种方法,预处理与上节相同 from __future__ import print_function import numpy as np import pa ...

  3. PaddlePaddle︱开发文档中学习情感分类(CNN、LSTM、双向LSTM)、语义角色标注

    PaddlePaddle出教程啦,教程一部分写的很详细,值得学习. 一期涉及新手入门.识别数字.图像分类.词向量.情感分析.语义角色标注.机器翻译.个性化推荐. 二期会有更多的图像内容. 随便,帮国产 ...

  4. 使用BERT进行情感分类预测及代码实例

    文章目录 0. BERT介绍 1. BERT配置 1.1. clone BERT 代码 1.2. 数据处理 1.2.1预训练模型 1.2.2数据集 训练集 测试集 开发集 2. 修改代码 2.1 加入 ...

  5. 基于Bert的文本情感分类

    详细代码已上传到github: click me Abstract:    Sentiment classification is the process of analyzing and reaso ...

  6. kaggle——Bag of Words Meets Bags of Popcorn(IMDB电影评论情感分类实践)

    kaggle链接:https://www.kaggle.com/c/word2vec-nlp-tutorial/overview 简介:给出 50,000 IMDB movie reviews,进行0 ...

  7. 文本情感分类:分词 OR 不分词(3)

    为什么要用深度学习模型?除了它更高精度等原因之外,还有一个重要原因,那就是它是目前唯一的能够实现“端到端”的模型.所谓“端到端”,就是能够直接将原始数据和标签输入,然后让模型自己完成一切过程——包括特 ...

  8. 使用bert进行情感分类

    2018年google推出了bert模型,这个模型的性能要远超于以前所使用的模型,总的来说就是很牛.但是训练bert模型是异常昂贵的,对于一般人来说并不需要自己单独训练bert,只需要加载预训练模型, ...

  9. NLP文本情感分类传统模型+深度学习(demo)

    文本情感分类: 文本情感分类(一):传统模型 摘自:http://spaces.ac.cn/index.php/archives/3360/ 测试句子:工信处女干事每月经过下属科室都要亲口交代24口交 ...

随机推荐

  1. 在MVC三层项目中如何使用Log4Net

    --前期准备(添加到队列中) 0-1在新建后的MVC项目中的[Models]中添加一个类,用于处理异常信息,并继承自HandleErrorAttribute public class MyExcept ...

  2. 《java编程思想》多态与接口

    向上转型 定义:把某个对象的引用视为对其基类类型的引用的做法被称为向上转型方法调用绑定 将一个方法调用同一个方法主体关联起来被称作绑定. 前期绑定:程序执行前进行的绑定叫做前期绑定,前期绑定也是jav ...

  3. Java实验五参考答案

    1.找错误 btOK.setOnAction( new EventHandler<ActionEvent> { public void handle (ActionEvent e) { S ...

  4. Linux 常用命令(遇见了,就记录了 ,随缘吧)

    1.实时查看最后日志(默认10条) # tail -f xxxxxx.log 2.查看结尾多少条日志 # tail -n30 -f xxxx.log 3.根据关键字查询日志 # cat xxxxx.l ...

  5. 为什么要用内插字符串代替string.format

    知道为什么要用内插字符串,只有踩过坑的人才能明白,如果你曾今使用string.format超5个以上占位符,那其中的痛苦我想你肯定是能够共鸣的. 一:痛苦经历 先上一段曾今写过的一段代码,大家来体会一 ...

  6. PTA数据结构与算法题目集(中文) 7-7

    PTA数据结构与算法题目集(中文)  7-7 7-7 六度空间 (30 分)   “六度空间”理论又称作“六度分隔(Six Degrees of Separation)”理论.这个理论可以通俗地阐述为 ...

  7. Round 1A 2020 - Code Jam 2020

    Problem A. Pattern Matching 把每个字符串分成第一个之前,最后一个之后,中间的部分 三个部分 每个字符串的中间的部分可以直接拼接 前后两个部分需要判断下是否合法 #inclu ...

  8. BadMethodCallException : Call to undefined method App\Models\Article::setContainer()

    如果你执行 php artisan db:seed 发生如下错误 说是模型中不存在 静态方法 setContainer()方法,那么你应该检查下你的DatabaseSeeder.php 文件 中的 r ...

  9. linux被当矿机排查案例

    1.发现服务器变的特别卡,正常服务运行很慢. 到服务器上查询一番发现top下发现     bashd的进程占用100%CPU了. find /-name bashd* //第一次查询文件占用目录kil ...

  10. CVPR2020文章汇总 | 点云处理、三维重建、姿态估计、SLAM、3D数据集等(12篇)

    作者:Tom Hardy Date:2020-04-15 来源:CVPR2020文章汇总 | 点云处理.三维重建.姿态估计.SLAM.3D数据集等(12篇) 1.PVN3D: A Deep Point ...