import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as F
import torch.optim as optim CONTEXT_SIZE = 2 # the same as window_size
EMBEDDING_DIM = 10
test_sentence = "When forty winters shall besiege thy brow,And dig deep trenches in thy beauty's field,Thy youth's proud livery so gazed on now,Will be a totter'd weed of small worth held:Then being asked, where all thy beauty lies,Where all the treasure of thy lusty days;To say, within thine own deep sunken eyes,Were an all-eating shame, and thriftless praise.How much more praise deserv'd thy beauty's use,If thou couldst answer 'This fair child of mineShall sum my count, and make my old excuse,'Proving his beauty by succession thine!This were to be new made when thou art old,And see thy blood warm when thou feel'st it cold.".split() vocb = set(test_sentence) # remove repeated words
word2id = {word: i for i, word in enumerate(vocb)}
id2word = {word2id[word]: word for word in word2id} # define model
class NgramModel(nn.Module):
def __init__(self, vocb_size, context_size, n_dim):
# super(NgramModel, self)._init_()
super().__init__()
self.n_word = vocb_size
self.embedding = nn.Embedding(self.n_word, n_dim)
self.linear1 = nn.Linear(context_size*n_dim, 128)
self.linear2 = nn.Linear(128, self.n_word) def forward(self, x):
# the first step: transmit words and achieve word embedding. eg. transmit two words, and then achieve (2, 100)
emb = self.embedding(x)
# the second step: word wmbedding unfold to (1,200)
emb = emb.view(1, -1)
# the third step: transmit to linear model, and then use relu, at last, transmit to linear model again
out = self.linear1(emb)
out = F.relu(out)
out = self.linear2(out)
# the output dim of last step is the number of words, wo can view as a classification problem
# if we want to predict the max probability of the words, finally we need use log softmax
log_prob = F.log_softmax(out)
return log_prob ngrammodel = NgramModel(len(word2id), CONTEXT_SIZE, 100)
criterion = nn.NLLLoss()
optimizer = optim.SGD(ngrammodel.parameters(), lr=1e-3) trigram = [((test_sentence[i], test_sentence[i+1]), test_sentence[i+2])
for i in range(len(test_sentence)-2)] for epoch in range(100):
print('epoch: {}'.format(epoch+1))
print('*'*10)
running_loss = 0
for data in trigram:
# we use 'word' to represent the two words forward the predict word, we use 'label' to represent the predict word
word, label = data # attention
word = Variable(torch.LongTensor([word2id[e] for e in word]))
label = Variable(torch.LongTensor([word2id[label]]))
# forward
out = ngrammodel(word)
loss = criterion(out, label)
running_loss += loss.data[0]
# backward
optimizer.zero_grad()
loss.backward()
optimizer.step()
print('loss: {:.6f}'.format(running_loss/len(word2id))) # predict
word, label = trigram[3]
word = Variable(torch.LongTensor([word2id[i] for i in word]))
out = ngrammodel(word)
_, predict_label = torch.max(out, 1)
predict_word = id2word[predict_label.data[0][0]]
print('real word is {}, predict word is {}'.format(label, predict_word))

PyTorch学习笔记之n-gram模型实现的更多相关文章

  1. ArcGIS案例学习笔记-批量裁剪地理模型

    ArcGIS案例学习笔记-批量裁剪地理模型 联系方式:谢老师,135-4855-4328,xiexiaokui#qq.com 功能:空间数据的批量裁剪 优点:1.批量裁剪:任意多个目标数据,去裁剪任意 ...

  2. Java学习笔记之---单例模型

    Java学习笔记之---单例模型 单例模型分为:饿汉式,懒汉式 (一)要点 1.某个类只能有一个实例 2.必须自行创建实例 3.必须自行向整个系统提供这个实例 (二)实现 1.只提供私有的构造方法 2 ...

  3. WebGL three.js学习笔记 加载外部模型以及Tween.js动画

    WebGL three.js学习笔记 加载外部模型以及Tween.js动画 本文的程序实现了加载外部stl格式的模型,以及学习了如何把加载的模型变为一个粒子系统,并使用Tween.js对该粒子系统进行 ...

  4. ARMV8 datasheet学习笔记5:异常模型

    1.前言 2.异常类型描述 见 ARMV8 datasheet学习笔记4:AArch64系统级体系结构之编程模型(1)-EL/ET/ST 一文 3. 异常处理路由对比 AArch32.AArch64架 ...

  5. Javascript MVC 学习笔记(一) 模型和数据

    写在前面 近期在看<MVC的Javascript富应用开发>一书.本来是抱着一口气读完的想法去看的.结果才看了一点就傻眼了:太多不懂的地方了. 仅仅好看一点查一点,一点一点往下看吧,进度虽 ...

  6. PowerDesigner 15学习笔记:十大模型及五大分类

    个人认为PowerDesigner 最大的特点和优势就是1)提供了一整套的解决方案,面向了不同的人员提供不同的模型工具,比如有针对企业架构师的模型,有针对需求分析师的模型,有针对系统分析师和软件架构师 ...

  7. [PyTorch 学习笔记] 3.1 模型创建步骤与 nn.Module

    本章代码:https://github.com/zhangxiann/PyTorch_Practice/blob/master/lesson3/module_containers.py 这篇文章来看下 ...

  8. [PyTorch 学习笔记] 7.1 模型保存与加载

    本章代码: https://github.com/zhangxiann/PyTorch_Practice/blob/master/lesson7/model_save.py https://githu ...

  9. PyTorch学习笔记之CBOW模型实践

    import torch from torch import nn, optim from torch.autograd import Variable import torch.nn.functio ...

随机推荐

  1. 利用for循环和range输出9 * 9乘法口诀表

    li = [2, 3, 4, 5, 6, 7, 8, 9, 10] for i in li: for j in range(1, i): print('{0} * {1} = {2}'.format( ...

  2. mysql Plugin ‘InnoDB’ init function returned error

    问题描述: 非正常关闭mysql,同时更改了my.cnf 导致启动时不支持innodb,出现如下错误:   [ERROR] Plugin ‘InnoDB’ init function returned ...

  3. poj 1862 2*根号(n1*n2)问题 贪心算法

    题意: 有n个数,要把其中2个数进行2*根号(n1*n2)操作,求剩下最小的那个数是多少? 哭诉:看题目根本没看出来要让我做这个操作. 思路: 每次把最大的,次大的拿出来进行操作 用"优先队 ...

  4. redis--py链接redis【转】

    请给原作者点赞--> 原文链接 一.redis redis是一个key-value存储系统.和Memcached类似,它支持存储的value类型相对更多,包括string(字符串).list(链 ...

  5. Monkeyrunner 简介及其环境搭建

    Monkeyrunner是通过坐标.控件ID和控件上的文字操作应用的界面元素,其测试用例是用python写的,这样就弥补了monkey只有简单命令无法执行复杂用例的缺陷.Monkeyrunner采用的 ...

  6. foreach ($users as $key=>$value)

    1: foreach(array_name as $value) { statement; } 这里的array_name是你要遍历的数组名,每次循环中,array_name数组的当前元素的值被赋给$ ...

  7. (转)全网最!详!细!tarjan算法讲解

    byhttp://www.cnblogs.com/uncle-lu/p/5876729.html 全网最详细tarjan算法讲解,我不敢说别的.反正其他tarjan算法讲解,我看了半天才看懂.我写的这 ...

  8. MongoDB学习-->Spring Data Mongodb-->MongodbTemplate

    配置文件application-dev.yml: server: port: 8888 mongo: host: localhost port: 27017 timeout: 60000 db: ma ...

  9. luogu2394 yyy loves Chemistry I

    练习 #include <iostream> #include <cstdio> using namespace std; long double a; int main(){ ...

  10. luogu2577 [ZJOI2005]午餐

    dp[i]表示第一队打饭时间i的最优解 #include <algorithm> #include <iostream> #include <cstring> #i ...