基于MR实现ngram语言模型
在大数据的今天,世界上任何一台单机都无法处理大数据,无论cpu的计算能力或者内存的容量。必须采用分布式来实现多台单机的资源整合,来进行任务的处理,包括离线的批处理和在线的实时处理。
鉴于上次开会讲了语言模型的发展,从规则到后来的NNLM。本章的目的就是锻炼动手能力,在知道原理的基础上,通过采用MR范式,自己实现一个ngram语言模型。
首先通过maven来管理相关包的依赖。
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion> <groupId>com.dingheng</groupId>
<artifactId>nragmMR</artifactId>
<version>1.0-SNAPSHOT</version> <packaging>jar</packaging> <dependencies>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-client</artifactId>
<version>2.7.2</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-core</artifactId>
<version>1.2.1</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
<version>2.7.2</version>
</dependency>
<dependency>
<groupId>mysql</groupId>
<artifactId>mysql-connector-java</artifactId>
<version>8.0.12</version>
</dependency>
</dependencies>
</project>
然后直接上代码:
1.首先是driver,作为程序的启动文件。
package com.dingheng; import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.db.DBConfiguration;
import org.apache.hadoop.mapreduce.lib.db.DBOutputFormat;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat; public class Driver { public static void main(String[] args) throws ClassNotFoundException, IOException, InterruptedException { // inputDir
// outputDir
// NumOfGram
// topK String inputDir = args[0];
String outputDir = args[1];
String numOfGram = args[2];
String threshold = args[3];
String topK = args[4]; // first mapreduce
Configuration configurationNGram = new Configuration();
configurationNGram.set("textinputformat.recode.delimiter", ".");
configurationNGram.set("numOfGram", numOfGram); Job jobNGram = Job.getInstance(configurationNGram);
jobNGram.setJobName("NGram");
jobNGram.setJarByClass(Driver.class); jobNGram.setMapperClass(NGram.NGramMapper.class);
jobNGram.setReducerClass(NGram.NGramReducer.class); jobNGram.setOutputKeyClass(Text.class);
jobNGram.setMapOutputValueClass(IntWritable.class); jobNGram.setInputFormatClass(TextInputFormat.class);
jobNGram.setOutputFormatClass(TextOutputFormat.class); TextInputFormat.addInputPath(jobNGram, new Path(inputDir));
TextOutputFormat.setOutputPath(jobNGram, new Path(outputDir));
jobNGram.waitForCompletion(true); // second mapreduce
Configuration configurationLanguage = new Configuration();
configurationLanguage.set("threshold", threshold);
configurationLanguage.set("topK", topK); DBConfiguration.configureDB(configurationLanguage,
"com.mysql.jdbc.Driver",
"jdbc:mysql://localhost:3306/test",
"root",
"123456"); Job jobLanguage = Job.getInstance(configurationLanguage);
jobLanguage.setJobName("LanguageModel");
jobLanguage.setJarByClass(Driver.class); jobLanguage.setMapperClass(LanguageModel.Map.class);
jobLanguage.setReducerClass(LanguageModel.Reduce.class); jobLanguage.setMapOutputKeyClass(Text.class);
jobLanguage.setMapOutputValueClass(Text.class);
jobLanguage.setOutputKeyClass(DBOutputWritable.class);
jobLanguage.setOutputValueClass(NullWritable.class); jobLanguage.setInputFormatClass(TextInputFormat.class);
jobLanguage.setOutputFormatClass(DBOutputFormat.class); DBOutputFormat.setOutput(
jobLanguage,
"output",
new String[] { "starting_phrase", "following_word", "count"}); TextInputFormat.setInputPaths(jobLanguage, new Path(args[1])); jobLanguage.waitForCompletion(true); }
}
Driver
2.然后是自己的定制类,自己定制了output
package com.dingheng; import org.apache.hadoop.mapreduce.lib.db.DBWritable; import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException; public class DBOutputWritable implements DBWritable{ private String starting_phrase;
private String following_word;
private int count; public DBOutputWritable(String starting_phrase, String following_word, int count) {
this.starting_phrase = starting_phrase;
this.following_word = following_word;
this.count = count;
} public void write(PreparedStatement arg0) throws SQLException {
arg0.setString(1, starting_phrase);
arg0.setString(2, following_word);
arg0.setInt(3, count);
} public void readFields(ResultSet arg0) throws SQLException {
this.starting_phrase = arg0.getString(1);
this.following_word = arg0.getString(2);
this.count = arg0.getInt(3);
}
}
DBOutputWritable
3.之后自己的mapper和reducer。我试用了两个MR迭代,每一个迭代写在文件中
package com.dingheng; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer; import java.io.IOException; public class NGram { public static class NGramMapper extends Mapper<LongWritable, Text, Text, IntWritable> { int numOfGram; @Override
public void setup(Context context) {
Configuration conf = context.getConfiguration();
numOfGram = conf.getInt("numOfGram", 5);
} @Override
public void map(LongWritable key,
Text value,
Context context) throws IOException, InterruptedException {
/*
input: read sentence
I love data n=3
I love -> 1
love data -> 1
I love data -> 1
*/ String line = value.toString().trim().toLowerCase().replaceAll("[^a-z]", " ");
String[] words = line.split("\\s+"); if (words.length < 2) {
return;
} StringBuilder sb;
for (int i = 0; i < words.length; i++) {
sb = new StringBuilder();
sb.append(words[i]);
for (int j = 1; i + j < words.length && j < numOfGram; j++) {
sb.append(" ");
sb.append(words[i + j]);
context.write(new Text(sb.toString()), new IntWritable(1));
}
}
}
} public static class NGramReducer extends Reducer<Text, IntWritable, Text, IntWritable> { @Override
public void reduce(Text key,
Iterable<IntWritable> values,
Context context) throws IOException, InterruptedException {
int sum = 0;
for (IntWritable value: values) {
sum = sum + value.get();
}
context.write(key, new IntWritable(sum));
}
}
}
NGram
package com.dingheng; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer; import java.io.IOException;
import java.util.*; public class LanguageModel { public static class Map extends Mapper<LongWritable, Text, Text, Text> { // input: I love big data\t10
// output: key: I love big value: data = 10 int threshold; @Override
protected void setup(Context context) throws IOException, InterruptedException {
Configuration configuration = context.getConfiguration();
threshold = configuration.getInt("threshold", 20);
} @Override
public void map(LongWritable key,
Text value,
Context context) throws IOException, InterruptedException { if ((value == null) || (value.toString().trim().length() == 0)) {
return;
} String line = value.toString().trim(); String[] wordsPlusCount = line.split("\t");
String[] words = wordsPlusCount[0].split("\\s+");
int count = Integer.valueOf(wordsPlusCount[wordsPlusCount.length - 1]); if (wordsPlusCount.length < 2 || count < threshold) {
return;
} StringBuilder sb = new StringBuilder();
for (int i = 0; i < words.length - 1; i++) {
sb.append(words[i]);
sb.append(" ");
} String outputKey = sb.toString().trim();
String outputValue = words[words.length - 1];
if (!(outputKey.length() < 1)) {
context.write(new Text(outputKey), new Text(outputValue + "=" + count));
}
}
} public static class Reduce extends Reducer<Text, Text, DBOutputWritable, NullWritable> { int topK; @Override
protected void setup(Context context) throws IOException, InterruptedException {
Configuration configuration = context.getConfiguration();
topK = configuration.getInt("topK", 5);
} @Override
public void reduce(Text key,
Iterable<Text> values,
Context context) throws IOException, InterruptedException {
// key: I love big
// value: <data = 10, girl = 100, boy = 1000 ...>
TreeMap<Integer, List<String>> tm = new TreeMap<Integer, List<String>>(Collections.<Integer>reverseOrder());
// <10, <data, baby...>>, <100, <girl>>, <1000, <boy>> for (Text val : values) {
// val: data = 10
String value = val.toString().trim();
String word = value.split("=")[0].trim();
int count = Integer.parseInt(value.split("=")[1].trim()); if (tm.containsKey(count)) {
tm.get(count).add(word);
} else {
List<String> list = new ArrayList<String>();
list.add(word);
tm.put(count, list);
}
} Iterator<Integer> iter = tm.keySet().iterator();
for (int j = 0; iter.hasNext() && j < topK; ) {
int keyCount = iter.next();
List<String> words = tm.get(keyCount);
for (String curWord: words) {
context.write(new DBOutputWritable(key.toString(), curWord, keyCount), NullWritable.get());
j++;
}
}
}
}
}
LanguageModel
基于MR实现ngram语言模型的更多相关文章
- NLP系列(5)_从朴素贝叶斯到N-gram语言模型
作者: 龙心尘 && 寒小阳 时间:2016年2月. 出处: http://blog.csdn.net/longxinchen_ml/article/details/50646528 ...
- N-gram语言模型简单介绍
N-gram语言模型 考虑一个语音识别系统,假设用户说了这么一句话:"I have a gun",因为发音的相似,该语音识别系统发现如下几句话都是可能的候选:1.I have a ...
- NLP中的用N-gram语言模型做英语完型填空的环境搭建
本文是对xing_NLP中的用N-gram语言模型做完型填空这样一个NLP项目环境搭建的一个说明,本来想写在README.md中.第一次用github中的wiki,想想尝试一下也不错,然而格式非常的混 ...
- OCR技术浅探:基于深度学习和语言模型的印刷文字OCR系统
作者: 苏剑林 系列博文: 科学空间 OCR技术浅探:1. 全文简述 OCR技术浅探:2. 背景与假设 OCR技术浅探:3. 特征提取(1) OCR技术浅探:3. 特征提取(2) OCR技术浅探:4. ...
- 通俗理解N-gram语言模型。(转)
从NLP的最基础开始吧..不过自己看到这里,还没做总结,这里有一篇很不错的解析,可以分享一下. N-gram语言模型 考虑一个语音识别系统,假设用户说了这么一句话:“I have a gun”,因为发 ...
- N-gram语言模型与马尔科夫假设关系(转)
1.从独立性假设到联合概率链朴素贝叶斯中使用的独立性假设为 P(x1,x2,x3,...,xn)=P(x1)P(x2)P(x3)...P(xn) 去掉独立性假设,有下面这个恒等式,即联合概率链规则 P ...
- 用CNTK搞深度学习 (二) 训练基于RNN的自然语言模型 ( language model )
前一篇文章 用 CNTK 搞深度学习 (一) 入门 介绍了用CNTK构建简单前向神经网络的例子.现在假设读者已经懂得了使用CNTK的基本方法.现在我们做一个稍微复杂一点,也是自然语言挖掘中很火 ...
- 语言模型(N-Gram)
问题描述:由于公司业务产品中,需要用户自己填写公司名称,而这个公司名称存在大量的乱填现象,因此需要对其做一些归一化的问题.在这基础上,能延伸出一个预测用户填写的公司名是否有效的模型出来. 目标:问题提 ...
- 基于N-Gram判断句子是否通顺
完整代码实现及训练与测试数据:click me 一.任务描述 自然语言通顺与否的判定,即给定一个句子,要求判定所给的句子是否通顺. 二.问题探索与分析 拿到这个问题便开 ...
随机推荐
- cannot mount volume over existing file, file exists /var/lib/docker/overlay2/.../merged/usr/share/zoneinfo/UTC 解决
问题产生原因: linux系统docker-compose.yml文件 放到 mac本启动发现启动报错 cannot mount volume over existing file, file exi ...
- 来自PTA Basic Level的三只小野兽
点我阅读原文 最近利用闲暇时间做了一下 PTA Basic Level[1] 里的题,里面现在一共有 95 道题,这些题大部分很基础,对于刷倦了 leetcode 的小伙伴可以去里面愉快的玩耍哦. 这 ...
- SpringCloud之Feign(五)
Feign简介 Feign 是一个声明web服务客户端,这便得编写web服务客户端更容易,使用Feign 创建一个接口并对它进行注解,它具有可插拔的注解支持包括Feign注解与JAX-RS注解,Fei ...
- django count(*) 慢查询优化
分页显示是web开发常见需求,随着表数据增加,200万以上时,翻页越到后面越慢,这个时候慢查询成为一个痛点,关于count(*)慢的原因,简单说会进行全表扫描,再排序,导致查询变慢.这里介绍postg ...
- mysql 用户操作和授权
1.查看mysql的版本 mysql -V 2.用户操作 # 创建用户 create user 'username'@'ip地址' identified by '密码'; # 用户重命名 rename ...
- 【新书推荐】《ASP.NET Core微服务实战:在云环境中开发、测试和部署跨平台服务》 带你走近微服务开发
<ASP.NET Core 微服务实战>译者序:https://blog.jijiechen.com/post/aspnetcore-microservices-preface-by-tr ...
- intellij idea svn 切换分支
原文地址:https://blog.csdn.net/wangjun5159/article/details/75137964 切换分支 更新/切换svn的快捷键是ctrl+T,这个快捷键还是很好用的 ...
- hadoop中两种上传文件方式
记录如何将本地文件上传至HDFS中 前提是已经启动了hadoop成功(nodedate都成功启动) ①先切换到HDFS用户 ②创建一个user件夹 bin/hdfs dfs -mkdir /user ...
- Lobooi 结对作业(24235+24229)
结队作业 GitHub项目地址 https://github.com/Lobooi/PairProgramming.git 伙伴博客地址 https://www.cnblogs.com/lanti/p ...
- SpringBoot+MyBatis项目Dao层最简单写法
前言 DAO(Data Access Object) 是数据访问层,说白了就是跟数据库打交道的,而数据库都有哪几种操作呢?没错,就是增删改查.这就意味着Dao层要提供增删改查操作. 不知道大家是怎么写 ...