大数据学习——mapreduce运营商日志增强
需求
1、对原始json数据进行解析,变成普通文本数据 2、求出每个人评分最高的3部电影 3、求出被评分次数最多的3部电影
数据
https://pan.baidu.com/s/1gPsQXVYSQEZ2OYek4HxK6A
pom.xml
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd"> <modelVersion>4.0.0</modelVersion> <groupId>com.cyf</groupId>
<artifactId>MapReduceCases</artifactId>
<packaging>jar</packaging>
<version>1.0</version> <properties>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
<project.reporting.outputEncoding>UTF-8</project.reporting.outputEncoding>
</properties>
<dependencies>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
<version>2.6.4</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdfs</artifactId>
<version>2.6.4</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-client</artifactId>
<version>2.6.4</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-mapreduce-client-core</artifactId>
<version>2.6.4</version>
</dependency> <dependency>
<groupId>com.alibaba</groupId>
<artifactId>fastjson</artifactId>
<version>1.1.40</version>
</dependency> <dependency>
<groupId>mysql</groupId>
<artifactId>mysql-connector-java</artifactId>
<version>5.1.36</version>
</dependency>
</dependencies> <build>
<plugins>
<plugin>
<artifactId>maven-assembly-plugin</artifactId>
<configuration>
<appendAssemblyId>false</appendAssemblyId>
<descriptorRefs>
<descriptorRef>jar-with-dependencies</descriptorRef>
</descriptorRefs>
<archive>
<manifest>
<mainClass>cn.itcast.mapreduce.json.JsonToText</mainClass>
</manifest>
</archive>
</configuration>
<executions>
<execution>
<id>make-assembly</id>
<phase>package</phase>
<goals>
<goal>assembly</goal>
</goals>
</execution>
</executions>
</plugin>
</plugins>
</build> </project>
package cn.itcast.mapreduce.json; import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException; import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.WritableComparable; public class OriginBean implements WritableComparable<OriginBean> { private Long movie; private Long rate; private Long timeStamp; private Long uid; public Long getMovie() {
return movie;
} public void setMovie(Long movie) {
this.movie = movie;
} public Long getRate() {
return rate;
} public void setRate(Long rate) {
this.rate = rate;
} public Long getTimeStamp() {
return timeStamp;
} public void setTimeStamp(Long timeStamp) {
this.timeStamp = timeStamp;
} public Long getUid() {
return uid;
} public void setUid(Long uid) {
this.uid = uid;
} public OriginBean(Long movie, Long rate, Long timeStamp, Long uid) {
this.movie = movie;
this.rate = rate;
this.timeStamp = timeStamp;
this.uid = uid;
} public OriginBean() {
// TODO Auto-generated constructor stub
} public int compareTo(OriginBean o) {
return this.movie.compareTo(o.movie);
} public void write(DataOutput out) throws IOException {
out.writeLong(movie);
out.writeLong(rate);
out.writeLong(timeStamp);
out.writeLong(uid);
} public void readFields(DataInput in) throws IOException {
this.movie = in.readLong();
this.rate = in.readLong();
this.timeStamp = in.readLong();
this.uid = in.readLong();
} @Override
public String toString() {
return this.movie + "\t" + this.rate + "\t" + this.timeStamp + "\t" + this.uid;
} }
package cn.itcast.mapreduce.json; import java.io.IOException; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat;
import org.codehaus.jackson.map.ObjectMapper; import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONObject; public class JsonToText { static class MyMapper extends Mapper<LongWritable, Text, Text, NullWritable> { Text k = new Text(); @Override
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException { // Bean bean = mapper.readValue(value.toString(), Bean.class); JSONObject valueJson = JSON.parseObject(value.toString()); Long movie = valueJson.getLong("movie"); OriginBean bean = new OriginBean(movie, valueJson.getLong("rate"), valueJson.getLong("timeStamp"), valueJson.getLong("uid"));
k.set(bean.toString());
context.write(k, NullWritable.get());
}
} public static void main(String[] args) throws Exception { Configuration conf = new Configuration();
//16777216/1024/1024=16 (62.5M/16)4个切片,启动4个maptask,处理结果4个文件
conf.set("mapreduce.input.fileinputformat.split.maxsize", "16777216"); Job job = Job.getInstance(conf); // job.setJarByClass(JsonToText.class); //告诉框架,我们的程序所在jar包的位置 job.setJar("/root/JsonToText.jar"); job.setMapperClass(MyMapper.class); job.setOutputKeyClass(Text.class); job.setOutputValueClass(NullWritable.class); // job.setOutputFormatClass(SequenceFileOutputFormat.class); job.setNumReduceTasks(0); FileInputFormat.setInputPaths(job, new Path("/json/input")); FileOutputFormat.setOutputPath(job, new Path("/json/output")); // FileInputFormat.setInputPaths(job, new Path(args[0])); // FileOutputFormat.setOutputPath(job, new Path(args[1])); job.waitForCompletion(true); } }
创建文件夹 并上传数据
hadoop fs -mkdir -p /json/input
hadoop fs -put rating.json /json/input
运行
hadoop jar JsonToText.jar cn.itcast.mapreduce.json.JsonToText
运行结果
https://pan.baidu.com/s/1ayrpl7w8Dlzpc7TRZIO94w
pom.xml
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd"> <modelVersion>4.0.0</modelVersion> <groupId>com.cyf</groupId>
<artifactId>MapReduceCases</artifactId>
<packaging>jar</packaging>
<version>1.0</version> <properties>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
<project.reporting.outputEncoding>UTF-8</project.reporting.outputEncoding>
</properties>
<dependencies>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
<version>2.6.4</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdfs</artifactId>
<version>2.6.4</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-client</artifactId>
<version>2.6.4</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-mapreduce-client-core</artifactId>
<version>2.6.4</version>
</dependency> <dependency>
<groupId>com.alibaba</groupId>
<artifactId>fastjson</artifactId>
<version>1.1.40</version>
</dependency> <dependency>
<groupId>mysql</groupId>
<artifactId>mysql-connector-java</artifactId>
<version>5.1.36</version>
</dependency>
</dependencies> <build>
<plugins>
<plugin>
<artifactId>maven-assembly-plugin</artifactId>
<configuration>
<appendAssemblyId>false</appendAssemblyId>
<descriptorRefs>
<descriptorRef>jar-with-dependencies</descriptorRef>
</descriptorRefs>
<archive>
<manifest>
<mainClass>cn.itcast.mapreduce.json.MovieRateSum</mainClass>
</manifest>
</archive>
</configuration>
<executions>
<execution>
<id>make-assembly</id>
<phase>package</phase>
<goals>
<goal>assembly</goal>
</goals>
</execution>
</executions>
</plugin>
</plugins>
</build> </project>
package cn.itcast.mapreduce.json; import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException; import org.apache.hadoop.io.WritableComparable; public class ResultBean implements WritableComparable<ResultBean> { private Long movie;
private Long sumRate; public void setSumRate(long sumRate) {
this.sumRate = sumRate;
} public Long getMovie() {
return movie;
} public void setMovie(Long movie) {
this.movie = movie;
} public ResultBean(Long movie, Long sumRate) {
this.movie = movie;
this.sumRate = sumRate;
} public ResultBean() {
// TODO Auto-generated constructor stub
} public int compareTo(ResultBean o) {
if (this.movie - o.movie != 0) {
return (int) (this.movie - o.movie);
}
return (int) (o.sumRate - this.sumRate);
} public void write(DataOutput out) throws IOException {
out.writeLong(movie);
out.writeLong(sumRate);
} public ResultBean(Long sumRate) {
super();
this.sumRate = sumRate;
} public void readFields(DataInput in) throws IOException {
this.movie = in.readLong();
this.sumRate = in.readLong();
} @Override
public String toString() {
//return movie + "\t" + sumRate;
return movie + "\t" + sumRate;
} }
package cn.itcast.mapreduce.json; import java.io.IOException; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat;
import org.codehaus.jackson.map.ObjectMapper; import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONObject; public class MovieRateSum { static class MyMapper extends Mapper<LongWritable, Text, LongWritable, OriginBean> { ObjectMapper mapper = new ObjectMapper(); @Override
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException { // Bean bean = mapper.readValue(value.toString(), Bean.class); JSONObject valueJson = JSON.parseObject(value.toString()); Long movie = valueJson.getLong("movie"); OriginBean bean = new OriginBean(movie, valueJson.getLong("rate"), valueJson.getLong("timeStamp"), valueJson.getLong("uid")); context.write(new LongWritable(bean.getMovie()), bean);
}
} static class MyReduce extends Reducer<LongWritable, OriginBean, ResultBean, NullWritable> { @Override
protected void reduce(LongWritable movie, Iterable<OriginBean> beans, Context context) throws IOException, InterruptedException { long sum = 0L; for (OriginBean bean : beans) {
sum += bean.getRate();
}
ResultBean bean = new ResultBean();
bean.setMovie(movie.get());
bean.setSumRate(sum);
context.write(bean, NullWritable.get());
} } public static void main(String[] args) throws Exception { Configuration conf = new Configuration(); Job job = Job.getInstance(conf); // job.setJarByClass(MovieRateSum.class);
//告诉框架,我们的程序所在jar包的位置
job.setJar("/root/MovieRateSum.jar");
job.setMapperClass(MyMapper.class);
job.setReducerClass(MyReduce.class); job.setMapOutputKeyClass(LongWritable.class);
job.setMapOutputValueClass(OriginBean.class); job.setOutputKeyClass(ResultBean.class);
job.setOutputValueClass(NullWritable.class); job.setOutputFormatClass(SequenceFileOutputFormat.class); FileInputFormat.setInputPaths(job, new Path("/json/output"));
FileOutputFormat.setOutputPath(job, new Path("/json/output-seq"));
// FileInputFormat.setInputPaths(job, new Path(args[0]));
// FileOutputFormat.setOutputPath(job, new Path(args[1])); job.waitForCompletion(true);
} }
大数据学习——mapreduce运营商日志增强的更多相关文章
- 大数据学习——点击流日志每天都10T,在业务应用服务器上,需要准实时上传至(Hadoop HDFS)上
点击流日志每天都10T,在业务应用服务器上,需要准实时上传至(Hadoop HDFS)上 1需求说明 点击流日志每天都10T,在业务应用服务器上,需要准实时上传至(Hadoop HDFS)上 2需求分 ...
- 大数据学习——mapreduce案例join算法
需求: 用mapreduce实现select order.orderid,order.pdtid,pdts.pdt_name,oder.amount from orderjoin pdtson ord ...
- 大数据学习——mapreduce学习topN问题
求每一个订单中成交金额最大的那一笔 top1 数据 Order_0000001,Pdt_01,222.8 Order_0000001,Pdt_05,25.8 Order_0000002,Pdt_05 ...
- 大数据学习——mapreduce共同好友
数据 commonfriends.txt A:B,C,D,F,E,O B:A,C,E,K C:F,A,D,I D:A,E,F,L E:B,C,D,M,L F:A,B,C,D,E,O,M G:A,C,D ...
- 大数据学习——mapreduce倒排索引
数据 a.txt hello jerry hello tom b.txt allen tom allen jerry allen hello c.txt hello jerry hello tom 1 ...
- 大数据学习——mapreduce汇总手机号上行流量下行流量总流量
时间戳 手机号 MAC地址 ip 域名 上行流量包个数 下行 上行流量 下行流量 http状态码 1363157995052 13826544101 5C-0E-8B-C7-F1-E0:CMCC 12 ...
- 大数据学习——mapreduce程序单词统计
项目结构 pom.xml文件 <?xml version="1.0" encoding="UTF-8"?> <project xmlns=&q ...
- 大数据学习——MapReduce学习——字符统计WordCount
操作背景 jdk的版本为1.8以上 ubuntu12 hadoop2.5伪分布 安装 Hadoop-Eclipse-Plugin 要在 Eclipse 上编译和运行 MapReduce 程序,需要安装 ...
- 大数据学习系列之七 ----- Hadoop+Spark+Zookeeper+HBase+Hive集群搭建 图文详解
引言 在之前的大数据学习系列中,搭建了Hadoop+Spark+HBase+Hive 环境以及一些测试.其实要说的话,我开始学习大数据的时候,搭建的就是集群,并不是单机模式和伪分布式.至于为什么先写单 ...
随机推荐
- 福建工程学院第七届ACM程序设计新生赛 (同步赛)
A.关电脑 #include<bits/stdc++.h> using namespace std; typedef long long LL; int T,h1,m1,s1,h2,m2, ...
- POJ 1692 Crossed Matchings dp[][] 比较有意思的dp
http://poj.org/problem?id=1692 这题看完题后就觉得我肯定不会的了,但是题解却很好理解.- - ,做题阴影吗 所以我还是需要多思考. 题目是给定两个数组,要求找出最大匹配数 ...
- 让搜狗输入法更符合编程/vim使用的配置
1. “菜单”—“设置属性”—“常用”—“初始状态”里的“中/英文”选项,选中“英文” 2. 设置属性里的“高级”里的“高级模式”,点“英文输入法设置”,“启动时启用英文输入法”选中 3. 按键-中英 ...
- dangerouslySetHTML 和 style 属性
这一节我们来补充两个之前没有提到的属性,但是在 React.js 组件开发中也非常常用,但是它们也很简单. dangerouslySetHTML 出于安全考虑的原因(XSS 攻击),在 React.j ...
- MongoDB部署、使用、监控及调优
MongoDB部署 系统环境:CentOS7 下载地址:http://mirrors.163.com/centos/7.6.1810/isos/x86_64/CentOS-7-x86_64-DVD ...
- 设计模式、SOLID原则:组件与联系
组件原则 - SRP The Single Responsibility Principle 单一责任原则 当需要修改某个类的时候原因有且只有一个.换句话说就是让一个类只做一种类型的责任,当这个类需要 ...
- 多线程中 CountDownLatch CyclicBarrier Semaphore的使用
CountDownLatch 调用await()方法的线程会被挂起,它会等待直到count值为0才继续执行.也可以传入时间,表示时间到之后,count还没有为0的时候,就会继续执行. package ...
- uva10366 Faucet Flow
每次找到两边离中心最高的板,如果等,再找外围的最高版...画图便于理解两边先找到距离(-1,1)最近的最大值L和R,因为可能存在多个最高的挡板.接着比较两个L和R的大小,相等的话分别分析两边,取最小值 ...
- 在window下搭建即时即用的hyperledger fabric 的环境
有版本号的严格按要求,遇到不少坑 1)安装git 版本无要求 2)安装go 1.9 配置环境变量 3)安装Vagrant 1.9.4 4)安装VirtualBox 5.1.28 5)在go ...
- c# xml本地化用法
1.普通格式 2.占位符格式 注意事项: 1.Pascal命名法 2.key只是key,中间不需要空格,value可以空格 3.占位符左右两边分别空一格