hadoop相关
执行wordcount
代码
package org.apache.hadoop.examples; import java.io.IOException;
import java.util.Iterator;
import java.util.StringTokenizer; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.FileInputFormat;
import org.apache.hadoop.mapred.FileOutputFormat;
import org.apache.hadoop.mapred.JobClient;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.MapReduceBase;
import org.apache.hadoop.mapred.Mapper;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reducer;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapred.TextInputFormat;
import org.apache.hadoop.mapred.TextOutputFormat; public class WordCount { public static class Map extends MapReduceBase implements
Mapper<LongWritable, Text, Text, IntWritable> {
private final static IntWritable one = new IntWritable();
private Text word = new Text(); public void map(LongWritable key, Text value,
OutputCollector<Text, IntWritable> output, Reporter reporter)
throws IOException {
String line = value.toString();
StringTokenizer tokenizer = new StringTokenizer(line);
while (tokenizer.hasMoreTokens()) {
word.set(tokenizer.nextToken());
output.collect(word, one);
}
}
} public static class Reduce extends MapReduceBase implements
Reducer<Text, IntWritable, Text, IntWritable> {
public void reduce(Text key, Iterator<IntWritable> values,
OutputCollector<Text, IntWritable> output, Reporter reporter)
throws IOException {
int sum = ;
while (values.hasNext()) {
sum += values.next().get();
}
output.collect(key, new IntWritable(sum));
}
} public static void main(String[] args) throws Exception {
JobConf conf = new JobConf(WordCount.class);
conf.setJobName("wordcount"); conf.setOutputKeyClass(Text.class);
conf.setOutputValueClass(IntWritable.class); conf.setMapperClass(Map.class);
conf.setCombinerClass(Reduce.class);
conf.setReducerClass(Reduce.class); conf.setInputFormat(TextInputFormat.class);
conf.setOutputFormat(TextOutputFormat.class); FileInputFormat.setInputPaths(conf, new Path(args[]));
FileOutputFormat.setOutputPath(conf, new Path(args[])); JobClient.runJob(conf);
}
}
首先进行编译:
javac -classpath ./share/hadoop/common/hadoop-common-2.7..jar:./share/hadoop/mapreduce/hadoop-mapreduce-client-core-2.7..jar -d WordCount ./WordCount/WordCount.java
然后压包
jar -cvf wordcount.jar org/*
在复制到hadoop的工作目录下
然后在hadoop工作目录下面新建一个input目录 mkdir input,在目录里面新建一个文件vi file1,输入以下内容:
hello world
hello hadoop
hello mapreduce
,把该文件上传到hadoop的分布式文件系统中去
- ./bin/hadoop fs -put input/file* input
(6)然后我们开始执行
- ./bin/hadoop jar wordcount.jar org.apache.hadoop.examples.WordCount input wordcount_output
(7)最后我们查看运行结果
- ./bin/hadoop fs -cat wordcount_output/part-r-00000
参考:
http://cardyn.iteye.com/blog/1356361
https://blog.csdn.net/qichangleixin/article/details/43376587
二.往hdfs写数据
java代码
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IOUtils; import java.io.*;
import java.net.URI; /**
* blog: http://www.iteblog.com/
* Date: 14-1-2
* Time: 下午6:09
*/
public class AppendContent {
public static void main(String[] args) {
String hdfs_path = "input/file1";//文件路径
Configuration conf = new Configuration();
conf.setBoolean("dfs.support.append", true); String inpath = "./append.txt";
FileSystem fs = null;
try {
fs = FileSystem.get(URI.create(hdfs_path), conf);
//要追加的文件流,inpath为文件
InputStream in = new
BufferedInputStream(new FileInputStream(inpath));
OutputStream out = fs.append(new Path(hdfs_path));
IOUtils.copyBytes(in, out, , true);
} catch (IOException e) {
e.printStackTrace();
}
}
}
注意指定的hdfs路径,用hdfs://localhost:9000/input/路径一直不行,不知道什么原因。
编译
javac -classpath ./share/hadoop/common/hadoop-common-2.7..jar:./share/hadoop/mapreduce/hadoop-mapreduce-client-core-2.7..jar -d ./classes ./my_append/AppendContent.java
压包
jar -cvf ./my_jar/append.jar ./classes/*
运行
./bin/hadoop jar ./my_jar/append.jar AppendContent
AppendContent是类的名字
查看
./bin/hdfs dfs -cat input/*
或者代码可以改为通过args传参的方式传入hdfs路径, 方便多进程操作
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream; import java.io.*;
import java.net.URI; /**
* blog: http://www.iteblog.com/
* Date: 14-1-2
* Time: 下午6:09
*/
public class AppendContent {
public static void main(String[] args) {
//String hdfs_path = "input/file1";//文件路径
String hdfs_path = args[];
Configuration conf = new Configuration();
conf.setBoolean("dfs.support.append", true); //String inpath = "./append.txt";
FileSystem fs = null;
try {
fs = FileSystem.get(URI.create(hdfs_path), conf);
FSDataOutputStream out = fs.append(new Path(hdfs_path)); String s="";
for(int i=;i<;i++)
{
for(int j=;j<;j++)
{
s+='a';
}
int readLen = s.getBytes().length;
out.write(s.getBytes(), , readLen);
} //int readLen = "0123456789".getBytes().length; //while (-1 != readLen) //out.write("0123456789".getBytes(), 0, readLen); //要追加的文件流,inpath为文件
//InputStream in = new
// BufferedInputStream(new FileInputStream(inpath));
//OutputStream out = fs.append(new Path(hdfs_path));
//IOUtils.copyBytes(in, out, 4096, true);
} catch (IOException e) {
e.printStackTrace();
}
}
}
编译与压包命令同上,执行命令如下
./bin/hadoop jar ./my_jar/append.jar AppendContent input/file1
参考:https://blog.csdn.net/jameshadoop/article/details/24179413
https://blog.csdn.net/wypblog/article/details/17914021
脚本
#!/bin/bash #开始时间
begin=$(date +%s%N) for ((i=; i<;i++))
do
{
./bin/hadoop jar ./my_jar/append.jar AppendContent input/file${i}
}
done wait
#结束时间
end=$(date +%s%N)
#spend=$(expr $end - $begin) use_tm=`echo $end $begin | awk '{ print ($1 - $2) / 1000000000}'`
echo "花费时间为$use_tm"
二. java在ext3中的测试
程序
import java.io.*;
import java.net.URI;
import java.io.BufferedWriter;
import java.io.File;
import java.io.FileOutputStream;
import java.io.FileWriter;
import java.io.IOException;
import java.io.OutputStreamWriter;
import java.io.RandomAccessFile; public class Toext3 {
public static void main(String[] args) {
//String hdfs_path = "input/file1";//文件路径
String ext3_path = args[];
FileWriter writer = null;
//String inpath = "./append.txt";
try {
String s="";
for(int i=;i<;i++)
{
s="";
for(int j=;j<;j++)
{
s+='b';
}
writer = new FileWriter(ext3_path, true);
writer.write(s);
System.out.println(ext3_path);
} } catch (IOException e) {
e.printStackTrace();
}finally {
try {
if(writer != null){
writer.close();
}
} catch (IOException e) {
e.printStackTrace();
}
} }
}
编译:
javac -classpath ./share/hadoop/common/hadoop-common-2.7..jar:./share/hadoop/mapreduce/hadoop-mapreduce-client-core-2.7..jar -d ./data/classes data/Toext3.java
运行
java -cp ./data/classes/ Toext3 ./data/ext3/file0
在运行中,-cp指明class文件的路径,Toext3指出要运行的类
hadoop相关的更多相关文章
- [Linux] 安装JDK和Maven及hadoop相关环境
紧接上一篇,继续安装hadoop相关环境 JDK安装: 1. 下载,下面这两个地址在网上找的,可以直接下载: http://download.oracle.com/otn-pu ...
- Hadoop相关项目Hive-Pig-Spark-Storm-HBase-Sqoop
Hadoop相关项目Hive-Pig-Spark-Storm-HBase-Sqoop的相关介绍. Hive Pig和Hive的对比 摘要: Pig Pig是一种编程语言,它简化了Hadoop常见的工作 ...
- 一 hadoop 相关介绍
hadoop 相关介绍 hadoop的首页有下面这样一段介绍.对hadoop是什么这个问题,做了简要的回答. The Apache™ Hadoop® project develops open-sou ...
- Hadoop自学笔记(一)常见Hadoop相关项目一览
本自学笔记来自于Yutube上的视频Hadoop系列.网址: https://www.youtube.com/watch?v=-TaAVaAwZTs(当中一个) 以后不再赘述 自学笔记,难免有各类错误 ...
- Hadoop相关问题解决
Hadoop相关问题解决 Hive 1.查询hivemeta信息,查到的numRows为-1 集群厂商 集群版本 是否高可用 是否开启认证 cdh 不限 不限 不限 在hivemeta库中可以通过以下 ...
- Hadoop相关日常操作
1.Hive相关 脚本导数据,并设置运行队列 bin/beeline -u 'url' --outputformat=tsv -e "set mapreduce.job.queuename= ...
- Hadoop 相关链接
Apache 软件下载 http://mirror.bit.edu.cn/apache/ 相关文档链接: Apache Hadoop 2.5.2 http://hadoop.apache.org ...
- 大数据及hadoop相关知识介绍
一.大数据的基本概念 1.1什么是大数据 互联网企业是最早收集大数据的行业,最典型的代表就是Google和百度,这两个公司是做搜索引擎的,数量都非常庞大,每天都要去把互联网上的各种各样的网页信息抓取下 ...
- Hadoop相关基础知识
因为个人对这块的知识也不熟悉,所以大多内容来源于网络. 1. Hadoop项目框架 2. Hadoop Hadoop是一个由Apache基金会所开发的分布式系统基础架构. 用户可以 ...
- Hadoop相关笔记
一. Zookeeper( 分布式协调服务框架 ) 1. Zookeeper概述和集群搭建: (1) Zookeeper概述: Zookeeper 是一个分布式 ...
随机推荐
- iOS 常见面试图总结2
1.请简述storyboard和xib的差别? 一个project中.能够有多个xib文件,一个xib文件相应着一个视图类控制器,和多个视图. 然而.使用 storyboard时,一个project仅 ...
- HDU 5493 Queue 树状数组+二分
Queue Problem Description N people numbered from 1 to N are waiting in a bank for service. They all ...
- h5-注册成功
aaarticlea/png;base64,iVBORw0KGgoAAAANSUhEUgAAAdUAAAGnCAIAAABuMVpqAAAgAElEQVR4nOy9eXQTd57om2R6uvtO3z ...
- DBS-Oracle:表的连接查询
ylbtech-DBS-Oracle:表的连接查询 链接查询是指基于两个或两个以上表或试图的查询.在实际应用中,查询单个表可能无法满足应用程序的实际需求(例如显示雇员的部门名称以及雇员名),在这种情况 ...
- 请求测试——Fiddler2工具(可以测试POST和Get)
使用参考:http://jingyan.baidu.com/article/dca1fa6fa07000f1a44052f6.html 发送POST请求的时候,需要填写发送类型: 发送JSON格式填写 ...
- 后端向服务器发送客户端请求--HttpWebRequest
HttpWebRequest类与HttpRequest类的区别 HttpRequest类的对象用于服务器端,获取客户端传来的请求的信息,包括HTTP报文传送过来的所有信息. HttpWebReques ...
- vscode 插件推荐 - 献给所有前端工程师
VScode现在已经越来越完善.性能远超Atom和webstorm,你有什么理由不用它?在这里,我会给你们推荐很多实用的插件,让你对 vscode 有更深刻的体会,渐渐地你就会知道它有多好用. 走马观 ...
- .NET XML POST 请求
//请求体,XML参数 string xmlstring = @"<root></root>“; //请求URL string postUrl ="http ...
- bootstrap-导航条
默认样式的导航条 导航条是在您的应用或网站中作为导航页头的响应式基础组件.它们在移动设备上可以折叠(并且可开可关),且在视口(viewport)宽度增加时逐渐变为水平展开模式. 两端对齐的导航条导航链 ...
- 06 ASP.net
ASP.net 第一天 理解浏览器与服务器概念,与WinForm的区别. C# IIS(Internet Information Service) 互联网信息服务 Java(Tomcat) Php(A ...