Spark setMaster源码

/**
* The master URL to connect to, such as "local" to run locally with one thread, "local[4]" to
* run locally with 4 cores, or "spark://master:7077" to run on a Spark standalone cluster.
*/
def setMaster(master: String): SparkConf = {
set("spark.master", master)
}

要连接到的主URL,例如“local”用一个线程在本地运行,“local [ 4 ]”用4个内核在本地运行,或者“Spark : / / master : 7077”用Spark独立集群运行。

package cn.rzlee.spark.scala

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext} // object相当于静态的
object ScalaWordCount {
def main(args: Array[String]): Unit = { //创建spark配置,设置应用程序名字
val conf = new SparkConf().setAppName("wordCountApp") // 创建spark执行入口
val sc = new SparkContext() // 指定以后从哪里读取数据创建RDD(弹性分布式数据集)
val lines: RDD[String] = sc.textFile("")
// 切分压平
val words: RDD[String] = lines.flatMap(_.split(" "))
// 将单词和一组合
val wordAndOne: RDD[(String, Int)] = words.map((_, ))
// 按key进行聚合 相同key不变,将value相加
val reduced: RDD[(String, Int)] = wordAndOne.reduceByKey(_+_)
// 排序
val sorted = reduced.sortBy(_._2,false)
// 将结果保存到HDFS中
sorted.saveAsTextFile("")
//释放资源
sc.stop()
}
}

基于排序机制的wordCount

java 版本:

package cn.rzlee.spark.core;

import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.FlatMapFunction;
import org.apache.spark.api.java.function.Function2;
import org.apache.spark.api.java.function.PairFunction;
import org.apache.spark.api.java.function.VoidFunction;
import scala.Tuple2;
import scala.actors.threadpool.Arrays; /**
* @Author ^_^
* @Create 2018/11/3
*/
public class SortWordCount {
public static void main(String[] args) {
SparkConf conf = new SparkConf().setAppName("SortWordCount").setMaster("local");
JavaSparkContext sc = new JavaSparkContext(conf); // 创建line RDD
JavaRDD<String> lines = sc.textFile("C:\\Users\\txdyl\\Desktop\\log\\in\\data.txt", 1); // 执行单词计数
JavaRDD<String> words = lines.flatMap(new FlatMapFunction<String, String>() {
@Override
public Iterable<String> call(String s) throws Exception {
return Arrays.asList(s.split("\t"));
}
}); JavaPairRDD<String, Integer> pair = words.mapToPair(new PairFunction<String, String, Integer>() {
@Override
public Tuple2<String, Integer> call(String s) throws Exception {
return new Tuple2<>(s, 1);
}
}); JavaPairRDD<String, Integer> wordCounts = pair.reduceByKey(new Function2<Integer, Integer, Integer>() {
@Override
public Integer call(Integer v1, Integer v2) throws Exception {
return v1 + v2;
}
}); // 进行key-value的反转映射
JavaPairRDD<Integer, String> countWords = wordCounts.mapToPair(new PairFunction<Tuple2<String, Integer>, Integer, String>() {
@Override
public Tuple2<Integer, String> call(Tuple2<String, Integer> t) throws Exception {
return new Tuple2<>(t._2, t._1);
}
}); // 按照key进行排序
JavaPairRDD<Integer, String> sortedCountWords = countWords.sortByKey(false); // 再次进行key-value的反转映射
JavaPairRDD<String, Integer> sortedWordCounts = sortedCountWords.mapToPair(new PairFunction<Tuple2<Integer, String>, String, Integer>() {
@Override
public Tuple2<String, Integer> call(Tuple2<Integer, String> t) throws Exception {
return new Tuple2<>(t._2, t._1);
}
}); // 打印结果
sortedWordCounts.foreach(new VoidFunction<Tuple2<String, Integer>>() {
@Override
public void call(Tuple2<String, Integer> t) throws Exception {
System.out.println(t._1 + " appears " + t._2+ " times.");
}
});
// 关闭JavaSparkContext
sc.close();
}
}

scala版本:

package cn.rzlee.spark.scala

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext} object SortWordCount {
def main(args: Array[String]): Unit = {
val conf = new SparkConf().setAppName(this.getClass.getSimpleName).setMaster("local")
val sc = new SparkContext(conf) val lines = sc.textFile("C:\\Users\\txdyl\\Desktop\\log\\in\\data.txt",1)
val words: RDD[String] = lines.flatMap(line=>line.split("\t"))
val pairs: RDD[(String, Int)] = words.map(word=>(word,1))
val wordCounts: RDD[(String, Int)] = pairs.reduceByKey(_+_)
val countWords: RDD[(Int, String)] = wordCounts.map(wordCount=>(wordCount._2, wordCount._1))
val sortedCountWords = countWords.sortByKey(false)
val sortedWordCounts: RDD[(String, Int)] = sortedCountWords.map(sortedCountWord=>(sortedCountWord._2, sortedCountWord._1))
sortedWordCounts.foreach(sortedWordCount=>{
println(sortedWordCount._1+" appear "+ sortedWordCount._2 + " times.")
}) sc.stop()
} }

Spark-Spark setMaster & WordCount Demo的更多相关文章

  1. Spark练习之wordcount,基于排序机制的wordcount

    Spark练习之wordcount 一.原理及其剖析 二.pom.xml 三.使用Java进行spark的wordcount练习 四.使用scala进行spark的wordcount练习 五.基于排序 ...

  2. Spark metrics on wordcount example

    I read the section Metrics on spark website. I wish to try it on the wordcount example, I can't make ...

  3. Spark初步 从wordcount开始

    Spark初步-从wordcount开始 spark中自带的example,有一个wordcount例子,我们逐步分析wordcount代码,开始我们的spark之旅. 准备工作 把README.md ...

  4. [spark]spark 编程教程

      参考: 英文:https://spark.apache.org/docs/latest/programming-guide.html 中文:http://www.cnblogs.com/lujin ...

  5. 分布式计算框架-Spark(spark环境搭建、生态环境、运行架构)

    Spark涉及的几个概念:RDD:Resilient Distributed Dataset(弹性分布数据集).DAG:Direct Acyclic Graph(有向无环图).SparkContext ...

  6. [spark] spark 特性、简介、下载

    [简介] 官网:http://spark.apache.org/ 推荐学习博客:http://dblab.xmu.edu.cn/blog/spark/ spark是一个采用Scala语言进行开发,更快 ...

  7. Spark学习之wordcount程序

    实例代码: import org.apache.spark.SparkConf; import org.apache.spark.api.java.JavaPairRDD; import org.ap ...

  8. Spark Streaming的样本demo统计

    废话不多说,直接上代码 package com.demo; import java.util.List; import java.util.regex.Pattern; import org.apac ...

  9. 50、Spark Streaming实时wordcount程序开发

    一.java版本 package cn.spark.study.streaming; import java.util.Arrays; import org.apache.spark.SparkCon ...

随机推荐

  1. Shell计算器

    #!/bin/bash # filename : jisuan.sh # description : add, subtract, multiply, and divide print_usage() ...

  2. QT项目性能调优小记

    最近的项目用到了QT 5.5,项目在运行过程中出现了一段时间CPU占用率持续25%,并频繁断网的情况,遂决定对项目性能进行优化. 优化工具也是VS2010自带的性能分析工具,具体的使用方法参见:htt ...

  3. ORACLE 表空间使用率查询

    SELECT A.TABLESPACE_NAME, A.BYTES / (1024 * 1024 * 1024 ) TOTAL, B.BYTES / (1024 * 1024 * 1024 ) USE ...

  4. Foundation框架 - NSDictionary类、NSMutableDictionary类

    NSArray.NSSet.NSDictionary /* 集合 1.NSArray\NSMutableArray * 有序 * 高速创建(不可变):@[obj1, obj2, obj3] * 高速訪 ...

  5. junit spring 测试

    http://my.oschina.net/dlpinghailinfeng/blog/336694 http://blog.csdn.net/zhangzikui/article/details/1 ...

  6. 基于react-native android的新闻app的开发

    使用平台:android 代码获取地址:https://github.com/wuwanyu/ReactNative-Android-MovieDemo 项目展示: 结构图: SpalashScree ...

  7. Java是否存在内存泄露

    会的. 原因:长生命周期的对象持有短生命周期对象的引用,导致短生命周期对象不能被回收,由此可能发生内存泄露. 举例参考:http://blog.csdn.net/yakihappy/article/d ...

  8. python在windows下安装paramiko模块和安装pycrypto模块(3步搞定)(转)

    Python中使用SSH需要用到OpenSSH,而OpenSSH依赖于paramiko模块,而paramiko模块又依赖于pycrypto模块,因此要在Python中使用SSH,我们需要先安装pycr ...

  9. Elasticsearch集群问题,导致主master节点发现不了node节点

    个人博客:https://blog.sharedata.info/ 最新需要配置es集群采用5个分片和1个副片,正好是11台机器,而只保留一份备份所以只需要5*2=10台机器方案:1.1台作为mast ...

  10. 【BZOJ2780】[Spoj]8093 Sevenk Love Oimaster 广义后缀自动机

    [BZOJ2780][Spoj]8093 Sevenk Love Oimaster Description Oimaster and sevenk love each other.     But r ...