package wikipedia

import org.apache.spark.SparkConf
import org.apache.spark.SparkContext
import org.apache.spark.SparkContext._
import org.apache.spark.rdd.RDD
import org.apache.log4j.{Level,Logger} case class WikipediaArticle(title: String, text: String) {
/**
* @return Whether the text of this article mentions `lang` or not
* @param lang Language to look for (e.g. "Scala")
*/
def mentionsLanguage(lang: String): Boolean = text.split(' ').contains(lang)
} object WikipediaRanking {
// 设置日志
Logger.getLogger("org").setLevel(Level.ERROR) val langs = List(
"JavaScript", "Java", "PHP", "Python", "C#", "C++", "Ruby", "CSS",
"Objective-C", "Perl", "Scala", "Haskell", "MATLAB", "Clojure", "Groovy") val conf: SparkConf = new SparkConf()
val sc: SparkContext = new SparkContext("local[*]", "Wikipedia") // Hint: use a combination of `sc.textFile`, `WikipediaData.filePath` and `WikipediaData.parse`
val wikiRdd: RDD[WikipediaArticle] = sc.textFile(WikipediaData.filePath).map(WikipediaData.parse) /** Returns the number of articles on which the language `lang` occurs. 返回lang语言出现的文章篇数
* Hint1: consider using method `aggregate` on RDD[T].
* Hint2: consider using method `mentionsLanguage` on `WikipediaArticle`
*/
def occurrencesOfLang(lang: String, rdd: RDD[WikipediaArticle]): Int =
rdd.filter(_.mentionsLanguage(lang)).count().toInt /* (1) Use `occurrencesOfLang` to compute the ranking of the languages
* (`val langs`) by determining the number of Wikipedia articles that
* mention each language at least once. Don't forget to sort the
* languages by their occurrence, in decreasing order!
*
* Note: this operation is long-running. It can potentially run for
* several seconds.
*/
def rankLangs(langs: List[String], rdd: RDD[WikipediaArticle]): List[(String, Int)] = {
rdd.cache() // 允许数据存储在内存
langs.map(lang => (lang, occurrencesOfLang(lang, rdd))).sortBy(_._2).reverse
/*
对于langs的每一个元素找到包含它的文章篇数。
其中sortBy(_._2)指根据occurrencesOfLang(lang, rdd))来排序,
如果是sortBy(_._1)则根据lang来排序
默认从小到大排序,所以加上.reverse
*/
} /* Compute an inverted index of the set of articles, mapping each language
* to the Wikipedia pages in which it occurs.
*/
def makeIndex(langs: List[String], rdd: RDD[WikipediaArticle]): RDD[(String, Iterable[WikipediaArticle])] = {
val articles_Languages = rdd.flatMap(article => {
langs.filter(lang => article.mentionsLanguage(lang))
.map(lang => (lang, article))
})
articles_Languages.groupByKey
} /* (2) Compute the language ranking again, but now using the inverted index. Can you notice
* a performance improvement?
*
* Note: this operation is long-running. It can potentially run for
* several seconds.
*/
def rankLangsUsingIndex(index: RDD[(String, Iterable[WikipediaArticle])]): List[(String, Int)] =
index.mapValues(_.size).sortBy(-_._2).collect().toList /* (3) Use `reduceByKey` so that the computation of the index and the ranking are combined.
* Can you notice an improvement in performance compared to measuring *both* the computation of the index
* and the computation of the ranking? If so, can you think of a reason?
*
* Note: this operation is long-running. It can potentially run for
* several seconds.
*/
def rankLangsReduceByKey(langs: List[String], rdd: RDD[WikipediaArticle]): List[(String, Int)] = {
rdd.flatMap(article => {
langs.filter(article.mentionsLanguage) // 相当于langs.filter(lang => article.mentionsLanguage(lang)) 或者 langs.filter(article.mentionsLanguage(_))
.map((_, 1))
}).reduceByKey(_ + _)
.sortBy(_._2)
.collect()
.toList
.reverse
} def main(args: Array[String]) { /* Languages ranked according to (1) */
val langsRanked: List[(String, Int)] = timed("Part 1: naive ranking", rankLangs(langs, wikiRdd)) /* An inverted index mapping languages to wikipedia pages on which they appear */
def index: RDD[(String, Iterable[WikipediaArticle])] = makeIndex(langs, wikiRdd) /* Languages ranked according to (2), using the inverted index */
val langsRanked2: List[(String, Int)] = timed("Part 2: ranking using inverted index", rankLangsUsingIndex(index)) /* Languages ranked according to (3) */
val langsRanked3: List[(String, Int)] = timed("Part 3: ranking using reduceByKey", rankLangsReduceByKey(langs, wikiRdd)) /* Output the speed of each ranking */
println(timing)
sc.stop()
} val timing = new StringBuffer
def timed[T](label: String, code: => T): T = {
val start = System.currentTimeMillis()
val result = code
val stop = System.currentTimeMillis()
timing.append(s"Processing $label took ${stop - start} ms.\n")
result
}
}

Spark Week1 HomeWork的更多相关文章

  1. CentOS7 安装spark集群

    Spark版本 1.6.0 Scala版本 2.11.7 Zookeeper版本 3.4.7 配置虚拟机 3台虚拟机,sm,sd1,sd2 1. 关闭防火墙 systemctl stop firewa ...

  2. 【cs229-Lecture2】Linear Regression with One Variable (Week 1)(含测试数据和源码)

    从Ⅱ到Ⅳ都在讲的是线性回归,其中第Ⅱ章讲得是简单线性回归(simple linear regression, SLR)(单变量),第Ⅲ章讲的是线代基础,第Ⅳ章讲的是多元回归(大于一个自变量). 本文的 ...

  3. Spark小课堂Week1 Hello Spark

    Spark小课堂Week1 Hello Spark 看到Spark这个词,你的第一印象是什么? 这是一朵"火花",官方的定义是Spark是一个高速的.通用的.分布式计算系统!!! ...

  4. Week1 Team Homework #2 Introduction of team member with photos

    小组成员介绍 组长:黄剑锟       11061164 组员:顾泽鹏        11061160 组员:周辰光         11061154 组员:龚少波        11061167 组 ...

  5. 团队博客作业Week1 Team Homework #3软件工程在北航

    这次我们采访了一位大四的学姐,让她简单地谈了谈去年学习软件工程的经历和感受. 在完成软件工程大作业的过程中,由于计划安排与实际脱节,导致时间前松后紧,平均每周花在这门课上的时间大约有8个小时. 项目完 ...

  6. Week1 Team Homework #1: Study the projects done by previous student groups

      我们研究了学长的项目:百度3D地图API的调用.下面是我们对该项目的一些看法: 优点: 界面清晰 各类之间调用及其他关系容易理清. 缺点: 前段html代码过于冗杂,很多(div)块间的层次关系不 ...

  7. Week1 Team Homework #3: 软件工程在北航

    在组内成员的共同努力,我们采访了几个学长学姐,顺利完成任务.反馈信息如下: 平均每周花在这门课上的时间 平均写的代码总行数 学到的最有用的部分 最没用的部分 <软件工程>最应该改进的地方 ...

  8. Week1 Team Homework #2: Introduction of each team member

    王洛书 我是来自浙江的王洛书.热爱历史,热爱美食,热爱代码,热爱博物馆.很喜欢软件工程这门课的上课方式,也很喜欢组里的这些同学.希望能大家一起努力,在这门课上真正地收获能力上的提升!   陈睿翊

  9. Week1 Team Homework #1 from Z.XML-对于学长项目《shield star》的思考和看法

    试用了一下学长黄杨等人开发的<shield star>游戏.                      其实作为一个学弟,我对cocos2d-x引擎还算是比较了解,所以对于这样一款很“典型 ...

随机推荐

  1. [机器学习]Bagging and Boosting

    Bagging 和 Boosting 都是一种将几个弱分类器(可以理解为分类或者回归能力不好的分类器)按照一定规则组合在一起从而变成一个强分类器.但二者的组合方式有所区别. 一.Bagging Bag ...

  2. IdHTTP设置SSL证书,乱码问题也解决了

    要跟银行做接口,需要使用delphi来post数据,但对方提供的是https开头的网址,需要使用证书,对方已提供证书了,但是还是调用不成功,使用的是idhttp和TIdSSLIOHandlerSock ...

  3. 漏洞告诉你:商家为什么都乐于提供免(diao)费(yu)WiFi?

    作为一名小微商户,每天我除了要为经营小店忙得焦头烂额,还要想方设法地寻求提升用户体验.于是,我用了号称“营销神器”的某商用WiFi系统...... 然后不可思议的事情发生了:连上此WiFi的手机(包括 ...

  4. 【Qt】无边框窗体中带有ActiveX组件时的一个BUG

    无意中发现的一个BUG,Qt5.1.1正式版首先创建一个GUI工程,拖入一个QAxWidget控件(为了使ActiveX生效,需要在.pro文件中加入CONFIG += qaxcontainer)接着 ...

  5. 网络文件系统nfs文件系统使用(比较全面)

    一.NFS简介 1.NFS就是Network FileSystem的缩写,它的最大功能就是可以通过网络让不同的机器,不同的操作系统彼此共享文件(sharefiles)——可以通过NFS挂载远程主机的目 ...

  6. Wiki上的C++哲学

    Philosophy[edit] Throughout C++'s life, its development and evolution has been informally governed b ...

  7. maven项目或者SpringBoot项目启动时报错在本地仓库中找不到jar包的解决办法

    经常遇到项目检出来后是导入开发工具eclipse中pom文件出错问题,项目启动时遇到了一些列的jar包找不到的问题,所以换个开发平台到IDEA以为会好些,结果同样的问题还是会出现的,为了找到具体的解决 ...

  8. Spring Boot:整合Spring Data JPA

    综合概述 JPA是Java Persistence API的简称,是一套Sun官方提出的Java持久化规范.其设计目标主要是为了简化现有的持久化开发工作和整合ORM技术,它为Java开发人员提供了一种 ...

  9. Hadoop编程踩坑

    Hadoop踩坑 在hadoop所有组件编程中,遇到在Windows下运行程序出现 java.io.IOException: Could not locate executable null\bin\ ...

  10. SpringBoot启动访问JSP页面,直接进入页面或者访问不到,报404,并且加载tomcat插件tomcat-embed-jasper也不行

    这个问题花费了两天的时间,解决路径: 我用的是SpringBoot1.5.2,SpringMVC和Spring,tomcat启动插件都是默认的版本,Spring是4.3.7,jdk是1.7.0_80, ...