15、Spark_RDD算子——AggregateByKey
生活随笔
收集整理的這篇文章主要介紹了
15、Spark_RDD算子——AggregateByKey
小編覺得挺不錯的,現在分享給大家,幫大家做個參考.
一、SparkUtils工具類
import org.apache.spark.{SparkConf, SparkContext}object SparkUtils {/*** 默認的master url路徑*/val DEFAULT_MASTER = "local[*]"/*** 默認master為local[*]的獲取sparkContext*/def getSparkContext(appName:String):SparkContext = getSparkContext(appName, DEFAULT_MASTER)def getSparkContext(appName:String, master:String):SparkContext = new SparkContext(new SparkConf().setAppName(appName).setMaster(master))/*** 釋放sparkContext*/def close(sc:SparkContext) = if(sc != null) sc.stop() }二、日志工具
import org.apache.log4j.{Level, Logger}trait LoggerTrait {Logger.getLogger("org.apache.spark").setLevel(Level.WARN)Logger.getLogger("org.apache.hadoop").setLevel(Level.WARN)Logger.getLogger("org.spark_project").setLevel(Level.WARN)}三、Spark算子AggregateByKey
import cn.qphone.spark.common.LoggerTrait.LoggerTrait import cn.qphone.spark.common.Utils.SparkUtils import org.apache.spark.SparkContext import org.apache.spark.rdd.RDDimport scala.collection.mutable.ArrayBufferobject Deom15_AggregateByKey extends LoggerTrait {def main(args: Array[String]): Unit = {//1.sparkcontext獲取val sc = SparkUtils.getSparkContext("Deom15_AggregateByKey ")//2.數據abk2rbk(sc)gbk2rbk(sc)//6.釋放資源SparkUtils.close(sc)}def abk2rbk(sc: SparkContext): Unit = {val list: List[String] = List("i am a big big boy","you are a abag girl")val listRDD: RDD[String] = sc.parallelize(list, 3)val mapRDD: RDD[(String, Int)] = listRDD.flatMap(_.split("\\s+")).map((_, 1))val cntRDD: RDD[(String, Int)] = mapRDD.aggregateByKey(0)(seqOp1, combOp1)cntRDD.foreach(println)}def seqOp1(sum: Int, num: Int): Int = sum + numdef combOp1(sum1: Int, sum2: Int): Int = sum1 + sum2def gbk2rbk(sc: SparkContext): Unit = {val stuList = List("令狐沖 華山派","岳不群 華山派","虛竹 逍遙派","喬峰 丐幫","黃蓉 桃花島","楊過 古墓派","小龍女 古墓派","郭靖 丐幫")val stuRDD: RDD[String] = sc.parallelize(stuList, 3)// val stusRDD: RDD[(String, String)] = stuRDD.map(line => {// val index = line.lastIndexOf(" ")// val classname = line.substring(index + 1)// val info = line.substring(0, index)// (classname, info)// })val stusRDD: RDD[(String, String)] = stuRDD.mapPartitionsWithIndex {case (partitionId, iterator) => {val array = iterator.toArrayprintln(s"${partitionId},${array.mkString("[", ",", "]")}")array.map(line => {val index = line.lastIndexOf(" ")val classname = line.substring(index + 1)val info = line.substring(0, index)(classname, info)}).iterator}}//3.combineByKeystusRDD.aggregateByKey(ArrayBuffer[String]())(seqOp2,combOp2).foreach(println)}def seqOp2(ab: ArrayBuffer[String],str:String): ArrayBuffer[String] = {ab.append(str)ab}def combOp2(ab1: ArrayBuffer[String], ab2: ArrayBuffer[String]): ArrayBuffer[String] = ab1.++(ab2) }總結
以上是生活随笔為你收集整理的15、Spark_RDD算子——AggregateByKey的全部內容,希望文章能夠幫你解決所遇到的問題。
- 上一篇: 201771010112罗松《面向对象程
- 下一篇: php如何判断一个数是不是整数,php判