reduceByKey(func, [numTasks])

reduceByKey(func, [numTasks])

When called on a DStream of (K, V) pairs, return a new DStream of (K, V) pairs where the values for each key are aggregated using the given reduce function. Note: By default, this uses Spark's default number of parallel tasks (2 for local mode, and in cluster mode the number is determined by the config property spark.default.parallelism) to do the grouping. You can pass an optional numTasks argument to set a different number of tasks.
​
1
import org.apache.spark._
2
import org.apache.spark.SparkContext._
3
import org.apache.spark.sql.{Row, SaveMode, SparkSession}
4
import org.apache.spark.sql.SQLContext
5
import org.apache.log4j.{Level, Logger}
6
import org.apache.spark.streaming.{Seconds, StreamingContext}
7
​
8
Logger.getLogger("org").setLevel(Level.ERROR)
9
​
10
val spark = SparkSession
11
.builder()
12
.config("spark.master", "local[2]")
13
.appName("streaming for book")
14
.getOrCreate()
15
import spark.implicits._
16
val sc=spark.sparkContext
17
val ssc = new StreamingContext(sc, Seconds(1))
18
​
19
val dataDirectory="/tmp/filestream/"
20
val lines=ssc.textFileStream(dataDirectory)
21
​
22
val keyValues = lines.flatMap(_.split(" ")).filter(_.nonEmpty).map(x=>(x,1))
23
val keyCount=keyValues.reduceByKey((x,y)=>(x+y))
24
​
25
keyCount.print()
26
ssc.start()
27
ssc.awaitTermination()
28
​
29
/*
30
input file:
31
​
32
cat y.txt
33
a a b c d
34
d h i j k
35
​
36
​
37
Output:
38
​
39
-------------------------------------------
40
Time: 1583561350000 ms
41
-------------------------------------------
42
(d,2)
43
(b,1)
44
(h,1)
45
(j,1)
46
(a,2)
47
(i,1)
48
(k,1)
49
(c,1)
50
​
51
*/
Copied!
Last modified 1yr ago
Copy link