说明
- RDD中的元素按照key指定的分区规则进行分区。
- RDD中的元素必须是键值对类型。
- 如果原有的partitionRDD和现有的partitionRDD一致的话就不进行分区,否则会发生shuffle。
函数签名
代码示例(默认分区器)
val conf: SparkConf = new SparkConf().setAppName(this.getClass.getName).setMaster("local[*]")
val sc = new SparkContext(conf)
val rdd: RDD[(String, Int)] = sc.makeRDD(List(("a", 1), ("b", 2), ("c", 3), ("d", 4)), 2)
println("-------------------重新分区前--------------------")
rdd.mapPartitionsWithIndex {
(index, datas) => {
println(index + "--->" + datas.mkString(","))
datas
}
}.collect()
println("-------------------重新分区后--------------------")
val newRDD: RDD[(String, Int)] = rdd.partitionBy(new HashPartitioner(3))
newRDD.mapPartitionsWithIndex {
(index, datas) => {
println(index + "--->" + datas.mkString(","))
datas
}
}.collect()
sc.stop()
代码实现(自定义分区器)
class MyPartitioner(partitions: Int) extends Partitioner {
override def numPartitions: Int = partitions
override def getPartition(key: Any): Int = {
val k: String = key.asInstanceOf[String]
if (k.startsWith("136")) 0
else if (k.startsWith("137")) 1
else if (k.startsWith("138")) 2
else 3
}
}
val rdd: RDD[(String, String)] = sc.makeRDD(List(("13698624174", "河北"), ("13766887551", "广东"),
("13876543211", "上海"), ("17677885551", "河南")), 2)
println("-------------------重新分区前--------------------")
rdd.mapPartitionsWithIndex {
(index, datas) => {
println(index + "--->" + datas.mkString(","))
datas
}
}.collect()
println("-------------------重新分区后--------------------")
val newRDD: RDD[(String, String)] = rdd.partitionBy(new MyPartitioner(4))
newRDD.mapPartitionsWithIndex {
(index, datas) => {
println(index + "--->" + datas.mkString(","))
datas
}
}.collect()