Spark常用transformation算子操作 —— Java版

  • cartesian算子

package rddDemo.transformation;

import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.VoidFunction;
import scala.Tuple2;

import java.util.ArrayList;
import java.util.List;

/**
 * 笛卡尔积算子 cartesian
 * Created by asus on 2018/7/15.
 */
public class CartesianDemoJava {
    public static void main(String[] args) {
        SparkConf conf = new SparkConf() ;
        conf.setAppName("CartesianDemoJava") ;
        conf.setMaster("local[2]") ;

        System.setProperty("hadoop.home.dir" , "E:\\hadoop-2.6.0") ;

        JavaSparkContext sc =  new JavaSparkContext(conf) ;
        // 班级一
        List<String> classOne = new ArrayList<>() ;
        // 班级二
        List<String> classTwo = new ArrayList<>() ;

        classOne.add("lao wang") ;
        classOne.add("lao zhang") ;
        classOne.add("lao zhao") ;
        classOne.add("lao li") ;

        classTwo.add("xiao wang") ;
        classTwo.add("xiao zhang") ;
        classTwo.add("xiao li") ;
        classTwo.add("xiao zhao") ;

        JavaRDD<String> classOneRDD = sc.parallelize(classOne , 2) ;
        JavaRDD<String> classTwoRDD = sc.parallelize(classTwo , 2) ;

        JavaPairRDD<String , String> classOneCartisianTwoRDD = classOneRDD.cartesian(classTwoRDD) ;
        classOneCartisianTwoRDD.foreach(new VoidFunction<Tuple2<String, String>>() {
            @Override
            public void call(Tuple2<String, String> s) throws Exception {
                System.out.println("( " + s._1 + " , " + s._2 + " )");
            }
        });

        sc.stop();
    }
}
  • coalesce算子

package rddDemo.transformation;

import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.Function2;

import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;

/**
 * Created by asus on 2018/6/16.
 */
public class CoalesceDemoJava {
    public static void main(String[] args) {
        SparkConf conf = new SparkConf() ;
        conf.setAppName("CoalesceDemoJava") ;
        conf.setMaster("local[2]") ;

        System.setProperty("hadoop.home.dir" , "E:\\hadoop-2.6.0") ;

        JavaSparkContext sc = new JavaSparkContext(conf) ;
        List<Integer> numbers = new ArrayList<Integer>() ;
        for(int i = 0 ; i <= 100 ; i ++) {
            numbers.add(i) ;
        }

        // 创建number RDD
        JavaRDD<Integer> numRdd = sc.parallelize(numbers , 10) ;

        // 计算原始number RDD 中个数字所属的partition index 编号
        JavaRDD<String> numRddWithPartitionIndex = numRdd.mapPartitionsWithIndex(new Function2<Integer, Iterator<Integer>, Iterator<String>>() {
            @Override
            public Iterator<String> call(Integer index, Iterator<Integer> numIter) throws Exception {
                List<String> numString = new ArrayList<String>() ;
                while(numIter.hasNext()) {
                    Integer num = numIter.next() ;
                    numString.add("number " + num + "with partition index " + index) ;
                }
                return numString.iterator();
            }
        } , false) ;
        for(String numInfo : numRddWithPartitionIndex.collect()) {
            System.out.println(numInfo);
        }

        // 使用coalesce 算子试图减少partition的数量,默认shuffle为false
        JavaRDD<Integer> coalescedNumRdd = numRdd.coalesce(5) ;

        // 计算coalesce之后number RDD 中个数字所属的partition index 编号
        JavaRDD<String> coalescedNumRddWithPartitionIndex = coalescedNumRdd.mapPartitionsWithIndex(new Function2<Integer, Iterator<Integer>, Iterator<String>>() {
            @Override
            public Iterator<String> call(Integer index, Iterator<Integer> numIter) throws Exception {
                List<String> numString = new ArrayList<>() ;
                while(numIter.hasNext()) {
                    Integer num = numIter.next() ;
                    numString.add("number " + num + "with partition index " + index) ;
                }
                return numString.iterator();
            }
        } , false) ;
        for(String numInfo : coalescedNumRddWithPartitionIndex.collect()) {
            System.out.println(numInfo);
        }

        sc.stop() ;
    }
}
  • cogroup算子

package rddDemo.transformation;

import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.VoidFunction;
import scala.Tuple2;

import java.util.ArrayList;
import java.util.List ;

/**
 * Created by asus on 2018/7/18.
 */
public class CogroupDemoJava {
    public static void main(String[] args) {
        SparkConf conf = new SparkConf() ;
        conf.setAppName("CogroupDemoJava") ;
        conf.setMaster("local[2]") ;

        System.setProperty("hadoop.home.dir" , "E:\\hadoop-2.6.0") ;

        JavaSparkContext sc = new JavaSparkContext(conf) ;

        List<Tuple2<String , Integer>> scores_1 = new ArrayList<>() ;
        List<Tuple2<String , Integer>> scores_2 = new ArrayList<>() ;

        scores_1.add(new Tuple2<String, Integer>("lao wang" , 10)) ;
        scores_1.add(new Tuple2<String, Integer>("lao zhang" , 20)) ;
        scores_1.add(new Tuple2<String, Integer>("lao zhao" , 30)) ;
        scores_1.add(new Tuple2<String, Integer>("lao li" , 40)) ;

        scores_2.add(new Tuple2<String, Integer>("lao wang" , 10)) ;
        scores_2.add(new Tuple2<String, Integer>("xiao zhang" , 20)) ;
        scores_2.add(new Tuple2<String, Integer>("lao zhao" , 30)) ;
        scores_2.add(new Tuple2<String, Integer>("xiao li" , 40)) ;

        JavaPairRDD<String , Integer> scoreOneRDD = sc.parallelizePairs(scores_1) ;
        JavaPairRDD<String , Integer> scoreTwoRDD = sc.parallelizePairs(scores_2) ;
        JavaPairRDD<String , Tuple2<Iterable<Integer> , Iterable<Integer>>> cogroupRDD = scoreOneRDD.cogroup(scoreTwoRDD) ;
        cogroupRDD.foreach(new VoidFunction<Tuple2<String, Tuple2<Iterable<Integer>, Iterable<Integer>>>>() {
            @Override
            public void call(Tuple2<String, Tuple2<Iterable<Integer>, Iterable<Integer>>> t) throws Exception {
                System.out.println("key -> " + t._1);
                System.out.print("value1 -> ");
                for(Integer s : t._2._1) {
                    System.out.print(s + " ");
                }
                System.out.println() ;

                System.out.print("value2 -> ");
                for(Integer s : t._2._2) {
                    System.out.print(s + " ");
                }
                System.out.println();
            }
        });

        sc.stop();
    }
}
  • distinct算子

package rddDemo.transformation;

import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext ;
import org.apache.spark.api.java.function.Function2;
import org.apache.spark.api.java.function.VoidFunction;

import java.util.ArrayList;
import java.util.Iterator;
import java.util.List ;

/**
 * distinct RDD 去重算子,有shuffle
 * Created by asus on 2018/7/11.
 */
public class DistinctDemoJava {
    public static void main(String[] args) {
        SparkConf conf = new SparkConf() ;
        conf.setAppName("DistinctDemoJava") ;
        conf.setMaster("local[2]") ;

        System.setProperty("hadoop.home.dir" , "E:\\hadoop-2.6.0") ;

        JavaSparkContext sc = new JavaSparkContext(conf) ;

        List<String> names = new ArrayList<>() ;
        names.add("lao wang") ;
        names.add("lao zhang") ;
        names.add("lao li") ;
        names.add("lao zhao") ;
        names.add("lao wang") ;
        names.add("lao zhang") ;
        names.add("lao li") ;
        names.add("lao zhao") ;

        JavaRDD<String> nameRDD = sc.parallelize(names , 4) ;
        JavaRDD<String> nameWithPartitionIndexRDD = nameRDD.mapPartitionsWithIndex(new Function2<Integer, Iterator<String>, Iterator<String>>() {
            @Override
            public Iterator<String> call(Integer index, Iterator<String> names) throws Exception {
                List<String> nameWithIndex = new ArrayList<>() ;
                while(names.hasNext()) {
                    nameWithIndex.add("name : " + names.next() + " with index " + index) ;
                }
                return nameWithIndex.iterator();
            }
        } , false) ;
        nameWithPartitionIndexRDD.foreach(new VoidFunction<String>() {
            @Override
            public void call(String s) throws Exception {
                System.out.println(s);
            }
        }) ;

        JavaRDD<String> distinctNameRDD = nameRDD.distinct() ;
        JavaRDD<String> distinctNameRDDWithPartitionIndex = distinctNameRDD.mapPartitionsWithIndex(new Function2<Integer, Iterator<String>, Iterator<String>>() {
            private static final long serialVersionID = 4L ;
            @Override
            public Iterator<String> call(Integer index, Iterator<String> names) throws Exception {
                List<String> nameWithIndex = new ArrayList<>() ;
                while(names.hasNext()) {
                    nameWithIndex.add("name : " + names.next() + " with index " + index) ;
                }
                return nameWithIndex.iterator();
            }
        } , false) ;
        distinctNameRDDWithPartitionIndex.foreach(new VoidFunction<String>() {
            @Override
            public void call(String s) throws Exception {
                System.out.println(s);
            }
        });

        sc.stop() ;
    }
}
  • filter算子

package rddDemo.transformation;

import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.Function;
import org.apache.spark.api.java.function.VoidFunction;

import java.util.Arrays;
import java.util.List;

/**
 * Created by asus on 2018/6/16.
 * transformation 算子 filter  f : U => bool
 */
public class FilterDemoJava {
    public static void main(String[] args) {
        SparkConf conf = new SparkConf() ;
        conf.setAppName("FilterDemoJava") ;
        conf.setMaster("local[2]") ;

        System.setProperty("hadoop.home.dir" , "E:\\hadoop-2.6.0") ;
        JavaSparkContext sc = new JavaSparkContext(conf) ;

        List<Integer> numbers = Arrays.asList(1, 2, 3, 4, 5, 6, 7, 8, 9, 10) ;
        JavaRDD<Integer> numRdd = sc.parallelize(numbers) ;
        JavaRDD<Integer> numLarger5 = numRdd.filter(new Function<Integer, Boolean>() {
            @Override
            public Boolean call(Integer v1) throws Exception {
                if(v1 > 5) {
                    return true ;
                }
                return false;
            }
        }) ;

        numRdd.foreach(new VoidFunction<Integer>() {
            @Override
            public void call(Integer integer) throws Exception {
                System.out.println("number : " + integer) ;
            }
        });

        numLarger5.foreach(new VoidFunction<Integer>() {
            @Override
            public void call(Integer integer) throws Exception {
                System.out.println("number larger 5 : " + integer) ;
            }
        });
        sc.stop() ;
    }
}
  • flatMap算子

package rddDemo.transformation;

import org.apache.spark.SparkConf ;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext ;
import org.apache.spark.api.java.function.FlatMapFunction;

import java.util.* ;


/**
 * Created by asus on 2018/6/16.
 */
public class FlatMapDemoJava {
    public static void main(String[] args) {
        SparkConf conf = new SparkConf() ;
        conf.setAppName("FlatMapDemoJava") ;
        conf.setMaster("local[2]") ;

        System.setProperty("hadoop.home.dir" , "E:\\hadoop-2.6.0") ;

        JavaSparkContext sc = new JavaSparkContext(conf) ;

        List<String> sentences = new ArrayList<>() ;
        sentences.add("today is a nice day") ;
        sentences.add("i love you") ;
        sentences.add("who am i") ;

        JavaRDD<String> sentenceRdd = sc.parallelize(sentences) ;
        for(String s : sentenceRdd.collect()) {
            System.out.println(s);
        }

        JavaRDD<String> wordRdd = sentenceRdd.flatMap(new FlatMapFunction<String, String>() {
            @Override
            public Iterator<String> call(String s) throws Exception {
                String[] words = s.split("\\s+") ;
                List<String> wordsList = new ArrayList<>() ;
                for(int i = 0 ; i < words.length ; i ++) {
                    wordsList.add(words[i]) ;
                }
                return wordsList.iterator() ;
            }
        }) ;
        for(String word : wordRdd.collect()) {
            System.out.println(word);
        }

        sc.stop();
    }
}
  • groupByKey算子

package rddDemo.transformation;

import org.apache.spark.SparkConf ;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaSparkContext ;
import org.apache.spark.api.java.function.PairFunction;
import scala.Tuple2;

import java.util.* ;

/**
 * Created by asus on 2018/6/17.
 */
public class GroupByKeyJava {
    public static void main(String [] args) {
        SparkConf conf = new SparkConf() ;
        conf.setAppName("GroupByKeyJava") ;
        conf.setMaster("local[3]") ;

        System.setProperty("hadoop.home.dir" , "E:\\hadoop-2.6.0") ;

        JavaSparkContext sc = new JavaSparkContext(conf) ;

        List<Tuple2<String , Integer>> userInfo = new ArrayList<>() ;
        userInfo.add(new Tuple2<String, Integer>("zhang" , 100)) ;
        userInfo.add(new Tuple2<String, Integer>("zhang" , 101)) ;
        userInfo.add(new Tuple2<String, Integer>("zhang" , 102)) ;
        userInfo.add(new Tuple2<String, Integer>("wang" , 90)) ;
        userInfo.add(new Tuple2<String, Integer>("wang" , 91)) ;
        userInfo.add(new Tuple2<String, Integer>("wang" , 92)) ;
        userInfo.add(new Tuple2<String, Integer>("li" , 80)) ;
        userInfo.add(new Tuple2<String, Integer>("li" , 81)) ;
        userInfo.add(new Tuple2<String, Integer>("li" , 82)) ;

        System.out.println("############################ 构造键值对RDD ############################");
        // 使用 sc.parallelizePairs 加载键值对集合,键值对由Tuple2元组构成
        JavaPairRDD<String , Integer> userRdd = sc.parallelizePairs(userInfo) ;
        for(Tuple2<String , Integer> t : userRdd.collect()) {
            System.out.println("name : " + t._1 + " score : " + t._2);
        }

        System.out.println("############################ 操作键值对RDD ############################");
        // mapToPairs 方法操作键值对中的每个值
        JavaPairRDD<String , Integer> mappedUserRdd = userRdd.mapToPair(new PairFunction<Tuple2<String, Integer>, String, Integer>() {
            @Override
            public Tuple2<String, Integer> call(Tuple2<String, Integer> t) throws Exception {
                return new Tuple2<>(t._1 , t._2 + 10);
            }
        }) ;
        for(Tuple2<String , Integer> t : mappedUserRdd.collect()) {
            System.out.println("name " + t._1 + " score " + t._2);
        }

        System.out.println("############################ 键值对RDD groupByKey ############################");
        // groupByKey算子进行分组,分组结果为<String , Itarable<Integer>>
        JavaPairRDD<String , Iterable<Integer>> userRddGroupByKey = userRdd.groupByKey() ;

        for(Tuple2<String , Iterable<Integer>> t : userRddGroupByKey.collect()) {
            List<Integer> scoreList = new ArrayList<>() ;
            for(Integer score : t._2) {
                scoreList.add(score) ;
            }
            System.out.println("name " + t._1 + " scoreList : " + scoreList.toString());
        }

        System.out.println("############################ ############################");
        JavaPairRDD<String , Integer> userTotalScore = userRddGroupByKey.mapToPair(new PairFunction<Tuple2<String,Iterable<Integer>>, String , Integer>() {
            @Override
            public Tuple2<String , Integer> call(Tuple2<String , Iterable<Integer>> t) throws Exception {
                Integer sum = 0 ;
                for(Integer s : t._2) {
                    sum += s ;
                }
                return new Tuple2<>(t._1 , sum) ;
            }
        }) ;
        for(Tuple2 t : userTotalScore.collect()) {
            System.out.println("name : " + t._1 + " total_score : " + t._2) ;
        }

        sc.stop();
    }
}
  • intersection算子

package rddDemo.transformation;

import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.VoidFunction;

import java.util.ArrayList;
import java.util.List ;

/**
 * RDD 求交集算子
 * Created by asus on 2018/7/15.
 */
public class IntersectionDemoJava {
    public static void main(String[] args) {
        SparkConf conf = new SparkConf() ;
        conf.setAppName("IntersectionDemoJava") ;
        conf.setMaster("local[2]") ;

        System.setProperty("hadoop.home.dir" , "E:\\hadoop-2.6.0") ;

        JavaSparkContext sc =  new JavaSparkContext(conf) ;
        // 班级一
        List<String> classOne = new ArrayList<>() ;
        // 班级二
        List<String> classTwo = new ArrayList<>() ;

        classOne.add("lao wang") ;
        classOne.add("lao wang") ;
        classOne.add("lao zhang") ;
        classOne.add("lao li") ;
        classOne.add("lao zhao") ;

        classTwo.add("lao wang") ;
        classTwo.add("lao wang") ;
        classTwo.add("lao zhao") ;
        classTwo.add("lao zhao") ;
        classTwo.add("xiao wang") ;
        classTwo.add("xiao zhao") ;

        JavaRDD<String> classOneRDD = sc.parallelize(classOne , 2) ;
        JavaRDD<String> classTwoRDD = sc.parallelize(classTwo , 2) ;

        // 求两个RDD的交集,会去重
        JavaRDD<String> classOneAndTwoRDD = classOneRDD.intersection(classTwoRDD) ;
        classOneAndTwoRDD.foreach(new VoidFunction<String>() {
            @Override
            public void call(String s) throws Exception {
                System.out.println(s);
            }
        });
        sc.stop();
    }
}
  • join算子

package rddDemo.transformation;

import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.VoidFunction;
import scala.Tuple2;

import java.util.ArrayList;
import java.util.List;

/**
 * Created by asus on 2018/7/18.
 */
public class JoinDemoJava {
    public static void main(String[] args) {
        SparkConf conf = new SparkConf() ;
        conf.setAppName("CogroupDemoJava") ;
        conf.setMaster("local[2]") ;

        System.setProperty("hadoop.home.dir" , "E:\\hadoop-2.6.0") ;

        JavaSparkContext sc = new JavaSparkContext(conf) ;

        List<Tuple2<String , Integer>> scores_1 = new ArrayList<>() ;
        List<Tuple2<String , Integer>> scores_2 = new ArrayList<>() ;

        scores_1.add(new Tuple2<String, Integer>("lao wang" , 10)) ;
        scores_1.add(new Tuple2<String, Integer>("lao zhang" , 20)) ;
        scores_1.add(new Tuple2<String, Integer>("lao zhao" , 30)) ;
        scores_1.add(new Tuple2<String, Integer>("lao li" , 40)) ;

        scores_2.add(new Tuple2<String, Integer>("lao wang" , 10)) ;
        scores_2.add(new Tuple2<String, Integer>("xiao zhang" , 20)) ;
        scores_2.add(new Tuple2<String, Integer>("lao zhao" , 30)) ;
        scores_2.add(new Tuple2<String, Integer>("xiao li" , 40)) ;

        JavaPairRDD<String , Integer> scoreOneRDD = sc.parallelizePairs(scores_1) ;
        JavaPairRDD<String , Integer> scoreTwoRDD = sc.parallelizePairs(scores_2) ;
        JavaPairRDD<String , Tuple2<Integer , Integer>> joinRDD = scoreOneRDD.join(scoreTwoRDD) ;

        joinRDD.foreach(new VoidFunction<Tuple2<String, Tuple2<Integer, Integer>>>() {
            @Override
            public void call(Tuple2<String, Tuple2<Integer, Integer>> t) throws Exception {
                System.out.println(t._1 + " -> " + t._2._1 + " , " + t._2._2);
            }
        }) ;

        sc.stop() ;
    }
}
  • map算子

package rddDemo.transformation;

import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.Function;
import org.apache.spark.api.java.function.VoidFunction;

import java.util.Arrays;
import java.util.List;


/**
 * Created by asus on 2018/6/16.
 */
public class MapDemoJava {
    public static void main(String [] args) {
        System.out.println("MapDemoJava");

        SparkConf conf = new SparkConf() ;
        conf.setAppName("MapDemoJava") ;
        conf.setMaster("local[2]") ;

        System.setProperty("hadoop.home.dir" , "E:\\hadoop-2.6.0") ;

        JavaSparkContext sc = new JavaSparkContext(conf) ;

        // 生命RDD
        List<Integer> numbers = Arrays.asList(1 , 2 , 3 , 4 ,5 , 5  ,6 ,8) ;
        JavaRDD<Integer> numRdd = sc.parallelize(numbers) ;

        // RDD 应用map 算子
        JavaRDD<Integer> newNumRdd = numRdd.map(new Function<Integer, Integer>() {
            private static final long serialVersionID = 1L ;
            @Override
            public Integer call(Integer number) throws Exception {
                return number * 10 ;
            }
        }) ;

        // RDD 应用 foreach 算子
        numRdd.foreach(new VoidFunction<Integer>() {
            @Override
            public void call(Integer integer) throws Exception {
                System.out.println("number : " + integer) ;
            }
        });

        newNumRdd.foreach(new VoidFunction<Integer>() {
            @Override
            public void call(Integer integer) throws Exception {
                System.out.println("new number : " + integer);
            }
        });

        sc.stop();
    }
}
  • mapPartitions算子

package rddDemo.transformation;

import org.apache.spark.SparkConf ;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.FlatMapFunction;
import org.apache.spark.api.java.function.VoidFunction;

import java.util.*;

/**
 * Created by asus on 2018/6/16.
 */

public class MapPartitionsDemoJava {
    public static void main(String[] args) {
        System.out.println("MapPartitionsDemoJava");
        SparkConf conf = new SparkConf() ;
        conf.setAppName("MapPartitionsDemoJava") ;
        conf.setMaster("local[2]") ;

        System.setProperty("hadoop.home.dir" , "E:\\hadoop-2.6.0") ;

        JavaSparkContext sc = new JavaSparkContext(conf) ;

        List<String> names = new ArrayList<String>() ;
        names.add("xuruyun") ;
        names.add("liangjingru") ;
        names.add("wangfei") ;

        final Map<String , Integer> scoreMap = new HashMap<String , Integer>() ;
        scoreMap.put("xuruyun" , 150) ;
        scoreMap.put("liangjingru" , 100) ;
        scoreMap.put("wangfei" , 90) ;

        // 定义 NameRDD
        JavaRDD<String> nameRdd = sc.parallelize(names) ;

        // 根据NameRDD 生成ScoreRDD
        JavaRDD<Integer> scoreRdd = nameRdd.mapPartitions(new FlatMapFunction<Iterator<String>, Integer>() {
            @Override
            public Iterator<Integer> call(Iterator<String> iterator) throws Exception {
                List<Integer> scores = new ArrayList<Integer>() ;
                while(iterator.hasNext()) {
                    String name = iterator.next() ;
                    Integer score = scoreMap.get(name) ;
                    scores.add(score) ;
                }
                return scores.iterator();
            }
        }) ;

        // foreach 输出 NameRDD的内容
        nameRdd.foreach(new VoidFunction<String>() {
            @Override
            public void call(String s) throws Exception {
                System.out.println("name : " + s);
            }
        });

        scoreRdd.foreach(new VoidFunction<Integer>() {
            @Override
            public void call(Integer score) throws Exception {
                System.out.println("score : " + score);
            }
        });
        sc.stop();
    }
}
  • mapPartitionWIthIndex算子

package rddDemo.transformation;

import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.Function2;
import org.apache.spark.api.java.function.VoidFunction;

import java.util.* ;

/**
 * Created by asus on 2018/6/16.
 */
public class MapPartitionsWithIndexDemoJava {
    public static void main(String [] args) {
        SparkConf conf = new SparkConf() ;
        conf.setAppName("MapPartitionsWithIndexDemoJava") ;
        conf.setMaster("local[2]") ;
        System.setProperty("hadoop.home.dir" , "E:\\hadoop-2.6.0") ;

        JavaSparkContext sc = new JavaSparkContext(conf) ;

        List<String> names = new ArrayList<String>() ;
        names.add("xuruyun") ;
        names.add("liangjingru") ;
        names.add("wangfei") ;

        JavaRDD<String> nameRdd = sc.parallelize(names , 3) ;
        nameRdd.foreach(new VoidFunction<String>() {
            @Override
            public void call(String s) throws Exception {
                System.out.println(s) ;
            }
        });

        JavaRDD<String> nameRddWithPartitionIndex = nameRdd.mapPartitionsWithIndex(new Function2<Integer, Iterator<String>, Iterator<String>>() {
            private static final long serialVersionID = 1L ;
            @Override
            public Iterator<String> call(Integer index, Iterator<String> iterator) throws Exception {
                List<String> helloPeople = new ArrayList<String>() ;
                while(iterator.hasNext()) {
                    String name = iterator.next() ;
                    helloPeople.add("Hello " + name + " with index " + index) ;
                }
                return helloPeople.iterator();
            }
        } , false) ;

        nameRddWithPartitionIndex.foreach(new VoidFunction<String>() {
            @Override
            public void call(String s) throws Exception {
                System.out.println(s);
            }
        });
        sc.stop() ;
    }
}
  • repartition算子

package rddDemo.transformation;

import org.apache.spark.SparkConf ;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.Function2;

import java.util.ArrayList;
import java.util.* ;

/**
 * Created by asus on 2018/6/16.
 */
public class RepartitionDemoJava {
    public static void main(String[] args) {
        SparkConf conf = new SparkConf() ;
        conf.setAppName("RepartitionDemoJava") ;
        conf.setMaster("local[2]") ;

        System.setProperty("hadoop.home.dir" , "E:\\hadoop-2.6.0") ;

        JavaSparkContext sc = new JavaSparkContext(conf) ;

        List<Integer> numbers = new ArrayList<>() ;
        for(int i = 0 ; i <= 100 ; i ++) {
            numbers.add(i) ;
        }

        JavaRDD<Integer> numRdd = sc.parallelize(numbers , 10) ;
        JavaRDD<String> numRddWithPartitionIndex = numRdd.mapPartitionsWithIndex(new Function2<Integer, Iterator<Integer>, Iterator<String>>() {
            private static final long serialVersionID = 1L ;
            @Override
            public Iterator<String> call(Integer index, Iterator<Integer> numIter) throws Exception {
                List<String> numString = new ArrayList() ;
                while(numIter.hasNext()) {
                    Integer num = numIter.next() ;
                    numString.add("number " + num + " with partition index " + index) ;
                }
                return numString.iterator();
            }
        } , false) ;
        for(String numInfo : numRddWithPartitionIndex.collect()) {
            System.out.println(numInfo);
        }

        JavaRDD<Integer> numRddRepartition = numRdd.repartition(5) ;

        JavaRDD<String> numRddRepartitionWithPartitionIndex = numRddRepartition.mapPartitionsWithIndex(new Function2<Integer, Iterator<Integer>, Iterator<String>>() {
            private static final long serialVersionID = 2L ;
            @Override
            public Iterator<String> call(Integer index, Iterator<Integer> numIter) throws Exception {
                List<String> numString = new ArrayList() ;
                while(numIter.hasNext()) {
                    Integer num = numIter.next() ;
                    numString.add("number " + num + " with partition index " + index) ;
                }
                return numString.iterator();
            }
        } , false) ;
        for(String numInfo : numRddRepartitionWithPartitionIndex.collect()) {
            System.out.println(numInfo);
        }

        sc.stop();
    }
}
  • sample算子

package rddDemo.transformation;

import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.VoidFunction;

import java.util.ArrayList;
import java.util.List;

/**
 * sample(replace:boolean , fraction:float , seed : long) 取样算子
 * Created by asus on 2018/7/8.
 */
public class SampleDemoJava {
    public static void main(String[] args) {
        SparkConf conf = new SparkConf() ;
        conf.setAppName("SampleDemoJava") ;
        conf.setMaster("local[2]") ;

        System.setProperty("hadoop.home.dir" , "E:\\hadoop-2.6.0") ;

        JavaSparkContext sc = new JavaSparkContext(conf) ;

        List<String> names = new ArrayList<>() ;
        names.add("lao wang") ;
        names.add("xiao wang") ;
        names.add("lao zhang") ;
        names.add("xiao zhang") ;
        names.add("lao li") ;
        names.add("xiao li") ;

        JavaRDD<String> namesRDD = sc.parallelize(names , 3) ;

        // 无放回取样50%
        System.out.println(">>>>>>>>>>>>>>>>>> 无放回取样50% <<<<<<<<<<<<<<<<<");
        namesRDD.sample(false , 0.5).foreach(new VoidFunction<String>() {
            @Override
            public void call(String s) throws Exception {
                System.out.println(s);
            }
        });

        // 有放回取样50%
        System.out.println(">>>>>>>>>>>>>>>>>> 有放回取样50% <<<<<<<<<<<<<<<<<");
        namesRDD.sample(true , 0.5).foreach(new VoidFunction<String>() {
            @Override
            public void call(String s) throws Exception {
                System.out.println(s);
            }
        });

        // 无放回取样50%,指定seed,取样唯一
        System.out.println(">>>>>>>>>>>>>>>>>> 无放回取样50%,指定seed,取样唯一 <<<<<<<<<<<<<<<<<");
        namesRDD.sample(false , 0.5 , 100).foreach(new VoidFunction<String>() {
            @Override
            public void call(String s) throws Exception {
                System.out.println(s);
            }
        });

        // 有放回取样50%,指定seed,取样唯一
        System.out.println(">>>>>>>>>>>>>>>>>> 有放回取样50%,指定seed,取样唯一 <<<<<<<<<<<<<<<<<");
        namesRDD.sample(true , 0.5 , 100).foreach(new VoidFunction<String>() {
            @Override
            public void call(String s) throws Exception {
                System.out.println(s);
            }
        });

        sc.stop();
    }
}
  • sortByKey算子

package rddDemo.transformation;

import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.PairFunction;
import org.apache.spark.api.java.function.VoidFunction;
import scala.Tuple2;

import java.util.ArrayList;
import java.util.List ;


/**
 * Created by asus on 2018/7/11.
 */
public class SortByKeyDemoJava {
    public static void main(String[] args) {
        SparkConf conf = new SparkConf() ;
        conf.setAppName("SortByKeyDemoJava") ;
        conf.setMaster("local[2]") ;

        System.setProperty("hadoop.home.dir" , "E:\\hadoop-2.6.0") ;

        JavaSparkContext sc = new JavaSparkContext(conf) ;

        List<Tuple2<String , Integer>> scores = new ArrayList<>() ;
        scores.add(new Tuple2<String, Integer>("lao wang" , 10)) ;
        scores.add(new Tuple2<String, Integer>("lao zhang" , 20)) ;
        scores.add(new Tuple2<String, Integer>("lao li" , 30)) ;
        scores.add(new Tuple2<String, Integer>("lao zhao" , 40)) ;

        JavaPairRDD<String , Integer> scoreRDD = sc.parallelizePairs(scores , 2) ;

        scoreRDD.foreach(new VoidFunction<Tuple2<String, Integer>>() {
            @Override
            public void call(Tuple2<String, Integer> s) throws Exception {
                System.out.println("name -> " + s._1 + " , score -> " + s._2);
            }
        });

        // 升序
        System.out.println(">>>>>>>>>>>>>>>>>>>>>>>>>>> asc(升序) <<<<<<<<<<<<<<<<<<<<<<<<<<<<<");
        JavaPairRDD<String , Integer> scoreSortByKeyAscRDD = scoreRDD.sortByKey(true) ;
        scoreSortByKeyAscRDD.foreach(new VoidFunction<Tuple2<String, Integer>>() {
            @Override
            public void call(Tuple2<String, Integer> s) throws Exception {
                System.out.println("name -> " + s._1 + " , score -> " + s._2);
            }
        });

        // 降序
        System.out.println(">>>>>>>>>>>>>>>>>>>>>>>>>>> desc(降序) <<<<<<<<<<<<<<<<<<<<<<<<<<<<<");
        JavaPairRDD<String , Integer> scoreSortByKeyDescRDD = scoreRDD.sortByKey(false) ;
        scoreSortByKeyDescRDD.foreach(new VoidFunction<Tuple2<String, Integer>>() {
            @Override
            public void call(Tuple2<String, Integer> s) throws Exception {
                System.out.println("name -> " + s._1 + " , score -> " + s._2);
            }
        });

        // 按照分数升序排序
        System.out.println(">>>>>>>>>>>>>>>>>>>>>>>>>>> asc(按照分数升序排序) <<<<<<<<<<<<<<<<<<<<<<<<<<<<<");
        JavaPairRDD<String , Integer> scoreAscRDD = scoreRDD.mapToPair(new PairFunction<Tuple2<String, Integer>, Integer, String>() {

            @Override
            public Tuple2<Integer, String> call(Tuple2<String, Integer> s) throws Exception {
                return new Tuple2<>(s._2 , s._1);
            }
        }).sortByKey(true).mapToPair(new PairFunction<Tuple2<Integer, String>, String, Integer>() {
            @Override
            public Tuple2<String , Integer> call(Tuple2<Integer, String> s) throws Exception {
                return new Tuple2<>(s._2 , s._1);
            }
        }) ;
        scoreAscRDD.foreach(new VoidFunction<Tuple2<String, Integer>>() {
            @Override
            public void call(Tuple2<String, Integer> s) throws Exception {
                System.out.println("name -> " + s._1 + " , score -> " + s._2);
            }
        });

        // 按照分数降序排序(每个partition内部的排序)
        System.out.println(">>>>>>>>>>>>>>>>>>>>>>>>>>> desc(按照分数降序排序) <<<<<<<<<<<<<<<<<<<<<<<<<<<<<");
        JavaPairRDD<String , Integer> scoreDescRDD = scoreRDD.mapToPair(new PairFunction<Tuple2<String, Integer>, Integer, String>() {
            @Override
            public Tuple2<Integer , String> call(Tuple2<String, Integer> s) throws Exception {
                return new Tuple2<>(s._2 , s._1);
            }
        }).sortByKey(false).mapToPair(new PairFunction<Tuple2<Integer, String>, String , Integer>() {
            @Override
            public Tuple2<String, Integer> call(Tuple2<Integer, String> s) throws Exception {
                return new Tuple2<>(s._2 , s._1);
            }
        }) ;
        scoreDescRDD.foreach(new VoidFunction<Tuple2<String, Integer>>() {
            @Override
            public void call(Tuple2<String, Integer> s) throws Exception {
                System.out.println("name -> " + s._1 + " , score -> " + s._2);
            }
        });

        sc.stop();
    }
}
  • union算子

package rddDemo.transformation;

import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.Function2;
import org.apache.spark.api.java.function.VoidFunction;

import java.util.ArrayList;
import java.util.Iterator;
import java.util.List ;

/**
 * rdd_1.union(rdd_2)
 * 直接合并RDD的partition
 * 如果rdd_1,rdd_2分别有两个partition,则合并后的RDD有4个partition,即union过程中不会shuffle
 * Created by asus on 2018/7/8.
 */
public class UnionDemoJava {
    public static void main(String[] args) {
        SparkConf conf = new SparkConf() ;
        conf.setAppName("UnionDemoJava") ;
        conf.setMaster("local[2]") ;

        System.setProperty("hadoop.home.dir" , "E:\\hadoop-2.6.0") ;

        JavaSparkContext sc = new JavaSparkContext(conf) ;

        List<String> oldMan = new ArrayList<>() ;
        oldMan.add("lao wang") ;
        oldMan.add("lao zhang") ;
        oldMan.add("lao li") ;
        oldMan.add("lao zhao") ;

        List<String> youngMan = new ArrayList<>() ;
        youngMan.add("xiao wang") ;
        youngMan.add("xiao zhang") ;
        youngMan.add("xiao li") ;
        youngMan.add("xiao zhao") ;


        /**
         * name : lao li with index 1
         * name : lao zhao with index 1
         * name : lao wang with index 0
         * name : lao zhang with index 0
         */
        // oldManRDD 有两个 partition
        JavaRDD<String> oldManRDD = sc.parallelize(oldMan , 2) ;
        // 获取 oldManRDD 中各元素所属的 partition 编号
        JavaRDD<String> oldManRDDWithIndex = oldManRDD.mapPartitionsWithIndex(new Function2<Integer, Iterator<String>, Iterator<String>>() {
            private static final long serialVersionID = 1L ;
            @Override
            public Iterator<String> call(Integer index, Iterator<String> names) throws Exception {
                List<String> nameWithIndex = new ArrayList<>() ;
                while(names.hasNext()) {
                    nameWithIndex.add("name : " + names.next() + " with index " + index) ;
                }
                return nameWithIndex.iterator() ;
            }
        } , false) ;
        oldManRDDWithIndex.foreach(new VoidFunction<String>() {
            @Override
            public void call(String s) throws Exception {
                System.out.println(s);
            }
        });

        /**
         * name : xiao li with index 1
         * name : xiao zhao with index 1
         * name : xiao wang with index 0
         * name : xiao zhang with index 0
         */
        // youngManRDD 有两个 partition
        JavaRDD<String> youngManRDD = sc.parallelize(youngMan , 2) ;
        // 获取 youngManRDD 中每个元素所属的 partition 编号
        JavaRDD<String> youngManRDDWithIndex = youngManRDD.mapPartitionsWithIndex(new Function2<Integer, Iterator<String>, Iterator<String>>() {
            private static final long serialVersionID = 2L ;
            @Override
            public Iterator<String> call(Integer index, Iterator<String> names) throws Exception {
                List<String> nameWithIndex = new ArrayList<>() ;
                while(names.hasNext()) {
                    nameWithIndex.add("name : " + names.next() + " with index " + index) ;
                }
                return nameWithIndex.iterator() ;
            }
        } , false) ;
        youngManRDDWithIndex.foreach(new VoidFunction<String>() {
            @Override
            public void call(String s) throws Exception {
                System.out.println(s);
            }
        });

        /**
         * name : lao li with index 1
         * name : lao zhao with index 1
         * name : xiao wang with index 2
         * name : xiao zhang with index 2
         * name : xiao li with index 3
         * name : xiao zhao with index 3
         * name : lao wang with index 0
         * name : lao zhang with index 0
         */
        // 合并 oldManRDD 和 youngManRDD , 合并后的RDD中有 4 个 partition,且每个partition 中的内容与未合并前一样
        JavaRDD<String> unionOldAndYoung = oldManRDD.union(youngManRDD) ;
        // 获取 unionOldAndYoung 中每个元素所属的 partition 编号
        JavaRDD<String> unionOldAndYoungWithIndex = unionOldAndYoung.mapPartitionsWithIndex(new Function2<Integer, Iterator<String>, Iterator<String>>() {
            private static final long serialVersionID = 3L ;
            @Override
            public Iterator<String> call(Integer index, Iterator<String> names) throws Exception {
                List<String> nameWithIndex = new ArrayList<>() ;
                while(names.hasNext()) {
                    nameWithIndex.add("name : " + names.next() + " with index " + index) ;
                }
                return nameWithIndex.iterator();
            }
        } , false) ;
        unionOldAndYoungWithIndex.foreach(new VoidFunction<String>() {
            @Override
            public void call(String s) throws Exception {
                System.out.println(s);
            }
        });

        sc.stop() ;
    }
}
  • saveAsTextFile算子

package rddDemo.transformation;

import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;

import java.util.ArrayList;
import java.util.List ;

/**
 * Created by asus on 2018/7/15.
 */
public class SaveAsTextFileDemoJava {
    public static void main(String[] args) {
        SparkConf conf = new SparkConf() ;
        conf.setAppName("SaveAsTextFileDemoJava") ;
        conf.setMaster("local[2]") ;

        System.setProperty("hadoop.home.dir" , "E:\\hadoop-2.6.0") ;

        JavaSparkContext sc = new JavaSparkContext(conf) ;

        List<Integer> numbers = new ArrayList<>() ;
        for(int i = 0 ; i < 10 ; i ++) {
            numbers.add(i) ;
        }

        JavaRDD<Integer> numberRDD = sc.parallelize(numbers , 2) ;

        // 将结果文件保存到本地(保存目录不能事先存在)
        numberRDD.saveAsTextFile("src/main/java/rddDemo/saveAsTextFilePath");
        // 将结果文件保存到HDFS(保存目录不能事先存在)
//        numberRDD.saveAsTextFile(""hdfs://ip:9000/saveAsTextFilePath);

        sc.stop();
    }
}

猜你喜欢

转载自blog.csdn.net/cxx654/article/details/81137977