spark二次排序,两列数据第一列排,如果第一列相等,看第二列

#!/usr/bin/env python3

from operator import gt
from pyspark import SparkContext, SparkConf

class SecondarySortKey():#参数是k,other,自带了参数self,不写
def__init__(self,k):#构造函数
self.column1 = k[0]
self.column2 = k[1]
def__gt__(self,other):
if other.column1 = self.column1:
return gt(self.column2,other.column2)#python3自带函数https://blog.csdn.net/xc_zhou/article/details/81416742
else:
return gt(self.column1,other.column1)

def main:
conf = SparkConf().serAppNmae(‘Spark_sort’).setMaster(‘local[1]’)
sc = SparkContext(conf=conf)
file=“file:///usr/local/saprk/mycode/rdd/secondarysort/file4.txt”
rdd1 = sc.textFile(file)
rdd2 = rdd1.filter(lambda x:len(x.strip())>0))
rdd3 = rdd2.map(lambda x:((int(x.split(" “)[0]),int(x.split(” ")[1])),x))
rdd4 = rdd3.map(lambda x:(SecondarySortKey(x[0]),x[1]))
rdd5 = rdd4.sortByKey(False)
rdd6 =rdd5.map(lambda x:x[1])
rdd6.foreach(print)

if__name__==‘main’:
main()

发布了25 篇原创文章 · 获赞 0 · 访问量 384

猜你喜欢

转载自blog.csdn.net/qq_45371603/article/details/104593849