import org.apache.hadoop.io.{LongWritable, Text}
import org.apache.hadoop.mapreduce.InputSplit
import org.apache.hadoop.mapreduce.lib.input.{FileSplit, TextInputFormat}
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.rdd.NewHadoopRDD
object sparkReadDir{
def main(args: Array[String]): Unit = {
val conf = new SparkConf()
conf.setAppName("testtoarquet")
conf.setMaster("local")
conf.set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
val sc = new SparkContext(conf)
var input = "C:\\Users\\mzz\\Desktop\\tt\\20180315\\"
var output = ""
//val value = sc.textFile(input+"20180314_HK5-10.82.26.22.txt")
val fileRDD = sc.newAPIHadoopFile[LongWritable, Text, TextInputFormat](input)
val hadoopRDD = fileRDD.asInstanceOf[NewHadoopRDD[LongWritable, Text]]
val fileAdnLine = hadoopRDD.mapPartitionsWithInputSplit((inputSplit: InputSplit, iterator: Iterator[(LongWritable, Text)]) => {
val file = inputSplit.asInstanceOf[FileSplit]
iterator.map(x => {
//file.getPath.toString 文件的全路径
//file.getPath.getName 文件名
file.getPath.toString.split("/")(6) + "," + x._2
})
})
fileAdnLine.foreach(println)
}
}
Spark读取目录获取文件名
猜你喜欢
转载自blog.csdn.net/xiaozhaoshigedasb/article/details/90675765
今日推荐
周排行