【python】pyspark读取获取文件、加载已存在的文件、读取csv文件修改列名

读取获取文件

在这里插入图片描述

# encoding=utf-8
from pyspark import SparkContext
from pyspark import SparkFiles


sc = SparkContext.getOrCreate()

# dt = sc.addFile('hdfs://192.168.56.122:9000/data')
with open(SparkFiles.get('e:/data.txt')) as files:
	ctx = files.readlines()
	for x in ctx:
		print(x)

加载已存在的文件并调用方法

# 创建文件 myPyspark_ReadFile2.py
# encoding=utf-8

def mynum(p):
	return 'hello man'
	

from pyspark import SparkContext
from pyspark import SparkFiles


sc = SparkContext.getOrCreate()
sc.addFile("myPyspark_ReadFile2.py")
from myPyspark_ReadFile2 import mynum
print(mynum('dd'))

读取csv文件修改列名

# encoding = utf-8

from pyspark.sql import SparkSession
from pyspark.sql.functions import col

spark = SparkSession.builder.getOrCreate()
# 更改列名
df = spark.read.format('csv').load('hdfs://192.168.56.122:9000/ord/orders.csv').withColumn('id',col('_c0')).withColumn('regdate',col('_c1')).withColumn('peoplenum',col('_c2')).withColumn('peoplenum',col('_c2')).withColumn('status',col('_c3')).select(col('id'),col('regdate'),col('peoplenum'),col('status'))
df.show(3)
发布了94 篇原创文章 · 获赞 110 · 访问量 5023

猜你喜欢

转载自blog.csdn.net/beautiful_huang/article/details/104199117