SQL 经典50题之mysql版、Spark-Sql版(详解)

SQL 经典50题之mysql版、Spark-Sql版

建库

create database school
USE `school`;

建表

//-- 课程表
DROP TABLE IF EXISTS `Course`;
CREATE TABLE `Course` (
  `c_id` varchar(20) NOT NULL,
  `c_name` varchar(20) NOT NULL DEFAULT '',
  `t_id` varchar(20) NOT NULL,
  PRIMARY KEY (`c_id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8; 

//-- 成绩表
DROP TABLE IF EXISTS `Score`;
CREATE TABLE `Score` (
  `s_id` varchar(20) NOT NULL,
  `c_id` varchar(20) NOT NULL,
  `s_score` int(3) DEFAULT NULL,
  PRIMARY KEY (`s_id`,`c_id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;

//-- 学生表
DROP TABLE IF EXISTS `Student`;
CREATE TABLE `Student` (
  `s_id` varchar(20) NOT NULL,
  `s_name` varchar(20) NOT NULL DEFAULT '',
  `s_birth` varchar(20) NOT NULL DEFAULT '',
  `s_sex` varchar(10) NOT NULL DEFAULT '',
  PRIMARY KEY (`s_id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;

//-- 教师表
DROP TABLE IF EXISTS `Teacher`;
CREATE TABLE `Teacher` (
  `t_id` varchar(20) NOT NULL,
  `t_name` varchar(20) NOT NULL DEFAULT '',
  PRIMARY KEY (`t_id`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8;

插入测试数据

//-- 课程表
insert  into `Course`(`c_id`,`c_name`,`t_id`) values ('01','语文','02'),('02','数学','01'),('03','英语','03');

//-- 成绩表
insert  into `Score`(`s_id`,`c_id`,`s_score`) values ('01','01',80),
('01','02',90),('01','03',99),('02','01',70),
('02','02',60),('02','03',80),('03','01',80),
('03','02',80),('03','03',80),('04','01',50),
('04','02',30),('04','03',20),('05','01',76),
('05','02',87),('06','01',31),('06','03',34),
('07','02',89),('07','03',98);

//-- 学生表
insert  into `Student`(`s_id`,`s_name`,`s_birth`,`s_sex`) values
('01','赵雷','1990-01-01','男'),
('02','钱电','1990-12-21','男'),
('03','孙风','1990-05-20','男'),
('04','李云','1990-08-06','男'),
('05','周梅','1991-12-01','女'),
('06','吴兰','1992-03-01','女'),
('07','郑竹','1989-07-01','女'),
('08','王菊','1990-01-20','女');

//-- 教师表
insert  into `Teacher`(`t_id`,`t_name`) values
('01','张三'),
('02','李四'),
('03','王五');

Spark-Sql 版查询前的数据读取

	val spark: SparkSession = SparkSession.builder()
      .appName("SparkOnMysql")
      .master("local[*]")
      .getOrCreate()
    import spark.implicits._
    import org.apache.spark.sql.functions._
    val url="jdbc:mysql://192.168.8.99:3306/school"
    val user="root"
    val pwd="ok"
    val driver="com.mysql.jdbc.Driver"
    val prop = new Properties()
    prop.setProperty("user",user)
    prop.setProperty("password",pwd)
    prop.setProperty("driver",driver)
    val student: DataFrame = spark.read.jdbc(url,"Student",prop)
    val course: DataFrame = spark.read.jdbc(url,"Course",prop)
    val score: DataFrame = spark.read.jdbc(url,"Score",prop)
    val teacher: DataFrame = spark.read.jdbc(url,"Teacher",prop)
	
	//创建临时视图
    student.createTempView("student")
    course.createTempView("course")
    score.createTempView("score")
    teacher.createTempView("teacher")

SQL 经典50题

1、查询"01"课程比"02"课程成绩高的学生的信息及课程分数

mysql版

//-- 第一种写法
select a.* ,b.s_score as 01_score,c.s_score as 02_score from 
student a join score b on a.s_id=b.s_id and b.c_id='01'
left join score c on a.s_id=c.s_id 
and c.c_id='02' or c.c_id = NULL where b.s_score>c.s_score
//-- 第二种写法
select a.*,b.s_score as 01_score,c.s_score as 02_score from 
Student a,Score b,Score c 
where a.s_id=b.s_id and a.s_id=c.s_id 
and b.c_id='01' and c.c_id='02' 
and b.s_score>c.s_score	

Spark-Sql版

val sql01: DataFrame = score
  .join(score,Seq("s_id"),"left")
  .filter(x => x.get(1).equals("01") && x.get(3)
    .equals("02") && x.get(2).asInstanceOf[Integer] > x.get(4).asInstanceOf[Integer])
    .join(student,Seq("s_id")).drop("c_id")
sql01.show()

2、查询"01"课程比"02"课程成绩低的学生的信息及课程分数

mysql版

select a.*,b.s_score as 01_score,c.s_score as 02_score from 
Student a,Score b,Score c 
where a.s_id=b.s_id and a.s_id=c.s_id 
and b.c_id='01' and c.c_id='02' 
and b.s_score<c.s_score	

Spark-Sql版

val sql02: DataFrame = score.join(score,Seq("s_id"),"left")
  .filter(x => x.get(1).equals("01") && x.get(3)
    .equals("02") && x.get(2).asInstanceOf[Integer] < x.get(4).asInstanceOf[Integer])
  .join(student,Seq("s_id")).drop("c_id")
sql02.show()

3、查询平均成绩大于等于60分的同学的学生编号和学生姓名和平均成绩

mysql版

select s1.s_id,s_name,avg(s_score) from
Student s1 join Score s2 on s1.s_id=s2.s_id
group by s2.s_id having avg(s_score)>=60

Spark-Sql版

val sql03: DataFrame = score.groupBy("s_id")
  .agg(Map("s_score" -> "avg"))
  .filter(x => x.get(1).asInstanceOf[Double] >= 60)
  .join(student, "s_id")
  .drop("s_birth", "s_sex")
sql03.show()

4、查询平均成绩小于60分的同学的学生编号和学生姓名和平均成绩

mysql版

select s1.s_id,s_name,avg(s_score) from
Student s1 join Score s2 on s1.s_id=s2.s_id
group by s2.s_id having avg(s_score)<60

Spark-Sql版

val sql04: DataFrame = score.groupBy("s_id")
  .agg(Map("s_score" -> "avg"))
  .filter(x => x.get(1).asInstanceOf[Double] < 60)
  .join(student, "s_id")
  .drop("s_birth", "s_sex")
sql04.show()

5、查询所有同学的学生编号、学生姓名、选课总数、所有课程的总成绩

mysql版

select a.s_id,a.s_name,count(b.c_id) as sum_course,sum(b.s_score) as sum_score from 
Student a left join Score b on a.s_id=b.s_id
group by a.s_id

Spark-Sql版

val sql05: DataFrame = score.groupBy("s_id")
  .agg(Map("c_id" -> "count", "s_score" -> "sum"))
  .join(student, Seq("s_id"), "right")
  .drop("s_birth", "s_sex")
sql05.show()

6、查询"李"姓老师的数量

mysql版

select count(t_id) from Teacher where t_name like '李%'

Spark-Sql版

val sql06: DataFrame = teacher
  .where(teacher("t_name").startsWith("李"))
  .groupBy("t_id")
  .agg(Map("t_id" -> "count"))
  .join(teacher, "t_id")
sql06.show()

7、查询学过"张三"老师授课的同学的信息

mysql版

select s.* from Student s join Score sc on s.s_id=sc.s_id
where c_id in (select c_id from Course where t_id in
(select t_id from Teacher where t_name='张三'))

Spark-Sql版

val sql07: DataFrame = score.join(student, "s_id")
  .join(course, "c_id")
  .join(teacher, "t_id")
  .where("t_name=='张三'")
  .drop("t_id", "c_id", "s_score", "c_name")
sql07.show()

8、查询没学过"张三"老师授课的同学的信息

mysql版

select * from Student where s_id not in
(select s_id from Score where c_id in
(select c_id from Course a join Teacher b on a.t_id = b.t_id
where t_name ='张三'))

Spark-Sql版

val sql08: Dataset[Row] = student.join(score.join(course.join(teacher, "t_id")
  .where("t_name=\"张三\""), "c_id")
  .select("s_id", "t_name"), Seq("s_id"), "left_outer")
  .where("t_name is null")
sql08.show()

9、查询学过编号为"01"并且也学过编号为"02"的课程的同学的信息

mysql版

select s.* from Student s join Score sc1 on s.s_id=sc1.s_id
left join Score sc2 on s.s_id=sc2.s_id
where sc1.c_id='01' and sc2.c_id='02'

Spark-Sql版

val sql09: DataFrame = score.join(score, Seq("s_id"), "left")
  .drop("s_score")
  .filter(x => x.get(1).equals("01") && x.get(2).equals("02"))
  .join(student, Seq("s_id"))
  .drop("c_id")
sql09.show()

10、查询学过编号为"01"但是没有学过编号为"02"的课程的同学的信息

mysql版

select * from Student where s_id in
(select s_id from Score where c_id='01')
and s_id not in
(select s_id from Score where c_id='02')

Spark-Sql版

val sql10: DataFrame = student
  .join(score.where("c_id = 2"), Seq("s_id"), "left_outer")
  .as("s2")
  .where("s2.c_id is null")
  .join(score.where("c_id = 1"), "s_id")
  .drop("c_id", "s_score")
sql10.show()

11、查询没有学全所有课程的同学的信息

mysql版

select s.*,count(c_id) from Student s left join Score sc
on s.s_id=sc.s_id group by sc.s_id 
having count(c_id)<(select count(c_id) from Course)

Spark-Sql版

val sql11: Dataset[Row] = student
  .join(score.groupBy("s_id").count().as("s1"), Seq("s_id"), "left")
  .where(s"s1.count <> ${course.count()} or s1.count is null ")
sql11.show()

12、查询至少有一门课与学号为"01"的同学所学相同的同学的信息

mysql版

select * from Student where s_id<>'01' and s_id not in
(select s_id from Score where c_id not in
(select c_id from Score where s_id='01'))

Spark-Sql版

val sql12: DataFrame = score
  .join(score
    .select("c_id")
    .where("s_id=1"), "c_id")
  .select("s_id")
  .distinct()
  .where("s_id <> 1")
  .join(student, "s_id")
sql12.show()

13、查询和"01"号的同学学习的课程完全相同的其他同学的信息

mysql版

select * from Student where s_id in
(select s_id from Score group by s_id having count(s_id)=
(select count(c_id) from Score where s_id = '01'))
and s_id not in
(select s_id from Score where c_id in
(select distinct c_id from Score where c_id not in
(select c_id from Score where s_id = '01')) group by s_id)
and s_id <> '01'

Spark-Sql版

val sql13: DataFrame = score
  .join(score.select("c_id").where("s_id=1"), "c_id")
  .groupBy("s_id")
  .count()
  .where(s"count(1) = ${score.where("s_id=1").count} and s_id <>1 ")
  .join(student, "s_id")
sql13.show()

14、查询没学过"张三"老师讲授的任一门课程的学生姓名

mysql版

select s_name from Student where s_id not in
(select s_id from Score where c_id in 
(select c_id from Course where t_id in
(select t_id from Teacher where t_name = '张三')))

Spark-Sql版

val sql14: Dataset[Row] = student.join(score.join(course.join(teacher, "t_id")
  .where("t_name=\"张三\""), "c_id")
  .select("s_id", "t_name"), Seq("s_id"), "left_outer")
  .where("t_name is null")
    .select("s_name")
sql14.show()

15、查询两门及其以上不及格课程的同学的学号,姓名及其平均成绩

mysql版

select a.s_id,s_name,round(avg(s_score)) from 
Student a left join Score b on a.s_id = b.s_id
where a.s_id in
(select s_id from Score where s_score<60 group by s_id
having count(1)>=2) group by a.s_id

Spark-Sql版

val sql15: DataFrame = score.where(score("s_score") < 60)
  .groupBy("s_id")
  .count()
  .join(student, "s_id")
  .select("s_id", "s_name")
  .join(score.groupBy("s_id")
    .avg("s_score"), "s_id")
sql15.show()

16、检索"01"课程分数小于60,按分数降序排列的学生信息

mysql版

select s.*,s_score from Student s left join Score sc
on s.s_id=sc.s_id where c_id='01' and s_score<60
order by s_score desc

Spark-Sql版

val sql16: DataFrame = score.where(score("c_id").isin("01"))
  .where(score("s_score") < 60)
  .join(student, "s_id")
  .orderBy(score("s_score").desc)
  .drop("c_id")
sql16.show()

17、按平均成绩从高到低显示所有学生的所有课程的成绩以及平均成绩

mysql版

select s_name,a.s_id,
max(case c_id when '01' then s_score end) 语文, 
max(case c_id when '02' then s_score end) 数学, 
max(case c_id when '03' then s_score end) 英语, 
avg(s_score) from Score a join Student b
on a.s_id=b.s_id group by a.s_id order by avg(s_score) desc

Spark-Sql版

val sql17: DataFrame = score.groupBy("s_id")
  .avg("s_score")
  .orderBy(desc("avg(s_score)"))
  .join(score, "s_id")
sql17.show()

18.查询各科成绩最高分、最低分和平均分

  • 以如下形式显示:课程ID,课程name,最高分,最低分,平均分,及格率,中等率,优良率,优秀率
  • 及格为>=60,中等为:70-80,优良为:80-90,优秀为:>=90

mysql版

select a.c_id,c_name,max(s_score),min(s_score),round(avg(s_score),2),
round(100*(sum(case when a.s_score>=60 then 1 else 0 end)/sum(case when a.s_score then 1 else 0 end)),2) as 及格率,
round(100*(sum(case when a.s_score>=70 and a.s_score<=80 then 1 else 0 end)/sum(case when a.s_score then 1 else 0 end)),2) as 中等率,
round(100*(sum(case when a.s_score>=80 and a.s_score<=90 then 1 else 0 end)/sum(case when a.s_score then 1 else 0 end)),2) as 优良率,
round(100*(sum(case when a.s_score>=90 then 1 else 0 end)/sum(case when a.s_score then 1 else 0 end)),2) as 优秀率
from Score a left join Course b on a.c_id = b.c_id group by a.c_id,b.c_name

Spark-Sql版

val sql18: DataFrame = score.groupBy("c_id")
  .agg("s_score" -> "max", "s_score" -> "min", "s_score" -> "avg")
sql18.show()

19、按各科成绩进行排序,并显示排名

mysql版

(select * from (select t1.c_id,t1.s_score,
(select count(distinct t2.s_score) from Score t2 where t2.s_score>=t1.s_score and t2.c_id='01') rank
from Score t1 where t1.c_id='01' order by t1.s_score desc) t1)
union
(select * from (select t1.c_id,t1.s_score,
(select count(distinct t2.s_score) from Score t2 where t2.s_score>=t1.s_score and t2.c_id='02') rank
from Score t1 where t1.c_id='02' order by t1.s_score desc) t2)
union
(select * from (select t1.c_id,t1.s_score,
(select count(distinct t2.s_score) from Score t2 where t2.s_score>=t1.s_score and t2.c_id='03') rank
from Score t1 where t1.c_id='03' order by t1.s_score desc) t3)

Spark-Sql版

val sql19: DataFrame = score
  .join(student, "s_id")
  .selectExpr("*", "row_number() over(partition by c_id order by s_score desc)  rank")
sql19.show()

20、查询学生的总成绩并进行排名

mysql版

select s_id,sum(s_score) from Score group by s_id order by sum(s_score) desc

Spark-Sql版

val sql20: Dataset[Row] = score.groupBy("s_id")
  .sum("s_score")
  .orderBy("sum(s_score)")
sql20.show()

21、查询不同老师所教不同课程平均分从高到低显示

mysql版

select a.t_id,t_name,a.c_id,round(avg(s_score),2) as avg_score
from Course a left join Score b on a.c_id=b.c_id 
left join Teacher c on a.t_id=c.t_id
group by a.c_id,a.t_id,c.t_name order by avg_score desc

Spark-Sql版

val sql21: DataFrame = score.groupBy("c_id")
  .avg("s_score")
  .join(course.join(teacher, "t_id"), "c_id")
    .orderBy(desc("avg(s_score)"))
sql21.show()

22、查询所有课程的成绩第2名到第3名的学生信息及该课程成绩

mysql版

select d.*,c.排名,c.s_score,c.c_id from
(select a.s_id,a.s_score,a.c_id,@i:=@i+1 as 排名 from Score a,
(select @i:=0)s where a.c_id='01' order by a.s_score desc)c
left join Student d on c.s_id=d.s_id where 排名 between 2 and 3
union
select d.*,c.排名,c.s_score,c.c_id from
(select a.s_id,a.s_score,a.c_id,@j:=@j+1 as 排名 from Score a,
(select @j:=0)s where a.c_id='02' order by a.s_score desc)c
left join Student d on c.s_id=d.s_id where 排名 between 2 and 3
union
select d.*,c.排名,c.s_score,c.c_id from
(select a.s_id,a.s_score,a.c_id,@k:=@k+1 as 排名 from Score a,
(select @k:=0)s where a.c_id='03' order by a.s_score desc)c
left join Student d on c.s_id=d.s_id where 排名 between 2 and 3

Spark-Sql版

val sql22: Dataset[Row] = score
  .selectExpr("*", "row_number() over(partition by c_id order by s_score) as rank")
  .filter(x => x.get(3).asInstanceOf[Integer] == 2 || x.get(3).asInstanceOf[Integer] == 3)
sql22.show()

23、统计各科成绩各分数段人数课程编号,课程名称,[100-85],[85-70],[70-60],[0-60]及所占百分比

mysql版

select distinct f.c_name,a.c_id,b.`85-100`,b.百分比,c.`70-85`,c.百分比,d.`60-70`,d.百分比,e.`0-60`,e.百分比 from Score a
left join (select c_id,SUM(case when s_score >85 and s_score <=100 then 1 else 0 end) as `85-100`,
ROUND(100*(SUM(case when s_score >85 and s_score <=100 then 1 else 0 end)/count(*)),2) as 百分比
from Score GROUP BY c_id)b on a.c_id=b.c_id
left join (select c_id,SUM(case when s_score >70 and s_score <=85 then 1 else 0 end) as `70-85`,
ROUND(100*(SUM(case when s_score >70 and s_score <=85 then 1 else 0 end)/count(*)),2) as 百分比
from Score GROUP BY c_id)c on a.c_id=c.c_id
left join (select c_id,SUM(case when s_score >60 and s_score <=70 then 1 else 0 end) as `60-70`,
ROUND(100*(SUM(case when s_score >60 and s_score <=70 then 1 else 0 end)/count(*)),2) as 百分比
from Score GROUP BY c_id)d on a.c_id=d.c_id
left join (select c_id,SUM(case when s_score >=0 and s_score <=60 then 1 else 0 end) as `0-60`,
ROUND(100*(SUM(case when s_score >=0 and s_score <=60 then 1 else 0 end)/count(*)),2) as 百分比
from Score GROUP BY c_id)e on a.c_id=e.c_id
left join Course f on a.c_id = f.c_id

Spark-Sql版

val rankDF: DataFrame = score.rdd.map(x => {
  if (x.get(2).asInstanceOf[Integer] < 60) (x.get(1).toString, 1)
  else if (x.get(2).asInstanceOf[Integer] < 70) (x.get(1).toString, 2)
  else if (x.get(2).asInstanceOf[Integer] < 85) (x.get(1).toString, 3)
  else (x.get(1).toString, 4)
}).toDF("c_id", "rank")
val sql23: DataFrame = rankDF.groupBy("c_id")
  .count().as("rnk1")
  .join(rankDF.groupBy("c_id", "rank")
    .count().as("rnk2"), "c_id")
  .withColumn("百分比", $"rnk2.count" / $"rnk1.count")
sql23.show()

24、查询学生平均成绩及其名次

mysql版

select a.s_id,
@i:=@i+1 as '不保留空缺排名',
@k:=(case when @avg_score=a.avg_s then @k else @i end) as '保留空缺排名',
@avg_score:=avg_s as '平均分'
from (select s_id,ROUND(AVG(s_score),2) as avg_s
from Score GROUP BY s_id ORDER BY avg_s DESC)a,
(select @avg_score:=0,@i:=0,@k:=0)b

Spark-Sql版

val sql24: DataFrame = score.groupBy("s_id")
  .avg("s_score")
  .selectExpr("*", s"row_number() over(order by 'avg(s_score)' desc) as rank")
sql24.show()

25、查询各科成绩前三名的记录

mysql版

select a.s_id,a.c_id,a.s_score from Score a 
left join Score b on a.c_id = b.c_id and a.s_score<b.s_score
group by a.s_id,a.c_id,a.s_score having count(b.s_id)<3
order by a.c_id,a.s_score desc

Spark-Sql版

val sql25: Dataset[Row] = score.selectExpr("*", "row_number() over(partition by c_id order by s_score) as rank")
  .orderBy("c_id", "rank")
  .where("rank<=3")
sql25.show()

26、查询每门课程被选修的学生数

mysql版

select c_id,count(s_id) from Score a group by c_id

Spark-Sql版

val sql26: DataFrame = score.groupBy("c_id")
  .agg("s_id" -> "count")
sql26.show()

27、查询出只有两门课程的全部学生的学号和姓名

mysql版

select s_id,s_name from Student where s_id in
(select s_id from Score group by s_id having count(c_id)=2)

Spark-Sql版

val sql27: DataFrame = score.groupBy("s_id")
  .count()
  .filter(x => x.get(1) == 2)
  .join(student, "s_id")
  .select("s_id", "s_name")
sql27.show()

28、查询男生、女生人数

mysql版

select s_sex,count(s_sex) as 人数  from Student group by s_sex

Spark-Sql版

val sql28: DataFrame = student.groupBy("s_sex").count()
sql28.show()

29、查询名字中含有"风"字的学生信息

mysql版

select * from Student where s_name like '%风%'

Spark-Sql版

val sql29: Dataset[Row] = student.where(student("s_name").like("%风%"))
sql29.show()

30、查询同名同性学生名单,并统计同名人数

mysql版

select a.s_name,a.s_sex,count(*) from Student a join 
Student b on a.s_id !=b.s_id
and a.s_name = b.s_name
and a.s_sex = b.s_sex
group by a.s_name,a.s_sex

Spark-Sql版

val sql30: Dataset[Row] = student.groupBy("s_name")
  .count().where("count > 1")
sql30.show()

31、查询1990年出生的学生名单

mysql版

select * from Student where s_birth like '1990%'

Spark-Sql版

val sql31: Dataset[Row] = student.where(student("s_birth").startsWith("1990"))
sql31.show()

32、查询每门课程的平均成绩,结果按平均成绩降序排列,平均成绩相同时,按课程编号升序排列

mysql版

select c_id,avg(s_score) avg from Score group by c_id
order by avg desc,c_id,asc

Spark-Sql版

val sql32: Dataset[Row] = score.groupBy("c_id")
  .avg("s_score")
  .orderBy(desc("avg(s_score)"), asc("c_id"))
sql32.show()

33、查询平均成绩大于等于85的所有学生的学号、姓名和平均成绩

mysql版

select a.s_id,s_name,avg(s_score) avg from Student a join
Score b on a.s_id=b.s_id group by b.s_id having avg>=85

Spark-Sql版

val sql33: DataFrame = score.groupBy("s_id")
  .avg("s_score")
  .where("avg(s_score)>=85")
  .join(student, "s_id")
  .select("s_id", "s_name", "avg(s_score)")
sql33.show()

34、查询课程名称为"数学",且分数低于60的学生姓名和分数

mysql版

select s_name,s_score from Student a join Score b
on a.s_id=b.s_id where s_score<60 and c_id=
(select c_id from Course where c_name='数学')

Spark-Sql版

val sql34: DataFrame = score.join(course, "c_id")
  .where("s_score < 60 and c_name=\"数学\"")
  .join(student, "s_id")
    .select("s_name","s_score")
sql34.show()

35、查询所有学生的课程及分数情况

mysql版

select s.s_id,s_name,
sum(case c_name when '语文' then s_score else 0 end) as '语文',
sum(case c_name when '数学' then s_score else 0 end) as '数学',
sum(case c_name when '英语' then s_score else 0 end) as '英语',
sum(s_score) as '总分'
from Student s left join Score sc on s.s_id=sc.s_id
left join Course c on sc.c_id=c.c_id
group by sc.s_id

Spark-Sql版

val sql35: DataFrame = score
  .join(student,"s_id")
  .join(course,"c_id")
sql35.show()

36、查询任何一门课程成绩在70分以上的学生姓名、课程名称和分数

mysql版

select s_name,c_name,s_score from Student st left join Score sc
on st.s_id= sc.s_id left join Course c on sc.c_id=c.c_id
where s_score>70

Spark-Sql版

val sql36: DataFrame = score.where("s_score>70")
  .join(student, "s_id")
  .join(course, "c_id")
  .select("s_name", "c_name", "s_score")
sql36.show()

37、查询学生不及格的课程

mysql版

select s_name,c_name,s_score from Student st join Score sc
on st.s_id=sc.s_id join Course c on sc.c_id=c.c_id
where s_score<60

Spark-Sql版

val sql37: DataFrame = student.join(score, "s_id")
  .where("s_score<60")
  .join(course, "c_id")
  .select("s_name", "c_name", "s_score")
sql37.show()

38、查询课程编号为01且课程成绩在80分以上的学生的学号和姓名

mysql版

select s.s_id,s_name,s_score from Student s join Score sc
on s.s_id=sc.s_id where c_id='01' and s_score>80

Spark-Sql版

val sql38: DataFrame = student.join(score, "s_id")
  .where("c_id=01 and s_score>80")
  .select("s_id", "s_name")
sql38.show()

39、求每门课程的学生人数

mysql版

select c_id,count(s_id) from Score group by c_id

Spark-Sql版

val sql39: DataFrame = score.groupBy("c_id").count()
sql39.show()

40、查询选修"张三"老师所授课程的学生中,成绩最高的学生信息及其成绩

mysql版

select s.*,s_score from Student s join Score sc
on s.s_id=sc.s_id where s_score in
(select max(s_score) from Score where c_id in
(select c_id from Course where t_id in
(select t_id from Teacher where t_name='张三')))
and c_id in (select c_id from Course where t_id in
(select t_id from Teacher where t_name='张三'))

Spark-Sql版

val sql40: DataFrame = score.join(course, "c_id")
  .join(teacher, "t_id")
  .where("t_name=\"张三\"")
  .orderBy(desc("s_score"))
  .limit(1)
  .join(student, "s_id")
  .select("s_id", "s_name", "s_birth", "s_sex", "s_score")
sql40.show()

41、查询不同课程成绩相同的学生的学生编号、课程编号、学生成绩

mysql版

select distinct b.s_id,a.c_id,b.c_id,b.s_score from Score a,Score b
where a.c_id != b.c_id and a.s_score = b.s_score
group by s_id

Spark-Sql版

val sql41: DataFrame = student.join(score, "s_id")
  .groupBy("s_id", "s_score")
  .count()
  .where("count>1")
  .join(score.withColumnRenamed("s_score", "score"), "s_id")
  .drop("count")
  .filter(x => x.get(1) == x.get(3))
  .drop("score")
sql41.show()

42、查询每门功课成绩最好的前两名

mysql版

select a.s_id,a.c_id,a.s_score from Score a
where (select count(1) from Score b where b.c_id=a.c_id
and b.s_score>=a.s_score)<=2 order by a.c_id

Spark-Sql版

val sql42: Dataset[Row] = score.selectExpr("*", "row_number() over(partition by c_id order by s_score desc) as rank")
  .where("rank<=2")
sql42.show()

43、统计每门课程的学生选修人数(超过5人的课程才统计)

  • 要求输出课程号和选修人数,查询结果按人数降序排列,若人数相同,按课程号升序排列

mysql版

select c_id,count(s_id) from Score group by c_id
having count(s_id)>5 order by count(s_id) desc,c_id asc

Spark-Sql版

val sql43: Dataset[Row] = score.groupBy("c_id")
  .count()
  .where("count >= 5")
sql43.show

44、检索至少选修两门课程的学生学号

mysql版

select s_id,count(c_id) from Score group by s_id
having count(c_id)>=2

Spark-Sql版

val sql44: Dataset[Row] = score.groupBy("s_id")
  .count()
  .where("count >= 2")
sql44.show()

45、查询选修了全部课程的学生信息

mysql版

select * from Student where s_id in
(select s_id from Score group by s_id
having count(c_id) = (select count(c_id) from Course))

Spark-Sql版

val sql45: DataFrame = score.groupBy("s_id")
  .count()
  .where(s"count = ${course.count()}")
  .join(student, "s_id")
  .drop("count")
sql45.show()

46、查询各学生的年龄

  • 按照出生日期来算,当前月日 < 出生年月的月日则,年龄减一

mysql版

select s_name,(date_format(now(),'%Y')-date_format(s_birth,'%Y') - 
(case when date_format(now(),'%m%d')>date_format(s_birth,'%m%d') then 0 else 1 end)) as age
from Student;

Spark-Sql版

//创建获取当前日期函数
def getNowTime(): String = {
  //实例化一个Date对象并且获取时间戳
  val time = new Date().getTime
  //设置时间格式
  val format = new SimpleDateFormat("yyyy-MM-dd")
  //将时间格式套用在获取的时间戳上
  format.format(time)
}
val sql46: DataFrame = student.rdd.map(
  x => {
    var age = getNowTime().substring(0, 4).toInt - x.get(2).toString.substring(0, 4).toInt
    if (getNowTime().substring(5, 7).toInt > x.get(2).toString.substring(5, 7).toInt) (x.get(0).toString, age)
    else if (getNowTime().substring(5, 7).toInt < x.get(2).toString.substring(5, 7).toInt) (x.get(0).toString, age - 1)
    else {
      if (getNowTime().substring(8, 10).toInt >= x.get(2).toString.substring(8, 10).toInt) (x.get(0).toString, age)
      else (x.get(0).toString, age - 1)
    }
  }).toDF("s_id", "age")
  .join(student, "s_id")
sql46.show()

47、查询本周过生日的学生

mysql版

select * from Student where
week(s_birth) = week(now())

Spark-Sql版

val sql47: Dataset[Row] = student
  .where(" unix_timestamp( cast( concat_ws('-',date_format(current_date(),'yyyy'),date_format(s_birth,'MM'),date_format(s_birth,'dd') ) as date ),'yyyy-MM-dd') between unix_timestamp(current_date()) and unix_timestamp(date_sub(next_day(current_date(),'MON'),1),'yyyy-MM-dd') ")
sql47.show()

48、查询下周过生日的学生

mysql版

select * from Student where
week(s_birth) = (week(now())+1)

Spark-Sql版

val sql48: Dataset[Row] = student
  .where(" unix_timestamp( cast( concat_ws('-',date_format(current_date(),'yyyy'),date_format(s_birth,'MM'),date_format(s_birth,'dd') ) as date ),'yyyy-MM-dd') between unix_timestamp(date_sub(next_day(current_date(),'MON'),1),'yyyy-MM-dd') and unix_timestamp(date_add(next_day(current_date(),'MON'),6),'yyyy-MM-dd') ")
sql48.show()

49、查询本月过生日的学生

mysql版

select * from Student where
date_format(s_birth,'%m')=date_format(now(),'%m')

select * from Student where month(s_birth)=month(now())

Spark-Sql版

val sql49: Dataset[Row] = student.where("month(s_birth) = month( current_date() )")
sql49.show()

50、查询下月过生日的学生

mysql版

select * from Student where
date_format(s_birth,'%m')=(date_format(now(),'%m')+1)

select * from Student where month(s_birth)=(month(now())+1)

Spark-Sql版

val sql50: Dataset[Row] = student.where("month(s_birth) = month( current_date() ) +1 ")
sql50.show()

猜你喜欢

转载自blog.csdn.net/qq_42578036/article/details/109995198