SparkSql执行select查询

package cn.itcast.spark.test

import java.sql.{Connection, Date, DriverManager, PreparedStatement}
import java.util.Properties

import org.apache.spark.sql.{DataFrame, SQLContext}
import org.apache.spark.{SparkConf, SparkContext}

//完成sparksql的增删改查
object test3 {
  //数据库配置
  lazy val url = "jdbc:mysql://192.168.10.190:3306/bigdata"
  lazy val url2 = "jdbc:mysql://192.168.10.190:3306/bigdata?user=root&password=x5"
  lazy val username = "root"
  lazy val password = "x5"

  def main(args: Array[String]) {
    var conn: Connection = null
    var ps : PreparedStatement = null

    //val sparkConf = new SparkConf().setAppName("sparkSqlTest").setMaster("yarn-cluster").set("spark.app.id", "sqlTest")
    val sparkConf = new SparkConf().setAppName("SQLDemo").setMaster("local")
    //获取context
    val sc = new SparkContext(sparkConf)
    //获取sqlContext
    val sqlContext = new SQLContext(sc)
    //引入隐式转换,可以使用spark sql内置函数
    import sqlContext.implicits._
    //创建jdbc连接信息
    val uri = url + "?user=" + username + "&password=" + password + "&useUnicode=true&characterEncoding=UTF-8"
    val prop = new Properties()
    //注意:集群上运行时,一定要添加这句话,否则会报找不到mysql驱动的错误
    prop.put("driver", "com.mysql.jdbc.Driver")
    //加载mysql数据表
    val df_test1: DataFrame = sqlContext.read.jdbc(uri, "user_t", prop)
    val df_test2: DataFrame = sqlContext.read.jdbc(uri, "t_user2", prop)

    //条件查询
    val predicates = Array[String]("id <= 2", "id >= 4 and id <= 5 ")
    val df_test3: DataFrame = sqlContext.read.jdbc(uri, "t_user2",predicates, prop)

    //从dataframe中获取所需字段
    //这个是查询
    df_test2.select("id", "name", "age").collect()
      .foreach(row => {
        println("id  " + row(0) + " ,name  " + row(1) + ", age  " + row(2))
      })

    df_test3.select("id", "name", "age").collect()
      .foreach(row => {
        println("id  " + row(0) + " ,name  " + row(1) + ", age  " + row(2))
      })
  }
}

猜你喜欢

转载自blog.csdn.net/qq_16563637/article/details/81538403