filetoes.txt 984 B

1234567891011121314151617181920212223242526
  1. import java.util.Properties
  2. import org.apache.spark
  3. import org.apache.spark.sql.SparkSession
  4. object ToSqlServer {
  5. case class studeng(id:Int,value:String)
  6. def main(args: Array[String]): Unit = {
  7. System.setProperty("hadoop.home.dir","E:\\Scala\\hadoop-2.6.5")
  8. val ss = SparkSession.builder().appName("test").master("local").getOrCreate()
  9. val sc = ss.sparkContext
  10. var rdd1 = sc.textFile("./src/main/scala/data/outPut/sum22.txt")
  11. var rdd2 =rdd1.map(_.split(","))
  12. var rdd3 = rdd2.map(f => {studeng(f(0).toInt,f(1))})
  13. val sql = ss.sqlContext
  14. var df = sql.createDataFrame(rdd3)
  15. df.createOrReplaceTempView("ss")
  16. var df1 = sql.sql("select * from ss limit 10")
  17. val prop = new Properties()
  18. prop.setProperty("user","sa")
  19. prop.setProperty("password","saljhy!@#")
  20. prop.setProperty("driver","com.microsoft.sqlserver.jdbc.SQLServerDriver")
  21. df1.write.mode("overwrite").jdbc("jdbc:sqlserver://192.168.50.32:1433/Mall","Sum",prop)
  22. }
  23. }