hive笔记-使用JDBC操作hive

参考网址:https://cwiki.apache.org/confluence/display/Hive/HiveClient#HiveClient-JDBC

其实根据官网的wiki即可实现,连代码都不用自己敲,但是有几个地方需要注意的。


package com.hihi.hive;

import java.sql.SQLException;
import java.sql.Connection;
import java.sql.ResultSet;
import java.sql.Statement;
import java.sql.DriverManager;

public class Hive_JDBC {

    // 第一个需要注意的地方,由于现在是使用hiveserver2,所以驱动class应该为org.apache.hive.jdbc.HiveDriver
    private static String driverName = "org.apache.hive.jdbc.HiveDriver";

    public static void main(String[] args) throws SQLException {
        try {
            Class.forName(driverName);
        } catch (ClassNotFoundException e) {
            // TODO Auto-generated catch block
            e.printStackTrace();
            System.exit(1);
        }
        // 后台要把hiveserver2拉起,才能连接得上。
        // 注意这里的URL和官网有差异,hiveserver2的schma应该是jdbc:hive2。
        Connection con = DriverManager.getConnection("jdbc:hive2://hadoop001:10000/default", "root",
                "");
        Statement stmt = con.createStatement();
        String tableName = "jdbc_test";

        String sql = "drop table " + tableName;
        System.out.println(sql);
        // DDL语句需要使用execute()执行!!
        //stmt.executeQuery("drop table " + tableName);
        stmt.execute(sql);
        sql="create table " + tableName + " (key int, value string) ROW FORMAT DELIMITED FIELDS TERMINATED BY \"\\t\"";
        System.out.println(sql);
        stmt.execute(sql);

        // show tables
        sql = "show tables '" + tableName + "'";
        System.out.println("Running: " + sql);
        ResultSet res = stmt.executeQuery(sql);
        if (res.next()) {
            System.out.println(res.getString(1));
        }
        // describe table
        sql = "describe " + tableName;
        System.out.println("Running: " + sql);
        res = stmt.executeQuery(sql);
        while (res.next()) {
            System.out.println(res.getString(1) + "\t" + res.getString(2));
        }

//         load data into table
//         NOTE: filepath has to be local to the hive server
//         NOTE: /tmp/a.txt is a ctrl-A separated file with two fields per line
        String filepath = "/tmp/a.txt";
        sql = "load data local inpath '" + filepath + "' into table " + tableName;
        System.out.println("Running: " + sql);
        if (!stmt.execute(sql))
            System.out.println(sql + "failed!");

        // select * query
        sql = "select * from " + tableName;
        System.out.println("Running: " + sql);
        res = stmt.executeQuery(sql);
        while (res.next()) {
            System.out.println(String.valueOf(res.getInt(1)) + "\t" + res.getString(2));
        }

        // regular hive query
        sql = "select count(1) from " + tableName;
        System.out.println("Running: " + sql);
        res = stmt.executeQuery(sql);
        while (res.next()) {
            System.out.println(res.getString(1));
        }
    }
}

pom.xml



  4.0.0

  study-hadoop
  hive
  1.0

  
    UTF-8
    2.6.0-cdh5.7.0
    1.1.0-cdh5.7.0
  

  
    
      cloudera
      https://repository.cloudera.com/artifactory/cloudera-repos/
    
  

  
      
          org.apache.hadoop
          hadoop-common
          ${hadoop.version}
      
      
          org.apache.hive
          hive-jdbc
          ${hive.version}
      
    
      org.apache.hive
      hive-exec
      ${hive.version}
    
      
    
      mysql
      mysql-connector-java
      5.1.45
    

    
      junit
      junit
      4.10
    
  
【来自@若泽大数据】

你可能感兴趣的:(Hadoop,hive)