org.apache.hadoop
hadoop-common
3.3.1
org.apache.hadoop
hadoop-mapreduce-client-core
3.3.1
org.apache.hive
hive-metastore
3.1.2
org.apache.iceberg
iceberg-core
0.13.1
org.apache.iceberg
iceberg-hive-metastore
0.13.1
将Hadoop的配置文件core-site.xml和hdfs-site.xml,放到项目的src/main/resources目录下
创建表、加载表、重命名表、删除表示例
import org.apache.iceberg.hive.HiveCatalog
import org.apache.hadoop.conf.Configuration
import org.apache.iceberg.Table
import org.apache.iceberg.catalog.TableIdentifier
import org.apache.iceberg.Schema
import org.apache.iceberg.types.Types
import org.apache.iceberg.PartitionSpec
object flink_test {
def main(args: Array[String]): Unit = {
// =======初始化Hive Catalog=============
val hiveCatalog:HiveCatalog = new HiveCatalog()
hiveCatalog.setConf(new Configuration())
val properties:java.util.HashMap[String,String]=
new java.util.HashMap[String,String]()
properties.put("warehouse", "hdfs://nnha/user/iceberg/warehouse")
properties.put("uri", "thrift://hive1:9083")
properties.put("clients", "2") // 客户端连接池大小
hiveCatalog.initialize("hive_catalog", properties) // 第一个参数为catalog名称
// =============创建表==================
val schema:Schema = new Schema(
// 通过Java API生成的Schema,需要给每个字段指定唯一ID
Types.NestedField.required(1, "user_name", Types.StringType.get()),
Types.NestedField.required(2, "order_time", Types.TimestampType.withZone()),
Types.NestedField.optional(3, "buy_products", Types.ListType.ofRequired(4, Types.StringType.get()))
)
val partitionSpec:PartitionSpec = PartitionSpec.builderFor(schema)
// 从timestamp类型字段,解析int类型的小时作为分区字段
.hour("order_time")
// 直接取表字段作为分区字段
.identity("user_name")
.build()
// 参数分别是数据库名和表名
val tableName:TableIdentifier = TableIdentifier.of("iceberg_db", "java_hive_table")
val table:Table = hiveCatalog.createTable(tableName,schema, partitionSpec)
// =============加载一个已经存在的表=========
// val table: Table = hiveCatalog.loadTable(TableIdentifier)
// =============重命名表=========
// hiveCatalog.renameTable(TableIdentifier, TableIdentifier)
// =============删除表=========
// true表示删除metadata目录下的文件,但是不删除metadata目录
// hiveCatalog.dropTable(identifier:TableIdentifier, purge:Boolean)
}
}
如果用scala执行,可以用以下命令
[root@hive1 ~]# scala -classpath flink_dev-1.0-SNAPSHOT.jar flink_test
org.apache.hadoop
hadoop-common
3.3.1
org.apache.hadoop
hadoop-hdfs-client
3.3.1
org.apache.iceberg
iceberg-core
0.13.1
将Hadoop的配置文件hdfs-site.xml,放到项目的src/main/resources目录下
创建表、加载表、重命名表、删除表示例
import org.apache.hadoop.conf.Configuration
import org.apache.iceberg.catalog.TableIdentifier
import org.apache.iceberg.hadoop.HadoopCatalog
import org.apache.iceberg.types.Types
import org.apache.iceberg.{PartitionSpec, Schema, Table}
object flink_test {
def main(args: Array[String]): Unit = {
// =======初始化Hadoop Catalog=============
val warehousePath: String = "hdfs://nnha/user/iceberg/warehouse"
val hadoopCatalog: HadoopCatalog = new HadoopCatalog(new Configuration(), warehousePath)
// =============创建表==================
val schema: Schema = new Schema(
// 通过Java API生成的Schema,需要给每个字段指定唯一ID
Types.NestedField.required(1, "user_name", Types.StringType.get()),
Types.NestedField.required(2, "order_time", Types.TimestampType.withZone()),
Types.NestedField.optional(3, "hobby", Types.ListType.ofRequired(4, Types.StringType.get()))
)
val partitionSpec: PartitionSpec = PartitionSpec.builderFor(schema)
// 从timestamp类型字段,解析int类型的小时作为分区字段
.hour("order_time")
// 直接取表字段作为分区字段
.identity("user_name")
.build()
// 参数分别是数据库名和表名
val tableName: TableIdentifier = TableIdentifier.of("iceberg_db", "java_hadoop_table")
val table: Table = hadoopCatalog.createTable(tableName, schema, partitionSpec)
// =============加载一个已经存在的表=========
// val table: Table = hadoopCatalog.loadTable(TableIdentifier)
// =============重命名表=========
// hadoopCatalog.renameTable(TableIdentifier, TableIdentifier)
// =============删除表=========
// hadoopCatalog.dropTable(identifier:TableIdentifier, purge:Boolean)
}
}
如果用scala执行,可以用以下命令
[root@hive1 ~]# scala -classpath flink_dev-1.0-SNAPSHOT.jar flink_test
org.apache.hadoop
hadoop-common
3.3.1
org.apache.hadoop
hadoop-hdfs-client
3.3.1
org.apache.iceberg
iceberg-core
0.13.1
将Hadoop的配置文件hdfs-site.xml,放到项目的src/main/resources目录下
创建表、加载表、删除表示例
import org.apache.hadoop.conf.Configuration
import org.apache.iceberg.hadoop.HadoopTables
import org.apache.iceberg.types.Types
import org.apache.iceberg.{PartitionSpec, Schema, Table}
object flink_test {
def main(args: Array[String]): Unit = {
// =======初始化Hadoop Tables=============
val hadoopTables: HadoopTables = new HadoopTables(new Configuration())
// =============创建表==================
val schema: Schema = new Schema(
// 通过Java API生成的Schema,需要给每个字段指定唯一ID
Types.NestedField.required(1, "user_name", Types.StringType.get()),
Types.NestedField.required(2, "order_time", Types.TimestampType.withZone()),
Types.NestedField.optional(3, "hobby", Types.ListType.ofRequired(4, Types.StringType.get()))
)
val partitionSpec: PartitionSpec = PartitionSpec.builderFor(schema)
// 从timestamp类型字段,解析int类型的小时作为分区字段
.hour("order_time")
// 直接取表字段作为分区字段
.identity("user_name")
.build()
val warehouseTablePath: String = "hdfs://nnha/user/iceberg/warehouse/iceberg_db/java_hdfs_table"
val table: Table = hadoopTables.create(schema, partitionSpec, warehouseTablePath)
// =============加载一个已经存在的表=========
// val table: Table = hadoopTables.load(warehouseTablePath)
// =============删除表=========
// hadoopTables.dropTable(warehouseTablePath:String, purge:boolean)
}
}
如果用scala执行,可以用以下命令
[root@hive1 ~]# scala -classpath flink_dev-1.0-SNAPSHOT.jar flink_test