su sdbadmin
/opt/sequoiasql/mysql/bin/mysql -h 127.0.0.1 -P 3306 -u root
CREATE USER 'metauser'@'%' IDENTIFIED BY 'metauser';
GRANT ALL ON *.* TO 'metauser'@'%';
CREATE DATABASE metastore CHARACTER SET 'latin1' COLLATE 'latin1_bin';
FLUSH PRIVILEGES;
quit;
cat > /opt/apache-hive-1.2.2-bin/conf/hive-site.xml<< EOF
javax.jdo.option.ConnectionURL
jdbc:mysql://localhost/metastore?createDatabaseIfNotExist=true
javax.jdo.option.ConnectionDriverName
com.mysql.jdbc.Driver
javax.jdo.option.ConnectionUserName
metauser
javax.jdo.option.ConnectionPassword
metauser
hive.test.authz.sstd.hs2.mode
true
hive.server2.enable.doAs
true
hive.users.in.admin.role
root
hive.server2.thrift.port
9073
hive.server2.authentication
CUSTOM
hive.server2.custom.authentication.class
com.sequoiadb.spark.sql.hive.SequoiadbAuth
hive.security.authorization.manager
org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory
EOF
cp spark-authorizer-2.1.1.jar /opt/apache-hive-1.2.2-bin/auxlib
cp mysql-connector-java-5.1.7-bin.jar /opt/apache-hive-1.2.2-bin/auxlib
export HADOOP_HOME=/opt/hadoop-2.9.2
apache-hive-1.2.2-bin/bin/schematool -dbType mysql -initSchema
su sdbadmin
/opt/sequoiasql/mysql/bin/mysql -h 127.0.0.1 -P 3306 -u root
use metastore;
create table DBUSER (dbuser varchar(100), passwd char(50), primary key (dbuser));
insert into DBUSER(dbuser, passwd) values ('root', md5('admin'));
为 thrift server 预先创建了一个 root 的用户,密码为 ‘admin’ 未来如果要增加用户,用类似的 insert 命令添加
delimiter ||
create trigger dbs_trigger
before insert on DBS
for each row
begin
set new.OWNER_NAME="public";
set new.OWNER_TYPE="ROLE";
end ||
delimiter ;
cp spark-authorizer-2.1.1.jar /opt/spark/jars
cp mysql-connector-java-5.1.7-bin.jar /opt/spark/jars
cat > /opt/spark/conf/hive-site.xml<< EOF
javax.jdo.option.ConnectionURL
jdbc:mysql://localhost/metastore?createDatabaseIfNotExist=true
javax.jdo.option.ConnectionDriverName
com.mysql.jdbc.Driver
javax.jdo.option.ConnectionUserName
metauser
javax.jdo.option.ConnectionPassword
metauser
hive.security.authorization.createtable.owner.grants
INSERT,SELECT
hive.security.authorization.enabled
true
hive.security.authorization.manager
org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory
hive.test.authz.sstd.hs2.mode
true
hive.server2.authentication
CUSTOM
hive.server2.custom.authentication.class
com.sequoiadb.spark.sql.hive.SequoiadbAuth
EOF
spark.sql.extensions=org.apache.ranger.authorization.spark.authorizer.SequoiadbSparkSQLExtension
./opt/spark/sbin/start-all.sh
./opt/spark/sbin/start-thriftserver.sh
netstat -anp|grep 10000
./bin/beeline -u jdbc:hive2://localhost:10000 -n root -p admin
在 spark sql 中创建数据表,执行建表的USER 对该表拥有 INSERT 和 SELECT 权限 如果其他 USER希望访问该表,应该在 hive 的thrift server 中,执行 grant 命令,以赋予其他 USER 对应权限
${HIVE_HOME}/bin/hiveserver2 >${HIVE_HOME}/hive_thriftserver.log 2>&1 &
./bin/beeline -u jdbc:hive2://localhost:9073 -n root -p admin
set role admin;
grant SELECT on table test to user USERNAME;
grant INSERT on table test to user USERNAME;
var db=new Sdb("localhost",11810);
db.createDomain("scottdomain",["datagroup1","datagroup2","datagroup3"],{AutoSplit:true});
db.createCS("scott",{Domain:"scottdomain"});
/opt/sequoiasql/mysql/bin/mysql -h 127.0.0.1 -P 3306 -u root
create database scott;
use scott;
create table emp(
empno int unsigned auto_increment primary key COMMENT '雇员编号',
ename varchar(15) COMMENT '雇员姓名',
job varchar(10) COMMENT '雇员职位',
mgr int unsigned COMMENT '雇员对应的领导的编号',
hiredate date COMMENT '雇员的雇佣日期',
sal decimal(7,2) COMMENT '雇员的基本工资',
comm decimal(7,2) COMMENT '奖金',
deptno int unsigned COMMENT '所在部门'
)ENGINE = sequoiadb COMMENT = "雇员表, sequoiadb: { table_options: { ShardingKey: { 'empno': 1 }, ShardingType: 'hash', 'Compressed': true, 'CompressionType': 'lzw', 'AutoSplit': true, 'EnsureShardingIndex': false } }";
INSERT INTO emp VALUES (7369,'SMITH','CLERK',7902,'1980-12-17',800,NULL,20);
INSERT INTO emp VALUES (7499,'ALLEN','SALESMAN',7698,'1981-2-20',1600,300,30);
INSERT INTO emp VALUES (7521,'WARD','SALESMAN',7698,'1981-2-22',1250,500,30);
INSERT INTO emp VALUES (7566,'JONES','MANAGER',7839,'1981-4-2',2975,NULL,20);
INSERT INTO emp VALUES (7654,'MARTIN','SALESMAN',7698,'1981-9-28',1250,1400,30);
INSERT INTO emp VALUES (7698,'BLAKE','MANAGER',7839,'1981-5-1',2850,NULL,30);
INSERT INTO emp VALUES (7782,'CLARK','MANAGER',7839,'1981-6-9',2450,NULL,10);
INSERT INTO emp VALUES (7788,'SCOTT','ANALYST',7566,'87-7-13',3000,NULL,20);
INSERT INTO emp VALUES (7839,'KING','PRESIDENT',NULL,'1981-11-17',5000,NULL,10);
INSERT INTO emp VALUES (7844,'TURNER','SALESMAN',7698,'1981-9-8',1500,100,30);
INSERT INTO emp VALUES (7876,'ADAMS','CLERK',7788,'87-7-13',1100,NULL,20);
INSERT INTO emp VALUES (7900,'JAMES','CLERK',7698,'1981-12-3',950,NULL,30);
INSERT INTO emp VALUES (7902,'FORD','ANALYST',7566,'1981-12-3',3000,NULL,20);
INSERT INTO emp VALUES (7934,'MILLER','CLERK',7782,'1982-1-23',1300,NULL,10);
<dependency>
<groupId>com.alibabagroupId>
<artifactId>fastjsonartifactId>
<version>1.2.58version>
dependency>
<dependency>
<groupId>com.alibabagroupId>
<artifactId>druid-spring-boot-starterartifactId>
<version>1.1.18version>
dependency>
<dependency>
<groupId>com.baomidougroupId>
<artifactId>mybatis-plus-coreartifactId>
<version>${mybatis-plus.version}version>
dependency>
<dependency>
<groupId>com.baomidougroupId>
<artifactId>mybatis-plus-extensionartifactId>
<version>${mybatis-plus.version}version>
dependency>
<dependency>
<groupId>commons-logginggroupId>
<artifactId>commons-loggingartifactId>
<version>1.1.3version>
dependency>
<dependency>
<groupId>org.apache.hivegroupId>
<artifactId>hive-execartifactId>
<version>1.2.1version>
dependency>
<dependency>
<groupId>org.apache.hivegroupId>
<artifactId>hive-metastoreartifactId>
<version>1.2.1version>
dependency>
<dependency>
<groupId>org.apache.httpcomponentsgroupId>
<artifactId>httpclientartifactId>
<version>4.5.2version>
dependency>
<dependency>
<groupId>org.apache.httpcomponentsgroupId>
<artifactId>httpcoreartifactId>
<version>4.4.4version>
dependency>
<dependency>
<groupId>org.apache.thriftgroupId>
<artifactId>libthriftartifactId>
<version>0.9.2version>
dependency>
<dependency>
<groupId>log4jgroupId>
<artifactId>log4jartifactId>
<version>1.2.17version>
dependency>
<dependency>
<groupId>org.slf4jgroupId>
<artifactId>slf4j-apiartifactId>
<version>1.7.10version>
dependency>
<dependency>
<groupId>org.slf4jgroupId>
<artifactId>slf4j-log4j12artifactId>
<version>1.7.10version>
dependency>
<dependency>
<groupId>org.apache.sparkgroupId>
<artifactId>spark-hive-thriftserver_2.11artifactId>
<version>2.0.1version>
<scope>providedscope>
dependency>
<dependency>
<groupId>org.apache.sparkgroupId>
<artifactId>spark-network-common_2.11artifactId>
<version>2.0.1version>
dependency>
<dependency>
<groupId>com.sequoiadbgroupId>
<artifactId>sequoiadb-driverartifactId>
<version>3.2.1version>
dependency>
<dependency>
<groupId>com.sequoiadbgroupId>
<artifactId>spark-sequoiadb_2.11artifactId>
<version>2.8.0version>
dependency>
<dependency>
<groupId>com.sequoiadbgroupId>
<artifactId>spark-sequoiadb-scala_2.11.2artifactId>
<version>1.12version>
dependency>
<dependency>
<groupId>org.apache.sparkgroupId>
<artifactId>spark-sql_2.11artifactId>
<version>2.2.2version>
dependency>
<dependency>
<groupId>org.apache.hivegroupId>
<artifactId>hive-jdbcartifactId>
<version>1.2.1version>
dependency>
<dependency>
<groupId>org.apache.hadoopgroupId>
<artifactId>hadoop-commonartifactId>
<version>3.2.0version>
dependency>
server.port=8090
#datasource config
#指定连接池类型
spring.datasource.type=com.alibaba.druid.pool.DruidDataSource
#指定驱动
spring.datasource.driver-class-name=org.apache.hive.jdbc.HiveDriver
#指定连接地址、用户名和密码
spring.datasource.url=jdbc:hive2://192.168.80.132:10000/default
spring.datasource.username=root
spring.datasource.password=admin
#初始化连接数量
spring.datasource.druid.initialSize=1
#最大空闲连接数
spring.datasource.druid.minIdle=5
#最大并发连接数
spring.datasource.druid.maxActive=20
#配置获取连接等待超时的时间
spring.datasource.druid.maxWait=60000
#配置间隔多久才进行一次检测,检测需要关闭的空闲连接,单位是毫秒
spring.datasource.druid.timeBetweenEvictionRunMillis=60000
#配置一个连接在池中最小生存的时间,单位是毫秒
spring.datasource.druid.minEvictableIdelTimeMillis=300000
#用来检测连接是否有效的sql,要求是一个查询语句
spring.datasource.druid.validation-query=SELECT 1
#mybatis
#mybatis-plus.mapper-locations=classpath:mapper/*.xml
#mybatis-plus.configuration.cache-enabled=false
#映射xml文件位置
mybatis.mapper-locations=classpath:mapper/*.xml
#需要扫描实体类的位置
mybatis.type-aliases-package=com.sdb.spark.demo.entity
#spring mvc配置静态文件
spring.mvc.static-path-pattern=/static/**
#热部署
spring.devtools.restart.enabled=true
spring.devtools.restart.additional-paths=src/main/java
spring.devtools.restart.exclude=WEB-INF/**
/**
* 雇员表
*
* @author yousongxian
* @date 2020-07-29
*/
public class Emp {
private Integer empno;//雇员编号
private String ename;//雇员姓名、
private String job;//雇员职位
private Integer mgr;//雇员对应的领导的编号
private String hiredate;//雇员的雇佣日期
private Double sal;//雇员的基本工资
private Double comm;//奖金
private Integer deptno;//所在部门
}
//省略getter和setter方法
@Override
public String toString() {
return "Emp{" +
"empno=" + empno +
", ename='" + ename + '\'' +
", job='" + job + '\'' +
", mgr=" + mgr +
", hiredate='" + hiredate + '\'' +
", sal=" + sal +
", comm=" + comm +
", deptno=" + deptno +
'}';
}
<resultMap id="BaseResultMap" type="com.sdb.spark.demo.entity.Emp">
<id column="empno" property="empno"/>
<result column="ename" property="ename" />
<result column="job" property="job" />
<result column="mgr" property="mgr" />
<result column="hiredate" property="hiredate" />
<result column="sal" property="sal" />
<result column="comm" property="comm" />
<result column="deptno" property="deptno" />
resultMap>
<select id="selectAll" resultType="map" parameterType="string">
select * from ${tablename}
select>
<update id="createTableEmp">
CREATE TABLE emp
(
empno INT,
ename STRING,
job STRING,
mgr INT,
hiredate date,
sal decimal(7,2),
comm decimal(7,2),
deptno INT
)
USING com.sequoiadb.spark OPTIONS
(
host 'localhost:11810',
collectionspace 'scott',
collection 'emp'
)update>
<update id="createTableEmpSchema">
CREATE TABLE emp_schema USING com.sequoiadb.spark OPTIONS
(
host 'localhost:11810',
collectionspace 'scott',
collection 'emp'
)
update>
<update id="createTableAsSelect" parameterType="map">
CREATE TABLE ${tablename} USING com.sequoiadb.spark OPTIONS
(
host 'localhost:11810',
domain 'scottdomain',
collectionspace 'scott',
collection #{tablename},
shardingkey '{"_id":1}',
shadingtype 'hash',
autosplit true
)AS ${condition}
update>
<mapper namespace="com.sdb.spark.demo.mapper.EmpMapper">
注意:方法名称跟xml映射文件中定义的方法id必须一致
List<Map<String,Object>> selectAll(String tablename);//查询全部
int createTableEmp();//创建emp表
int createTableEmpSchema();//用自动生成schema的方式创建emp_schema表
int createTableAsSelect(Map<String,String>map);//用查询结果创建emp_as_select表
int insertEmp(Emp emp);//对emp插入记录
List<Map<String,Object>> selectAll(String tablename);
int createTableEmp();
int createTableEmpSchema();//用自动生成schema的方式创建emp_schema表
int createTableAsSelect(Map<String,String> map);//用查询结果创建emp_as_select表
int insertEmp(Emp emp);//对emp插入记录
@Service
public class EmpServiceImpl implements EmpService {
@Autowired
private EmpMapper empMapper;
@Override
public List<Map<String,Object>> selectAll(String tablename) {
return empMapper.selectAll(tablename);
}
@Override
public int createTableEmp() {
return empMapper.createTableEmp();
}
@Override
public int createTableEmpSchema() {
return empMapper.createTableEmpSchema();
}
@Override
public int createTableAsSelect(Map<String,String>map) {
return empMapper.createTableAsSelect(map);
}
@Override
public int insertEmp(Emp emp) {
return empMapper.insertEmp(emp);
}
}
@Autowired
private EmpService empService;
@Test
public List<Map<String,Object>> selectAll(){
String tablename="emp";
List<Map<String,Object>>resultlist=new ArrayList<Map<String, Object>>();
resultlist =empService.selectAll(tablename);
for(Map<String,Object>map:resultlist){
for(Map.Entry<String,Object>m:map.entrySet()){
System.out.print(m.getKey()+"="+m.getValue()+"\t");
}
System.out.println();
}
return resultlist;
}
@Test
public void createTable(){
empService.createTableEmp();
}
@Test
public void createTableEmpSchema(){
empService.createTableEmpSchema();
}
@Test
public void createTableAsSelect(){
Map<String,String> map=new HashMap<String, String>();
String tablename="emp_as_select";
//String condition="select empno,ename from emp";
map.put("tablename",tablename);
map.put("condition",condition);
if(map.get("tablename").equals("")||map.get("tablename")==null||map.get("condition").equals("")||map.get("condition")==null){
System.out.println("请输入正确表明和条件");
}else {
empService.createTableAsSelect(map);
}
}
@SpringBootApplication(scanBasePackages = {"com.sdb.spark.demo.service.Impl"})
@MapperScan(basePackages = {"com.sdb.spark.demo.mapper"})
public class DemoApplication {
public static void main(String[] args) {
SpringApplication.run(DemoApplication.class, args);
}
}