话不多说,直接开搞:
org.apache.hive
hive-jdbc
1.1.0
org.eclipse.jetty.aggregate
jetty-all
org.apache.hive
hive-shims
jasper-compiler
tomcat
jasper-runtime
tomcat
servlet-api
javax.servlet
log4j-slf4j-impl
org.apache.logging.log4j
slf4j-log4j12
org.slf4j
tomcat
*
ch.qos.logback
logback-classic
org.eclipse.jetty.orbit
*
org.eclipse.jetty.aggregate
*
javax.servlet
servlet-api
org.mortbay.jetty
*
org.springframework.data
spring-data-hadoop
2.4.0.RELEASE
javax.servlet
*
com.alibaba
druid-spring-boot-starter
1.1.1
org.apache.tomcat
tomcat-jdbc
import org.aspectj.lang.JoinPoint;
import org.aspectj.lang.annotation.After;
import org.aspectj.lang.annotation.Aspect;
import org.aspectj.lang.annotation.Before;
import org.springframework.core.annotation.Order;
import org.springframework.stereotype.Component;
/**
* @Description 多数据源 切面类
**/
@Aspect
@Order(-10)
@Component
public class DataSourceAspect {
@Before("@annotation(targetDataSource)")
public void changeDataSource(JoinPoint point, TargetDataSource targetDataSource) throws Throwable {
String lookupKey = targetDataSource.value();
System.out.println("DataSource's lookupKey >> " + lookupKey);
DataSourceContextHolder.set(lookupKey);
}
@After("@annotation(targetDataSource)")
public void restoreDataSource(JoinPoint point, TargetDataSource targetDataSource) {
DataSourceContextHolder.remove();
}
}
import org.aspectj.lang.JoinPoint;
import org.aspectj.lang.annotation.After;
import org.aspectj.lang.annotation.Aspect;
import org.aspectj.lang.annotation.Before;
import org.springframework.core.annotation.Order;
import org.springframework.stereotype.Component;
/**
* @Description 多数据源 切面类
**/
@Aspect
@Order(-10)
@Component
public class DataSourceAspect {
@Before("@annotation(targetDataSource)")
public void changeDataSource(JoinPoint point, TargetDataSource targetDataSource) throws Throwable {
String lookupKey = targetDataSource.value();
System.out.println("DataSource's lookupKey >> " + lookupKey);
DataSourceContextHolder.set(lookupKey);
}
@After("@annotation(targetDataSource)")
public void restoreDataSource(JoinPoint point, TargetDataSource targetDataSource) {
DataSourceContextHolder.remove();
}
}
/**
* @Description 数据源标记类
**/
public class DataSourceContextHolder {
public static final String MYSQL = "mysql";
public static final String HIVE = "hive";
private static final ThreadLocal local = new ThreadLocal<>();
public static ThreadLocal getLocal() {
return local;
}
public static void set(String lookupKey) {
local.set(lookupKey);
}
public static String get() {
return local.get();
}
public static void remove() {
local.remove();
}
}
import org.springframework.beans.factory.InitializingBean;
import org.springframework.boot.context.properties.ConfigurationProperties;
import org.springframework.stereotype.Component;
import java.util.List;
import java.util.Properties;
/**
* @Description 加载数据源 配置类
**/
@Component
@ConfigurationProperties(prefix = "spring.datasource")
public class MatchProperties implements InitializingBean {
private List source;
@Override
public void afterPropertiesSet() throws Exception {
System.out.println(this.source);
}
public List getSource() {
return source;
}
public void setSource(List source) {
this.source = source;
}
}
import com.alibaba.druid.pool.DruidDataSource;
import com.alibaba.druid.pool.DruidDataSourceFactory;
import org.apache.ibatis.session.SqlSessionFactory;
import org.mybatis.spring.SqlSessionFactoryBean;
import org.mybatis.spring.SqlSessionTemplate;
import org.springframework.beans.BeansException;
import org.springframework.beans.factory.InitializingBean;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.context.ApplicationContext;
import org.springframework.context.ApplicationContextAware;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.jdbc.datasource.DataSourceTransactionManager;
import org.springframework.jdbc.datasource.lookup.AbstractRoutingDataSource;
import org.springframework.lang.Nullable;
import org.springframework.transaction.PlatformTransactionManager;
import javax.sql.DataSource;
import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.atomic.AtomicInteger;
/**
* @Description 多数据源配置类
* @Author yanghanwei
* @Mail [email protected]
* @Date 2019/3/19 14:07
* @Version v1
**/
@Configuration
public class DataSourceConfiguration implements ApplicationContextAware, InitializingBean {
private ApplicationContext context;
@Nullable
private Map
spring:
datasource:
source:
- label: mysql
type: com.alibaba.druid.pool.DruidDataSource
driver-class-name: com.mysql.cj.jdbc.Driver
url: jdbc:mysql://10.111.32.118:3306/geoc?useUnicode=true&characterEncoding=utf-8&useSSL=false&serverTimezone=Asia/Shanghai&autoReconnect=true
username: root
password: 123456
# 下面为连接池的补充设置,应用到上面所有数据源中
# 初始化大小,最小,最大
initialSize: 1
minIdle: 3
maxActive: 20
# 配置获取连接等待超时的时间
maxWait: 60000
# 配置间隔多久才进行一次检测,检测需要关闭的空闲连接,单位是毫秒
timeBetweenEvictionRunsMillis: 60000
# 配置一个连接在池中最小生存的时间,单位是毫秒
minEvictableIdleTimeMillis: 30000
validationQuery: select 1
testWhileIdle: true
testOnBorrow: false
testOnReturn: false
# 打开PSCache,并且指定每个连接上PSCache的大小
poolPreparedStatements: true
maxPoolPreparedStatementPerConnectionSize: 20
- label: hive
url: jdbc:hive2://h1:10000/test
driver-class-name: org.apache.hadoop.jdbc.HiveDriver
type: com.alibaba.druid.pool.DruidDataSource
username: hadoop
password: hadoop
# 下面为连接池的补充设置,应用到上面所有数据源中
# 初始化大小,最小,最大
initialSize: 1
minIdle: 3
maxActive: 20
# 配置获取连接等待超时的时间
maxWait: 60000
# 配置间隔多久才进行一次检测,检测需要关闭的空闲连接,单位是毫秒
timeBetweenEvictionRunsMillis: 60000
# 配置一个连接在池中最小生存的时间,单位是毫秒
minEvictableIdleTimeMillis: 30000
validationQuery: select 1
testWhileIdle: true
testOnBorrow: false
testOnReturn: false
# 打开PSCache,并且指定每个连接上PSCache的大小
poolPreparedStatements: true
maxPoolPreparedStatementPerConnectionSize: 20
只要在相应的 server 方法上加上注解
@targetDataSource(value="myslq")
@targetDataSource(value="hive")
打完收工!