所属文章:池化技术(一)Druid是如何管理数据库连接的?
本代码段对应流程1.2,真正获取连接的执行:
private DruidPooledConnection getConnectionInternal(long maxWait) throws SQLException {
//可用性判断
if (closed) {
connectErrorCountUpdater.incrementAndGet(this);
throw new DataSourceClosedException("dataSource already closed at " + new Date(closeTimeMillis));
}
if (!enable) {
connectErrorCountUpdater.incrementAndGet(this);
throw new DataSourceDisableException();
}
final long nanos = TimeUnit.MILLISECONDS.toNanos(maxWait); //纳秒
final int maxWaitThreadCount = this.maxWaitThreadCount; //目前因为拿不到连接而发生阻塞的业务线程数
DruidConnectionHolder holder;
for (boolean createDirect = false;;) {
if (createDirect) { //模式未启用,恒等false,下面的逻辑不会触发,所以为了方便阅读,隐藏这部分代码
//代码隐藏
}
try {
lock.lockInterruptibly(); //锁获取
} catch (InterruptedException e) {
connectErrorCountUpdater.incrementAndGet(this);
throw new SQLException("interrupt", e);
}
try {
if (maxWaitThreadCount > 0
&& notEmptyWaitThreadCount >= maxWaitThreadCount) { //如果因为拿不到连接而阻塞的业务线程数达到阈值,则直接抛异常
connectErrorCountUpdater.incrementAndGet(this);
throw new SQLException("maxWaitThreadCount " + maxWaitThreadCount + ", current wait Thread count "
+ lock.getQueueLength());
}
if (onFatalError
&& onFatalErrorMaxActive > 0
&& activeCount >= onFatalErrorMaxActive) {
connectErrorCountUpdater.incrementAndGet(this);
StringBuilder errorMsg = new StringBuilder();
errorMsg.append("onFatalError, activeCount ")
.append(activeCount)
.append(", onFatalErrorMaxActive ")
.append(onFatalErrorMaxActive);
if (lastFatalErrorTimeMillis > 0) {
errorMsg.append(", time '")
.append(StringUtils.formatDateTime19(
lastFatalErrorTimeMillis, TimeZone.getDefault()))
.append("'");
}
if (lastFatalErrorSql != null) {
errorMsg.append(", sql \n")
.append(lastFatalErrorSql);
}
throw new SQLException(
errorMsg.toString(), lastFatalError);
}
connectCount++; //连接数累加
if (createScheduler != null
&& poolingCount == 0
&& activeCount < maxActive
&& creatingCountUpdater.get(this) == 0
&& createScheduler instanceof ScheduledThreadPoolExecutor) {
ScheduledThreadPoolExecutor executor = (ScheduledThreadPoolExecutor) createScheduler;
if (executor.getQueue().size() > 0) {
createDirect = true; //createScheduler这种异步添加模式不开启(默认不开启,本文也不是基于该模式的),createDirect永远不等于true,所以上面createDirect==true的代码不会被触发
continue;
}
}
if (maxWait > 0) {
holder = pollLast(nanos); //尝试从池子里获取连接
} else {
holder = takeLast();
}
if (holder != null) {
activeCount++; //拿到连接,activeCount累加
if (activeCount > activePeak) {
activePeak = activeCount;
activePeakTime = System.currentTimeMillis();
}
}
} catch (InterruptedException e) {
connectErrorCountUpdater.incrementAndGet(this);
throw new SQLException(e.getMessage(), e);
} catch (SQLException e) {
connectErrorCountUpdater.incrementAndGet(this);
throw e;
} finally {
lock.unlock();
}
break;
}
if (holder == null) { //没有获取到连接,整理错误信息,抛出错误
long waitNanos = waitNanosLocal.get();
StringBuilder buf = new StringBuilder(128);
buf.append("wait millis ")//
.append(waitNanos / (1000 * 1000))//
.append(", active ").append(activeCount)//
.append(", maxActive ").append(maxActive)//
.append(", creating ").append(creatingCount)//
;
if (creatingCount > 0 && createStartNanos > 0) {
long createElapseMillis = (System.nanoTime() - createStartNanos) / (1000 * 1000);
if (createElapseMillis > 0) {
buf.append(", createElapseMillis ").append(createElapseMillis);
}
}
if (createErrorCount > 0) {
buf.append(", createErrorCount ").append(createErrorCount);
}
List sqlList = this.getDataSourceStat().getRuningSqlList();
for (int i = 0; i < sqlList.size(); ++i) {
if (i != 0) {
buf.append('\n');
} else {
buf.append(", ");
}
JdbcSqlStatValue sql = sqlList.get(i);
buf.append("runningSqlCount ").append(sql.getRunningCount());
buf.append(" : ");
buf.append(sql.getSql());
}
String errorMessage = buf.toString();
if (this.createError != null) {
throw new GetConnectionTimeoutException(errorMessage, createError);
} else {
throw new GetConnectionTimeoutException(errorMessage);
}
}
holder.incrementUseCount();
DruidPooledConnection poolalbeConnection = new DruidPooledConnection(holder); //包装成目标对象
return poolalbeConnection; //返回
}
//尝试从池子里获取连接
private DruidConnectionHolder pollLast(long nanos) throws InterruptedException, SQLException {
long estimate = nanos;
for (;;) {
if (poolingCount == 0) { //池子里的空闲连接为0,说明需要通知主流程3新增连接了
emptySignal(); // empty.signal,唤起主流程3新增连接
if (failFast && isFailContinuous()) { //如果置为快速结束,则不阻塞业务线程,直接抛出异常
throw new DataSourceNotAvailableException(createError);
}
if (estimate <= 0) {
waitNanosLocal.set(nanos - estimate);
return null;
}
notEmptyWaitThreadCount++; //因为获取不到连接而陷入阻塞状态的业务线程数+1
if (notEmptyWaitThreadCount > notEmptyWaitThreadPeak) {
notEmptyWaitThreadPeak = notEmptyWaitThreadCount;
}
try {
long startEstimate = estimate;
estimate = notEmpty.awaitNanos(estimate); // 阻塞(挂起)estimate这么长的世界,期间如果被唤醒,则estimate就会被刷新成剩余等待时间
// recycle or
// creator
notEmptyWaitCount++;
notEmptyWaitNanos += (startEstimate - estimate);
if (!enable) {
connectErrorCountUpdater.incrementAndGet(this);
throw new DataSourceDisableException();
}
} catch (InterruptedException ie) {
notEmpty.signal(); // 期间线程被中断,则唤起一次其他处于阻塞状态的业务线程
notEmptySignalCount++;
throw ie;
} finally {
notEmptyWaitThreadCount--;
}
if (poolingCount == 0) { //依然没有竞争到
if (estimate > 0) { //如果目标阻塞时间(maxWait)还没有用完,则继续尝试获取
continue;
}
waitNanosLocal.set(nanos - estimate);
return null;
}
}
decrementPoolingCount(); //poolingCount--
DruidConnectionHolder last = connections[poolingCount]; //直接获取
connections[poolingCount] = null; //获取后意味着连接已被借出,原有位置置空
long waitNanos = nanos - estimate; //标记这次获取连接花了多长时间,连接够用时便为0
last.setLastNotEmptyWaitNanos(waitNanos);
return last; //返回
}
}