本来想继续吧robot框架完成的,可是后来突然接到一个derby的活,在这之前完全不听说过这个数据库,于是网上找资料补脑,先看一下derby目录的都有哪些文件吧,选择工具上有很多由于对java了解差,没用eclipse,NetBeans等在网上咨询最后用了DbVisualizerpro这个工具,然后开始研究配置derby,发现像这种轻量级工具确实不错,步骤大概几部前提安装过derby,然后打开工具连接derby的工具都差不多,第一步找到 driver,就是lib目录的derby jar包,然后添加方式一般有嵌入式和server选择Embedded模式,选择你的derby数据库其他名字密码都是别名你可以填写鱼其他数据库连接时的格式填写就行,不写也行。注意driver选择和数据库选择。
ok至此环境完事,开始你的sql语句或者鼠标操作过程。
由于这次任务并不是简单的增删查改,所以之前这么逗铺垫,目的清空seg()目录的数据,delete肯定是不行的,他清楚的表的数据,而且seg()是derby内部的,所以只能找到apache derby提供的api,最好用java调,本人感觉java太大,选择python结果还在继续中,,,,,暂时记录由于还要部署以防忘记。
db.lck文件
log控制文件
log数据文件
seg0系统表
seg0文件名
seg0文件内容
service.properties文件
db.lck文件
db.lck保存的是数据库的UUID,UUID是如何生成的?
BasicUUIDFactory 初始化的时候 确定了 majorId timemillis currentValue ,同时有几个定值MODULUS MULTIPLIER STEP INITIAL_VALUE
majorId 一般是进程的可用内存
timemillis 当前的时间戳
currentValue 给个初始值INITIAL_VALUE(2551218188L)
BasicUUIDFactory创建UUID的过程如下:
在currentValue的基础上 运算 ( ( MULTIPLIER * currentValue ) + STEP ) % MODULUS 得到新的 currentValue
如果新的 currentValue 等于 INITIAL_VALUE ,majorId 加1。如果majorId加了一个long的循环 到0了。重新初始化timemillis currentValue
用majorId, timemillis, sequence 构造 BasicUUID。下次创建UUID的时候使用新的currentValue。
获取UUID的时候:
以 '-' 作为分隔符
从sequence写4个字节 8个字符,加上 分隔符
从timemillis写2个字节 4个字符,加上 分隔符
从timemillis写2个字节 4个字符,加上 分隔符
从timemillis写2个字节 4个字符,加上 分隔符
从majorId写6个字节 12个字符
最后是36个字节,形成了一个UUID 的String
public final class BasicUUIDFactory
implements UUIDFactory
{
/*
** Fields of BasicUUIDFactory.
*/
private long majorId; // 48 bits only
private long timemillis;
public BasicUUIDFactory() {
Object env = Monitor.getMonitor().getEnvironment();
if (env != null) {
String s = env.toString();
if (s != null)
env = s;
majorId = ((long) env.hashCode());
} else {
majorId = Runtime.getRuntime().freeMemory();
}
majorId &= 0x0000ffffffffffffL;
resetCounters();
}
private static final long MODULUS = ( 1L << 32 );
private static final long MULTIPLIER = ( ( 1L << 14 ) + 1 );
private static final long STEP = ( ( 1L << 27 ) + 1 );
private static final long INITIAL_VALUE = ( 2551218188L );
private long currentValue;
public synchronized UUID createUUID()
{
long cv = currentValue = ( ( MULTIPLIER * currentValue ) + STEP ) % MODULUS;
if ( cv == INITIAL_VALUE ) { bumpMajor(); }
int sequence = (int) cv;
return new BasicUUID(majorId, timemillis, sequence);
}
private void bumpMajor() {
// 48 bits only
majorId = (majorId + 1L) & 0x0000ffffffffffffL;
if (majorId == 0L)
resetCounters();
}
private void resetCounters()
{
timemillis = System.currentTimeMillis();
currentValue = INITIAL_VALUE;
}
}
public BasicUUID(long majorId, long timemillis, int sequence)
{
this.majorId = majorId;
this.timemillis = timemillis;
this.sequence = sequence;
}
public String toString() {return stringWorkhorse( '-' );}
public String stringWorkhorse( char separator )
{
char[] data = new char[36];
writeMSB(data, 0, (long) sequence, 4);
int offset = 8;
if (separator != 0) data[offset++] = separator;
long ltimemillis = timemillis;
writeMSB(data, offset, (ltimemillis & 0x0000ffff00000000L) >>> 32, 2);
offset += 4;
if (separator != 0) data[offset++] = separator;
writeMSB(data, offset, (ltimemillis & 0x00000000ffff0000L) >>> 16, 2);
offset += 4;
if (separator != 0) data[offset++] = separator;
writeMSB(data, offset, (ltimemillis & 0x000000000000ffffL), 2);
offset += 4;
if (separator != 0) data[offset++] = separator;
writeMSB(data, offset, majorId, 6);
offset += 12;
return new String(data, 0, offset);
}
private static void writeMSB(char[] data, int offset, long value, int nbytes)
{
for (int i = nbytes - 1; i >= 0; i--)
{
long b = (value & (255L << (8 * i))) >>> (8 * i);
int c = (int) ((b & 0xf0) >> 4);
data[offset++] = (char) (c < 10 ? c + '0' : (c - 10) + 'a');
c = (int) (b & 0x0f);
data[offset++] = (char) (c < 10 ? c + '0' : (c - 10) + 'a');
}
}
log控制文件
Derby数据库的log文件,包括了log的控制文件, 控制文件的镜像和实际的log文件。初始化的时候只会有log1.dat
log的控制文件定义了 64个字节,但是实际就写了 48个字节。
控制文件的镜像,内容和控制文件一模一样。
boolean writeControlFile(StorageFile logControlFileName, long value)
throws IOException, StandardException
{
StorageRandomAccessFile logControlFile = null;
ByteArrayOutputStream baos = new ByteArrayOutputStream(64);
DataOutputStream daos = new DataOutputStream(baos);
daos.writeInt(fid);
daos.writeInt(OBSOLETE_LOG_VERSION_NUMBER);
daos.writeLong(value);
if (onDiskMajorVersion == 0) {
onDiskMajorVersion = jbmsVersion.getMajorVersion();
onDiskMinorVersion = jbmsVersion.getMinorVersion();
onDiskBeta = jbmsVersion.isBeta();
}
daos.writeInt(onDiskMajorVersion);
daos.writeInt(onDiskMinorVersion);
// For 2.0 beta we added the build number and the isBeta indication.
// (5 bytes from our first spare long)
daos.writeInt(jbmsVersion.getBuildNumberAsInt());
byte flags = 0;
if (onDiskBeta)
flags |= IS_BETA_FLAG;
if (logNotSynced || wasDBInDurabilityTestModeNoSync)
flags |= IS_DURABILITY_TESTMODE_NO_SYNC_FLAG;
daos.writeByte(flags);
//
// write some spare bytes after 2.0 we have 3 + 2(8) spare bytes.
long spare = 0;
daos.writeByte(0);
daos.writeByte(0);
daos.writeByte(0);
daos.writeLong(spare);
daos.flush();
// write the checksum for the control data written
checksum.reset();
checksum.update(baos.toByteArray(), 0, baos.size());
daos.writeLong(checksum.getValue());
daos.flush();
try
{
checkCorrupt();
try
{
logControlFile = privRandomAccessFile(logControlFileName, "rw");
}
logControlFile.seek(0);
logControlFile.write(baos.toByteArray());
syncFile(logControlFile);
logControlFile.close();
// write the same data to mirror control file
try
{
logControlFile =
privRandomAccessFile(getMirrorControlFileName(), "rw");
}
logControlFile.seek(0);
logControlFile.write(baos.toByteArray());
syncFile(logControlFile);
}
return true;
}
log数据文件
首先写了24个字节 , 然后再这24个字节之后填充0到1M。
private boolean initLogFile(StorageRandomAccessFile newlog, long number,
long prevLogRecordEndInstant)
throws IOException, StandardException
{
newlog.seek(0);
newlog.writeInt(fid);
newlog.writeInt(OBSOLETE_LOG_VERSION_NUMBER); // for silly backwards compatibility reason
newlog.writeLong(number);
newlog.writeLong(prevLogRecordEndInstant);
syncFile(newlog);
return true;
}
private void preAllocateNewLogFile(StorageRandomAccessFile log) throws IOException, StandardException
{
int amountToWrite = logSwitchInterval - LOG_FILE_HEADER_SIZE ;
int bufferSize = logBufferSize * 2;
byte[] emptyBuffer = new byte[bufferSize];
int nWrites = amountToWrite/bufferSize;
int remainingBytes = amountToWrite % bufferSize;
try{
while(nWrites-- > 0)
log.write(emptyBuffer);
if(remainingBytes !=0)
log.write(emptyBuffer , 0 ,remainingBytes);
//sync the file
syncFile(log);
}
}
seg0系统表
包含了
1 个PropertyConglomerate表
4 个核心 表
18 个非核心 表
1 个PropertyConglomerate表
public void boot(boolean create, Properties startParams)throws StandardException{this.serviceProperties = startParams; boot_load_conglom_map(); if (create) { conglom_nextid = 1; } rawstore = (RawStoreFactory) Monitor.bootServiceModule( create, this, RawStoreFactory.MODULE, serviceProperties);Monitor.bootServiceModule( create, this, org.apache.derby.iapi.reference.Module.PropertyFactory, startParams); // Create the in-memory conglomerate directory conglomCacheInit(); // Read in the conglomerate directory from the conglom conglom // Create the conglom conglom from within a separate system xact RAMTransaction tc = (RAMTransaction) getAndNameTransaction( ContextService.getFactory().getCurrentContextManager(), AccessFactoryGlobals.USER_TRANS_NAME); int lock_mode = LockingPolicy.MODE_CONTAINER; system_default_locking_policy = tc.getRawStoreXact().newLockingPolicy( lock_mode, TransactionController.ISOLATION_SERIALIZABLE, true); tc.commit(); // set up the property validation pf = (PropertyFactory) Monitor.findServiceModule( this, org.apache.derby.iapi.reference.Module.PropertyFactory); xactProperties = new PropertyConglomerate(tc, create, startParams, pf); rawstore.getRawStoreProperties(tc); bootLookupSystemLockLevel(tc); lock_mode = (getSystemLockLevel() == TransactionController.MODE_TABLE ? LockingPolicy.MODE_CONTAINER : LockingPolicy.MODE_RECORD); system_default_locking_policy = tc.getRawStoreXact().newLockingPolicy( lock_mode, TransactionController.ISOLATION_SERIALIZABLE, true); // set up the callbacl for the lock manager with initialization addPropertySetNotification(getLockFactory(), tc); // make sure user cannot change these properties addPropertySetNotification(this,tc); tc.commit(); tc.destroy(); tc = null;}PropertyConglomerate( TransactionController tc, boolean create, Properties serviceProperties,PropertyFactory pf)throws StandardException{this.pf = pf;if (create) {DataValueDescriptor[] template = makeNewTemplate();Properties conglomProperties = new Properties();conglomProperties.put( Property.PAGE_SIZE_PARAMETER, RawStoreFactory.PAGE_SIZE_STRING);conglomProperties.put( RawStoreFactory.PAGE_RESERVED_SPACE_PARAMETER, RawStoreFactory.PAGE_RESERVED_ZERO_SPACE_STRING); propertiesConglomId = tc.createConglomerate( AccessFactoryGlobals.HEAP, template, null, (int[]) null, // use default collation for property conglom. conglomProperties, TransactionController.IS_DEFAULT);serviceProperties.put( Property.PROPERTIES_CONGLOM_ID, Long.toString(propertiesConglomId));}this.serviceProperties = serviceProperties;lf = ((RAMTransaction) tc).getAccessManager().getLockFactory();cachedLock = new CacheLock(this);PC_XenaVersion softwareVersion = new PC_XenaVersion();if (create)setProperty(tc,DataDictionary.PROPERTY_CONGLOMERATE_VERSION,softwareVersion, true);elsesoftwareVersion.upgradeIfNeeded(tc,this,serviceProperties);getCachedDbProperties(tc);}
4 个核心 表 和 18 个非核心 表
首先是初始化。初始化的工作比较简单,但是他们初始化的工作是不一样的。4 个核心 表填充了4个TabInfoImpl到coreInfo变量中,18 个非核心 表 只是初始化了noncoreInfo变量,没有填充。
下面就是createDictionaryTables(startParams, bootingTC, ddg);创建4 个核心 表 和 18 个非核心 表
由于18 个非核心 表 初始化的时候没有确定表,所以在运行时调用了getNonCoreTIByNumber 获取TabInfoImpl。
public void boot(boolean create, Properties startParams)
throws StandardException
{
softwareVersion = new DD_Version(this, DataDictionary.DD_VERSION_DERBY_10_6);
startupParameters = startParams;
uuidFactory = Monitor.getMonitor().getUUIDFactory();
engineType = Monitor.getEngineType( startParams );
dvf = langConnFactory.getDataValueFactory();
exFactory = (ExecutionFactory) Monitor.bootServiceModule(
create, this,
ExecutionFactory.MODULE,
startParams);
// initailze the arrays of core and noncore tables
initializeCatalogInfo();
// indicate that we are in the process of booting
booting = true;
// set only if child class hasn't overriden this already
if ( dataDescriptorGenerator == null )
{ dataDescriptorGenerator = new DataDescriptorGenerator( this ); }
}
public void initializeCatalogInfo()
throws StandardException
{
initializeCoreInfo();
initializeNoncoreInfo();
}
private void initializeCoreInfo()
throws StandardException
{
TabInfoImpl[] lcoreInfo = coreInfo = new TabInfoImpl[NUM_CORE];
UUIDFactory luuidFactory = uuidFactory;
lcoreInfo[SYSTABLES_CORE_NUM] =
new TabInfoImpl(new SYSTABLESRowFactory(luuidFactory, exFactory, dvf));
lcoreInfo[SYSCOLUMNS_CORE_NUM] =
new TabInfoImpl(new SYSCOLUMNSRowFactory(luuidFactory, exFactory, dvf));
lcoreInfo[SYSCONGLOMERATES_CORE_NUM] =
new TabInfoImpl(new SYSCONGLOMERATESRowFactory(luuidFactory, exFactory, dvf));
lcoreInfo[SYSSCHEMAS_CORE_NUM] =
new TabInfoImpl(new SYSSCHEMASRowFactory(luuidFactory, exFactory, dvf));
}
private void initializeNoncoreInfo()
throws StandardException
{
noncoreInfo = new TabInfoImpl[NUM_NONCORE];
}
protected void createDictionaryTables(Properties params, TransactionController tc,
DataDescriptorGenerator ddg)
throws StandardException
{
/*
** Create a new schema descriptor -- with no args
** creates the system schema descriptor in which
** all tables reside (SYS)
*/
systemSchemaDesc =
newSystemSchemaDesc(
SchemaDescriptor.STD_SYSTEM_SCHEMA_NAME,
SchemaDescriptor.SYSTEM_SCHEMA_UUID);
/* Create the core tables and generate the UUIDs for their
* heaps (before creating the indexes).
* RESOLVE - This loop will eventually drive all of the
* work for creating the core tables.
*/
for (int coreCtr = 0; coreCtr < NUM_CORE; coreCtr++)
{
TabInfoImpl ti = coreInfo[coreCtr];
Properties heapProperties = ti.getCreateHeapProperties();
ti.setHeapConglomerate(
createConglomerate(
ti.getTableName(),
tc,
ti.getCatalogRowFactory().makeEmptyRow(),
heapProperties
)
);
// bootstrap indexes on core tables before bootstraping the tables themselves
if (coreInfo[coreCtr].getNumberOfIndexes() > 0)
{
bootStrapSystemIndexes(systemSchemaDesc, tc, ddg, ti);
}
}
// bootstrap the core tables into the data dictionary
for ( int ictr = 0; ictr < NUM_CORE; ictr++ )
{
/* RESOLVE - need to do something with COLUMNTYPE in following table creating code */
TabInfoImpl ti = coreInfo[ictr];
addSystemTableToDictionary(ti, systemSchemaDesc, tc, ddg);
}
for (int noncoreCtr = 0; noncoreCtr < NUM_NONCORE; noncoreCtr++)
{
int catalogNumber = noncoreCtr + NUM_CORE;
boolean isDummy = (catalogNumber == SYSDUMMY1_CATALOG_NUM);
TabInfoImpl ti = getNonCoreTIByNumber(catalogNumber);
makeCatalog(ti, isDummy ? sysIBMSchemaDesc : systemSchemaDesc, tc );
if (isDummy)
populateSYSDUMMY1(tc);
// Clear the table entry for this non-core table,
// to allow it to be garbage-collected. The idea
// is that a running database might never need to
// reference a non-core table after it was created.
clearNoncoreTable(noncoreCtr);
}
}
protected TabInfoImpl getNonCoreTIByNumber(int catalogNumber)
throws StandardException
{
int nonCoreNum = catalogNumber - NUM_CORE;
// Look up the TabInfoImpl in the array. This does not have to be
// synchronized, because getting a reference is atomic.
TabInfoImpl retval = noncoreInfo[nonCoreNum];
if (retval == null)
{
UUIDFactory luuidFactory = uuidFactory;
switch (catalogNumber)
{
case SYSCONSTRAINTS_CATALOG_NUM:
retval = new TabInfoImpl(new SYSCONSTRAINTSRowFactory(
luuidFactory, exFactory, dvf));
break;
case SYSKEYS_CATALOG_NUM:
retval = new TabInfoImpl(new SYSKEYSRowFactory(
luuidFactory, exFactory, dvf));
break;
case SYSPERMS_CATALOG_NUM:
retval = new TabInfoImpl(new SYSPERMSRowFactory(
luuidFactory, exFactory, dvf));
break;
}
initSystemIndexVariables(retval);
noncoreInfo[nonCoreNum] = retval;
}
return retval;
}
seg0文件名
一共写了70个文件,都是代表什么?
这些文件的命名是如何考虑的了?
首先数据文件有数据和索引之分,数据类型为0 索引类型为1,通过String implementation区分,通过cfactory.getConglomerateFactoryId()取得类型
通过getNextConglomId方法确定文件的名字,结论是得到segment = 0 , conglomid一般是16 32 48 这样递增,如果是索引则加1,变成 17 33 49等。
conglomid会转化成十六进制,segment一般是0。conglomid = 32的文件是存储数据的,文件名应该是c20.dat 。conglomid = 321,是存储索引的,文件名为c201.dat。
protected void createDictionaryTables(Properties params, TransactionController tc,
DataDescriptorGenerator ddg)
throws StandardException
{
for (int coreCtr = 0; coreCtr < NUM_CORE; coreCtr++)
{
TabInfoImpl ti = coreInfo[coreCtr];
Properties heapProperties = ti.getCreateHeapProperties();
ti.setHeapConglomerate(
createConglomerate(
ti.getTableName(),
tc,
ti.getCatalogRowFactory().makeEmptyRow(),
heapProperties
)
);
// bootstrap indexes on core tables before bootstraping the tables themselves
if (coreInfo[coreCtr].getNumberOfIndexes() > 0)
{
bootStrapSystemIndexes(systemSchemaDesc, tc, ddg, ti);
}
}
// bootstrap the core tables into the data dictionary
for ( int ictr = 0; ictr < NUM_CORE; ictr++ )
{
/* RESOLVE - need to do something with COLUMNTYPE in following table creating code */
TabInfoImpl ti = coreInfo[ictr];
addSystemTableToDictionary(ti, systemSchemaDesc, tc, ddg);
}
for (int noncoreCtr = 0; noncoreCtr < NUM_NONCORE; noncoreCtr++)
{
int catalogNumber = noncoreCtr + NUM_CORE;
boolean isDummy = (catalogNumber == SYSDUMMY1_CATALOG_NUM);
TabInfoImpl ti = getNonCoreTIByNumber(catalogNumber);
makeCatalog(ti, isDummy ? sysIBMSchemaDesc : systemSchemaDesc, tc );
if (isDummy)
populateSYSDUMMY1(tc);
clearNoncoreTable(noncoreCtr);
}
}
public long createConglomerate(
String implementation,
DataValueDescriptor[] template,
ColumnOrdering[] columnOrder,
int[] collationIds,
Properties properties,
int temporaryFlag)
throws StandardException
{
// Find the appropriate factory for the desired implementation.
MethodFactory mfactory;
mfactory = accessmanager.findMethodFactoryByImpl(implementation);
ConglomerateFactory cfactory = (ConglomerateFactory) mfactory;
int segment;
long conglomid;
if ((temporaryFlag & TransactionController.IS_TEMPORARY)
== TransactionController.IS_TEMPORARY)
{
segment = ContainerHandle.TEMPORARY_SEGMENT;
conglomid = ContainerHandle.DEFAULT_ASSIGN_ID;
}
else
{
segment = 0; // RESOLVE - only using segment 0
conglomid =
accessmanager.getNextConglomId(
cfactory.getConglomerateFactoryId());
}
// call the factory to actually create the conglomerate.
Conglomerate conglom =
cfactory.createConglomerate(
this, segment, conglomid, template,
columnOrder, collationIds, properties, temporaryFlag);
}
protected long getNextConglomId(int factory_type)
throws StandardException
{
long conglomid;
synchronized (conglom_cache)
{
if (conglom_nextid == 0)
{
// shift out the factory id and then add 1.
conglom_nextid = (rawstore.getMaxContainerId() >> 4) + 1;
}
conglomid = conglom_nextid++;
}
return((conglomid << 4) | factory_type);
}
public Object run() throws StandardException, IOException
{
switch( actionCode)
{
case GET_FILE_NAME_ACTION:
return privGetFileName( actionIdentity, actionStub, actionErrorOK, actionTryAlternatePath);
case CREATE_CONTAINER_ACTION:
{
StorageFile file = privGetFileName( actionIdentity, false, false, false);
try {
dataFactory.writeInProgress();
try
{
fileData = file.getRandomAccessFile( "rw");
}
finally
{
dataFactory.writeFinished();
}
writeRAFHeader(fileData, true,
(actionIdentity.getSegmentId() != ContainerHandle.TEMPORARY_SEGMENT));
}
canUpdate = true;
return null;
} // end of case CREATE_CONTAINER_ACTION
case REMOVE_FILE_ACTION:
return privRemoveFile( actionFile) ? this : null;
case OPEN_CONTAINER_ACTION:
{
boolean isStub = false; // is this a stub?
StorageFile file = privGetFileName( actionIdentity, false, true, true);
if (file == null)
return null;
try {
if (!file.exists()) {
// file does not exist, may be it has been stubbified
file = privGetFileName( actionIdentity, true, true, true);
if (!file.exists())
return null;
isStub = true;
}
}
canUpdate = false;
try {
if (!dataFactory.isReadOnly() && file.canWrite())
canUpdate = true;
} catch (SecurityException se) {
// just means we can't write to it.
}
try {
fileData = file.getRandomAccessFile(canUpdate ? "rw" : "r");
readHeader(getEmbryonicPage(fileData,
FIRST_ALLOC_PAGE_OFFSET));
}
return this;
}
return null;
}
public StorageFile getAlternateContainerPath(
ContainerKey containerId,
boolean stub)
{
return getContainerPath(
containerId, stub, GET_ALTERNATE_CONTAINER_PATH_ACTION);
}
public final Object run() throws IOException, StandardException
{
switch( actionCode)
{
case BOOT_ACTION:
readOnly = storageFactory.isReadOnlyDatabase();
supportsRandomAccess = storageFactory.supportsRandomAccess();
return null;
case GET_ALTERNATE_CONTAINER_PATH_ACTION:
{
StringBuffer sb = new StringBuffer("seg");
sb.append(containerId.getSegmentId());
sb.append(storageFactory.getSeparator());
if( actionCode == GET_CONTAINER_PATH_ACTION)
{
sb.append(stub ? 'd' : 'c');
sb.append(Long.toHexString(containerId.getContainerId()));
sb.append(".dat");
}
else
{
sb.append(stub ? 'D' : 'C');
sb.append(Long.toHexString(containerId.getContainerId()));
sb.append(".DAT");
}
return storageFactory.newStorageFile( sb.toString());
}
return null;
}
seg0文件内容
每个文件都初始化了一个页,一个页的大小是4k。
初始化4k之后,还会写一些数据到这些数据和索引文件,这个工作由 createFinished(); 完成。
每个文件至少再加一个页,当然也有些系统库的表比较大,达到112k。这些扩充都是按4k扩充的。
每个文件写入初始数据的过程如下:
epage = new byte[pageSize]; 初始化4096个字节 的数组
writeHeaderToArray(containerInfo); 向containerInfo写入80个字节,包含了从formatIdInteger status 到checksum的信息
AllocPage.WriteContainerInfo(containerInfo, epage, create);epage的[BORROWED_SPACE_OFFSET]位置写入containerInfo长度信息N(80);,然后将containerInfo拷贝到epage的BORROWED_SPACE_OFFSET + BORROWED_SPACE_LEN(109),拷贝80个字节
最终填充了epage的数据,不过只有第108个字节 和 从109 到 190个字节上是有数据的, 其他位置都为0
private void writeRAFHeader(StorageRandomAccessFile file, boolean create, boolean syncFile) throws IOException, StandardException{byte[] epage;if (create){ epage = new byte[pageSize];}else{}writeHeader(file, create, epage);if (syncFile){dataFactory.writeInProgress();try{ file.sync(false);}}}protected void writeHeader(StorageRandomAccessFile file, boolean create, byte[] epage)throws IOException, StandardException{writeHeaderToArray(containerInfo); AllocPage.WriteContainerInfo(containerInfo, epage, create);// now epage has the containerInfo written inside it// force WAL - and check to see if database is corrupt or is frozen.dataFactory.flush(lastLogInstant);if (lastLogInstant != null)lastLogInstant = null;// write it outdataFactory.writeInProgress();try{ writeAtOffset(file, epage, FIRST_ALLOC_PAGE_OFFSET);}finally{dataFactory.writeFinished();}}private void writeHeaderToArray(byte[] a) throws IOException{ArrayOutputStream a_out = new ArrayOutputStream(a);FormatIdOutputStream outStream = new FormatIdOutputStream(a_out);int status = 0;if (getDroppedState()) status |= FILE_DROPPED;if (getCommittedDropState()) status |= FILE_COMMITTED_DROP;if (isReusableRecordId()) status |= FILE_REUSABLE_RECORDID;a_out.setPosition(0);a_out.setLimit(CONTAINER_INFO_SIZE);outStream.writeInt(formatIdInteger);outStream.writeInt(status);outStream.writeInt(pageSize);outStream.writeInt(spareSpace);outStream.writeInt(minimumRecordSize);outStream.writeShort(initialPages);outStream.writeShort(PreAllocSize); // write spare1outStream.writeLong(firstAllocPageNumber);outStream.writeLong(firstAllocPageOffset);outStream.writeLong(containerVersion);outStream.writeLong(estimatedRowCount);outStream.writeLong(reusableRecordIdSequenceNumber);outStream.writeLong(0); //Write spare3checksum.reset();checksum.update(a, 0, CONTAINER_INFO_SIZE - CHECKSUM_SIZE);outStream.writeLong(checksum.getValue());a_out.clearLimit();}public static void WriteContainerInfo(byte[] containerInfo, byte[] epage, boolean create)throws StandardException{int N = (containerInfo == null) ? 0 : containerInfo.length;if (create){epage[BORROWED_SPACE_OFFSET] = (byte)N;}else{}if (N != 0)System.arraycopy(containerInfo, 0, epage, BORROWED_SPACE_OFFSET + BORROWED_SPACE_LEN, N);}
service.properties文件
首先看看最终的文件内容
#D:\jhhWorks\eclipse_ws\testjava\db4# ********************************************************************# *** Please do NOT edit this file. ***# *** CHANGING THE CONTENT OF THIS FILE MAY CAUSE DATA CORRUPTION. ***# ********************************************************************#Sat Sep 22 21:11:22 CST 2012SysschemasIndex2Identifier=225SyscolumnsIdentifier=144SysconglomeratesIndex1Identifier=49SysconglomeratesIdentifier=32SyscolumnsIndex2Identifier=177SysschemasIndex1Identifier=209SysconglomeratesIndex3Identifier=81SystablesIndex2Identifier=129SyscolumnsIndex1Identifier=161derby.serviceProtocol=org.apache.derby.database.DatabaseSysschemasIdentifier=192derby.storage.propertiesId=16SysconglomeratesIndex2Identifier=65derby.serviceLocale=zh_CNSystablesIdentifier=96SystablesIndex1Identifier=113
bootService 开始的时候传进来的 properties 只有3个。{user=sa, password=derby, create=true, drdaID={3}}
之后新建了properties 将{user=sa, password=derby, create=true, drdaID={3}}作为默认。
之后添加了 serviceProtocol 和 serviceLocale , 变成 2 个
之后添加了 serviceDirectory 和 serviceType ,变成 4 个
之后添加了如下Property运行时的3个属性。变成 7 个
public static final String CREATE_WITH_NO_LOG =PROPERTY_RUNTIME_PREFIX + "storage.createWithNoLog";
String PROPERTIES_CONGLOM_ID = "derby.storage.propertiesId";
String BOOT_DB_CLASSPATH = PROPERTY_RUNTIME_PREFIX + "database.classpath";
BasicDatabase
startParams.put(Property.CREATE_WITH_NO_LOG, "true");
PropertyConglomerate
serviceProperties.put(
Property.PROPERTIES_CONGLOM_ID,
Long.toString(propertiesConglomId));
BasicDatabase
startParams.put(Property.BOOT_DB_CLASSPATH, classpath);
ts.bootModule 结束之后有20个。有4个是带 __rt 。写入 service.properties 时通过removeRuntimeProperties排除掉。
其他的是如何加入的了 ?
DataDictionaryImpl定义了13个常量,再初始化了4个核心系统表之后添加进来。
最后将properties里有16个属性写入service.properties 。
private static final String CFG_SYSTABLES_ID = "SystablesIdentifier";private static final String CFG_SYSTABLES_INDEX1_ID = "SystablesIndex1Identifier";private static final String CFG_SYSTABLES_INDEX2_ID = "SystablesIndex2Identifier";private static final String CFG_SYSCOLUMNS_ID = "SyscolumnsIdentifier";private static final String CFG_SYSCOLUMNS_INDEX1_ID = "SyscolumnsIndex1Identifier";private static final String CFG_SYSCOLUMNS_INDEX2_ID = "SyscolumnsIndex2Identifier";private static final String CFG_SYSCONGLOMERATES_ID = "SysconglomeratesIdentifier";private static final String CFG_SYSCONGLOMERATES_INDEX1_ID = "SysconglomeratesIndex1Identifier";private static final String CFG_SYSCONGLOMERATES_INDEX2_ID = "SysconglomeratesIndex2Identifier";private static final String CFG_SYSCONGLOMERATES_INDEX3_ID = "SysconglomeratesIndex3Identifier";private static final String CFG_SYSSCHEMAS_ID = "SysschemasIdentifier";private static final String CFG_SYSSCHEMAS_INDEX1_ID = "SysschemasIndex1Identifier";private static final String CFG_SYSSCHEMAS_INDEX2_ID = "SysschemasIndex2Identifier";// Add the bootstrap information to the configurationparams.put(CFG_SYSTABLES_ID, Long.toString(coreInfo[SYSTABLES_CORE_NUM].getHeapConglomerate()));..................................................................params.put(CFG_SYSSCHEMAS_INDEX2_ID, Long.toString(coreInfo[SYSSCHEMAS_CORE_NUM].getIndexConglomerate(((SYSSCHEMASRowFactory) coreInfo[SYSSCHEMAS_CORE_NUM].getCatalogRowFactory()).SYSSCHEMAS_INDEX2_ID)));protected Object bootService(PersistentService provider,String factoryInterface, String serviceName, Properties properties,boolean create) throws StandardException {try {synchronized (this) {===={user=sa, password=derby, create=true, drdaID={3}}====Locale serviceLocale = null;if (create) { properties = new Properties(properties); serviceLocale = setLocale(properties); properties.put(Property.SERVICE_PROTOCOL, factoryInterface);===={user=sa, password=derby, create=true, drdaID={3}}{derby.serviceProtocol=org.apache.derby.database.Database, derby.serviceLocale=zh_CN}====serviceName = provider.createServiceRoot(serviceName,Boolean.valueOf(properties.getProperty(Property.DELETE_ON_CREATE)).booleanValue());serviceKey = ProtocolKey.create(factoryInterface, serviceName);} else if (properties != null) {String serverLocaleDescription = properties.getProperty(Property.SERVICE_LOCALE);if ( serverLocaleDescription != null)serviceLocale = staticGetLocaleFromString(serverLocaleDescription);}ts = new TopService(this, serviceKey, provider, serviceLocale);services.addElement(ts);}if (properties != null) {// the root of the data properties.put(PersistentService.ROOT, serviceName); properties.put(PersistentService.TYPE, provider.getType());}===={user=sa, password=derby, create=true, drdaID={3}}{derby.__rt.serviceType=directory, derby.serviceProtocol=org.apache.derby.database.Database, derby.serviceLocale=zh_CN, derby.__rt.serviceDirectory=D:\jhhWorks\eclipse_ws\testjava\db5}====if (SanityManager.DEBUG && reportOn) {dumpProperties("Service Properties: " + serviceKey.toString(), properties);}// push a new context managerif (previousCM == null) {cm = contextService.newContextManager();contextService.setCurrentContextManager(cm);}sb = new ServiceBootContext(cm);UpdateServiceProperties usProperties;Properties serviceProperties;===={user=sa, password=derby, create=true, drdaID={3}}{derby.__rt.serviceType=directory, derby.serviceProtocol=org.apache.derby.database.Database, derby.serviceLocale=zh_CN, derby.__rt.serviceDirectory=D:\jhhWorks\eclipse_ws\testjava\db5}serviceProperties NULLusProperties NULL====instance = ts.bootModule(create, null, serviceKey, serviceProperties);===={SysschemasIndex1Identifier=209, derby.__rt.database.classpath=, SysconglomeratesIdentifier=32, SysconglomeratesIndex3Identifier=81, SyscolumnsIdentifier=144, SysschemasIndex2Identifier=225, SystablesIndex1Identifier=113, derby.__rt.storage.createWithNoLog=true, SyscolumnsIndex1Identifier=161, SystablesIndex2Identifier=129, SyscolumnsIndex2Identifier=177, SysschemasIdentifier=192, derby.storage.propertiesId=16, derby.serviceProtocol=org.apache.derby.database.Database, SystablesIdentifier=96, SysconglomeratesIndex1Identifier=49, derby.__rt.serviceDirectory=D:\jhhWorks\eclipse_ws\testjava\db5, derby.__rt.serviceType=directory, derby.serviceLocale=zh_CN, SysconglomeratesIndex2Identifier=65}====if (create || inRestore) {// remove all the in-memory propertiesprovider.saveServiceProperties(serviceName, usProperties.getStorageFactory(), BaseMonitor.removeRuntimeProperties(properties), false);usProperties.setServiceBooted();} if (cm != previousCM) cm.cleanupOnError(StandardException.closeException()); } return instance;}public void saveServiceProperties( final String serviceName, StorageFactory sf, final Properties properties, final boolean replace)throws StandardException { final WritableStorageFactory storageFactory = (WritableStorageFactory) sf; try { AccessController.doPrivileged( new PrivilegedExceptionAction() { public Object run() throws StandardException { StorageFile backupFile = null; StorageFile servicePropertiesFile = storageFactory.newStorageFile( PersistentService.PROPERTIES_NAME); OutputStream os = null; try { os = servicePropertiesFile.getOutputStream(); properties.store( os, serviceName + MessageService.getTextMessage(MessageId.SERVICE_PROPERTIES_DONT_EDIT)); storageFactory.sync( os, false); os.close(); os = null; } } ); }} private void store0(BufferedWriter bw, String comments, boolean escUnicode) throws IOException { if (comments != null) { writeComments(bw, comments); } bw.write("#" + new Date().toString()); bw.newLine();synchronized (this) { for (Enumeration e = keys(); e.hasMoreElements();) { String key = (String)e.nextElement(); String val = (String)get(key); key = saveConvert(key, true, escUnicode); /* No need to escape embedded and trailing spaces for value, hence * pass false to flag. */ val = saveConvert(val, false, escUnicode); bw.write(key + "=" + val); bw.newLine(); }} bw.flush(); }