Hibernate源码学习五_创建SessionFactory

接学习四,下来就是调用configurationbuildSessionFactory方法来创建一个sessionFactory了,具体代码如下:

public SessionFactory buildSessionFactory(ServiceRegistry serviceRegistry) throws HibernateException {
		LOG.debugf( "Preparing to build session factory with filters : %s", filterDefinitions );
		//注册类型,包含数据库方言
		buildTypeRegistrations( serviceRegistry );
		//二次传递编译,包含注解处理
		secondPassCompile();
		if ( !metadataSourceQueue.isEmpty() ) {
			LOG.incompleteMappingMetadataCacheProcessing();
		}
		//校验所配置的映射类与hbm配置文件中的属性是否一致
		validate();
       //校验全局配置属性
		Environment.verifyProperties( properties );
		Properties copy = new Properties();
		copy.putAll( properties );
		//拷贝一份配置,处理占位符
		ConfigurationHelper.resolvePlaceHolders( copy );
		//通过配置的属性和注册的服务创建一个设置
		Settings settings = buildSettings( copy, serviceRegistry );
		//通过注册的服务,映射文件,设置,以及一个session工厂的观察者//(用于监视会话工厂状态)来创建一个会话工厂的实现
		return new SessionFactoryImpl(
				this,
				mapping,
				serviceRegistry,
				settings,
				sessionFactoryObserver
			);
	}

 下面看一下上述的secondPassCompile()二次传递编译方法secondPassCompile()的具体代码:

protected void secondPassCompile () throws MappingException {
		LOG.trace( "Starting secondPassCompile() processing" );
		
		// TEMPORARY
		// Ensure the correct ClassLoader is used in commons-annotations.
//获取当前线程中所使用的classLoader,便于处理结束后,将该classloader在设置为当//前线程所使用的类加载器,仅用于保留对象
		ClassLoader tccl = Thread.currentThread().getContextClassLoader();
	//获取ClassLoaderHelper中的上下文类加载器并设置到当前线程,确保所使用的类//加载器,查看实际代码,发现其实返回的还是当前线程所使用的上下文加载器,但//ClassLoaderHelper注释中说明该属性可以通过客户自定义注入来进行替换,而且在//Hibernate5中将被替换为其他方式,具体参见ClassLoaderHelper源码	Thread.currentThread().setContextClassLoader( ClassLoaderHelper.getContextClassLoader() );

		//process default values first,第一次处理的时候需要设置一下默认值
		{
			if ( !isDefaultProcessed ) {
				//use global delimiters if orm.xml declare it
				Map defaults = reflectionManager.getDefaults();
				final Object isDelimited = defaults.get( "delimited-identifier" );
				if ( isDelimited != null && isDelimited == Boolean.TRUE ) {
					getProperties().put( Environment.GLOBALLY_QUOTED_IDENTIFIERS, "true" );
				}
				// Set default schema name if orm.xml declares it.
				final String schema = (String) defaults.get( "schema" );
				if ( StringHelper.isNotEmpty( schema ) ) {
					getProperties().put( Environment.DEFAULT_SCHEMA, schema );
				}
				// Set default catalog name if orm.xml declares it.
				final String catalog = (String) defaults.get( "catalog" );
				if ( StringHelper.isNotEmpty( catalog ) ) {
					getProperties().put( Environment.DEFAULT_CATALOG, catalog );
				}
				//注解绑定
				AnnotationBinder.bindDefaults( createMappings() );
				isDefaultProcessed = true;
			}
		}

		// process metadata queue
		{
			metadataSourceQueue.syncAnnotatedClasses();
			metadataSourceQueue.processMetadata( determineMetadataSourcePrecedence() );
		}



		try {
			inSecondPass = true;
			processSecondPassesOfType( PkDrivenByDefaultMapsIdSecondPass.class );
			processSecondPassesOfType( SetSimpleValueTypeSecondPass.class );
			processSecondPassesOfType( CopyIdentifierComponentSecondPass.class );
			processFkSecondPassInOrder();
			processSecondPassesOfType( CreateKeySecondPass.class );
			processSecondPassesOfType( SecondaryTableSecondPass.class );

			originalSecondPassCompile();

			inSecondPass = false;
		}
		catch ( RecoverableException e ) {
			//the exception was not recoverable after all
			throw ( RuntimeException ) e.getCause();
		}

		// process cache queue,缓存队列处理
		{
			for ( CacheHolder holder : caches ) {
				if ( holder.isClass ) {
					applyCacheConcurrencyStrategy( holder );
				}
				else {
					applyCollectionCacheConcurrencyStrategy( holder );
				}
			}
			caches.clear();
		}
		//唯一约束处理
		for ( Map.Entry<Table, List<UniqueConstraintHolder>> tableListEntry : uniqueConstraintHoldersByTable.entrySet() ) {
			final Table table = tableListEntry.getKey();
			final List<UniqueConstraintHolder> uniqueConstraints = tableListEntry.getValue();
			for ( UniqueConstraintHolder holder : uniqueConstraints ) {
				buildUniqueKeyFromColumnNames( table, holder.getName(), holder.getColumns() );
			}
		}
		//恢复当前线程的上下文类加载器为初始上下文类加载器
		Thread.currentThread().setContextClassLoader( tccl );
	}

 创建sessionFactory所使用的构造函数代码:

public SessionFactoryImpl(
			final Configuration cfg,
			Mapping mapping,
			ServiceRegistry serviceRegistry,
			Settings settings,
			SessionFactoryObserver observer) throws HibernateException {
			LOG.debug( "Building session factory" );
//session工厂的设置项
		sessionFactoryOptions = new SessionFactoryOptions() {
			private EntityNotFoundDelegate entityNotFoundDelegate;

			@Override
			public Interceptor getInterceptor() {
				return cfg.getInterceptor();
			}

			@Override
			public EntityNotFoundDelegate getEntityNotFoundDelegate() {
				if ( entityNotFoundDelegate == null ) {
					if ( cfg.getEntityNotFoundDelegate() != null ) {
						entityNotFoundDelegate = cfg.getEntityNotFoundDelegate();
					}
					else {
						entityNotFoundDelegate = new EntityNotFoundDelegate() {
							public void handleEntityNotFound(String entityName, Serializable id) {
								throw new ObjectNotFoundException( id, entityName );
							}
						};
					}
				}
				return entityNotFoundDelegate;
			}
		};

		this.settings = settings;

		this.properties = new Properties();
		this.properties.putAll( cfg.getProperties() );

		this.serviceRegistry = serviceRegistry.getService( SessionFactoryServiceRegistryFactory.class ).buildServiceRegistry(
				this,
				cfg
		);
        //jdbc服务
this.jdbcServices = this.serviceRegistry.getService( JdbcServices.class );
//方言
        this.dialect = this.jdbcServices.getDialect();
//缓存访问服务
		this.cacheAccess = this.serviceRegistry.getService( CacheImplementor.class );
		final RegionFactory regionFactory = cacheAccess.getRegionFactory();
//sql函数注册,将配置中的自定义方法及制定数据库方言中的方法注册到一个//以方法名称为key,对应的方言所对英的SQLFunction接口实现类的 Map中,
		this.sqlFunctionRegistry = new SQLFunctionRegistry( getDialect(), cfg.getSqlFunctions() );
//如果指定的观察者不为空,将其添加到当前的观察者链中//SessionFactoryObserverChain
		if ( observer != null ) {
			this.observer.addObserver( observer );
		}

		this.typeResolver = cfg.getTypeResolver().scope( this );
		this.typeHelper = new TypeLocatorImpl( typeResolver );
//过滤器
		this.filters = new HashMap<String, FilterDefinition>();
		this.filters.putAll( cfg.getFilterDefinitions() );

		LOG.debugf( "Session factory constructed with filter configurations : %s", filters );
		LOG.debugf( "Instantiating session factory with properties: %s", properties );

//查询计划缓存
		this.queryPlanCache = new QueryPlanCache( this );

		// todo : everything above here consider implementing as standard SF service.  specifically: stats, caches, types, function-reg
//内部类,用于定义一个拦截器的SessionFactory观察者
//拦截器接口可以用于在对持久类进行加载、编辑、更新等操作前进行处理,可//以用于记录操作信息
		class IntegratorObserver implements SessionFactoryObserver {
			private ArrayList<Integrator> integrators = new ArrayList<Integrator>();

			@Override
			public void sessionFactoryCreated(SessionFactory factory) {
			}

			@Override
//sessionFactory关闭时,调用每个拦截器的回调方法disintegrate
			public void sessionFactoryClosed(SessionFactory factory) {
				for ( Integrator integrator : integrators ) {
					integrator.disintegrate( SessionFactoryImpl.this, SessionFactoryImpl.this.serviceRegistry );
				}
			}
		}
//拦截器观察着
		final IntegratorObserver integratorObserver = new IntegratorObserver();
//将拦截器的观察者加入当前的观察者链中
		this.observer.addObserver( integratorObserver );
//获取拦截器对应的服务类,设置拦截
		for ( Integrator integrator : serviceRegistry.getService( IntegratorService.class ).getIntegrators() ) {
			integrator.integrate( cfg, this, this.serviceRegistry );
			integratorObserver.integrators.add( integrator );
		}

		//Generators:
//标示符生成器
		identifierGenerators = new HashMap();
//获取当前配置中的所有映射类
		Iterator classes = cfg.getClassMappings();
		while ( classes.hasNext() ) {
			PersistentClass model = (PersistentClass) classes.next();
//如果当前映射类的定义不是继承的,则根据其定义进行设置
			if ( !model.isInherited() ) {
				IdentifierGenerator generator = model.getIdentifier().createIdentifierGenerator(
						cfg.getIdentifierGeneratorFactory(),
						getDialect(),
				        settings.getDefaultCatalogName(),
				        settings.getDefaultSchemaName(),
				        (RootClass) model
				);
				//设定持久类的标示符生成器
				identifierGenerators.put( model.getEntityName(), generator );
			}
		}


		///////////////////////////////////////////////////////////////////////
		// Prepare persisters and link them up with their cache
		// region/access-strategy
//获取设置中的缓存范围
		final String cacheRegionPrefix = settings.getCacheRegionPrefix() == null ? "" : settings.getCacheRegionPrefix() + ".";
//持久化工厂服务
		final PersisterFactory persisterFactory = serviceRegistry.getService( PersisterFactory.class );
//持久类
		entityPersisters = new HashMap();
		Map entityAccessStrategies = new HashMap();
		Map<String,ClassMetadata> classMeta = new HashMap<String,ClassMetadata>();
//获取所有映射类
		classes = cfg.getClassMappings();
		while ( classes.hasNext() ) {
			final PersistentClass model = (PersistentClass) classes.next();
			model.prepareTemporaryTables( mapping, getDialect() );
			final String cacheRegionName = cacheRegionPrefix + model.getRootClass().getCacheRegionName();
			// cache region is defined by the root-class in the hierarchy...
			EntityRegionAccessStrategy accessStrategy = ( EntityRegionAccessStrategy ) entityAccessStrategies.get( cacheRegionName );
			if ( accessStrategy == null && settings.isSecondLevelCacheEnabled() ) {
				final AccessType accessType = AccessType.fromExternalName( model.getCacheConcurrencyStrategy() );
				if ( accessType != null ) {
					LOG.tracef( "Building shared cache region for entity data [%s]", model.getEntityName() );
					EntityRegion entityRegion = regionFactory.buildEntityRegion( cacheRegionName, properties, CacheDataDescriptionImpl.decode( model ) );
					accessStrategy = entityRegion.buildAccessStrategy( accessType );
					entityAccessStrategies.put( cacheRegionName, accessStrategy );
					cacheAccess.addCacheRegion( cacheRegionName, entityRegion );
				}
			}
			
			NaturalIdRegionAccessStrategy naturalIdAccessStrategy = null;
			if ( model.hasNaturalId() && model.getNaturalIdCacheRegionName() != null ) {
				final String naturalIdCacheRegionName = cacheRegionPrefix + model.getNaturalIdCacheRegionName();
				naturalIdAccessStrategy = ( NaturalIdRegionAccessStrategy ) entityAccessStrategies.get( naturalIdCacheRegionName );
				
				if ( naturalIdAccessStrategy == null && settings.isSecondLevelCacheEnabled() ) {
					final CacheDataDescriptionImpl cacheDataDescription = CacheDataDescriptionImpl.decode( model );
					
					NaturalIdRegion naturalIdRegion = null;
					try {
						naturalIdRegion = regionFactory.buildNaturalIdRegion( naturalIdCacheRegionName, properties,
								cacheDataDescription );
					}
					catch ( UnsupportedOperationException e ) {
						LOG.warnf(
								"Shared cache region factory [%s] does not support natural id caching; " +
										"shared NaturalId caching will be disabled for not be enabled for %s",
								regionFactory.getClass().getName(),
								model.getEntityName()
						);
					}
					
					if (naturalIdRegion != null) {
						naturalIdAccessStrategy = naturalIdRegion.buildAccessStrategy( regionFactory.getDefaultAccessType() );
						entityAccessStrategies.put( naturalIdCacheRegionName, naturalIdAccessStrategy );
						cacheAccess.addCacheRegion(  naturalIdCacheRegionName, naturalIdRegion );
					}
				}
			}
			//根据上述配置生成一个实体映射
			EntityPersister cp = persisterFactory.createEntityPersister(
					model,
					accessStrategy,
					naturalIdAccessStrategy,
					this,
					mapping
			);
	//以实体映射的名称为key,将对应的持久类加入entityPersisters
			entityPersisters.put( model.getEntityName(), cp );
			classMeta.put( model.getEntityName(), cp.getClassMetadata() );
		}
		this.classMetadata = Collections.unmodifiableMap(classMeta);

		Map<String,Set<String>> tmpEntityToCollectionRoleMap = new HashMap<String,Set<String>>();
		collectionPersisters = new HashMap<String,CollectionPersister>();
		Map<String,CollectionMetadata> tmpCollectionMetadata = new HashMap<String,CollectionMetadata>();
		Iterator collections = cfg.getCollectionMappings();
		while ( collections.hasNext() ) {
			Collection model = (Collection) collections.next();
			final String cacheRegionName = cacheRegionPrefix + model.getCacheRegionName();
			final AccessType accessType = AccessType.fromExternalName( model.getCacheConcurrencyStrategy() );
			CollectionRegionAccessStrategy accessStrategy = null;
			if ( accessType != null && settings.isSecondLevelCacheEnabled() ) {
				LOG.tracev( "Building shared cache region for collection data [{0}]", model.getRole() );
				CollectionRegion collectionRegion = regionFactory.buildCollectionRegion( cacheRegionName, properties, CacheDataDescriptionImpl
						.decode( model ) );
				accessStrategy = collectionRegion.buildAccessStrategy( accessType );
				entityAccessStrategies.put( cacheRegionName, accessStrategy );
				cacheAccess.addCacheRegion( cacheRegionName, collectionRegion );
			}
			CollectionPersister persister = persisterFactory.createCollectionPersister(
					cfg,
					model,
					accessStrategy,
					this
			) ;
			collectionPersisters.put( model.getRole(), persister );
			tmpCollectionMetadata.put( model.getRole(), persister.getCollectionMetadata() );
			Type indexType = persister.getIndexType();
			if ( indexType != null && indexType.isAssociationType() && !indexType.isAnyType() ) {
				String entityName = ( ( AssociationType ) indexType ).getAssociatedEntityName( this );
				Set roles = tmpEntityToCollectionRoleMap.get( entityName );
				if ( roles == null ) {
					roles = new HashSet();
					tmpEntityToCollectionRoleMap.put( entityName, roles );
				}
				roles.add( persister.getRole() );
			}
			Type elementType = persister.getElementType();
			if ( elementType.isAssociationType() && !elementType.isAnyType() ) {
				String entityName = ( ( AssociationType ) elementType ).getAssociatedEntityName( this );
				Set roles = tmpEntityToCollectionRoleMap.get( entityName );
				if ( roles == null ) {
					roles = new HashSet();
					tmpEntityToCollectionRoleMap.put( entityName, roles );
				}
				roles.add( persister.getRole() );
			}
		}
		collectionMetadata = Collections.unmodifiableMap( tmpCollectionMetadata );
		Iterator itr = tmpEntityToCollectionRoleMap.entrySet().iterator();
		while ( itr.hasNext() ) {
			final Map.Entry entry = ( Map.Entry ) itr.next();
			entry.setValue( Collections.unmodifiableSet( ( Set ) entry.getValue() ) );
		}
		collectionRolesByEntityParticipant = Collections.unmodifiableMap( tmpEntityToCollectionRoleMap );

		//Named Queries:
		namedQueries = new HashMap<String, NamedQueryDefinition>( cfg.getNamedQueries() );
		namedSqlQueries = new HashMap<String, NamedSQLQueryDefinition>( cfg.getNamedSQLQueries() );
		sqlResultSetMappings = new HashMap<String, ResultSetMappingDefinition>( cfg.getSqlResultSetMappings() );
		imports = new HashMap<String,String>( cfg.getImports() );

		// after *all* persisters and named queries are registered
		Iterator iter = entityPersisters.values().iterator();
		while ( iter.hasNext() ) {
			final EntityPersister persister = ( ( EntityPersister ) iter.next() );
			persister.postInstantiate();
			registerEntityNameResolvers( persister );

		}
		iter = collectionPersisters.values().iterator();
		while ( iter.hasNext() ) {
			final CollectionPersister persister = ( ( CollectionPersister ) iter.next() );
			persister.postInstantiate();
		}

		//JNDI + Serialization:
//获取当前设置的会话工厂名称
		name = settings.getSessionFactoryName();
//获取uuid
		try {
			uuid = (String) UUID_GENERATOR.generate(null, null);
		}
		catch (Exception e) {
			throw new AssertionFailure("Could not generate UUID");
		}
	//将当前的会话工厂加入会话工厂注册
		SessionFactoryRegistry.INSTANCE.addSessionFactory(
				uuid,
				name,
				settings.isSessionFactoryNameAlsoJndiName(),
				this,
				serviceRegistry.getService( JndiService.class )
		);

		LOG.debug( "Instantiated session factory" );

		settings.getMultiTableBulkIdStrategy().prepare(
				jdbcServices,
				buildLocalConnectionAccess(),
				cfg.createMappings(),
				cfg.buildMapping(),
				properties
		);


		if ( settings.isAutoCreateSchema() ) {
			new SchemaExport( serviceRegistry, cfg )
					.setImportSqlCommandExtractor( serviceRegistry.getService( ImportSqlCommandExtractor.class ) )
					.create( false, true );
		}
		if ( settings.isAutoUpdateSchema() ) {
			new SchemaUpdate( serviceRegistry, cfg ).execute( false, true );
		}
		if ( settings.isAutoValidateSchema() ) {
			new SchemaValidator( serviceRegistry, cfg ).validate();
		}
		if ( settings.isAutoDropSchema() ) {
			schemaExport = new SchemaExport( serviceRegistry, cfg )
					.setImportSqlCommandExtractor( serviceRegistry.getService( ImportSqlCommandExtractor.class ) );
		}

		currentSessionContext = buildCurrentSessionContext();

		//checking for named queries
		if ( settings.isNamedQueryStartupCheckingEnabled() ) {
			final Map<String,HibernateException> errors = checkNamedQueries();
			if ( ! errors.isEmpty() ) {
				StringBuilder failingQueries = new StringBuilder( "Errors in named queries: " );
				String sep = "";
				for ( Map.Entry<String,HibernateException> entry : errors.entrySet() ) {
					LOG.namedQueryError( entry.getKey(), entry.getValue() );
					failingQueries.append( sep ).append( entry.getKey() );
					sep = ", ";
				}
				throw new HibernateException( failingQueries.toString() );
			}
		}

		// this needs to happen after persisters are all ready to go...
		this.fetchProfiles = new HashMap();
		itr = cfg.iterateFetchProfiles();
		while ( itr.hasNext() ) {
			final org.hibernate.mapping.FetchProfile mappingProfile =
					( org.hibernate.mapping.FetchProfile ) itr.next();
			final FetchProfile fetchProfile = new FetchProfile( mappingProfile.getName() );
			for ( org.hibernate.mapping.FetchProfile.Fetch mappingFetch : mappingProfile.getFetches() ) {
				// resolve the persister owning the fetch
				final String entityName = getImportedClassName( mappingFetch.getEntity() );
				final EntityPersister owner = entityName == null
						? null
						: entityPersisters.get( entityName );
				if ( owner == null ) {
					throw new HibernateException(
							"Unable to resolve entity reference [" + mappingFetch.getEntity()
									+ "] in fetch profile [" + fetchProfile.getName() + "]"
					);
				}

				// validate the specified association fetch
				Type associationType = owner.getPropertyType( mappingFetch.getAssociation() );
				if ( associationType == null || !associationType.isAssociationType() ) {
					throw new HibernateException( "Fetch profile [" + fetchProfile.getName() + "] specified an invalid association" );
				}

				// resolve the style
				final Fetch.Style fetchStyle = Fetch.Style.parse( mappingFetch.getStyle() );

				// then construct the fetch instance...
				fetchProfile.addFetch( new Association( owner, mappingFetch.getAssociation() ), fetchStyle );
				((Loadable) owner).registerAffectingFetchProfile( fetchProfile.getName() );
			}
			fetchProfiles.put( fetchProfile.getName(), fetchProfile );
		}

		this.customEntityDirtinessStrategy = determineCustomEntityDirtinessStrategy();
		this.currentTenantIdentifierResolver = determineCurrentTenantIdentifierResolver( cfg.getCurrentTenantIdentifierResolver() );
		this.transactionEnvironment = new TransactionEnvironmentImpl( this );
		this.observer.sessionFactoryCreated( this );
	}

 

 

 

你可能感兴趣的:(sessionFactory)