/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @version $Id: SolrCore.java 1305693 2012-03-27 00:43:47Z janhoy $
*/
public final class SolrCore implements SolrInfoMBean {
public static final String version="1.0";
public static Logger log = LoggerFactory.getLogger(SolrCore.class);
private String name;
private String logid; // used to show what name is set
private final CoreDescriptor coreDescriptor;
private final SolrConfig solrConfig;
private final SolrResourceLoader resourceLoader;
private final IndexSchema schema;
private final String dataDir;
private final UpdateHandler updateHandler;
private final long startTime;
private final RequestHandlers reqHandlers;
private final Map searchComponents;
private final Map updateProcessorChains;
private final Map infoRegistry;
private IndexDeletionPolicyWrapper solrDelPolicy;
private DirectoryFactory directoryFactory;
private IndexReaderFactory indexReaderFactory;
static int boolean_query_max_clause_count = Integer.MIN_VALUE;
// only change the BooleanQuery maxClauseCount once for ALL cores...
void booleanQueryMaxClauseCount() {
synchronized(SolrCore.class) {
if (boolean_query_max_clause_count == Integer.MIN_VALUE) {
boolean_query_max_clause_count = solrConfig.booleanQueryMaxClauseCount;
BooleanQuery.setMaxClauseCount(boolean_query_max_clause_count);
} else if (boolean_query_max_clause_count != solrConfig.booleanQueryMaxClauseCount ) {
log.debug("BooleanQuery.maxClauseCount= " +boolean_query_max_clause_count+ ", ignoring " +solrConfig.booleanQueryMaxClauseCount);
}
}
}
/**
* The SolrResourceLoader used to load all resources for this core.
* @since solr 1.3
*/
public SolrResourceLoader getResourceLoader() {
return resourceLoader;
}
/**
* Gets the configuration resource name used by this core instance.
* @since solr 1.3
*/
public String getConfigResource() {
return solrConfig.getResourceName();
}
/**
* Gets the configuration resource name used by this core instance.
* @deprecated Use {@link #getConfigResource()} instead.
*/
@Deprecated
public String getConfigFile() {
return solrConfig.getResourceName();
}
/**
* Gets the configuration object used by this core instance.
*/
public SolrConfig getSolrConfig() {
return solrConfig;
}
/**
* Gets the schema resource name used by this core instance.
* @since solr 1.3
*/
public String getSchemaResource() {
return schema.getResourceName();
}
/**
* Gets the schema resource name used by this core instance.
* @deprecated Use {@link #getSchemaResource()} instead.
*/
@Deprecated
public String getSchemaFile() {
return schema.getResourceName();
}
/**
* Gets the schema object used by this core instance.
*/
public IndexSchema getSchema() {
return schema;
}
/**
* Returns the indexdir as given in index.properties. If index.properties exists in dataDir and
* there is a property index available and it points to a valid directory
* in dataDir that is returned Else dataDir/index is returned. Only called for creating new indexSearchers
* and indexwriters. Use the getIndexDir() method to know the active index directory
*
* @return the indexdir as given in index.properties
*/
public String getNewIndexDir() {
String result = dataDir + "index/";
File propsFile = new File(dataDir + "index.properties");
if (propsFile.exists()) {
Properties p = new Properties();
InputStream is = null;
try {
is = new FileInputStream(propsFile);
p.load(is);
} catch (IOException e) {
/*no op*/
} finally {
IOUtils.closeQuietly(is);
}
String s = p.getProperty("index");
if (s != null && s.trim().length() > 0) {
File tmp = new File(dataDir + s);
if (tmp.exists() && tmp.isDirectory())
result = dataDir + s;
}
}
return result;
}
public DirectoryFactory getDirectoryFactory() {
return directoryFactory;
}
public IndexReaderFactory getIndexReaderFactory() {
return indexReaderFactory;
}
/**
* Returns a Map of name vs SolrInfoMBean objects. The returned map is an instance of
* a ConcurrentHashMap and therefore no synchronization is needed for putting, removing
* or iterating over it.
*
* @return the Info Registry map which contains SolrInfoMBean objects keyed by name
* @since solr 1.3
*/
public Map getInfoRegistry() {
return infoRegistry;
}
private void initDeletionPolicy() {
PluginInfo info = solrConfig.getPluginInfo(IndexDeletionPolicy.class.getName());
IndexDeletionPolicy delPolicy = null;
if(info != null){
delPolicy = createInstance(info.className,IndexDeletionPolicy.class,"Deletion Policy for SOLR");
if (delPolicy instanceof NamedListInitializedPlugin) {
((NamedListInitializedPlugin) delPolicy).init(info.initArgs);
}
} else {
delPolicy = new SolrDeletionPolicy();
}
solrDelPolicy = new IndexDeletionPolicyWrapper(delPolicy);
}
private void initListeners() {
final Class clazz = SolrEventListener.class;
final String label = "Event Listener";
for (PluginInfo info : solrConfig.getPluginInfos(SolrEventListener.class.getName())) {
String event = info.attributes.get("event");
if("firstSearcher".equals(event) ){
SolrEventListener obj = createInitInstance(info,clazz,label,null);
firstSearcherListeners.add(obj);
log.info(logid + "Added SolrEventListener for firstSearcher: " + obj);
} else if("newSearcher".equals(event) ){
SolrEventListener obj = createInitInstance(info,clazz,label,null);
newSearcherListeners.add(obj);
log.info(logid + "Added SolrEventListener for newSearcher: " + obj);
}
}
}
final List firstSearcherListeners = new ArrayList();
final List newSearcherListeners = new ArrayList();
/**
* NOTE: this function is not thread safe. However, it is safe to call within the
* inform( SolrCore core ) function for SolrCoreAware classes.
* Outside inform, this could potentially throw a ConcurrentModificationException
*
* @see SolrCoreAware
*/
public void registerFirstSearcherListener( SolrEventListener listener )
{
firstSearcherListeners.add( listener );
}
/**
* NOTE: this function is not thread safe. However, it is safe to call within the
* inform( SolrCore core ) function for SolrCoreAware classes.
* Outside inform, this could potentially throw a ConcurrentModificationException
*
* @see SolrCoreAware
*/
public void registerNewSearcherListener( SolrEventListener listener )
{
newSearcherListeners.add( listener );
}
/**
* NOTE: this function is not thread safe. However, it is safe to call within the
* inform( SolrCore core ) function for SolrCoreAware classes.
* Outside inform, this could potentially throw a ConcurrentModificationException
*
* @see SolrCoreAware
*/
public QueryResponseWriter registerResponseWriter( String name, QueryResponseWriter responseWriter ){
return responseWriters.put(name, responseWriter);
}
// gets a non-caching searcher
public SolrIndexSearcher newSearcher(String name) throws IOException {
return newSearcher(name, false);
}
// gets a non-caching searcher
public SolrIndexSearcher newSearcher(String name, boolean readOnly) throws IOException {
return new SolrIndexSearcher(this, schema, name, directoryFactory.open(getIndexDir()), readOnly, false);
}
private void initDirectoryFactory() {
DirectoryFactory dirFactory;
PluginInfo info = solrConfig.getPluginInfo(DirectoryFactory.class.getName());
if (info != null) {
dirFactory = (DirectoryFactory) getResourceLoader().newInstance(info.className);
dirFactory.init(info.initArgs);
} else {
dirFactory = new StandardDirectoryFactory();
}
// And set it
directoryFactory = dirFactory;
}
if (indexExists && firstTime) {
// to remove locks, the directory must already exist... so we create it
// if it didn't exist already...
Directory dir = SolrIndexWriter.getDirectory(indexDir, getDirectoryFactory(), solrConfig.indexConfig);
if (dir != null) {
if (IndexWriter.isLocked(dir)) {
if (removeLocks) {
log.warn(logid + "WARNING: Solr index directory '{}' is locked. Unlocking...", indexDir);
IndexWriter.unlock(dir);
} else {
log.error(logid + "Solr index directory '{}' is locked. Throwing exception", indexDir);
throw new LockObtainFailedException("Index locked for write for core " + name);
}
}
dir.close();
}
}
// Create the index if it doesn't exist.
if(!indexExists) {
log.warn(logid+"Solr index directory '" + new File(indexDir) + "' doesn't exist."
+ " Creating new index...");
} catch (IOException e) {
throw new RuntimeException(e);
}
}
/** Creates an instance by trying a constructor that accepts a SolrCore before
* trying the default (no arg) constructor.
*@param className the instance class to create
*@param cast the class or interface that the instance should extend or implement
*@param msg a message helping compose the exception error if any occurs.
*@return the desired instance
*@throws SolrException if the object could not be instantiated
*/
private T createInstance(String className, Class cast, String msg) {
Class clazz = null;
if (msg == null) msg = "SolrCore Object";
try {
clazz = getResourceLoader().findClass(className);
if (cast != null && !cast.isAssignableFrom(clazz))
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,"Error Instantiating "+msg+", "+className+ " is not a " +cast.getName());
//most of the classes do not have constructors which takes SolrCore argument. It is recommended to obtain SolrCore by implementing SolrCoreAware.
// So invariably always it will cause a NoSuchMethodException. So iterate though the list of available constructors
Constructor[] cons = clazz.getConstructors();
for (Constructor con : cons) {
Class[] types = con.getParameterTypes();
if(types.length == 1 && types[0] == SolrCore.class){
return (T)con.newInstance(this);
}
}
return (T) getResourceLoader().newInstance(className);//use the empty constructor
} catch (SolrException e) {
throw e;
} catch (Exception e) {
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,"Error Instantiating "+msg+", "+className+ " failed to instantiate " +cast.getName(), e);
}
}
public T createInitInstance(PluginInfo info,Class cast, String msg, String defClassName){
if(info == null) return null;
T o = createInstance(info.className == null ? defClassName : info.className,cast, msg);
if (o instanceof PluginInfoInitialized) {
((PluginInfoInitialized) o).init(info);
} else if (o instanceof NamedListInitializedPlugin) {
((NamedListInitializedPlugin) o).init(info.initArgs);
}
return o;
}
/**
* @return the last core initialized. If you are using multiple cores,
* this is not a function to use.
*
* @deprecated Use {@link CoreContainer#getCore(String)} instead.
*/
@Deprecated
public static SolrCore getSolrCore() {
synchronized( SolrCore.class ) {
if( instance == null ) {
try {
// sets 'instance' to the latest solr core
CoreContainer.Initializer init = new CoreContainer.Initializer();
instance = init.initialize().getCore("");
} catch(Exception xany) {
throw new SolrException( SolrException.ErrorCode.SERVER_ERROR,
"error creating core", xany );
}
}
}
return instance;
}
/**
* Creates a new core and register it in the list of cores.
* If a core with the same name already exists, it will be stopped and replaced by this one.
*@param dataDir the index directory
*@param config a solr config instance
*@param schema a solr schema instance
*
*@since solr 1.3
*/
public SolrCore(String name, String dataDir, SolrConfig config, IndexSchema schema, CoreDescriptor cd) {
coreDescriptor = cd;
this.setName( name );
resourceLoader = config.getResourceLoader();
if (dataDir == null){
dataDir = config.getDataDir();
if(dataDir == null) dataDir = cd.getDataDir();
}
// Processors initialized before the handlers
updateProcessorChains = loadUpdateProcessorChains();
reqHandlers = new RequestHandlers(this);
reqHandlers.initHandlersFromConfig( solrConfig );
// Handle things that should eventually go away
initDeprecatedSupport();
final CountDownLatch latch = new CountDownLatch(1);
try {
// cause the executor to stall so firstSearcher events won't fire
// until after inform() has been called for all components.
// searchExecutor must be single-threaded for this to work
searcherExecutor.submit(new Callable() {
public Object call() throws Exception {
latch.await();
return null;
}
});
// Open the searcher *before* the update handler so we don't end up opening
// one in the middle.
// With lockless commits in Lucene now, this probably shouldn't be an issue anymore
getSearcher(false,false,null);
// Finally tell anyone who wants to know
resourceLoader.inform( resourceLoader );
resourceLoader.inform( this ); // last call before the latch is released.
instance = this;
} catch (Throwable e) {
latch.countDown();//release the latch, otherwise we block trying to do the close. This should be fine, since counting down on a latch of 0 is still fine
//close down the searcher and any other resources, if it exists, as this is not recoverable
close();
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, null, e, false);
} finally {
// allow firstSearcher events to fire and make sure it is released
latch.countDown();
}
infoRegistry.put("core", this);
// register any SolrInfoMBeans SolrResourceLoader initialized
//
// this must happen after the latch is released, because a JMX server impl may
// choose to block on registering until properties can be fetched from an MBean,
// and a SolrCoreAware MBean may have properties that depend on getting a Searcher
// from the core.
resourceLoader.inform(infoRegistry);
}
/**
* Load the request processors
*/
private Map loadUpdateProcessorChains() {
Map map = new HashMap();
UpdateRequestProcessorChain def = initPlugins(map,UpdateRequestProcessorChain.class, UpdateRequestProcessorChain.class.getName());
if(def == null){
def = map.get(null);
}
if (def == null) {
// construct the default chain
UpdateRequestProcessorFactory[] factories = new UpdateRequestProcessorFactory[]{
new LogUpdateProcessorFactory(),
new RunUpdateProcessorFactory()
};
def = new UpdateRequestProcessorChain(factories, this);
}
map.put(null, def);
map.put("", def);
return map;
}
/**
* @return an update processor registered to the given name. Throw an exception if this chain is undefined
*/
public UpdateRequestProcessorChain getUpdateProcessingChain( final String name )
{
UpdateRequestProcessorChain chain = updateProcessorChains.get( name );
if( chain == null ) {
throw new SolrException( SolrException.ErrorCode.BAD_REQUEST,
"unknown UpdateRequestProcessorChain: "+name );
}
return chain;
}
// this core current usage count
private final AtomicInteger refCount = new AtomicInteger(1);
final void open() {
refCount.incrementAndGet();
}
/**
* Close all resources allocated by the core if it is no longer in use...
*
*
searcher
*
updateHandler
*
all CloseHooks will be notified
*
All MBeans will be unregistered from MBeanServer if JMX was enabled
*
*
*
*
* The behavior of this method is determined by the result of decrementing
* the core's reference count (A core is created with a refrence count of 1)...
*
*
*
If reference count is > 0, the usage count is decreased by 1 and no
* resources are released.
*
*
If reference count is == 0, the resources are released.
*
If reference count is < 0, and error is logged and no further action
* is taken.
*
*
* @see #isClosed()
*/
public void close() {
int count = refCount.decrementAndGet();
if (count > 0) return; // close is called often, and only actually closes if nothing is using it.
if (count < 0) {
log.error("Too many close [count:{}] on {}. Please report this exception to [email protected]", count, this );
return;
}
log.info(logid+" CLOSING SolrCore " + this);
try {
infoRegistry.clear();
} catch (Exception e) {
SolrException.log(log, e);
}
try {
updateHandler.close();
} catch (Exception e) {
SolrException.log(log,e);
}
try {
searcherExecutor.shutdown();
if (!searcherExecutor.awaitTermination(60, TimeUnit.SECONDS)) {
log.error("Timeout waiting for searchExecutor to terminate");
}
} catch (Exception e) {
SolrException.log(log,e);
}
try {
// Since we waited for the searcherExecutor to shut down,
// there should be no more searchers warming in the background
// that we need to take care of.
//
// For the case that a searcher was registered *before* warming
// then the searchExecutor will throw an exception when getSearcher()
// tries to use it, and the exception handling code should close it.
closeSearcher();
} catch (Exception e) {
SolrException.log(log,e);
}
/** Current core usage count. */
public int getOpenCount() {
return refCount.get();
}
/** Whether this core is closed. */
public boolean isClosed() {
return refCount.get() <= 0;
}
@Override
protected void finalize() throws Throwable {
try {
if (getOpenCount() != 0) {
log.error("REFCOUNT ERROR: unreferenced " + this + " (" + getName()
+ ") has a reference count of " + getOpenCount());
}
} finally {
super.finalize();
}
}
private Collection closeHooks = null;
/**
* Add a close callback hook
*/
public void addCloseHook( CloseHook hook )
{
if( closeHooks == null ) {
closeHooks = new ArrayList();
}
closeHooks.add( hook );
}
/**
* Returns a Request object based on the admin/pingQuery section
* of the Solr config file.
*
* @deprecated use {@link org.apache.solr.handler.PingRequestHandler} instead
*/
@Deprecated
public SolrQueryRequest getPingQueryRequest() {
return solrConfig.getPingQueryRequest(this);
}
////////////////////////////////////////////////////////////////////////////////
// Request Handler
////////////////////////////////////////////////////////////////////////////////
/**
* Get the request handler registered to a given name.
*
* This function is thread safe.
*/
public SolrRequestHandler getRequestHandler(String handlerName) {
return reqHandlers.get(handlerName);
}
/**
* Returns an unmodifieable Map containing the registered handlers of the specified type.
*/
public Map getRequestHandlers(Class clazz) {
return reqHandlers.getAll(clazz);
}
/**
* Returns an unmodifieable Map containing the registered handlers
*/
public Map getRequestHandlers() {
return reqHandlers.getRequestHandlers();
}
/**
* Get the SolrHighlighter
*/
@Deprecated
public SolrHighlighter getHighlighter() {
HighlightComponent hl = (HighlightComponent) searchComponents.get(HighlightComponent.COMPONENT_NAME);
return hl==null? null: hl.getHighlighter();
}
/**
* Registers a handler at the specified location. If one exists there, it will be replaced.
* To remove a handler, register null at its path
*
* Once registered the handler can be accessed through:
*
*
* Handlers must be initalized before getting registered. Registered
* handlers can immediatly accept requests.
*
* This call is thread safe.
*
* @return the previous SolrRequestHandler registered to this name null if none.
*/
public SolrRequestHandler registerRequestHandler(String handlerName, SolrRequestHandler handler) {
return reqHandlers.register(handlerName,handler);
}
/**
* Register the default search components
*/
private Map loadSearchComponents()
{
Map components = new HashMap();
initPlugins(components,SearchComponent.class);
for (Map.Entry e : components.entrySet()) {
SearchComponent c = e.getValue();
if (c instanceof HighlightComponent) {
HighlightComponent hl = (HighlightComponent) c;
if(!HighlightComponent.COMPONENT_NAME.equals(e.getKey())){
components.put(HighlightComponent.COMPONENT_NAME,hl);
}
break;
}
}
addIfNotPresent(components,HighlightComponent.COMPONENT_NAME,HighlightComponent.class);
addIfNotPresent(components,QueryComponent.COMPONENT_NAME,QueryComponent.class);
addIfNotPresent(components,FacetComponent.COMPONENT_NAME,FacetComponent.class);
addIfNotPresent(components,MoreLikeThisComponent.COMPONENT_NAME,MoreLikeThisComponent.class);
addIfNotPresent(components,StatsComponent.COMPONENT_NAME,StatsComponent.class);
addIfNotPresent(components,DebugComponent.COMPONENT_NAME,DebugComponent.class);
return components;
}
private void addIfNotPresent(Map registry, String name, Class extends T> c){
if(!registry.containsKey(name)){
T searchComp = (T) resourceLoader.newInstance(c.getName());
registry.put(name, searchComp);
if (searchComp instanceof SolrInfoMBean){
infoRegistry.put(((SolrInfoMBean)searchComp).getName(), (SolrInfoMBean)searchComp);
}
}
}
/**
* @return a Search Component registered to a given name. Throw an exception if the component is undefined
*/
public SearchComponent getSearchComponent( String name )
{
SearchComponent component = searchComponents.get( name );
if( component == null ) {
throw new SolrException( SolrException.ErrorCode.BAD_REQUEST,
"Unknown Search Component: "+name );
}
return component;
}
/**
* Accessor for all the Search Components
* @return An unmodifiable Map of Search Components
*/
public Map getSearchComponents() {
return Collections.unmodifiableMap(searchComponents);
}
/**
* RequestHandlers need access to the updateHandler so they can all talk to the
* same RAM indexer.
*/
public UpdateHandler getUpdateHandler() {
return updateHandler;
}
////////////////////////////////////////////////////////////////////////////////
// Searcher Control
////////////////////////////////////////////////////////////////////////////////
// The current searcher used to service queries.
// Don't access this directly!!!! use getSearcher() to
// get it (and it will increment the ref count at the same time).
// This reference is protected by searcherLock.
private RefCounted _searcher;
// All of the open searchers. Don't access this directly.
// protected by synchronizing on searcherLock.
private final LinkedList> _searchers = new LinkedList>();
final ExecutorService searcherExecutor = Executors.newSingleThreadExecutor();
private int onDeckSearchers; // number of searchers preparing
private Object searcherLock = new Object(); // the sync object for the searcher
private final int maxWarmingSearchers; // max number of on-deck searchers allowed
/**
* Return a registered {@link RefCounted}<{@link SolrIndexSearcher}> with
* the reference count incremented. It must be decremented when no longer needed.
* This method should not be called from SolrCoreAware.inform() since it can result
* in a deadlock if useColdSearcher==false.
* If handling a normal request, the searcher should be obtained from
* {@link org.apache.solr.request.SolrQueryRequest#getSearcher()} instead.
*/
public RefCounted getSearcher() {
try {
return getSearcher(false,true,null);
} catch (IOException e) {
SolrException.log(log,null,e);
return null;
}
}
/**
* Return the newest {@link RefCounted}<{@link SolrIndexSearcher}> with
* the reference count incremented. It must be decremented when no longer needed.
* If no searcher is currently open, then if openNew==true a new searcher will be opened,
* or null is returned if openNew==false.
*/
public RefCounted getNewestSearcher(boolean openNew) {
synchronized (searcherLock) {
if (_searchers.isEmpty()) {
if (!openNew) return null;
// Not currently implemented since simply calling getSearcher during inform()
// can result in a deadlock. Right now, solr always opens a searcher first
// before calling inform() anyway, so this should never happen.
throw new UnsupportedOperationException();
}
RefCounted newest = _searchers.getLast();
newest.incref();
return newest;
}
}
/**
* Get a {@link SolrIndexSearcher} or start the process of creating a new one.
*
* The registered searcher is the default searcher used to service queries.
* A searcher will normally be registered after all of the warming
* and event handlers (newSearcher or firstSearcher events) have run.
* In the case where there is no registered searcher, the newly created searcher will
* be registered before running the event handlers (a slow searcher is better than no searcher).
*
*
* These searchers contain read-only IndexReaders. To access a non read-only IndexReader,
* see newSearcher(String name, boolean readOnly).
*
*
* If forceNew==true then
* A new searcher will be opened and registered regardless of whether there is already
* a registered searcher or other searchers in the process of being created.
*
* If forceNew==false then:
*
If a searcher is already registered, that searcher will be returned
*
If no searcher is currently registered, but at least one is in the process of being created, then
* this call will block until the first searcher is registered
*
If no searcher is currently registered, and no searchers in the process of being registered, a new
* searcher will be created.
*
*
* If returnSearcher==true then a {@link RefCounted}<{@link SolrIndexSearcher}> will be returned with
* the reference count incremented. It must be decremented when no longer needed.
*
* If waitSearcher!=null and a new {@link SolrIndexSearcher} was created,
* then it is filled in with a Future that will return after the searcher is registered. The Future may be set to
* null in which case the SolrIndexSearcher created has already been registered at the time
* this method returned.
*
* @param forceNew if true, force the open of a new index searcher regardless if there is already one open.
* @param returnSearcher if true, returns a {@link SolrIndexSearcher} holder with the refcount already incremented.
* @param waitSearcher if non-null, will be filled in with a {@link Future} that will return after the new searcher is registered.
* @throws IOException
*/
public RefCounted getSearcher(boolean forceNew, boolean returnSearcher, final Future[] waitSearcher) throws IOException {
// it may take some time to open an index.... we may need to make
// sure that two threads aren't trying to open one at the same time
// if it isn't necessary.
synchronized (searcherLock) {
// see if we can return the current searcher
if (_searcher!=null && !forceNew) {
if (returnSearcher) {
_searcher.incref();
return _searcher;
} else {
return null;
}
}
// check to see if we can wait for someone else's searcher to be set
if (onDeckSearchers>0 && !forceNew && _searcher==null) {
try {
searcherLock.wait();
} catch (InterruptedException e) {
log.info(SolrException.toStr(e));
}
}
// check again: see if we can return right now
if (_searcher!=null && !forceNew) {
if (returnSearcher) {
_searcher.incref();
return _searcher;
} else {
return null;
}
}
// At this point, we know we need to open a new searcher...
// first: increment count to signal other threads that we are
// opening a new searcher.
onDeckSearchers++;
if (onDeckSearchers < 1) {
// should never happen... just a sanity check
log.error(logid+"ERROR!!! onDeckSearchers is " + onDeckSearchers);
onDeckSearchers=1; // reset
} else if (onDeckSearchers > maxWarmingSearchers) {
onDeckSearchers--;
String msg="Error opening new searcher. exceeded limit of maxWarmingSearchers="+maxWarmingSearchers + ", try again later.";
log.warn(logid+""+ msg);
// HTTP 503==service unavailable, or 409==Conflict
throw new SolrException(SolrException.ErrorCode.SERVICE_UNAVAILABLE,msg,true);
} else if (onDeckSearchers > 1) {
log.info(logid+"PERFORMANCE WARNING: Overlapping onDeckSearchers=" + onDeckSearchers);
}
}
// open the index synchronously
// if this fails, we need to decrement onDeckSearchers again.
SolrIndexSearcher tmp;
RefCounted newestSearcher = null;
try {
newestSearcher = getNewestSearcher(false);
String newIndexDir = getNewIndexDir();
File indexDirFile = new File(getIndexDir()).getCanonicalFile();
File newIndexDirFile = new File(newIndexDir).getCanonicalFile();
if (newReader == null) {
currentReader.incRef();
newReader = currentReader;
}
tmp = new SolrIndexSearcher(this, schema, "main", newReader, true, true);
} else {
IndexReader reader = getIndexReaderFactory().newReader(getDirectoryFactory().open(newIndexDir), true);
tmp = new SolrIndexSearcher(this, schema, "main", reader, true, true);
}
} catch (Throwable th) {
synchronized(searcherLock) {
onDeckSearchers--;
// notify another waiter to continue... it may succeed
// and wake any others.
searcherLock.notify();
}
// need to close the searcher here??? we shouldn't have to.
throw new RuntimeException(th);
} finally {
if (newestSearcher != null) {
newestSearcher.decref();
}
}
final SolrIndexSearcher newSearcher=tmp;
RefCounted currSearcherHolder=null;
final RefCounted newSearchHolder=newHolder(newSearcher);
if (returnSearcher) newSearchHolder.incref();
// a signal to decrement onDeckSearchers if something goes wrong.
final boolean[] decrementOnDeckCount=new boolean[1];
decrementOnDeckCount[0]=true;
if (_searcher == null) {
// if there isn't a current searcher then we may
// want to register this one before warming is complete instead of waiting.
if (solrConfig.useColdSearcher) {
registerSearcher(newSearchHolder);
decrementOnDeckCount[0]=false;
alreadyRegistered=true;
}
} else {
// get a reference to the current searcher for purposes of autowarming.
currSearcherHolder=_searcher;
currSearcherHolder.incref();
}
}
final SolrIndexSearcher currSearcher = currSearcherHolder==null ? null : currSearcherHolder.get();
//
// Note! if we registered the new searcher (but didn't increment it's
// reference count because returnSearcher==false, it's possible for
// someone else to register another searcher, and thus cause newSearcher
// to close while we are warming.
//
// Should we protect against that by incrementing the reference count?
// Maybe we should just let it fail? After all, if returnSearcher==false
// and newSearcher has been de-registered, what's the point of continuing?
//
Future future=null;
// warm the new searcher based on the current searcher.
// should this go before the other event handlers or after?
if (currSearcher != null) {
future = searcherExecutor.submit(
new Callable() {
public Object call() throws Exception {
try {
newSearcher.warm(currSearcher);
} catch (Throwable e) {
SolrException.logOnce(log,null,e);
}
return null;
}
}
);
}
// WARNING: this code assumes a single threaded executor (that all tasks
// queued will finish first).
final RefCounted currSearcherHolderF = currSearcherHolder;
if (!alreadyRegistered) {
future = searcherExecutor.submit(
new Callable() {
public Object call() throws Exception {
try {
// signal that we no longer need to decrement
// the count *before* registering the searcher since
// registerSearcher will decrement even if it errors.
decrementOnDeckCount[0]=false;
registerSearcher(newSearchHolder);
} catch (Throwable e) {
SolrException.logOnce(log,null,e);
} finally {
// we are all done with the old searcher we used
// for warming...
if (currSearcherHolderF!=null) currSearcherHolderF.decref();
}
return null;
}
}
);
}
if (waitSearcher != null) {
waitSearcher[0] = future;
}
// Return the searcher as the warming tasks run in parallel
// callers may wait on the waitSearcher future returned.
return returnSearcher ? newSearchHolder : null;
synchronized (searcherLock) {
if (decrementOnDeckCount[0]) {
onDeckSearchers--;
}
if (onDeckSearchers < 0) {
// sanity check... should never happen
log.error(logid+"ERROR!!! onDeckSearchers after decrement=" + onDeckSearchers);
onDeckSearchers=0; // try and recover
}
// if we failed, we need to wake up at least one waiter to continue the process
searcherLock.notify();
}
// since the indexreader was already opened, assume we can continue on
// even though we got an exception.
return returnSearcher ? newSearchHolder : null;
}
}
private RefCounted newHolder(SolrIndexSearcher newSearcher) {
RefCounted holder = new RefCounted(newSearcher) {
@Override
public void close() {
try {
synchronized(searcherLock) {
// it's possible for someone to get a reference via the _searchers queue
// and increment the refcount while RefCounted.close() is being called.
// we check the refcount again to see if this has happened and abort the close.
// This relies on the RefCounted class allowing close() to be called every
// time the counter hits zero.
if (refcount.get() > 0) return;
_searchers.remove(this);
}
resource.close();
} catch (IOException e) {
log.error("Error closing searcher:" + SolrException.toStr(e));
}
}
};
holder.incref(); // set ref count to 1 to account for this._searcher
return holder;
}
// Take control of newSearcherHolder (which should have a reference count of at
// least 1 already. If the caller wishes to use the newSearcherHolder directly
// after registering it, then they should increment the reference count *before*
// calling this method.
//
// onDeckSearchers will also be decremented (it should have been incremented
// as a result of opening a new searcher).
private void registerSearcher(RefCounted newSearcherHolder) throws IOException {
synchronized (searcherLock) {
try {
if (_searcher != null) {
_searcher.decref(); // dec refcount for this._searcher
_searcher=null;
}
/***
// a searcher may have been warming asynchronously while the core was being closed.
// if this happens, just close the searcher.
if (isClosed()) {
// NOTE: this should not happen now - see close() for details.
// *BUT* if we left it enabled, this could still happen before
// close() stopped the executor - so disable this test for now.
log.error("Ignoring searcher register on closed core:" + newSearcher);
_searcher.decref();
}
***/
} catch (Throwable e) {
log(e);
} finally {
// wake up anyone waiting for a searcher
// even in the face of errors.
onDeckSearchers--;
searcherLock.notifyAll();
}
}
}
public void closeSearcher() {
log.info(logid+"Closing main searcher on request.");
synchronized (searcherLock) {
if (_searcher != null) {
_searcher.decref(); // dec refcount for this._searcher
_searcher=null; // isClosed() does check this
infoRegistry.remove("currentSearcher");
}
}
}
利用javascript读取表单数据,可以利用以下三种方法获取:
1、通过表单ID属性:var a = document.getElementByIdx_x_x("id");
2、通过表单名称属性:var b = document.getElementsByName("name");
3、直接通过表单名字获取:var c = form.content.
什么是Spring Data Mongo
Spring Data MongoDB项目对访问MongoDB的Java客户端API进行了封装,这种封装类似于Spring封装Hibernate和JDBC而提供的HibernateTemplate和JDBCTemplate,主要能力包括
1. 封装客户端跟MongoDB的链接管理
2. 文档-对象映射,通过注解:@Document(collectio
The insertion algorithm for 2-3 trees just described is not difficult to understand; now, we will see that it is also not difficult to implement. We will consider a simple representation known