下面是相关的类:
1. LRUCache: 最近使用Cache。在内部维护一个Cache的最大容量M,始终只保存M个元素于Cache内部,当第M+1元素插入Cache中时,最先被放入Cache中的元素将失效。
2. ExpireCache: 时间过期Cache。在内部统一管理失效时间T,当元素插入Cache后,超过时间T,则删除。
3. AccessExpireCache: 时间过期Cache。同ExpireCache不同的是,当元素被访问后,重新开始计算该元素的超时时间,而不是只从元素插入时开始计时。
4. UniqueExpireCache: 时间过期Cache。同ExpireCache不同的是,每一个元素都有自己单独的失效时间。
5. UniqueAccessExpireCache:时间过期Cache。同AccessExpireCache不同的是,每一个元素都有自己单独的失效时间。
6. ExpireLRUCache:时间过期和LRU策略的混合体。当时间过期和LRU任一过期条件被触发时,容器中的元素失效。
7. AccessExpireLRUCache:时间过期和LRU策略的混合体。同ExpireLRUCache相比,当元素被访问后,重新开始计算该元素的超时时间,而不是只从元素插入时开始计时。
8. UniqueExpireLRUCache:时间过期和LRU策略的混合体。同ExpireLRUCache相比,每一个元素都有自己单独的失效时间。
9. UniqueAccessExpireLRUCache:时间过期和LRU策略的混合体。同UniqueExpireLRUCache相比,当元素被访问后,重新开始计算该元素的超时时间,而不是只从元素插入时开始计时。
template <class TKey, class TValue, class TStrategy, class TMutex = FastMutex, class TEventMutex = FastMutex> class AbstractCache /// An AbstractCache is the interface of all caches. { public: FIFOEvent<const KeyValueArgs<TKey, TValue >, TEventMutex > Add; FIFOEvent<const KeyValueArgs<TKey, TValue >, TEventMutex > Update; FIFOEvent<const TKey, TEventMutex> Remove; FIFOEvent<const TKey, TEventMutex> Get; FIFOEvent<const EventArgs, TEventMutex> Clear; typedef std::map<TKey, SharedPtr<TValue > > DataHolder; typedef typename DataHolder::iterator Iterator; typedef typename DataHolder::const_iterator ConstIterator; typedef std::set<TKey> KeySet; AbstractCache() { initialize(); } AbstractCache(const TStrategy& strat): _strategy(strat) { initialize(); } virtual ~AbstractCache() { uninitialize(); } // ........... protected: mutable FIFOEvent<ValidArgs<TKey> > IsValid; mutable FIFOEvent<KeySet> Replace; void initialize() /// Sets up event registration. { Add += Delegate<TStrategy, const KeyValueArgs<TKey, TValue> >(&_strategy, &TStrategy::onAdd); Update += Delegate<TStrategy, const KeyValueArgs<TKey, TValue> >(&_strategy, &TStrategy::onUpdate); Remove += Delegate<TStrategy, const TKey>(&_strategy, &TStrategy::onRemove); Get += Delegate<TStrategy, const TKey>(&_strategy, &TStrategy::onGet); Clear += Delegate<TStrategy, const EventArgs>(&_strategy, &TStrategy::onClear); IsValid += Delegate<TStrategy, ValidArgs<TKey> >(&_strategy, &TStrategy::onIsValid); Replace += Delegate<TStrategy, KeySet>(&_strategy, &TStrategy::onReplace); } void uninitialize() /// Reverts event registration. { Add -= Delegate<TStrategy, const KeyValueArgs<TKey, TValue> >(&_strategy, &TStrategy::onAdd ); Update -= Delegate<TStrategy, const KeyValueArgs<TKey, TValue> >(&_strategy, &TStrategy::onUpdate); Remove -= Delegate<TStrategy, const TKey>(&_strategy, &TStrategy::onRemove); Get -= Delegate<TStrategy, const TKey>(&_strategy, &TStrategy::onGet); Clear -= Delegate<TStrategy, const EventArgs>(&_strategy, &TStrategy::onClear); IsValid -= Delegate<TStrategy, ValidArgs<TKey> >(&_strategy, &TStrategy::onIsValid); Replace -= Delegate<TStrategy, KeySet>(&_strategy, &TStrategy::onReplace); } void doAdd(const TKey& key, const TValue& val) /// Adds the key value pair to the cache. /// If for the key already an entry exists, it will be overwritten. { Iterator it = _data.find(key); doRemove(it); KeyValueArgs<TKey, TValue> args(key, val); Add.notify(this, args); _data.insert(std::make_pair(key, SharedPtr<TValue>(new TValue(val)))); doReplace(); } void doAdd(const TKey& key, SharedPtr<TValue>& val) /// Adds the key value pair to the cache. /// If for the key already an entry exists, it will be overwritten. { Iterator it = _data.find(key); doRemove(it); KeyValueArgs<TKey, TValue> args(key, *val); Add.notify(this, args); _data.insert(std::make_pair(key, val)); doReplace(); } void doUpdate(const TKey& key, const TValue& val) /// Adds the key value pair to the cache. /// If for the key already an entry exists, it will be overwritten. { KeyValueArgs<TKey, TValue> args(key, val); Iterator it = _data.find(key); if (it == _data.end()) { Add.notify(this, args); _data.insert(std::make_pair(key, SharedPtr<TValue>(new TValue(val)))); } else { Update.notify(this, args); it->second = SharedPtr<TValue>(new TValue(val)); } doReplace(); } void doUpdate(const TKey& key, SharedPtr<TValue>& val) /// Adds the key value pair to the cache. /// If for the key already an entry exists, it will be overwritten. { KeyValueArgs<TKey, TValue> args(key, *val); Iterator it = _data.find(key); if (it == _data.end()) { Add.notify(this, args); _data.insert(std::make_pair(key, val)); } else { Update.notify(this, args); it->second = val; } doReplace(); } void doRemove(Iterator it) /// Removes an entry from the cache. If the entry is not found /// the remove is ignored. { if (it != _data.end()) { Remove.notify(this, it->first); _data.erase(it); } } bool doHas(const TKey& key) const /// Returns true if the cache contains a value for the key { // ask the strategy if the key is valid ConstIterator it = _data.find(key); bool result = false; if (it != _data.end()) { ValidArgs<TKey> args(key); IsValid.notify(this, args); result = args.isValid(); } return result; } SharedPtr<TValue> doGet(const TKey& key) /// Returns a SharedPtr of the cache entry, returns 0 if for /// the key no value was found { Iterator it = _data.find(key); SharedPtr<TValue> result; if (it != _data.end()) { // inform all strategies that a read-access to an element happens Get.notify(this, key); // ask all strategies if the key is valid ValidArgs<TKey> args(key); IsValid.notify(this, args); if (!args.isValid()) { doRemove(it); } else { result = it->second; } } return result; } void doClear() { static EventArgs _emptyArgs; Clear.notify(this, _emptyArgs); _data.clear(); } void doReplace() { std::set<TKey> delMe; Replace.notify(this, delMe); // delMe contains the to be removed elements typename std::set<TKey>::const_iterator it = delMe.begin(); typename std::set<TKey>::const_iterator endIt = delMe.end(); for (; it != endIt; ++it) { Iterator itH = _data.find(*it); doRemove(itH); } } TStrategy _strategy; mutable DataHolder _data; mutable TMutex _mutex; private: // .... };
mutable std::map<TKey, SharedPtr<TValue > > _data;另外AbstractCache中还定义了一个TStrategy对象,
TStrategy _strategy;并且在AbstractCache的initialize()函数中,把Cache的一些函数操作委托给TStrategy对象。其函数操作接口为:
void doAdd(const TKey& key, SharedPtr<TValue>& val) /// Adds the key value pair to the cache. /// If for the key already an entry exists, it will be overwritten. { Iterator it = _data.find(key); doRemove(it); KeyValueArgs<TKey, TValue> args(key, *val); Add.notify(this, args); _data.insert(std::make_pair(key, val)); doReplace(); }
而Replace操作可被Add、Update、Get操作触发。这是因为Cache并不是一个主动对象(POCO C++库学习和分析 -- 线程 (四)),不会自动的把元素标志为失效,需要外界也就是调用方触发进行。
在Cache类中另外一个值得注意的地方是,保存的是TValue的SharedPtr。之所以这么设计,是为了线程安全,由于replace操作可能被多个线程调用,所以解决的方法,要么是返回TValue的SharedPtr,要么是返回TValue的拷贝。同拷贝方法相比,SharedPtr的方法要更加廉价。
Strategy类完成了对_data中保存的<key-value>pair中key的排序工作。每个Strategy中都存在一个key的容器,其中LRUStrategy中是std::list<TKey>,ExpireStrategy、UniqueAccessExpireStrategy、UniqueExpireStrategy中是std::multimap<Timestamp, TKey>。
对于LRU策略,这么设计我是可以理解的。每次访问都会使key被重置于list的最前端。为了实现对list快速访问,增加一个std::map<TKey, Iterator>容器,每次对list容器进行插入操作时,把插入位的itorator保存入map中,这样对于list的访问效率可以从O(n)变成O(log(n)),因为不需要遍历了。下面是相关的代码:
void onReplace(const void*, std::set<TKey>& elemsToRemove) { // Note: replace only informs the cache which elements // it would like to remove! // it does not remove them on its own! std::size_t curSize = _keyIndex.size(); if (curSize < _size) { return; } std::size_t diff = curSize - _size; Iterator it = --_keys.end(); //--keys can never be invoked on an empty list due to the minSize==1 requirement of LRU std::size_t i = 0; while (i++ < diff) { elemsToRemove.insert(*it); if (it != _keys.begin()) { --it; } } }
void onAdd(const void*, const KeyValueArgs <TKey, TValue>& args) { Timestamp now; IndexIterator it = _keyIndex.insert(typename TimeIndex::value_type(now, args.key())); std::pair<Iterator, bool> stat = _keys.insert(typename Keys::value_type(args.key(), it)); if (!stat.second) { _keyIndex.erase(stat.first->second); stat.first->second = it; } }
void onReplace(const void*, std::set<TKey>& elemsToRemove) { // Note: replace only informs the cache which elements // it would like to remove! // it does not remove them on its own! IndexIterator it = _keyIndex.begin(); while (it != _keyIndex.end() && it->first.isElapsed(_expireTime)) { elemsToRemove.insert(it->second); ++it; } }可以看到这是对multimap的遍历,效率为O(n)。
#include "Poco/LRUCache.h" int main() { Poco::LRUCache<int, std::string> myCache(3); myCache.add(1, "Lousy"); // |-1-| -> first elem is the most popular one Poco::SharedPtr<std::string> ptrElem = myCache.get(1); // |-1-| myCache.add(2, "Morning"); // |-2-1-| myCache.add(3, "USA"); // |-3-2-1-| // now get rid of the most unpopular entry: "Lousy" myCache.add(4, "Good"); // |-4-3-2-| poco_assert (*ptrElem == "Lousy"); // content of ptrElem is still valid ptrElem = myCache.get(2); // |-2-4-3-| // replace the morning entry with evening myCache.add(2, "Evening"); // 2 Events: Remove followed by Add }