MemoryCache在项目中用了很久,感觉比较简单,以前也看过里面的源代码,主要借用MemoryCacheStore来完成数据的存储,里面是线程安全的,MemoryCacheStore借用Hashtable来实现存储,如果已经有数据了,就把以前的删除然后在添加
我们来看看MemoryCache的实现:
public class MemoryCache : ObjectCache, IEnumerable, IDisposable { private static readonly TimeSpan OneYear = new TimeSpan(365, 0, 0, 0); private static object s_initLock = new object(); private static MemoryCache s_defaultCache; private static CacheEntryRemovedCallback s_sentinelRemovedCallback = new CacheEntryRemovedCallback(SentinelEntry.OnCacheEntryRemovedCallback); private GCHandleRef[] _storeRefs; private int _storeCount; private int _disposed; private MemoryCacheStatistics _stats; private string _name; private PerfCounters _perfCounters; private bool _configLess; EventHandler _onAppDomainUnload; UnhandledExceptionEventHandler _onUnhandledException; private MemoryCache() { _name = "Default"; Init(null); } public MemoryCache(string name, NameValueCollection config = null) { if (name == null) { throw new ArgumentNullException("name"); } if (name == String.Empty) { throw new ArgumentException(R.Empty_string_invalid, "name"); } if (String.Equals(name, "default", StringComparison.OrdinalIgnoreCase)) { throw new ArgumentException(R.Default_is_reserved, "name"); } _name = name; Init(config); } private void Init(NameValueCollection config) { _storeCount = Environment.ProcessorCount; _storeRefs = new GCHandleRef [_storeCount]; InitDisposableMembers(config); } public override object Get(string key, string regionName = null) { return GetInternal(key, regionName); } private object GetInternal(string key, string regionName) { if (regionName != null) { throw new NotSupportedException(R.RegionName_not_supported); } if (key == null) { throw new ArgumentNullException("key"); } MemoryCacheEntry entry = GetEntry(key); return (entry != null) ? entry.Value : null; } internal MemoryCacheEntry GetEntry(String key) { if (IsDisposed) { return null; } MemoryCacheKey cacheKey = new MemoryCacheKey(key); MemoryCacheStore store = GetStore(cacheKey); return store.Get(cacheKey); } public override void Set(string key, object value, CacheItemPolicy policy, string regionName = null) { if (regionName != null) { throw new NotSupportedException(R.RegionName_not_supported); } if (key == null) { throw new ArgumentNullException("key"); } DateTimeOffset absExp = ObjectCache.InfiniteAbsoluteExpiration; TimeSpan slidingExp = ObjectCache.NoSlidingExpiration; CacheItemPriority priority = CacheItemPriority.Default; Collection changeMonitors = null; CacheEntryRemovedCallback removedCallback = null; if (policy != null) { ValidatePolicy(policy); if (policy.UpdateCallback != null) { Set(key, value, policy.ChangeMonitors, policy.AbsoluteExpiration, policy.SlidingExpiration, policy.UpdateCallback); return; } absExp = policy.AbsoluteExpiration; slidingExp = policy.SlidingExpiration; priority = policy.Priority; changeMonitors = policy.ChangeMonitors; removedCallback = policy.RemovedCallback; } if (IsDisposed) { if (changeMonitors != null) { foreach (ChangeMonitor monitor in changeMonitors) { if (monitor != null) { monitor.Dispose(); } } } return; } MemoryCacheKey cacheKey = new MemoryCacheKey(key); MemoryCacheStore store = GetStore(cacheKey); store.Set(cacheKey, new MemoryCacheEntry(key, value, absExp, slidingExp, priority, changeMonitors, removedCallback, this)); } internal MemoryCacheStore GetStore(MemoryCacheKey cacheKey) { // Dev10 865907: Math.Abs throws OverflowException for Int32.MinValue int hashCode = cacheKey.Hash; if (hashCode < 0) { hashCode = (hashCode == Int32.MinValue) ? 0 : -hashCode; } int idx = hashCode % _storeCount; return _storeRefs[idx].Target; } private void InitDisposableMembers(NameValueCollection config) { bool dispose = true; try { try { _perfCounters = new PerfCounters(_name); } catch { // ignore exceptions from perf counters } for (int i = 0; i < _storeCount; i++) { _storeRefs[i] = new GCHandleRef (new MemoryCacheStore(this, _perfCounters)); } _stats = new MemoryCacheStatistics(this, config); AppDomain appDomain = Thread.GetDomain(); EventHandler onAppDomainUnload = new EventHandler(OnAppDomainUnload); appDomain.DomainUnload += onAppDomainUnload; _onAppDomainUnload = onAppDomainUnload; UnhandledExceptionEventHandler onUnhandledException = new UnhandledExceptionEventHandler(OnUnhandledException); appDomain.UnhandledException += onUnhandledException; _onUnhandledException = onUnhandledException; dispose = false; } finally { if (dispose) { Dispose(); } } } private void OnAppDomainUnload(Object unusedObject, EventArgs unusedEventArgs) { Dispose(); } private void OnUnhandledException(Object sender, UnhandledExceptionEventArgs eventArgs) { // if the CLR is terminating, dispose the cache. // This will dispose the perf counters (see Dev10 680819). if (eventArgs.IsTerminating) { Dispose(); } } public void Dispose() { if (Interlocked.Exchange(ref _disposed, 1) == 0) { // unhook domain events DisposeSafeCritical(); // stats must be disposed prior to disposing the stores. if (_stats != null) { _stats.Dispose(); } if (_storeRefs != null) { foreach (var storeRef in _storeRefs) { if (storeRef != null) { storeRef.Dispose(); } } } if (_perfCounters != null) { _perfCounters.Dispose(); } GC.SuppressFinalize(this); } } }
MemoryCacheStore的实现:
internal sealed class MemoryCacheStore : IDisposable { const int INSERT_BLOCK_WAIT = 10000; const int MAX_COUNT = Int32.MaxValue / 2; private Hashtable _entries; private Object _entriesLock; private CacheExpires _expires; private CacheUsage _usage; private int _disposed; private ManualResetEvent _insertBlock; private volatile bool _useInsertBlock; private MemoryCache _cache; private PerfCounters _perfCounters; internal MemoryCacheStore(MemoryCache cache, PerfCounters perfCounters) { _cache = cache; _perfCounters = perfCounters; _entries = new Hashtable(new MemoryCacheEqualityComparer()); _entriesLock = new Object(); _expires = new CacheExpires(this); _usage = new CacheUsage(this); InitDisposableMembers(); } internal MemoryCacheEntry Get(MemoryCacheKey key) { MemoryCacheEntry entry = _entries[key] as MemoryCacheEntry; // has it expired? if (entry != null && entry.UtcAbsExp <= DateTime.UtcNow) { Remove(key, entry, CacheEntryRemovedReason.Expired); entry = null; } // update outside of lock UpdateExpAndUsage(entry); return entry; } internal void Set(MemoryCacheKey key, MemoryCacheEntry entry) { if (_useInsertBlock && entry.HasUsage()) { WaitInsertBlock(); } MemoryCacheEntry existingEntry = null; bool added = false; lock (_entriesLock) { if (_disposed == 0) { existingEntry = _entries[key] as MemoryCacheEntry; if (existingEntry != null) { existingEntry.State = EntryState.RemovingFromCache; } entry.State = EntryState.AddingToCache; added = true; _entries[key] = entry; } } CacheEntryRemovedReason reason = CacheEntryRemovedReason.Removed; if (existingEntry != null) { if (existingEntry.UtcAbsExp <= DateTime.UtcNow) { reason = CacheEntryRemovedReason.Expired; } RemoveFromCache(existingEntry, reason, delayRelease:true); } if (added) { AddToCache(entry); } // Dev10 861163: Call Release after the new entry has been completely added so // that the CacheItemRemovedCallback can take a dependency on the newly inserted item. if (existingEntry != null) { existingEntry.Release(_cache, reason); } } private void AddToCache(MemoryCacheEntry entry) { // add outside of lock if (entry != null) { if (entry.HasExpiration()) { _expires.Add(entry); } if (entry.HasUsage() && (!entry.HasExpiration() || entry.UtcAbsExp - DateTime.UtcNow >= CacheUsage.MIN_LIFETIME_FOR_USAGE)) { _usage.Add(entry); } entry.State = EntryState.AddedToCache; entry.CallNotifyOnChanged(); if (_perfCounters != null) { _perfCounters.Increment(PerfCounterName.Entries); _perfCounters.Increment(PerfCounterName.Turnover); } } } internal void UpdateExpAndUsage(MemoryCacheEntry entry, bool updatePerfCounters = true) { if (entry != null) { if (entry.InUsage() || entry.SlidingExp > TimeSpan.Zero) { DateTime utcNow = DateTime.UtcNow; entry.UpdateSlidingExp(utcNow, _expires); entry.UpdateUsage(utcNow, _usage); } // DevDiv #67021: If this entry has an update sentinel, the sliding expiration is actually associated // with that sentinel, not with this entry. We need to update the sentinel's sliding expiration to // keep the sentinel from expiring, which in turn would force a removal of this entry from the cache. entry.UpdateSlidingExpForUpdateSentinel(); if (updatePerfCounters && _perfCounters != null) { _perfCounters.Increment(PerfCounterName.Hits); _perfCounters.Increment(PerfCounterName.HitRatio); _perfCounters.Increment(PerfCounterName.HitRatioBase); } } else { if (updatePerfCounters && _perfCounters != null) { _perfCounters.Increment(PerfCounterName.Misses); _perfCounters.Increment(PerfCounterName.HitRatioBase); } } } }
可见MemoryCache和MemoryCacheStore的实现都非常好理解。我们以web程序为例, 所有的数据都存在MemoryCacheStore的Hashtable中,但是不同的请求如何共享这个MemoryCacheStore数据了,一般我们采用static变量来实现,static变量跟着进程走,里面的线程共享它,我们来看看MemoryCache,它不是用静态变量,而是采用GCHandle来实现的,里面封装的GCHandleRef:
internal class GCHandleRef: IDisposable where T : class, IDisposable { GCHandle _handle; T _t; [SecuritySafeCritical] [PermissionSet(SecurityAction.Assert, Unrestricted = true)] public GCHandleRef(T t) { _handle = GCHandle.Alloc(t); } public T Target { [SecuritySafeCritical] [PermissionSet(SecurityAction.Assert, Unrestricted = true)] get { try { T t = (T)_handle.Target; if (t != null) { return t; } } catch (InvalidOperationException) { // use the normal reference instead of throwing an exception when _handle is already freed } return _t; } } [SecuritySafeCritical] [PermissionSet(SecurityAction.Assert, Unrestricted = true)] public void Dispose() { Target.Dispose(); // Safe to call Dispose more than once but not thread-safe if (_handle.IsAllocated) { // We must free the GC handle to avoid leaks. // However after _handle is freed we no longer have access to its Target // which will cause AVs and various race conditions under stress. // We revert to using normal references after disposing the GC handle _t = (T)_handle.Target; _handle.Free(); } } }
在MemoryCache的MemoryCache构造函数里面会调用Init方法,里面会初始化_storeRefs数组( _storeRefs = new GCHandleRef
我们在使用c#托管代码时,内存地址和GC回收不是我们关心的,CLR已经给我们进行了暗箱操作。但是有时候我们想使用类似C语言那种方式直接对内存进行操作,或者涉及到非托管代码的调用,此时就需要保护内存地址,防止GC垃圾回收机制将内存收回。因为一旦内存被CLR回收掉,直接造成非托管代码再次访问这块内存时失效,导致程序崩溃。
C#中直接操作内存主要有以下三种方法:
1、GCHandle类用于提供用于从非托管内存访问托管对象的方法。下面通过程序进行介绍:
//托管的内存区域
Int16[] Mangement_Mem = new Int16[4]{ 4, 3, 2, 1 };
GCHandle gch = GCHandle.Alloc(Mangement_Mem,GCHandleType.Normal);
/*
为托管内存Mangement_Mem分配GCHandle句柄,它保护Mangement_Mem对象不被垃圾回收。但是此时Mangement_Mem在内存中的地址可能会改变,不管内存如何改变,其对象的的句柄的整数表示即gch值是不变的,因此可以将其值传给非托管函数中去使用。当不再需要 GCHandle时,必须通过Free将其释放,此后GC垃圾处理器可能才会对其回收。
*/
/*
GCHandle.Alloc(Mangement_Mem,GCHandleType.Normal)作用类似如下:
GC.KeepAlive(Mangement_Mem);
从Mangement_Mem句柄表现形式再次转化为句GCHandle对象
IntPtr Ptr_Mem = GCHandle.ToIntPtr(gch);
GCHandle handle = GCHandle.FromIntPtr(Ptr_Mem);
*/
//获取该GCHandle对象表示的实际对象。
Int16[] array = (Int16[]) handle.Target;
GCHandle.Alloc(Mangement_Mem,GCHandleType.Normal);GCHandle.Alloc函数的第二个形参,除了有GCHandleType.Normal 外,还有Pinned。但Normal不会固定其地址,只是保证内存不被GC回收。而Pinned可以将地址固定住,Pinned后这将防止垃圾回收器移动内存地址。
2、 Marshal
C#中提供了一个方法集,这些方法用于分配非托管内存、复制非托管内存块、将托管类型转换为非托管类型,此外还提供了在与非托管代码交互时使用的其他杂项方法。也只有c++.net才有托管,非托管的概念,纯的C++没有这个概念。java可以认为所有东西都是托管的。这就是通过marshal类实现。
Marshal可以实现结构体和字节序之间的转化。具体可以搜索一下网上的资料。
3、通过fixe固定地址。将我们申请的资源通过关键字进行固定,达到使CLR不使用垃圾回收机制操作我们保护的内存。
class StudentInfo { public string Name { set; get; } } class Program { private const int OptionsMask = 0xFFFF; static void Main(string[] args) { var a = new StudentInfo { Name = "Gavin" }; var b = ObjectToByte(a); var c = ByteToObject(ref a, b); Console.ReadKey(); } ////// 将结构体转换成字节数组 /// /// /// public static byte[] ObjectToByte (T obj) { //得到结构体的大小 int size = Marshal.SizeOf (obj); //创建byte数组 byte[] bytes = new byte[size]; //分配结构体大小的内存空间 IntPtr structPtr = Marshal.AllocHGlobal(size); //将结构体拷到分配好的内存空间 Marshal.StructureToPtr(obj, structPtr, false); //从内存空间拷到byte数组 Marshal.Copy(structPtr, bytes, 0, size); //释放内存空间 Marshal.FreeHGlobal(structPtr); //返回byte数组 return bytes; } /// /// 将字节数组转换成结构体 /// /// /// /// /// public static int ByteToObject (ref T _struct, byte[] buffer) { try { if ((buffer != null) && (buffer.Length > 0)) { GCHandle pinned = GCHandle.Alloc(buffer, GCHandleType.Pinned); try { _struct = (T)Marshal.PtrToStructure(pinned.AddrOfPinnedObject(), typeof(T)); return buffer.Length; } finally { pinned.Free(); } } else return 0; } catch { return -1; } } }