LRU(多线程)缓存的实现

前面写了个lru缓存,适用于单线程的场景。多线程实现缓存的思路有很多种,有基于ttl淘汰策略的,有基于lru淘汰实现的,而在淘汰的方式也有很多选择,如果选用Concurrenthashmap,可以很好的基于ttl淘汰策略,具体实现是另开一个守护线程,定时淘汰ConcurrenthashMap中的过期键,而在使用lru淘汰策略时则需要额外借助辅助结构双向链表,需要额外的同步操作,由于在本文之前实现了一个lru缓存算法https://blog.csdn.net/qq_32459653/article/details/82766468,故放弃使用现有的Concurrenthashmap结构

模仿jdk1.7Concurrenthashmap结构的实现,实现自己的一个同步lru缓存,实现如下

package Inter.other;


/**
 * 缓存通用实现接口接口
 * Created by lin on 2018/9/19.
 */
public interface Cache {
     V get(K key);

    void set(K key, V value);

    void clear();

    int size();

    /**
     * 该方法,专门为segmentCache设计,
     *
     * @return
     */
    void removeLast();
}

 

 

package Inter.other;


import java.util.HashMap;
import java.util.Map;

/**
 * 缓存算法的具体实现
 * Created by lin on 2018/9/16.
 * 时间复杂度为O(1)的一个缓存
 */
public class LRUCache implements Cache {

    // private KeyGenerationStrategy keyGenerationStrategy;
    //默认容量大小
    private static final int DEFAULT_CAPACITY = 8;

    /* 缓存容量的大小 */
    private volatile int capacity;
    /* 缓存已使用的容量 */
    private volatile int size;
    /* 为了实现快速寻找,这里使用map,查找时间复杂度为O(1)*/
    private volatile  Map> map = new HashMap>();
    /* 为了实现快速替换,这里使用链表,删除或者加入时间复杂度为O(1)*/
    private volatile Node head;
    private volatile  Node tail;

    /**
     * 初始化
     *
     * @param capacity
     */
    public LRUCache(int capacity) {
        //  map = new HashMap<>();
        if (capacity <= 0) {
            capacity = DEFAULT_CAPACITY;
        }
        this.capacity = capacity;
        this.head = new Node(null, null, null);
        this.tail = new Node(head, null, null);
        head.next = tail;

    }

    public LRUCache() {
        this(DEFAULT_CAPACITY);
    }

    /**
     * 从缓存中获取指定值,没有返回空
     *
     * @param
     * @param 
     * @return
     */


    public  V get(K key) {
        Node node = (Node) map.get(key);
        if (node == null) {
            return null;
        } else {
            moveToFirst(node);
            return node.value;
        }
    }

    /**
     * 指定节点添加到缓存中
     *
     * @param key   value值对应的键
     * @param value 存放的值
     */

    public void set(K key, V value) {

        Node node = new Node(value, key);
        //缓存容量未满,不需要淘汰,直接添加到最后一个
        if (size <= capacity) {
            node.prev = head;
            node.next = head.next;
            head.next.prev = node;
            head.next = node;
            map.put(node.key, node);
            size++;
        } else {//容量已满,淘汰最后一个节点即可
            // map.put((K)node.key, node);
            Node delNode = tail.prev;
            delNode.prev.next = node;
            node.prev = delNode.prev;
            node.next = tail;
            tail.prev = node;
            delNode.next = null;
            delNode.prev = null;
            delNode = null;
            map.remove(delNode.key);

        }

    }


    //清空缓存

    public void clear() {
        this.head = new Node(null, null, null);
        this.tail = new Node(head, null, null);
        head.next = tail;
        size = 0;
    }


    public int size() {
        return this.size;
    }

    public void removeLast() {
        if (size() == 0) {
            return;
        }
        Node delNode = tail.prev;
        Node node = delNode.prev;
        tail.prev = node;
        node.next = tail;
        delNode.next = null;
        delNode.prev = null;
        delNode = null;
        map.remove(delNode.key);

    }

    /**
     * 当节点被访问时需要放置到缓存最前面
     *
     * @param node
     */
    private void moveToFirst(Node node) {
        //validationIsSwap();
        if (node == head.next) {
            return;
        }
        Node nodePrev = node.prev;
        Node nodeNext = node.next;
        Node beMoved = head.next;// 头节点的下一个节点
        head.next = node;
        node.prev = head;
        node.next = beMoved;
        beMoved.prev = node;

        nodePrev.next = nodeNext;
        nodeNext.prev = nodePrev;


    }

    /**
     * 确定是否可以交换,如果size小于等于1 则没必要
     * 

* private void validationIsSwap() { * if (size <= 1) { * throw new IllegalArgumentException("缓存容量不大于1,不能进行该操作"); * } * } */ public static void main(String[] args) { LRUCache lruCache = new LRUCache(20); KeyGenerationStrategy keyGenerationStrategy = new SimpleKeyGenerationStrategy(); String key1 = keyGenerationStrategy.generationKey(1); String key2 = keyGenerationStrategy.generationKey(2); String key3 = keyGenerationStrategy.generationKey(3); lruCache.set(key1, 1); lruCache.set(key2, 2); lruCache.set(key3, 3); System.out.println(lruCache.get(key1) + ""); ; System.out.println(lruCache.get(key2) + ""); ; System.out.println(lruCache.get(key3) + ""); ; System.out.println(lruCache.get(key1) + ""); ; // lruCache.swapAndFirst(node2); Node head = lruCache.head; //第一个 head = head.next; System.out.println(head); //第二个 head = head.next; System.out.println(head); //第三个 head = head.next; System.out.println(head); // lruCache.set(node1); } private Node getHead() { return this.head; } }

package Inter.other;

/**
 * 链表节点的定义
 * Created by lin on 2018/9/16.
 */
public class Node {
  final  V value;
  final  K key;//表示该节点的键;
   volatile Node next;
   volatile  Node prev;

    public Node(V value, K key) {
        this.value = value;
        this.key = key;
    }

    public Node(Node prev, Node next, V value) {
        this.prev = prev;
        this.next = next;
        this.value = value;
    }

    public K getKey() {
        return this.key;
    }

    @Override
    public String toString() {
        return "prev:" + prev.value + "当前节点" + this.value + "next:" + next.value;
    }
}

 

 

package Inter.other;

/**
 * 键值生成策略接口
 * Created by lin on 2018/9/19.
 */
public interface KeyGenerationStrategy {

    K generationKey(V value);

}

 

package Inter.other;

/**
 * 简单的键值生成
 * Created by lin on 2018/9/19.
 */
public class SimpleKeyGenerationStrategy implements KeyGenerationStrategy {

    public K generationKey(V value) {
        return (K) value.toString();
    }
}

到这里为止 与单线程的lru实现都差不多,只是将一些字段变为用volatile,或final修饰了,并增加了removelast方法;

接下来就模仿jdk1.7ConcurrentHashmap实现并发量高,线程安全的缓存,

 

package Inter.other;

import lombok.Getter;
import lombok.Setter;

import java.io.Serializable;
import java.util.Random;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReadWriteLock;
import java.util.concurrent.locks.ReentrantReadWriteLock;

/**
 * Created by lin on 2018/9/20.
 */
public abstract class SegmentCache implements Cache {

    protected CacheFactory cacheFactory;

    protected final Random random; //主要用于淘汰时,随机选用一个桶淘汰
    /*桶的数量 ,其大小可以任意指定,不一定非要2的整数幂*/
    @Getter
    private final int segmentCount;
    @Getter
    protected volatile AtomicInteger size;
    //缓存容量大小,可以改变,但不建议那么做
    @Setter
    protected volatile int capacity;

    /**
     * 采用分段锁锁的思路,这里每一个cache都有一个
     * ReadWriteLock ,在操作cache时,需要获取对应的ReadWriteLock
     */
    protected final Segment[] caches;


    private static final int DEFAULT_SEGMENTCOUNT = Runtime.getRuntime().availableProcessors();


    /**
     * 初始化一个segmentCache缓存,考虑到缓存可能分布不均匀,故给
     * 每个segment分配的容量大小均是capacity的大小,实际容量由SegmentCache
     * 控制,给每个segment的容量设置为capacity并不会浪费内存,因为并没有实际分配
     * 内存空间,仅仅是一个阈值
     *
     * @param segmentCount 分段的数量
     * @param capacity     容量大小
     * @param cache        默认的缓存实现
     */
    public SegmentCache(int segmentCount, int capacity, Cache cache) {
        if (capacity <= 0) {
            throw new IllegalArgumentException("capacity 必须大于0");
        }
        if (segmentCount <= 0) {
            throw new IllegalArgumentException("segmentCount 必须大于0");
        }
        this.segmentCount = segmentCount;
        caches = new Segment[segmentCount];
        this.capacity = capacity;
        setCacheFactory();//设置缓存工厂
        for (int i = 0; i < segmentCount; i++) {
          cache =  cacheFactory.getCache(cache.getClass().getSimpleName(), capacity);
            caches[i] = new Segment(cache, capacity);
        }
        random = new Random(segmentCount);


    }

    public SegmentCache(int capacity, Cache cache) {
        this(DEFAULT_SEGMENTCOUNT, capacity, cache);
    }


    public  V get(K key) {
        int place = getSegmentPlace(key);
        Segment cache = (Segment) caches[place];
        return cache.get(key);
    }


    public void set(K key, V value) {
        int place = getSegmentPlace(key);
        Segment cache = caches[place];
        while (size.get() < capacity) {//小于
            int nowSize = size.get();
            if (size.compareAndSet(nowSize, nowSize + 1)) { //先扩容,在添加
                cache.set(key, value);
                break;
            }
            continue;

        }

        weekout();
        //递归调用自身重新设置
        set(key, value);

    }

    /**
     * 淘汰键值,
     */
    public abstract void weekout();

    public void clear() {

    }


    /**
     * 返回缓存中已存在的键值得大小
     *
     * @return
     */
    public int size() {
        return size.get();
    }

    static final class Segment extends ReentrantReadWriteLock implements Serializable, Cache {
        transient volatile int size;  // segment中元素的的数量
        transient volatile int capacity;    // 缓存容量的大小
        transient int modCount;          //对的大小造成影响的操作的数量(比如put或者remove操作)

        private volatile Cache cache;  //segment的缓存结构

        public Segment(Cache cache, int capacity) {
            this.cache = cache;
            this.capacity = capacity;
        }


        public  V get(K key) {
            readLock().lock();
            try {
                return cache.get(key);
            } finally {
                readLock().unlock();
            }


        }

        public void set(K key, V value) {
            writeLock().lock();
            try {
                cache.set(key, value);
            } finally {
                writeLock().unlock();
            }

        }

        public void clear() {
            writeLock().lock();
            try {
                cache.clear();
            } finally {
                writeLock().unlock();
            }

        }

        public int size() {
            return cache.size();
        }

        public void removeLast() {
            writeLock().lock();
            try {
                cache.removeLast();
            } finally {
                writeLock().unlock();
            }
        }
    }

    /**
     * 考虑到事实情况,segmentCount可以任意指定大小,
     *
     * @param key
     * @return
     */
    private int getSegmentPlace(K key) {
        return key.hashCode() % segmentCount;
    }


    public abstract void setCacheFactory();


}

缓存工厂,用于决定,使用哪种基本的缓存策略

package Inter.other;

/**
 * Created by lin on 2018/9/20.
 */
public interface CacheFactory {

    Cache getCache(String name, int capacity);

}

 

package Inter.other;

/**
 * Created by lin on 2018/9/20.
 */
public class SimpleCacheFactory implements CacheFactory {


    public Cache getCache(String name, int capacity) {
        if (name.equalsIgnoreCase("lruCache")) {
            return new LRUCache(capacity);
        }

        throw new IllegalArgumentException("没有该种缓存");

    }
}

package Inter.other;

/**
 * Created by lin on 2018/9/20.
 */
public class SimpleSegmentCache extends SegmentCache {

    public SimpleSegmentCache(int capacity, Cache cache) {
        super(capacity, cache);
    }

    public void removeLast() {
        weekout();
    }

    public void weekout() {

        while (true) {
            if (size.get() < capacity) {
                break;
            }
            int weedSegmentPlace = random.nextInt();
            Segment weedSegment = caches[weedSegmentPlace];
            if (weedSegment.size() > 0) {
                weedSegment.writeLock().lock();
                try {
                    if (size.get() < capacity) {
                        return;
                    }
                    if (weedSegment.size() > 0) {
                        weedSegment.removeLast();
                        size.decrementAndGet();
                        break;
                    }
                } finally {
                    weedSegment.writeLock().unlock();
                }
            }
        }

    }

    public void setCacheFactory() {
        this.cacheFactory = new SimpleCacheFactory();
    }

}

 

另外对lru算法非常感兴趣的同学可以看一些大牛写的线程安全的高并发 lru缓存算法,具体地址如下https://blog.csdn.net/njchenyi/article/details/8046914

 

 

 

 

 

你可能感兴趣的:(java,lru高并发实现)