LeetCode LRU Cache

Problem Description

Design and implement a data structure for Least Recently Used (LRU) cache. It should support the following operations: get and set.

get(key) - Get the value (will always be positive) of the key if the key exists in the cache, otherwise return -1.
set(key, value) - Set or insert the value if the key is not already present. When the cache reached its capacity, it should invalidate the least recently used item before inserting a new item.

Problem Solution

 1. Solution with Time Limit Exceeded

Note: unordered_map + list,  使用unordered_map记录相应的key-value, 使用list记录cache中的key值,但算法会出现超时,主要原因是操作list的删除和查找需要耗费较多的时间,导致超时

class LRUCache{
private:
    unordered_map<int,int> mp; //key is the cache number, value is the count times of cache number existing
    list<int> lst;  //cache list 
    int cacheSize;
public:
    
    LRUCache(int capacity) {
        cacheSize=capacity;            
    }
    
    int get(int key) {
        if(mp.find(key)!=mp.end())
        {
            lst.remove(key);
            lst.push_front(key);
            return mp[key];
        }
        return -1;
        
    }
    
    void set(int key, int value) {
        if(mp.find(key)!=mp.end())
        {
            mp[key]=value;
            lst.remove(key);
            lst.push_front(key);
            return;
        }
        if(lst.size()>=cacheSize)
        {
            mp.erase(lst.back());
            lst.pop_back();
            
        }
        lst.push_front(key);
        mp[key]=value;
    }
};

2. Solution with Time Limit Exceeded

Note: unordered_map + vector,其原理类似方案一,不同的是将list换成vector存储cache中的key, 但也是在操作vector的删除和查找时需要耗费一定的线性时间,导致超时

class LRUCache{
private:
    vector<int> cacheKey; //cache keys
    unordered_map<int, int> mp; //key-value relation 
    int cacheSize;
public:
    
    LRUCache(int capacity) {
        cacheSize = capacity;
    }
    // change the position of key in cacheKey vector
    void MoveToBack(int key){
        vector<int>::iterator iter;
        for(iter=cacheKey.begin();iter!=cacheKey.end();++iter)
        {
            if(*iter == key)
                break;
        }
        if(iter!=cacheKey.end())
        {
            cacheKey.erase(iter);
            cacheKey.push_back(key);
        }
    }    
    int get(int key) {
        if(mp.find(key) != mp.end()){ //key exists in map
            MoveToBack(key);  
            return mp[key];
        }
        return -1;  //key doesn't exist
    }
    
    void set(int key, int value) {
        if(mp.find(key) != mp.end()){ //key exists in map
            mp[key] = value;  //change the value of relative key
            MoveToBack(key);  //then change the position of key in cacheKey vector
            return;
        }        
        if(cacheKey.size() >= cacheSize ){  // if the keys in vecotor has been greater than capacity of cache           
            mp.erase(cacheKey[0]);  //remove the realtive key-value of map
            cacheKey.erase(cacheKey.begin());  //remove the first key in cacheKey vector            
        }
    
        cacheKey.push_back(key);  //add new key in cacheKey vector
        mp[key] = value;  // insert new key-value into map 
    }
};

3. Successful Solution

Note: unordered_map + list,  与上述两种方案不同的是,本方案引入一个key-value的结构体,利用list记录cache中的key-value ,利用unordered_map记录list中相应key的迭代位置,这样在操作list的删除和查找时可以直接通过记录的iterator位置直接用list的splice进行key位置的变更。

class cacheNode{
public:
    int key; 
    int val;
    cacheNode(int k , int v) : key(k) , val(v){}
};
class LRUCache{
private:
    int cacheSize;
    list<cacheNode> lst;
    unordered_map<int,list<cacheNode>::iterator> mp;
public:
    LRUCache(int capacity){
        cacheSize=capacity;
    }
    int get(int key){
        if(mp.find(key) != mp.end()){
            lst.splice(lst.begin(),lst,mp[key]);
            mp[key]=lst.begin();
            return lst.front().val;
        }
        return -1;
    }
    void set(int key,int value)
    {
        if(mp.find(key)!=mp.end()){
            lst.splice(lst.begin(),lst,mp[key]);
            lst.front().val=value;
            mp[key]=lst.begin();
        }
        else
        {
            if(lst.size()>=cacheSize)
            {
                mp.erase(lst.back().key);
                lst.pop_back();
            }
            lst.push_front(cacheNode(key,value));
            mp[key]=lst.begin();
            
        }    
    }
};

 

 

 

 

 

 

 

 

 

posted @ 2014-04-16 11:39  ballwql  阅读(335)  评论(0编辑  收藏  举报