View Javadoc

1   package com.atlassian.cache.ehcache;
2   
3   import java.io.Serializable;
4   import java.util.Collection;
5   import java.util.Map;
6   import java.util.concurrent.locks.Lock;
7   import java.util.concurrent.locks.ReadWriteLock;
8   import java.util.concurrent.locks.ReentrantReadWriteLock;
9   
10  import javax.annotation.Nullable;
11  
12  import com.atlassian.cache.CacheLoader;
13  
14  import com.google.common.base.Function;
15  import com.google.common.base.Throwables;
16  import com.google.common.collect.Multimap;
17  import com.google.common.collect.Multimaps;
18  
19  import net.sf.ehcache.CacheException;
20  import net.sf.ehcache.Ehcache;
21  import net.sf.ehcache.Element;
22  import net.sf.ehcache.concurrent.LockType;
23  import net.sf.ehcache.concurrent.Sync;
24  import net.sf.ehcache.constructs.blocking.BlockingCache;
25  import net.sf.ehcache.constructs.blocking.SelfPopulatingCache;
26  
27  import static com.atlassian.util.concurrent.Assertions.notNull;
28  
29  /**
30   * A rewrite of {@link SelfPopulatingCache} with stronger concurrency guarantees.
31   * Unfortunately, since the locks are local and cache replication happens at the Ehcache layer,
32   * bypassing these locks, this is still not good enough for clusters.  It is, however,
33   * an improvement.
34   *
35   * @param <K> the cache's key type
36   * @param <V> the cache's value type
37   */
38  public class LoadingCache<K,V> extends BlockingCache
39  {
40      /**
41       * Default value for number of mutexes in StripedReadWriteLockSync.DEFAULT_NUMBER_OF_MUTEXES is 2048,
42       * however it's an overkill for Cloud where cache access is mostly request based and does not have
43       * high concurrent usage. Reducing to 64 as default with system property override option.
44       */
45      private static final int DEFAULT_NUMBER_OF_MUTEXES = Integer.getInteger(LoadingCache.class.getName() + '.' + "DEFAULT_NUMBER_OF_MUTEXES", 64);
46  
47      private final CacheLoader<K, V> loader;
48  
49      // This needs to be fair to ensure that removeAll does not starve for a busy cache
50      private final ReadWriteLock loadVsRemoveAllLock = new ReentrantReadWriteLock(true);
51  
52      public LoadingCache(final Ehcache cache, final CacheLoader<K, V> loader) throws CacheException
53      {
54          super(cache, DEFAULT_NUMBER_OF_MUTEXES);
55          this.loader = notNull("loader", loader);
56      }
57  
58      Lock loadLock()
59      {
60          return loadVsRemoveAllLock.readLock();
61      }
62  
63      Lock removeAllLock()
64      {
65          return loadVsRemoveAllLock.writeLock();
66      }
67  
68      @Override
69      public Element get(final Object key)
70      {
71          if (key == null)
72          {
73              throw new NullPointerException("null keys are not permitted");
74          }
75          Element element = super.get(key);
76          return (element != null) ? element : loadValueAndReleaseLock(key);
77      }
78  
79      /**
80       * Handle a cache miss by loading the value and releasing the write lock.
81       * <p>
82       * On a cache miss, {@link BlockingCache#get(Object) super.get(key)} returns {@code null} with the write
83       * lock still held.  It is the caller's (our) responsibility to load the value and
84       * {@link BlockingCache#put(Element) put} it into the cache, which will implicitly release the lock.
85       * The lock must be released regardless of whether or not the load operation throws an exception.
86       * </p>
87       *
88       * @param key the key for which we must load the value
89       * @return a cache element representing the key and the corresponding value that was loaded for it
90       */
91      private Element loadValueAndReleaseLock(final Object key)
92      {
93          Element element;
94          loadLock().lock();
95          try
96          {
97              V value = null;
98              try
99              {
100                 value = getFromLoader(key);
101             }
102             finally
103             {
104                 element = putLoadedValueAndReleaseLock(key, value);
105             }
106         }
107         finally
108         {
109             // It isn't safe to let removeAll proceed until after we have done our put, so this
110             // coarser read lock has to be held until the very end. :(
111             loadLock().unlock();
112         }
113         return element;
114     }
115 
116     @Nullable
117     private Element putLoadedValueAndReleaseLock(Object key, V value)
118     {
119         // Putting the new element into the cache implicitly releases the lock for us
120         if (value != null)
121         {
122             final Element element = new Element(key, value);
123             put(element);
124             return element;
125         }
126 
127         // If the value is null, then loadValueAndReleaseLock is throwing an exception.  We still need to
128         // release the lock, but the actual return value does not matter.  Note that unlike SelfPopulatingCache,
129         // we are not using put(new Element(key, null)) to do this.  That would do an explicit removal (and with
130         // replication!) which seems pretty pointless.
131         final Sync lock = getLockForKey(key);
132         if (lock.isHeldByCurrentThread(LockType.WRITE))
133         {
134             lock.unlock(LockType.WRITE);
135         }
136         return null;
137     }
138 
139     @SuppressWarnings({ "ConstantConditions", "unchecked" })
140     private V getFromLoader(Object key)
141     {
142         final V value;
143         try
144         {
145             value = loader.load((K)key);
146         }
147         catch (final RuntimeException re)
148         {
149             put(new Element(key, null));
150             throw propagate(key, re);
151         }
152         catch (final Error err)
153         {
154             put(new Element(key, null));
155             throw propagate(key, err);
156         }
157 
158         if (value == null)
159         {
160             throw new CacheException("CacheLoader returned null for key " + key);
161         }
162         return value;
163     }
164 
165 
166 
167     // Make sure we acquire the write lock when removing a key.  BlockingCache should really be
168     // doing this itself, but it isn't.  This prevents remove from overlapping with a lazy load
169     // for the same key.
170 
171     @Override
172     public boolean remove(Serializable key, boolean doNotNotifyCacheReplicators)
173     {
174         final Sync sync = getLockForKey(key);
175         sync.lock(LockType.WRITE);
176         try
177         {
178             return super.remove(key, doNotNotifyCacheReplicators);
179         }
180         finally
181         {
182             sync.unlock(LockType.WRITE);
183         }
184     }
185 
186     @Override
187     public boolean remove(Serializable key)
188     {
189         final Sync sync = getLockForKey(key);
190         sync.lock(LockType.WRITE);
191         try
192         {
193             return super.remove(key);
194         }
195         finally
196         {
197             sync.unlock(LockType.WRITE);
198         }
199     }
200 
201     @Override
202     public boolean remove(Object key)
203     {
204         final Sync sync = getLockForKey(key);
205         sync.lock(LockType.WRITE);
206         try
207         {
208             return super.remove(key);
209         }
210         finally
211         {
212             sync.unlock(LockType.WRITE);
213         }
214     }
215 
216     @Override
217     public boolean remove(Object key, final boolean doNotNotifyCacheReplicators)
218     {
219         final Sync sync = getLockForKey(key);
220         sync.lock(LockType.WRITE);
221         try
222         {
223             return super.remove(key, doNotNotifyCacheReplicators);
224         }
225         finally
226         {
227             sync.unlock(LockType.WRITE);
228         }
229     }
230 
231 
232 
233     // removeAll(Collection) and removeAll(Collection, boolean) can specify multiple keys.  As with the
234     // single-key remove methods, these need to acquire write locks to prevent them from overlapping with
235     // lazy loads.  We could take the simple approach of iterating and delegating each one to remove(Object),
236     // but it is easy enough to group them into lock stripes first, and this seems like a wise precaution
237     // against a large cache removing several values at once (say, through expiration).
238 
239     @Override
240     public void removeAll(Collection<?> keys)
241     {
242         removeGroupedBySync(keys, new RemoveCallback()
243         {
244             @Override
245             public void removeUnderLock(Collection<?> keysForSync)
246             {
247                 underlyingCache.removeAll(keysForSync);
248             }
249         });
250     }
251 
252     @Override
253     public void removeAll(Collection<?> keys, final boolean doNotNotifyCacheReplicators)
254     {
255         removeGroupedBySync(keys, new RemoveCallback()
256         {
257             @Override
258             public void removeUnderLock(Collection<?> keysForSync)
259             {
260                 underlyingCache.removeAll(keysForSync, doNotNotifyCacheReplicators);
261             }
262         });
263     }
264 
265     /**
266      * Partitions the supplied keys into subsets that share the same lock, then calls
267      * {@link #removeGroupedBySync(Multimap, RemoveCallback)} with the results so that the callback
268      * can be applied to each subset with the corresponding write lock held.
269      *
270      * @param allKeys the keys to be grouped into subsets that share a lock, then removed from the cache
271      * @param callback the removal function to apply each lock's subset of keys while that write lock is held
272      */
273     private void removeGroupedBySync(Collection<?> allKeys, RemoveCallback callback)
274     {
275         final Multimap<Sync,?> map = Multimaps.index(allKeys, new Function<Object,Sync>()
276         {
277             @Override
278             public Sync apply(Object key)
279             {
280                 return getLockForKey(key);
281             }
282         });
283         removeGroupedBySync(map, callback);
284     }
285 
286     /**
287      * For each sync, acquires the write lock and calls {@link RemoveCallback#removeUnderLock(Collection)} with
288      * the corresponding list of keys, then releases the lock.
289      *
290      * @param keysBySync the mapping of locks to each lock's corresponding keys
291      * @param callback the removal function to apply each lock's subset of keys while that write lock is held
292      */
293     private static <K> void removeGroupedBySync(Multimap<Sync,K> keysBySync, RemoveCallback callback)
294     {
295         for (Map.Entry<Sync,Collection<K>> entry : keysBySync.asMap().entrySet())
296         {
297             final Sync sync = entry.getKey();
298             final Collection<K> keysUsingThisSync = entry.getValue();
299             sync.lock(LockType.WRITE);
300             try
301             {
302                 callback.removeUnderLock(keysUsingThisSync);
303             }
304             finally
305             {
306                 sync.unlock(LockType.WRITE);
307             }
308         }
309     }
310 
311 
312 
313     // The removeAll functions would have to acquire every single striped lock individually to prevent
314     // overlap with lazy loaders, and this probably isn't acceptable.  Instead, we pay the cost of an
315     // additional R/W lock that loaders can share and removeAll must acquire exclusively.
316 
317     @Override
318     public void removeAll()
319     {
320         removeAllLock().lock();
321         try
322         {
323             super.removeAll();
324         }
325         finally
326         {
327             removeAllLock().unlock();
328         }
329     }
330 
331     @Override
332     public void removeAll(boolean doNotNotifyCacheReplicators)
333     {
334         removeAllLock().lock();
335         try
336         {
337             super.removeAll(doNotNotifyCacheReplicators);
338         }
339         finally
340         {
341             removeAllLock().unlock();
342         }
343     }
344 
345 
346 
347     private static RuntimeException propagate(Object key, Throwable e)
348     {
349         Throwables.propagateIfInstanceOf(e, CacheException.class);
350         throw new CacheException("Could not fetch object for cache entry with key \"" + key + "\".", e);
351     }
352 
353     interface RemoveCallback
354     {
355         void removeUnderLock(Collection<?> keys);
356     }
357 }