View Javadoc
1   package com.atlassian.cache.hazelcast;
2   
3   import java.util.Random;
4   
5   import javax.annotation.Nonnull;
6   
7   import com.atlassian.cache.CacheSettings;
8   
9   import com.hazelcast.config.EvictionConfig;
10  import com.hazelcast.config.EvictionPolicy;
11  import com.hazelcast.config.MapConfig;
12  import com.hazelcast.config.MaxSizeConfig;
13  import com.hazelcast.config.NearCacheConfig;
14  import com.hazelcast.config.NearCachePreloaderConfig;
15  
16  import static com.hazelcast.config.MaxSizeConfig.MaxSizePolicy.PER_NODE;
17  
18  /**
19   * Performs the (re)configuration of Hazelcast {@link MapConfig} objects.
20   *
21   * @since 2.4.0
22   */
23  class HazelcastMapConfigConfigurator
24  {
25      // If the cache is a hybrid cache, only the entry versions are tracked in the IMap. These versions should remain
26      // cached longer than the values in local caches. A multiplier is applied to the config parameters that affect
27      // cache eviction to enforce this.
28      public static final int HYBRID_MULTIPLIER = 2;
29      // see #adjustPerNodeCapacity() for details
30      public static final int SMALL_CACHES_CAPACITY_MULTIPLIER = 2;
31  
32      public static final int NEAR_CACHE_EXPIRY_RATIO = Integer.getInteger("atlassian.cache.nearCacheExpiryRatio", 75);
33  
34      public MapConfig configureMapConfig(CacheSettings settings, MapConfig mapConfig, int partitionsCount)
35      {
36          boolean hybrid = !settings.getReplicateViaCopy(true);
37          Integer multiplier = hybrid ? HYBRID_MULTIPLIER : 1;
38          Integer maxEntries = settings.getMaxEntries();
39  
40          final NearCacheConfig nearCacheConfig = mapConfig.getNearCacheConfig() == null ?
41                  new NearCacheConfig() :
42                  copyNearCacheConfig(mapConfig.getNearCacheConfig());
43  
44          if (maxEntries != null)
45          {
46              // In Hazelcast 3.8 the algorithm of calculation of per-node capacity has been changed
47              // (https://github.com/hazelcast/hazelcast/issues/11646)
48              // Please refer to javadocs of adjustPerNodeCapacity() for details.
49              final int maxSize = adjustPerNodeCapacity(mapConfig, multiplier * maxEntries, partitionsCount);
50  
51              nearCacheConfig.setMaxSize(maxSize);
52              nearCacheConfig.setEvictionPolicy(EvictionPolicy.LFU.name());
53          }
54  
55          final Long expireAfterAccess = settings.getExpireAfterAccess();
56          if (expireAfterAccess != null)
57          {
58              final int maxIdleSeconds = multiplier * roundUpToWholeSeconds(expireAfterAccess);
59  
60              mapConfig.setMaxIdleSeconds(maxIdleSeconds);
61  
62              // Near-cache hits don't reset the last-accessed timestamp on the underlying IMap entry.
63              // So make the near cache have a TTL that is less than the maxIdle of the IMap:
64              //
65              // nearCacheTTL = (0.75 + jitter) * maxIdleSeconds
66              //
67              // That way the near cache entry will always expire before the IMap - calling back through
68              // and refreshing the IMap idle timer. We also add some random jitter (+ or - 0.15) so all nodes don't expire
69              // at the same time.
70  
71              int jitter = new Random().nextInt(30) - 15;
72              int nearCacheTtl = (int) Math.round(((NEAR_CACHE_EXPIRY_RATIO + jitter) * maxIdleSeconds) / 100.0);
73              nearCacheConfig.setTimeToLiveSeconds(Math.max(1, nearCacheTtl));
74          }
75  
76          final Long expireAfterWrite = settings.getExpireAfterWrite();
77          if (expireAfterWrite != null)
78          {
79              final int timeToLiveSeconds = multiplier * roundUpToWholeSeconds(expireAfterWrite);
80  
81              mapConfig.setTimeToLiveSeconds(timeToLiveSeconds);
82  
83              nearCacheConfig.setTimeToLiveSeconds(timeToLiveSeconds);
84          }
85  
86          final boolean nearCache = settings.getReplicateAsynchronously(true);
87          if (nearCache)
88          {
89              mapConfig.setNearCacheConfig(nearCacheConfig);
90          }
91          else
92          {
93              mapConfig.setNearCacheConfig(null);
94          }
95  
96          return mapConfig;
97      }
98  
99      /**
100      * This method creates a copy of {@link NearCacheConfig} <strong>without</strong> copying read-only objects for
101      * class fields, which may contain trash from previous invocations of near cache methods (this problem comes from
102      * the fact, that HZ treats map configs as a const values after cluster startup, but atlassian cache allows to
103      * change them after cluster is stared)
104      *
105      * @param nearCacheConfig {@link NearCacheConfig} to be copied
106      * @return copy of the {@link NearCacheConfig} without read-only objects
107      */
108     private NearCacheConfig copyNearCacheConfig(@Nonnull  NearCacheConfig nearCacheConfig)
109     {
110         NearCacheConfig nearCacheConfigCopy = new NearCacheConfig(nearCacheConfig);
111         EvictionConfig evictionConfigCopy = new EvictionConfig(nearCacheConfig.getEvictionConfig());
112         nearCacheConfigCopy.setEvictionConfig(evictionConfigCopy);
113         NearCachePreloaderConfig preloaderConfigCopy = new NearCachePreloaderConfig(nearCacheConfigCopy.getPreloaderConfig());
114         nearCacheConfigCopy.setPreloaderConfig(preloaderConfigCopy);
115         return nearCacheConfigCopy;
116     }
117 
118     /**
119      * In Hazelcast 3.8 the algorithm of calculation of per-node capacity has been changed
120      * (https://github.com/hazelcast/hazelcast/issues/11646). In a nutshell, new per-node capacity calculation is now
121      * based on partition size: {@code partitionSize > desiredPerNodeSize * numberOfNodes / numberOfPartitions}
122      *
123      * Old capacity calculation algorithm, which calculated sum of all partition sizes for the given map, which belong
124      * to the current node, was replaced with simplified formula above because, according to Hazelcast devs, "partition
125      * thread should interact only with partition, which belongs to this thread" :(. My guess is this calculation is
126      * based on assumption that all objects are <b>evenly distributed</b> among Hazelcast partitions (which
127      * goes back to the Hazelcast key hashing algorithm, but that's different story).
128      *
129      * This simplified calculation comes with a price:
130      * <ul>
131      *     <li>This formula doesn't make sense, when {@code desiredPerNodeSize < numberOfPartitions}. In this case,
132      *     all objects <b>are evicted immediately after insertion</b></li>
133      *     <li>This formula is based on both static ({@code desiredPerNodeSize, numberOfPartitions}) as well as
134      *     dynamic ({@code numberOfNodes}) parameters, that's why it's very hard to predict precise real capacity.</li>
135      * </ul>
136      *
137      * So the best thing we can do here is to guarantee <b>minimum capacity</b> of
138      * {@link HazelcastMapConfigConfigurator#SMALL_CACHES_CAPACITY_MULTIPLIER * partitionsCount} objects per partition,
139      * unless more is requested.
140      *
141      * @param mapConfig cache map config
142      * @param desiredPerNodeSize requested per node capacity
143      * @param partitionsCount number of partitions in the cluster
144      * @return adjusted node capacity
145      */
146     private int adjustPerNodeCapacity(MapConfig mapConfig, int desiredPerNodeSize, int partitionsCount)
147     {
148         int adjustedCacheSize = Math.max(SMALL_CACHES_CAPACITY_MULTIPLIER * partitionsCount, desiredPerNodeSize);
149         mapConfig.setMaxSizeConfig(new MaxSizeConfig().setMaxSizePolicy(PER_NODE).setSize(adjustedCacheSize));
150         mapConfig.setEvictionPolicy(EvictionPolicy.LFU);
151         return adjustedCacheSize;
152     }
153 
154     private static int roundUpToWholeSeconds(final Long expireAfterAccess)
155     {
156         return (int) Math.ceil(expireAfterAccess / 1000d);
157     }
158 }