Skip to content

Commit e01f8c2

Browse files
committed
Change the default recycler type.
Recycling is not thread-local anymore by default but instead there are several pools of objects to recycle that threads may use depending on their id. Each pool is protected by its own lock so up to ${number of pools} threads may recycler objects concurrently. Recyclers have also been refactored for better composability, for example there is a soft recycler that creates a recycler that wraps data around a SoftReference and a thread-local recycler that can take any factory or recyclers and instantiates a dedicated instance per thread. RecyclerBenchmark has been added to try to figure out the overhead of object recycling depending on the recycler type and the number of threads trying to recycle objects concurrently. Close elastic#4647
1 parent 3ab73ab commit e01f8c2

20 files changed

+766
-329
lines changed

src/main/java/org/elasticsearch/cache/recycler/CacheRecycler.java

Lines changed: 40 additions & 35 deletions
Original file line numberDiff line numberDiff line change
@@ -24,11 +24,14 @@
2424
import org.elasticsearch.ElasticsearchIllegalArgumentException;
2525
import org.elasticsearch.common.component.AbstractComponent;
2626
import org.elasticsearch.common.inject.Inject;
27-
import org.elasticsearch.common.recycler.*;
27+
import org.elasticsearch.common.recycler.Recycler;
2828
import org.elasticsearch.common.settings.Settings;
29+
import org.elasticsearch.common.util.concurrent.EsExecutors;
2930

3031
import java.util.Locale;
3132

33+
import static org.elasticsearch.common.recycler.Recyclers.*;
34+
3235
@SuppressWarnings("unchecked")
3336
public class CacheRecycler extends AbstractComponent {
3437

@@ -66,8 +69,9 @@ public CacheRecycler(Settings settings) {
6669
final Type type = Type.parse(settings.get("type"));
6770
int limit = settings.getAsInt("limit", 10);
6871
int smartSize = settings.getAsInt("smart_size", 1024);
72+
final int availableProcessors = EsExecutors.boundedNumberOfProcessors(settings);
6973

70-
hashMap = build(type, limit, smartSize, new Recycler.C<ObjectObjectOpenHashMap>() {
74+
hashMap = build(type, limit, smartSize, availableProcessors, new Recycler.C<ObjectObjectOpenHashMap>() {
7175
@Override
7276
public ObjectObjectOpenHashMap newInstance(int sizing) {
7377
return new ObjectObjectOpenHashMap(size(sizing));
@@ -78,7 +82,7 @@ public void clear(ObjectObjectOpenHashMap value) {
7882
value.clear();
7983
}
8084
});
81-
hashSet = build(type, limit, smartSize, new Recycler.C<ObjectOpenHashSet>() {
85+
hashSet = build(type, limit, smartSize, availableProcessors, new Recycler.C<ObjectOpenHashSet>() {
8286
@Override
8387
public ObjectOpenHashSet newInstance(int sizing) {
8488
return new ObjectOpenHashSet(size(sizing), 0.5f);
@@ -89,7 +93,7 @@ public void clear(ObjectOpenHashSet value) {
8993
value.clear();
9094
}
9195
});
92-
doubleObjectMap = build(type, limit, smartSize, new Recycler.C<DoubleObjectOpenHashMap>() {
96+
doubleObjectMap = build(type, limit, smartSize, availableProcessors, new Recycler.C<DoubleObjectOpenHashMap>() {
9397
@Override
9498
public DoubleObjectOpenHashMap newInstance(int sizing) {
9599
return new DoubleObjectOpenHashMap(size(sizing));
@@ -100,7 +104,7 @@ public void clear(DoubleObjectOpenHashMap value) {
100104
value.clear();
101105
}
102106
});
103-
longObjectMap = build(type, limit, smartSize, new Recycler.C<LongObjectOpenHashMap>() {
107+
longObjectMap = build(type, limit, smartSize, availableProcessors, new Recycler.C<LongObjectOpenHashMap>() {
104108
@Override
105109
public LongObjectOpenHashMap newInstance(int sizing) {
106110
return new LongObjectOpenHashMap(size(sizing));
@@ -111,7 +115,7 @@ public void clear(LongObjectOpenHashMap value) {
111115
value.clear();
112116
}
113117
});
114-
longLongMap = build(type, limit, smartSize, new Recycler.C<LongLongOpenHashMap>() {
118+
longLongMap = build(type, limit, smartSize, availableProcessors, new Recycler.C<LongLongOpenHashMap>() {
115119
@Override
116120
public LongLongOpenHashMap newInstance(int sizing) {
117121
return new LongLongOpenHashMap(size(sizing));
@@ -122,7 +126,7 @@ public void clear(LongLongOpenHashMap value) {
122126
value.clear();
123127
}
124128
});
125-
intIntMap = build(type, limit, smartSize, new Recycler.C<IntIntOpenHashMap>() {
129+
intIntMap = build(type, limit, smartSize, availableProcessors, new Recycler.C<IntIntOpenHashMap>() {
126130
@Override
127131
public IntIntOpenHashMap newInstance(int sizing) {
128132
return new IntIntOpenHashMap(size(sizing));
@@ -133,7 +137,7 @@ public void clear(IntIntOpenHashMap value) {
133137
value.clear();
134138
}
135139
});
136-
floatIntMap = build(type, limit, smartSize, new Recycler.C<FloatIntOpenHashMap>() {
140+
floatIntMap = build(type, limit, smartSize, availableProcessors, new Recycler.C<FloatIntOpenHashMap>() {
137141
@Override
138142
public FloatIntOpenHashMap newInstance(int sizing) {
139143
return new FloatIntOpenHashMap(size(sizing));
@@ -144,7 +148,7 @@ public void clear(FloatIntOpenHashMap value) {
144148
value.clear();
145149
}
146150
});
147-
doubleIntMap = build(type, limit, smartSize, new Recycler.C<DoubleIntOpenHashMap>() {
151+
doubleIntMap = build(type, limit, smartSize, availableProcessors, new Recycler.C<DoubleIntOpenHashMap>() {
148152
@Override
149153
public DoubleIntOpenHashMap newInstance(int sizing) {
150154
return new DoubleIntOpenHashMap(size(sizing));
@@ -155,7 +159,7 @@ public void clear(DoubleIntOpenHashMap value) {
155159
value.clear();
156160
}
157161
});
158-
longIntMap = build(type, limit, smartSize, new Recycler.C<LongIntOpenHashMap>() {
162+
longIntMap = build(type, limit, smartSize, availableProcessors, new Recycler.C<LongIntOpenHashMap>() {
159163
@Override
160164
public LongIntOpenHashMap newInstance(int sizing) {
161165
return new LongIntOpenHashMap(size(sizing));
@@ -166,7 +170,7 @@ public void clear(LongIntOpenHashMap value) {
166170
value.clear();
167171
}
168172
});
169-
objectIntMap = build(type, limit, smartSize, new Recycler.C<ObjectIntOpenHashMap>() {
173+
objectIntMap = build(type, limit, smartSize, availableProcessors, new Recycler.C<ObjectIntOpenHashMap>() {
170174
@Override
171175
public ObjectIntOpenHashMap newInstance(int sizing) {
172176
return new ObjectIntOpenHashMap(size(sizing));
@@ -177,7 +181,7 @@ public void clear(ObjectIntOpenHashMap value) {
177181
value.clear();
178182
}
179183
});
180-
intObjectMap = build(type, limit, smartSize, new Recycler.C<IntObjectOpenHashMap>() {
184+
intObjectMap = build(type, limit, smartSize, availableProcessors, new Recycler.C<IntObjectOpenHashMap>() {
181185
@Override
182186
public IntObjectOpenHashMap newInstance(int sizing) {
183187
return new IntObjectOpenHashMap(size(sizing));
@@ -188,7 +192,7 @@ public void clear(IntObjectOpenHashMap value) {
188192
value.clear();
189193
}
190194
});
191-
objectFloatMap = build(type, limit, smartSize, new Recycler.C<ObjectFloatOpenHashMap>() {
195+
objectFloatMap = build(type, limit, smartSize, availableProcessors, new Recycler.C<ObjectFloatOpenHashMap>() {
192196
@Override
193197
public ObjectFloatOpenHashMap newInstance(int sizing) {
194198
return new ObjectFloatOpenHashMap(size(sizing));
@@ -253,12 +257,12 @@ static int size(int sizing) {
253257
return sizing > 0 ? sizing : 256;
254258
}
255259

256-
private <T> Recycler<T> build(Type type, int limit, int smartSize, Recycler.C<T> c) {
260+
private <T> Recycler<T> build(Type type, int limit, int smartSize, int availableProcessors, Recycler.C<T> c) {
257261
Recycler<T> recycler;
258262
try {
259-
recycler = type.build(c, limit);
263+
recycler = type.build(c, limit, availableProcessors);
260264
if (smartSize > 0) {
261-
recycler = new Recycler.Sizing<T>(recycler, smartSize);
265+
recycler = sizing(recycler, none(c), smartSize);
262266
}
263267
} catch (IllegalArgumentException ex) {
264268
throw new ElasticsearchIllegalArgumentException("no type support [" + type + "] for recycler");
@@ -270,40 +274,44 @@ private <T> Recycler<T> build(Type type, int limit, int smartSize, Recycler.C<T>
270274
public static enum Type {
271275
SOFT_THREAD_LOCAL {
272276
@Override
273-
<T> Recycler<T> build(Recycler.C<T> c, int limit) {
274-
return new SoftThreadLocalRecycler<T>(c, limit);
277+
<T> Recycler<T> build(Recycler.C<T> c, int limit, int availableProcessors) {
278+
return threadLocal(softFactory(dequeFactory(c, limit)));
275279
}
280+
},
281+
THREAD_LOCAL {
276282
@Override
277-
boolean perThread() {
278-
return true;
283+
<T> Recycler<T> build(Recycler.C<T> c, int limit, int availableProcessors) {
284+
return threadLocal(dequeFactory(c, limit));
279285
}
280286
},
281-
THREAD_LOCAL {
287+
QUEUE {
282288
@Override
283-
<T> Recycler<T> build(Recycler.C<T> c, int limit) {
284-
return new ThreadLocalRecycler<T>(c, limit);
289+
<T> Recycler<T> build(Recycler.C<T> c, int limit, int availableProcessors) {
290+
return concurrentDeque(c, limit);
285291
}
292+
},
293+
SOFT_CONCURRENT {
286294
@Override
287-
boolean perThread() {
288-
return true;
295+
<T> Recycler<T> build(Recycler.C<T> c, int limit, int availableProcessors) {
296+
return concurrent(softFactory(dequeFactory(c, limit)), availableProcessors);
289297
}
290298
},
291-
QUEUE {
299+
CONCURRENT {
292300
@Override
293-
<T> Recycler<T> build(Recycler.C<T> c, int limit) {
294-
return new QueueRecycler<T>(c, limit);
301+
<T> Recycler<T> build(Recycler.C<T> c, int limit, int availableProcessors) {
302+
return concurrent(dequeFactory(c, limit), availableProcessors);
295303
}
296304
},
297305
NONE {
298306
@Override
299-
<T> Recycler<T> build(Recycler.C<T> c, int limit) {
300-
return new NoneRecycler<T>(c);
307+
<T> Recycler<T> build(Recycler.C<T> c, int limit, int availableProcessors) {
308+
return none(c);
301309
}
302310
};
303311

304312
public static Type parse(String type) {
305313
if (Strings.isNullOrEmpty(type)) {
306-
return SOFT_THREAD_LOCAL;
314+
return SOFT_CONCURRENT;
307315
}
308316
try {
309317
return Type.valueOf(type.toUpperCase(Locale.ROOT));
@@ -312,9 +320,6 @@ public static Type parse(String type) {
312320
}
313321
}
314322

315-
abstract <T> Recycler<T> build(Recycler.C<T> c, int limit);
316-
boolean perThread() {
317-
return false;
318-
}
323+
abstract <T> Recycler<T> build(Recycler.C<T> c, int limit, int availableProcessors);
319324
}
320325
}

src/main/java/org/elasticsearch/cache/recycler/PageCacheRecycler.java

Lines changed: 68 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -19,18 +19,20 @@
1919

2020
package org.elasticsearch.cache.recycler;
2121

22-
import org.elasticsearch.cache.recycler.CacheRecycler.Type;
22+
import com.google.common.base.Strings;
23+
import org.elasticsearch.ElasticsearchIllegalArgumentException;
2324
import org.elasticsearch.common.component.AbstractComponent;
2425
import org.elasticsearch.common.inject.Inject;
25-
import org.elasticsearch.common.recycler.NoneRecycler;
2626
import org.elasticsearch.common.recycler.Recycler;
2727
import org.elasticsearch.common.settings.Settings;
28-
import org.elasticsearch.common.unit.ByteSizeValue;
2928
import org.elasticsearch.common.util.BigArrays;
3029
import org.elasticsearch.common.util.concurrent.EsExecutors;
3130
import org.elasticsearch.threadpool.ThreadPool;
3231

3332
import java.util.Arrays;
33+
import java.util.Locale;
34+
35+
import static org.elasticsearch.common.recycler.Recyclers.*;
3436

3537
/** A recycler of fixed-size pages. */
3638
public class PageCacheRecycler extends AbstractComponent {
@@ -79,17 +81,9 @@ private static int maxCount(long limit, long pageSize, double weight, double tot
7981
public PageCacheRecycler(Settings settings, ThreadPool threadPool) {
8082
super(settings);
8183
final Type type = Type.parse(componentSettings.get(TYPE));
82-
long limit = componentSettings.getAsMemory(LIMIT_HEAP, "10%").bytes();
83-
if (type.perThread()) {
84-
final long limitPerThread = componentSettings.getAsBytesSize(LIMIT_PER_THREAD, new ByteSizeValue(-1)).bytes();
85-
if (limitPerThread != -1) {
86-
// if the per_thread limit is set, it has precedence
87-
limit = limitPerThread;
88-
} else {
89-
// divide memory equally to all search threads
90-
limit /= maximumSearchThreadPoolSize(threadPool, settings);
91-
}
92-
}
84+
final long limit = componentSettings.getAsMemory(LIMIT_HEAP, "10%").bytes();
85+
final int availableProcessors = EsExecutors.boundedNumberOfProcessors(settings);
86+
final int searchThreadPoolSize = maximumSearchThreadPoolSize(threadPool, settings);
9387

9488
// We have a global amount of memory that we need to divide across data types.
9589
// Since some types are more useful than other ones we give them different weights.
@@ -113,39 +107,39 @@ public PageCacheRecycler(Settings settings, ThreadPool threadPool) {
113107

114108
final double totalWeight = bytesWeight + intsWeight + longsWeight + doublesWeight + objectsWeight;
115109

116-
bytePage = build(type, maxCount(limit, BigArrays.BYTE_PAGE_SIZE, bytesWeight, totalWeight), new Recycler.C<byte[]>() {
110+
bytePage = build(type, maxCount(limit, BigArrays.BYTE_PAGE_SIZE, bytesWeight, totalWeight), searchThreadPoolSize, availableProcessors, new Recycler.C<byte[]>() {
117111
@Override
118112
public byte[] newInstance(int sizing) {
119113
return new byte[BigArrays.BYTE_PAGE_SIZE];
120114
}
121115
@Override
122116
public void clear(byte[] value) {}
123117
});
124-
intPage = build(type, maxCount(limit, BigArrays.INT_PAGE_SIZE, intsWeight, totalWeight), new Recycler.C<int[]>() {
118+
intPage = build(type, maxCount(limit, BigArrays.INT_PAGE_SIZE, intsWeight, totalWeight), searchThreadPoolSize, availableProcessors, new Recycler.C<int[]>() {
125119
@Override
126120
public int[] newInstance(int sizing) {
127121
return new int[BigArrays.INT_PAGE_SIZE];
128122
}
129123
@Override
130124
public void clear(int[] value) {}
131125
});
132-
longPage = build(type, maxCount(limit, BigArrays.LONG_PAGE_SIZE, longsWeight, totalWeight), new Recycler.C<long[]>() {
126+
longPage = build(type, maxCount(limit, BigArrays.LONG_PAGE_SIZE, longsWeight, totalWeight), searchThreadPoolSize, availableProcessors, new Recycler.C<long[]>() {
133127
@Override
134128
public long[] newInstance(int sizing) {
135129
return new long[BigArrays.LONG_PAGE_SIZE];
136130
}
137131
@Override
138132
public void clear(long[] value) {}
139133
});
140-
doublePage = build(type, maxCount(limit, BigArrays.DOUBLE_PAGE_SIZE, doublesWeight, totalWeight), new Recycler.C<double[]>() {
134+
doublePage = build(type, maxCount(limit, BigArrays.DOUBLE_PAGE_SIZE, doublesWeight, totalWeight), searchThreadPoolSize, availableProcessors, new Recycler.C<double[]>() {
141135
@Override
142136
public double[] newInstance(int sizing) {
143137
return new double[BigArrays.DOUBLE_PAGE_SIZE];
144138
}
145139
@Override
146140
public void clear(double[] value) {}
147141
});
148-
objectPage = build(type, maxCount(limit, BigArrays.OBJECT_PAGE_SIZE, objectsWeight, totalWeight), new Recycler.C<Object[]>() {
142+
objectPage = build(type, maxCount(limit, BigArrays.OBJECT_PAGE_SIZE, objectsWeight, totalWeight), searchThreadPoolSize, availableProcessors, new Recycler.C<Object[]>() {
149143
@Override
150144
public Object[] newInstance(int sizing) {
151145
return new Object[BigArrays.OBJECT_PAGE_SIZE];
@@ -194,13 +188,65 @@ public Recycler.V<Object[]> objectPage() {
194188
return objectPage.obtain();
195189
}
196190

197-
private static <T> Recycler<T> build(Type type, int limit, Recycler.C<T> c) {
191+
private static <T> Recycler<T> build(Type type, int limit, int estimatedThreadPoolSize, int availableProcessors, Recycler.C<T> c) {
198192
final Recycler<T> recycler;
199193
if (limit == 0) {
200-
recycler = new NoneRecycler<T>(c);
194+
recycler = none(c);
201195
} else {
202-
recycler = type.build(c, limit);
196+
recycler = type.build(c, limit, estimatedThreadPoolSize, availableProcessors);
203197
}
204198
return recycler;
205199
}
200+
201+
public static enum Type {
202+
SOFT_THREAD_LOCAL {
203+
@Override
204+
<T> Recycler<T> build(Recycler.C<T> c, int limit, int estimatedThreadPoolSize, int availableProcessors) {
205+
return threadLocal(softFactory(dequeFactory(c, limit / estimatedThreadPoolSize)));
206+
}
207+
},
208+
THREAD_LOCAL {
209+
@Override
210+
<T> Recycler<T> build(Recycler.C<T> c, int limit, int estimatedThreadPoolSize, int availableProcessors) {
211+
return threadLocal(dequeFactory(c, limit / estimatedThreadPoolSize));
212+
}
213+
},
214+
QUEUE {
215+
@Override
216+
<T> Recycler<T> build(Recycler.C<T> c, int limit, int estimatedThreadPoolSize, int availableProcessors) {
217+
return concurrentDeque(c, limit);
218+
}
219+
},
220+
SOFT_CONCURRENT {
221+
@Override
222+
<T> Recycler<T> build(Recycler.C<T> c, int limit, int estimatedThreadPoolSize, int availableProcessors) {
223+
return concurrent(softFactory(dequeFactory(c, limit / availableProcessors)), availableProcessors);
224+
}
225+
},
226+
CONCURRENT {
227+
@Override
228+
<T> Recycler<T> build(Recycler.C<T> c, int limit, int estimatedThreadPoolSize, int availableProcessors) {
229+
return concurrent(dequeFactory(c, limit / availableProcessors), availableProcessors);
230+
}
231+
},
232+
NONE {
233+
@Override
234+
<T> Recycler<T> build(Recycler.C<T> c, int limit, int estimatedThreadPoolSize, int availableProcessors) {
235+
return none(c);
236+
}
237+
};
238+
239+
public static Type parse(String type) {
240+
if (Strings.isNullOrEmpty(type)) {
241+
return SOFT_CONCURRENT;
242+
}
243+
try {
244+
return Type.valueOf(type.toUpperCase(Locale.ROOT));
245+
} catch (IllegalArgumentException e) {
246+
throw new ElasticsearchIllegalArgumentException("no type support [" + type + "]");
247+
}
248+
}
249+
250+
abstract <T> Recycler<T> build(Recycler.C<T> c, int limit, int estimatedThreadPoolSize, int availableProcessors);
251+
}
206252
}

0 commit comments

Comments
 (0)