35
35
import org .elasticsearch .cluster .metadata .MappingMetaData ;
36
36
import org .elasticsearch .cluster .routing .ShardIterator ;
37
37
import org .elasticsearch .common .Strings ;
38
- import org .elasticsearch .common .collect .Sets ;
39
38
import org .elasticsearch .common .inject .Inject ;
40
39
import org .elasticsearch .common .settings .Settings ;
41
40
import org .elasticsearch .index .engine .Engine ;
42
- import org .elasticsearch .index .engine .EngineException ;
43
41
import org .elasticsearch .index .mapper .DocumentMapper ;
44
42
import org .elasticsearch .index .mapper .MapperService ;
45
43
import org .elasticsearch .index .mapper .ParsedDocument ;
54
52
import org .elasticsearch .transport .TransportService ;
55
53
56
54
import java .io .IOException ;
57
- import java .util .Set ;
58
55
59
56
/**
60
57
* Performs the index operation.
@@ -106,11 +103,14 @@ public class TransportShardBulkAction extends TransportShardReplicationOperation
106
103
}
107
104
108
105
@ Override protected PrimaryResponse <BulkShardResponse > shardOperationOnPrimary (ClusterState clusterState , ShardOperationRequest shardRequest ) {
109
- IndexShard indexShard = indexShard (shardRequest );
110
106
final BulkShardRequest request = shardRequest .request ;
111
- BulkItemResponse [] responses = new BulkItemResponse [request .items ().length ];
107
+ IndexShard indexShard = indexShard (shardRequest );
108
+
112
109
Engine .Operation [] ops = new Engine .Operation [request .items ().length ];
113
- for (int i = 0 ; i < ops .length ; i ++) {
110
+
111
+
112
+ BulkItemResponse [] responses = new BulkItemResponse [request .items ().length ];
113
+ for (int i = 0 ; i < request .items ().length ; i ++) {
114
114
BulkItemRequest item = request .items ()[i ];
115
115
if (item .request () instanceof IndexRequest ) {
116
116
IndexRequest indexRequest = (IndexRequest ) item .request ();
@@ -126,11 +126,39 @@ public class TransportShardBulkAction extends TransportShardReplicationOperation
126
126
127
127
SourceToParse sourceToParse = SourceToParse .source (indexRequest .source ()).type (indexRequest .type ()).id (indexRequest .id ())
128
128
.routing (indexRequest .routing ()).parent (indexRequest .parent ());
129
+ long version ;
130
+ ParsedDocument doc ;
131
+ Engine .Operation op ;
129
132
if (indexRequest .opType () == IndexRequest .OpType .INDEX ) {
130
- ops [i ] = indexShard .prepareIndex (sourceToParse ).version (indexRequest .version ()).origin (Engine .Operation .Origin .PRIMARY );
133
+ Engine .Index index = indexShard .prepareIndex (sourceToParse ).version (indexRequest .version ()).origin (Engine .Operation .Origin .PRIMARY );
134
+ doc = indexShard .index (index );
135
+ version = index .version ();
136
+ op = index ;
131
137
} else {
132
- ops [i ] = indexShard .prepareCreate (sourceToParse ).version (indexRequest .version ()).origin (Engine .Operation .Origin .PRIMARY );
138
+ Engine .Create create = indexShard .prepareCreate (sourceToParse ).version (indexRequest .version ()).origin (Engine .Operation .Origin .PRIMARY );
139
+ doc = indexShard .create (create );
140
+ version = create .version ();
141
+ op = create ;
142
+ }
143
+ // update the version on request so it will happen on the replicas
144
+ indexRequest .version (version );
145
+
146
+ // update mapping on master if needed, we won't update changes to the same type, since once its changed, it won't have mappers added
147
+ if (doc .mappersAdded ()) {
148
+ updateMappingOnMaster (indexRequest );
149
+ }
150
+
151
+ // if we are going to percolate, then we need to keep this op for the postPrimary operation
152
+ if (Strings .hasLength (indexRequest .percolate ())) {
153
+ if (ops == null ) {
154
+ ops = new Engine .Operation [request .items ().length ];
155
+ }
156
+ ops [i ] = op ;
133
157
}
158
+
159
+ // add the response
160
+ responses [i ] = new BulkItemResponse (item .id (), indexRequest .opType ().toString ().toLowerCase (),
161
+ new IndexResponse (indexRequest .index (), indexRequest .type (), indexRequest .id (), version ));
134
162
} catch (Exception e ) {
135
163
if (logger .isDebugEnabled ()) {
136
164
logger .debug ("[" + shardRequest .request .index () + "][" + shardRequest .shardId + "]" + ": Failed to execute bulk item (index) [" + indexRequest + "]" , e );
@@ -141,7 +169,14 @@ public class TransportShardBulkAction extends TransportShardReplicationOperation
141
169
} else if (item .request () instanceof DeleteRequest ) {
142
170
DeleteRequest deleteRequest = (DeleteRequest ) item .request ();
143
171
try {
144
- ops [i ] = indexShard .prepareDelete (deleteRequest .type (), deleteRequest .id (), deleteRequest .version ()).origin (Engine .Operation .Origin .PRIMARY );
172
+ Engine .Delete delete = indexShard .prepareDelete (deleteRequest .type (), deleteRequest .id (), deleteRequest .version ()).origin (Engine .Operation .Origin .PRIMARY );
173
+ indexShard .delete (delete );
174
+ // update the request with teh version so it will go to the replicas
175
+ deleteRequest .version (delete .version ());
176
+
177
+ // add the response
178
+ responses [i ] = new BulkItemResponse (item .id (), "delete" ,
179
+ new DeleteResponse (deleteRequest .index (), deleteRequest .type (), deleteRequest .id (), delete .version (), delete .notFound ()));
145
180
} catch (Exception e ) {
146
181
if (logger .isDebugEnabled ()) {
147
182
logger .debug ("[" + shardRequest .request .index () + "][" + shardRequest .shardId + "]" + ": Failed to execute bulk item (delete) [" + deleteRequest + "]" , e );
@@ -152,61 +187,11 @@ public class TransportShardBulkAction extends TransportShardReplicationOperation
152
187
}
153
188
}
154
189
155
- EngineException [] failures = indexShard .bulk (new Engine .Bulk (ops ).refresh (request .refresh ()));
156
- // process failures and mappings
157
- Set <String > processedTypes = Sets .newHashSet ();
158
- for (int i = 0 ; i < ops .length ; i ++) {
159
- // failed to parse, already set the failure, skip
160
- if (ops [i ] == null ) {
161
- continue ;
162
- }
163
-
164
- BulkItemRequest item = request .items ()[i ];
165
- if (item .request () instanceof IndexRequest ) {
166
- IndexRequest indexRequest = (IndexRequest ) item .request ();
167
- long version ;
168
- if (indexRequest .opType () == IndexRequest .OpType .INDEX ) {
169
- Engine .Index engineIndex = (Engine .Index ) ops [i ];
170
- version = engineIndex .version ();
171
- if (!processedTypes .contains (engineIndex .type ())) {
172
- processedTypes .add (engineIndex .type ());
173
- ParsedDocument doc = engineIndex .parsedDoc ();
174
- if (doc .mappersAdded ()) {
175
- updateMappingOnMaster (indexRequest );
176
- }
177
- }
178
- } else {
179
- Engine .Create engineCreate = (Engine .Create ) ops [i ];
180
- version = engineCreate .version ();
181
- if (!processedTypes .contains (engineCreate .type ())) {
182
- processedTypes .add (engineCreate .type ());
183
- ParsedDocument doc = engineCreate .parsedDoc ();
184
- if (doc .mappersAdded ()) {
185
- updateMappingOnMaster (indexRequest );
186
- }
187
- }
188
- }
189
- // update the version on request so it will happen on the replicas
190
- indexRequest .version (version );
191
- if (failures != null && failures [i ] != null ) {
192
- responses [i ] = new BulkItemResponse (item .id (), indexRequest .opType ().toString ().toLowerCase (),
193
- new BulkItemResponse .Failure (indexRequest .index (), indexRequest .type (), indexRequest .id (), ExceptionsHelper .detailedMessage (failures [i ])));
194
- } else {
195
- responses [i ] = new BulkItemResponse (item .id (), indexRequest .opType ().toString ().toLowerCase (),
196
- new IndexResponse (indexRequest .index (), indexRequest .type (), indexRequest .id (), version ));
197
- }
198
- } else if (item .request () instanceof DeleteRequest ) {
199
- DeleteRequest deleteRequest = (DeleteRequest ) item .request ();
200
- Engine .Delete engineDelete = (Engine .Delete ) ops [i ];
201
- // update the version on request so it will happen on the replicas
202
- deleteRequest .version (engineDelete .version ());
203
- if (failures != null && failures [i ] != null ) {
204
- responses [i ] = new BulkItemResponse (item .id (), "delete" ,
205
- new BulkItemResponse .Failure (deleteRequest .index (), deleteRequest .type (), deleteRequest .id (), ExceptionsHelper .detailedMessage (failures [i ])));
206
- } else {
207
- responses [i ] = new BulkItemResponse (item .id (), "delete" ,
208
- new DeleteResponse (deleteRequest .index (), deleteRequest .type (), deleteRequest .id (), engineDelete .version (), engineDelete .notFound ()));
209
- }
190
+ if (request .refresh ()) {
191
+ try {
192
+ indexShard .refresh (new Engine .Refresh (false ));
193
+ } catch (Exception e ) {
194
+ // ignore
210
195
}
211
196
}
212
197
BulkShardResponse response = new BulkShardResponse (new ShardId (request .index (), request .shardId ()), responses );
@@ -223,6 +208,9 @@ public class TransportShardBulkAction extends TransportShardReplicationOperation
223
208
// failure, continue
224
209
continue ;
225
210
}
211
+ if (ops [i ] == null ) {
212
+ continue ; // failed
213
+ }
226
214
if (itemRequest .request () instanceof IndexRequest ) {
227
215
IndexRequest indexRequest = (IndexRequest ) itemRequest .request ();
228
216
if (!Strings .hasLength (indexRequest .percolate ())) {
@@ -247,33 +235,41 @@ public class TransportShardBulkAction extends TransportShardReplicationOperation
247
235
@ Override protected void shardOperationOnReplica (ShardOperationRequest shardRequest ) {
248
236
IndexShard indexShard = indexShard (shardRequest );
249
237
final BulkShardRequest request = shardRequest .request ;
250
- Engine .Operation [] ops = new Engine .Operation [request .items ().length ];
251
- for (int i = 0 ; i < ops .length ; i ++) {
238
+ for (int i = 0 ; i < request .items ().length ; i ++) {
252
239
BulkItemRequest item = request .items ()[i ];
253
240
if (item .request () instanceof IndexRequest ) {
254
241
IndexRequest indexRequest = (IndexRequest ) item .request ();
255
242
try {
256
243
SourceToParse sourceToParse = SourceToParse .source (indexRequest .source ()).type (indexRequest .type ()).id (indexRequest .id ())
257
244
.routing (indexRequest .routing ()).parent (indexRequest .parent ());
258
245
if (indexRequest .opType () == IndexRequest .OpType .INDEX ) {
259
- ops [i ] = indexShard .prepareIndex (sourceToParse ).version (indexRequest .version ()).origin (Engine .Operation .Origin .REPLICA );
246
+ Engine .Index index = indexShard .prepareIndex (sourceToParse ).version (indexRequest .version ()).origin (Engine .Operation .Origin .REPLICA );
247
+ indexShard .index (index );
260
248
} else {
261
- ops [i ] = indexShard .prepareCreate (sourceToParse ).version (indexRequest .version ()).origin (Engine .Operation .Origin .REPLICA );
249
+ Engine .Create create = indexShard .prepareCreate (sourceToParse ).version (indexRequest .version ()).origin (Engine .Operation .Origin .REPLICA );
250
+ indexShard .create (create );
262
251
}
263
252
} catch (Exception e ) {
264
253
// ignore, we are on backup
265
254
}
266
255
} else if (item .request () instanceof DeleteRequest ) {
267
256
DeleteRequest deleteRequest = (DeleteRequest ) item .request ();
268
257
try {
269
- ops [i ] = indexShard .prepareDelete (deleteRequest .type (), deleteRequest .id (), deleteRequest .version ()).origin (Engine .Operation .Origin .REPLICA );
258
+ Engine .Delete delete = indexShard .prepareDelete (deleteRequest .type (), deleteRequest .id (), deleteRequest .version ()).origin (Engine .Operation .Origin .REPLICA );
259
+ indexShard .delete (delete );
270
260
} catch (Exception e ) {
271
261
// ignore, we are on backup
272
262
}
273
263
}
274
264
}
275
265
276
- indexShard .bulk (new Engine .Bulk (ops ).refresh (request .refresh ()));
266
+ if (request .refresh ()) {
267
+ try {
268
+ indexShard .refresh (new Engine .Refresh (false ));
269
+ } catch (Exception e ) {
270
+ // ignore
271
+ }
272
+ }
277
273
}
278
274
279
275
private void updateMappingOnMaster (final IndexRequest request ) {
0 commit comments