mirror of
https://github.com/elastic/elasticsearch.git
synced 2025-04-25 07:37:19 -04:00
Goodbye and thank you synced flush! (#50882)
Synced flush was a brilliant idea. It supports instant recoveries with a quite small implementation. However, with the presence of sequence numbers and retention leases, it is no longer needed. This change removes it from 8.0. Relates #5077
This commit is contained in:
parent
adb56e5842
commit
09b46c8646
53 changed files with 290 additions and 3731 deletions
|
@ -27,7 +27,6 @@ import org.elasticsearch.action.admin.indices.cache.clear.ClearIndicesCacheRespo
|
|||
import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest;
|
||||
import org.elasticsearch.action.admin.indices.flush.FlushRequest;
|
||||
import org.elasticsearch.action.admin.indices.flush.FlushResponse;
|
||||
import org.elasticsearch.action.admin.indices.flush.SyncedFlushRequest;
|
||||
import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequest;
|
||||
import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeResponse;
|
||||
import org.elasticsearch.action.admin.indices.open.OpenIndexRequest;
|
||||
|
@ -415,41 +414,6 @@ public final class IndicesClient {
|
|||
FlushResponse::fromXContent, listener, emptySet());
|
||||
}
|
||||
|
||||
/**
|
||||
* Initiate a synced flush manually using the synced flush API.
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-synced-flush-api.html">
|
||||
* Synced flush API on elastic.co</a>
|
||||
* @param syncedFlushRequest the request
|
||||
* @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
|
||||
* @return the response
|
||||
* @throws IOException in case there is a problem sending the request or parsing back the response
|
||||
* @deprecated synced flush is deprecated and will be removed in 8.0.
|
||||
* Use {@link #flush(FlushRequest, RequestOptions)} instead.
|
||||
*/
|
||||
@Deprecated
|
||||
public SyncedFlushResponse flushSynced(SyncedFlushRequest syncedFlushRequest, RequestOptions options) throws IOException {
|
||||
return restHighLevelClient.performRequestAndParseEntity(syncedFlushRequest, IndicesRequestConverters::flushSynced, options,
|
||||
SyncedFlushResponse::fromXContent, emptySet());
|
||||
}
|
||||
|
||||
/**
|
||||
* Asynchronously initiate a synced flush manually using the synced flush API.
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-synced-flush-api.html">
|
||||
* Synced flush API on elastic.co</a>
|
||||
* @param syncedFlushRequest the request
|
||||
* @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
|
||||
* @param listener the listener to be notified upon request completion
|
||||
* @return cancellable that may be used to cancel the request
|
||||
* @deprecated synced flush is deprecated and will be removed in 8.0.
|
||||
* Use {@link #flushAsync(FlushRequest, RequestOptions, ActionListener)} instead.
|
||||
*/
|
||||
@Deprecated
|
||||
public Cancellable flushSyncedAsync(SyncedFlushRequest syncedFlushRequest, RequestOptions options,
|
||||
ActionListener<SyncedFlushResponse> listener) {
|
||||
return restHighLevelClient.performRequestAsyncAndParseEntity(syncedFlushRequest, IndicesRequestConverters::flushSynced, options,
|
||||
SyncedFlushResponse::fromXContent, listener, emptySet());
|
||||
}
|
||||
|
||||
/**
|
||||
* Retrieve the settings of one or more indices.
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-get-settings.html">
|
||||
|
|
|
@ -29,7 +29,6 @@ import org.elasticsearch.action.admin.indices.alias.get.GetAliasesRequest;
|
|||
import org.elasticsearch.action.admin.indices.cache.clear.ClearIndicesCacheRequest;
|
||||
import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest;
|
||||
import org.elasticsearch.action.admin.indices.flush.FlushRequest;
|
||||
import org.elasticsearch.action.admin.indices.flush.SyncedFlushRequest;
|
||||
import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequest;
|
||||
import org.elasticsearch.action.admin.indices.open.OpenIndexRequest;
|
||||
import org.elasticsearch.action.admin.indices.refresh.RefreshRequest;
|
||||
|
@ -194,15 +193,6 @@ final class IndicesRequestConverters {
|
|||
return request;
|
||||
}
|
||||
|
||||
static Request flushSynced(SyncedFlushRequest syncedFlushRequest) {
|
||||
String[] indices = syncedFlushRequest.indices() == null ? Strings.EMPTY_ARRAY : syncedFlushRequest.indices();
|
||||
Request request = new Request(HttpPost.METHOD_NAME, RequestConverters.endpoint(indices, "_flush/synced"));
|
||||
RequestConverters.Params parameters = new RequestConverters.Params();
|
||||
parameters.withIndicesOptions(syncedFlushRequest.indicesOptions());
|
||||
request.addParameters(parameters.asMap());
|
||||
return request;
|
||||
}
|
||||
|
||||
static Request forceMerge(ForceMergeRequest forceMergeRequest) {
|
||||
String[] indices = forceMergeRequest.indices() == null ? Strings.EMPTY_ARRAY : forceMergeRequest.indices();
|
||||
Request request = new Request(HttpPost.METHOD_NAME, RequestConverters.endpoint(indices, "_forcemerge"));
|
||||
|
|
|
@ -1,346 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.client;
|
||||
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.ParsingException;
|
||||
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
|
||||
import org.elasticsearch.common.xcontent.ToXContentFragment;
|
||||
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentLocation;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.XContentParser.Token;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg;
|
||||
import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg;
|
||||
import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken;
|
||||
|
||||
public class SyncedFlushResponse implements ToXContentObject {
|
||||
|
||||
public static final String SHARDS_FIELD = "_shards";
|
||||
|
||||
private ShardCounts totalCounts;
|
||||
private Map<String, IndexResult> indexResults;
|
||||
|
||||
SyncedFlushResponse(ShardCounts totalCounts, Map<String, IndexResult> indexResults) {
|
||||
this.totalCounts = new ShardCounts(totalCounts.total, totalCounts.successful, totalCounts.failed);
|
||||
this.indexResults = Collections.unmodifiableMap(indexResults);
|
||||
}
|
||||
|
||||
/**
|
||||
* @return The total number of shard copies that were processed across all indexes
|
||||
*/
|
||||
public int totalShards() {
|
||||
return totalCounts.total;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return The number of successful shard copies that were processed across all indexes
|
||||
*/
|
||||
public int successfulShards() {
|
||||
return totalCounts.successful;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return The number of failed shard copies that were processed across all indexes
|
||||
*/
|
||||
public int failedShards() {
|
||||
return totalCounts.failed;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return A map of results for each index where the keys of the map are the index names
|
||||
* and the values are the results encapsulated in {@link IndexResult}.
|
||||
*/
|
||||
public Map<String, IndexResult> getIndexResults() {
|
||||
return indexResults;
|
||||
}
|
||||
|
||||
ShardCounts getShardCounts() {
|
||||
return totalCounts;
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject();
|
||||
builder.startObject(SHARDS_FIELD);
|
||||
totalCounts.toXContent(builder, params);
|
||||
builder.endObject();
|
||||
for (Map.Entry<String, IndexResult> entry: indexResults.entrySet()) {
|
||||
String indexName = entry.getKey();
|
||||
IndexResult indexResult = entry.getValue();
|
||||
builder.startObject(indexName);
|
||||
indexResult.toXContent(builder, params);
|
||||
builder.endObject();
|
||||
}
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
|
||||
public static SyncedFlushResponse fromXContent(XContentParser parser) throws IOException {
|
||||
ensureExpectedToken(Token.START_OBJECT, parser.nextToken(), parser::getTokenLocation);
|
||||
ShardCounts totalCounts = null;
|
||||
Map<String, IndexResult> indexResults = new HashMap<>();
|
||||
XContentLocation startLoc = parser.getTokenLocation();
|
||||
while (parser.nextToken().equals(Token.FIELD_NAME)) {
|
||||
if (parser.currentName().equals(SHARDS_FIELD)) {
|
||||
ensureExpectedToken(Token.START_OBJECT, parser.nextToken(), parser::getTokenLocation);
|
||||
totalCounts = ShardCounts.fromXContent(parser);
|
||||
} else {
|
||||
String indexName = parser.currentName();
|
||||
IndexResult indexResult = IndexResult.fromXContent(parser);
|
||||
indexResults.put(indexName, indexResult);
|
||||
}
|
||||
}
|
||||
if (totalCounts != null) {
|
||||
return new SyncedFlushResponse(totalCounts, indexResults);
|
||||
} else {
|
||||
throw new ParsingException(
|
||||
startLoc,
|
||||
"Unable to reconstruct object. Total counts for shards couldn't be parsed."
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Encapsulates the number of total successful and failed shard copies
|
||||
*/
|
||||
public static final class ShardCounts implements ToXContentFragment {
|
||||
|
||||
public static final String TOTAL_FIELD = "total";
|
||||
public static final String SUCCESSFUL_FIELD = "successful";
|
||||
public static final String FAILED_FIELD = "failed";
|
||||
|
||||
private static final ConstructingObjectParser<ShardCounts, Void> PARSER =
|
||||
new ConstructingObjectParser<>(
|
||||
"shardcounts",
|
||||
a -> new ShardCounts((Integer) a[0], (Integer) a[1], (Integer) a[2])
|
||||
);
|
||||
static {
|
||||
PARSER.declareInt(constructorArg(), new ParseField(TOTAL_FIELD));
|
||||
PARSER.declareInt(constructorArg(), new ParseField(SUCCESSFUL_FIELD));
|
||||
PARSER.declareInt(constructorArg(), new ParseField(FAILED_FIELD));
|
||||
}
|
||||
|
||||
private int total;
|
||||
private int successful;
|
||||
private int failed;
|
||||
|
||||
|
||||
ShardCounts(int total, int successful, int failed) {
|
||||
this.total = total;
|
||||
this.successful = successful;
|
||||
this.failed = failed;
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.field(TOTAL_FIELD, total);
|
||||
builder.field(SUCCESSFUL_FIELD, successful);
|
||||
builder.field(FAILED_FIELD, failed);
|
||||
return builder;
|
||||
}
|
||||
|
||||
public static ShardCounts fromXContent(XContentParser parser) throws IOException {
|
||||
return PARSER.parse(parser, null);
|
||||
}
|
||||
|
||||
public boolean equals(ShardCounts other) {
|
||||
if (other != null) {
|
||||
return
|
||||
other.total == this.total &&
|
||||
other.successful == this.successful &&
|
||||
other.failed == this.failed;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* Description for the flush/synced results for a particular index.
|
||||
* This includes total, successful and failed copies along with failure description for each failed copy.
|
||||
*/
|
||||
public static final class IndexResult implements ToXContentFragment {
|
||||
|
||||
public static final String TOTAL_FIELD = "total";
|
||||
public static final String SUCCESSFUL_FIELD = "successful";
|
||||
public static final String FAILED_FIELD = "failed";
|
||||
public static final String FAILURES_FIELD = "failures";
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
private static final ConstructingObjectParser<IndexResult, Void> PARSER =
|
||||
new ConstructingObjectParser<>(
|
||||
"indexresult",
|
||||
a -> new IndexResult((Integer) a[0], (Integer) a[1], (Integer) a[2], (List<ShardFailure>)a[3])
|
||||
);
|
||||
static {
|
||||
PARSER.declareInt(constructorArg(), new ParseField(TOTAL_FIELD));
|
||||
PARSER.declareInt(constructorArg(), new ParseField(SUCCESSFUL_FIELD));
|
||||
PARSER.declareInt(constructorArg(), new ParseField(FAILED_FIELD));
|
||||
PARSER.declareObjectArray(optionalConstructorArg(), ShardFailure.PARSER, new ParseField(FAILURES_FIELD));
|
||||
}
|
||||
|
||||
private ShardCounts counts;
|
||||
private List<ShardFailure> failures;
|
||||
|
||||
IndexResult(int total, int successful, int failed, List<ShardFailure> failures) {
|
||||
counts = new ShardCounts(total, successful, failed);
|
||||
if (failures != null) {
|
||||
this.failures = Collections.unmodifiableList(failures);
|
||||
} else {
|
||||
this.failures = Collections.unmodifiableList(new ArrayList<>());
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @return The total number of shard copies that were processed for this index.
|
||||
*/
|
||||
public int totalShards() {
|
||||
return counts.total;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return The number of successful shard copies that were processed for this index.
|
||||
*/
|
||||
public int successfulShards() {
|
||||
return counts.successful;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return The number of failed shard copies that were processed for this index.
|
||||
*/
|
||||
public int failedShards() {
|
||||
return counts.failed;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return A list of {@link ShardFailure} objects that describe each of the failed shard copies for this index.
|
||||
*/
|
||||
public List<ShardFailure> failures() {
|
||||
return failures;
|
||||
}
|
||||
|
||||
ShardCounts getShardCounts() {
|
||||
return counts;
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
counts.toXContent(builder, params);
|
||||
if (failures.size() > 0) {
|
||||
builder.startArray(FAILURES_FIELD);
|
||||
for (ShardFailure failure : failures) {
|
||||
failure.toXContent(builder, params);
|
||||
}
|
||||
builder.endArray();
|
||||
}
|
||||
return builder;
|
||||
}
|
||||
|
||||
public static IndexResult fromXContent(XContentParser parser) throws IOException {
|
||||
return PARSER.parse(parser, null);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Description of a failed shard copy for an index.
|
||||
*/
|
||||
public static final class ShardFailure implements ToXContentFragment {
|
||||
|
||||
public static String SHARD_ID_FIELD = "shard";
|
||||
public static String FAILURE_REASON_FIELD = "reason";
|
||||
public static String ROUTING_FIELD = "routing";
|
||||
|
||||
private int shardId;
|
||||
private String failureReason;
|
||||
private Map<String, Object> routing;
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
static final ConstructingObjectParser<ShardFailure, Void> PARSER = new ConstructingObjectParser<>(
|
||||
"shardfailure",
|
||||
a -> new ShardFailure((Integer)a[0], (String)a[1], (Map<String, Object>)a[2])
|
||||
);
|
||||
static {
|
||||
PARSER.declareInt(constructorArg(), new ParseField(SHARD_ID_FIELD));
|
||||
PARSER.declareString(constructorArg(), new ParseField(FAILURE_REASON_FIELD));
|
||||
PARSER.declareObject(
|
||||
optionalConstructorArg(),
|
||||
(parser, c) -> parser.map(),
|
||||
new ParseField(ROUTING_FIELD)
|
||||
);
|
||||
}
|
||||
|
||||
ShardFailure(int shardId, String failureReason, Map<String, Object> routing) {
|
||||
this.shardId = shardId;
|
||||
this.failureReason = failureReason;
|
||||
if (routing != null) {
|
||||
this.routing = Collections.unmodifiableMap(routing);
|
||||
} else {
|
||||
this.routing = Collections.unmodifiableMap(new HashMap<>());
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @return Id of the shard whose copy failed
|
||||
*/
|
||||
public int getShardId() {
|
||||
return shardId;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return Reason for failure of the shard copy
|
||||
*/
|
||||
public String getFailureReason() {
|
||||
return failureReason;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return Additional information about the failure.
|
||||
*/
|
||||
public Map<String, Object> getRouting() {
|
||||
return routing;
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject();
|
||||
builder.field(SHARD_ID_FIELD, shardId);
|
||||
builder.field(FAILURE_REASON_FIELD, failureReason);
|
||||
if (routing.size() > 0) {
|
||||
builder.field(ROUTING_FIELD, routing);
|
||||
}
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
|
||||
public static ShardFailure fromXContent(XContentParser parser) throws IOException {
|
||||
return PARSER.parse(parser, null);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -33,7 +33,6 @@ import org.elasticsearch.action.admin.indices.cache.clear.ClearIndicesCacheRespo
|
|||
import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest;
|
||||
import org.elasticsearch.action.admin.indices.flush.FlushRequest;
|
||||
import org.elasticsearch.action.admin.indices.flush.FlushResponse;
|
||||
import org.elasticsearch.action.admin.indices.flush.SyncedFlushRequest;
|
||||
import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequest;
|
||||
import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeResponse;
|
||||
import org.elasticsearch.action.admin.indices.open.OpenIndexRequest;
|
||||
|
@ -97,7 +96,6 @@ import org.elasticsearch.common.xcontent.support.XContentMapValues;
|
|||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.query.QueryBuilder;
|
||||
import org.elasticsearch.index.query.QueryBuilders;
|
||||
import org.elasticsearch.indices.flush.SyncedFlushService;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -759,41 +757,6 @@ public class IndicesClientIT extends ESRestHighLevelClientTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
public void testSyncedFlush() throws IOException {
|
||||
{
|
||||
String index = "index";
|
||||
Settings settings = Settings.builder()
|
||||
.put("number_of_shards", 1)
|
||||
.put("number_of_replicas", 0)
|
||||
.build();
|
||||
createIndex(index, settings);
|
||||
SyncedFlushRequest syncedFlushRequest = new SyncedFlushRequest(index);
|
||||
SyncedFlushResponse flushResponse =
|
||||
execute(syncedFlushRequest, highLevelClient().indices()::flushSynced, highLevelClient().indices()::flushSyncedAsync,
|
||||
expectWarnings(SyncedFlushService.SYNCED_FLUSH_DEPRECATION_MESSAGE));
|
||||
assertThat(flushResponse.totalShards(), equalTo(1));
|
||||
assertThat(flushResponse.successfulShards(), equalTo(1));
|
||||
assertThat(flushResponse.failedShards(), equalTo(0));
|
||||
}
|
||||
{
|
||||
String nonExistentIndex = "non_existent_index";
|
||||
assertFalse(indexExists(nonExistentIndex));
|
||||
SyncedFlushRequest syncedFlushRequest = new SyncedFlushRequest(nonExistentIndex);
|
||||
ElasticsearchException exception = expectThrows(
|
||||
ElasticsearchException.class,
|
||||
() ->
|
||||
execute(
|
||||
syncedFlushRequest,
|
||||
highLevelClient().indices()::flushSynced,
|
||||
highLevelClient().indices()::flushSyncedAsync,
|
||||
expectWarnings(SyncedFlushService.SYNCED_FLUSH_DEPRECATION_MESSAGE)
|
||||
)
|
||||
);
|
||||
assertEquals(RestStatus.NOT_FOUND, exception.status());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
public void testClearCache() throws IOException {
|
||||
{
|
||||
String index = "index";
|
||||
|
|
|
@ -32,7 +32,6 @@ import org.elasticsearch.action.admin.indices.alias.get.GetAliasesRequest;
|
|||
import org.elasticsearch.action.admin.indices.cache.clear.ClearIndicesCacheRequest;
|
||||
import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest;
|
||||
import org.elasticsearch.action.admin.indices.flush.FlushRequest;
|
||||
import org.elasticsearch.action.admin.indices.flush.SyncedFlushRequest;
|
||||
import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequest;
|
||||
import org.elasticsearch.action.admin.indices.open.OpenIndexRequest;
|
||||
import org.elasticsearch.action.admin.indices.refresh.RefreshRequest;
|
||||
|
@ -460,30 +459,6 @@ public class IndicesRequestConvertersTests extends ESTestCase {
|
|||
Assert.assertThat(request.getMethod(), equalTo(HttpPost.METHOD_NAME));
|
||||
}
|
||||
|
||||
public void testSyncedFlush() {
|
||||
String[] indices = ESTestCase.randomBoolean() ? null : RequestConvertersTests.randomIndicesNames(0, 5);
|
||||
SyncedFlushRequest syncedFlushRequest;
|
||||
if (ESTestCase.randomBoolean()) {
|
||||
syncedFlushRequest = new SyncedFlushRequest(indices);
|
||||
} else {
|
||||
syncedFlushRequest = new SyncedFlushRequest();
|
||||
syncedFlushRequest.indices(indices);
|
||||
}
|
||||
Map<String, String> expectedParams = new HashMap<>();
|
||||
RequestConvertersTests.setRandomIndicesOptions(syncedFlushRequest::indicesOptions, syncedFlushRequest::indicesOptions,
|
||||
expectedParams);
|
||||
Request request = IndicesRequestConverters.flushSynced(syncedFlushRequest);
|
||||
StringJoiner endpoint = new StringJoiner("/", "/", "");
|
||||
if (indices != null && indices.length > 0) {
|
||||
endpoint.add(String.join(",", indices));
|
||||
}
|
||||
endpoint.add("_flush/synced");
|
||||
Assert.assertThat(request.getEndpoint(), equalTo(endpoint.toString()));
|
||||
Assert.assertThat(request.getParameters(), equalTo(expectedParams));
|
||||
Assert.assertThat(request.getEntity(), nullValue());
|
||||
Assert.assertThat(request.getMethod(), equalTo(HttpPost.METHOD_NAME));
|
||||
}
|
||||
|
||||
public void testForceMerge() {
|
||||
String[] indices = ESTestCase.randomBoolean() ? null : RequestConvertersTests.randomIndicesNames(0, 5);
|
||||
ForceMergeRequest forceMergeRequest;
|
||||
|
|
|
@ -1,270 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.client;
|
||||
|
||||
import com.carrotsearch.hppc.ObjectIntHashMap;
|
||||
import com.carrotsearch.hppc.ObjectIntMap;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.routing.ShardRoutingState;
|
||||
import org.elasticsearch.cluster.routing.TestShardRouting;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.xcontent.DeprecationHandler;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.indices.flush.ShardsSyncedFlushResult;
|
||||
import org.elasticsearch.indices.flush.SyncedFlushService;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
public class SyncedFlushResponseTests extends ESTestCase {
|
||||
|
||||
public void testXContentSerialization() throws IOException {
|
||||
final XContentType xContentType = randomFrom(XContentType.values());
|
||||
TestPlan plan = createTestPlan();
|
||||
|
||||
XContentBuilder serverResponsebuilder = XContentBuilder.builder(xContentType.xContent());
|
||||
assertNotNull(plan.result);
|
||||
serverResponsebuilder.startObject();
|
||||
plan.result.toXContent(serverResponsebuilder, ToXContent.EMPTY_PARAMS);
|
||||
serverResponsebuilder.endObject();
|
||||
XContentBuilder clientResponsebuilder = XContentBuilder.builder(xContentType.xContent());
|
||||
assertNotNull(plan.result);
|
||||
plan.clientResult.toXContent(clientResponsebuilder, ToXContent.EMPTY_PARAMS);
|
||||
Map<String, Object> serverContentMap = convertFailureListToSet(
|
||||
serverResponsebuilder
|
||||
.generator()
|
||||
.contentType()
|
||||
.xContent()
|
||||
.createParser(
|
||||
xContentRegistry(),
|
||||
DeprecationHandler.THROW_UNSUPPORTED_OPERATION,
|
||||
BytesReference.bytes(serverResponsebuilder).streamInput()
|
||||
).map()
|
||||
);
|
||||
Map<String, Object> clientContentMap = convertFailureListToSet(
|
||||
clientResponsebuilder
|
||||
.generator()
|
||||
.contentType()
|
||||
.xContent()
|
||||
.createParser(
|
||||
xContentRegistry(),
|
||||
DeprecationHandler.THROW_UNSUPPORTED_OPERATION,
|
||||
BytesReference.bytes(clientResponsebuilder).streamInput()
|
||||
)
|
||||
.map()
|
||||
);
|
||||
assertEquals(serverContentMap, clientContentMap);
|
||||
}
|
||||
|
||||
public void testXContentDeserialization() throws IOException {
|
||||
final XContentType xContentType = randomFrom(XContentType.values());
|
||||
TestPlan plan = createTestPlan();
|
||||
XContentBuilder builder = XContentBuilder.builder(xContentType.xContent());
|
||||
builder.startObject();
|
||||
plan.result.toXContent(builder, ToXContent.EMPTY_PARAMS);
|
||||
builder.endObject();
|
||||
XContentParser parser = builder
|
||||
.generator()
|
||||
.contentType()
|
||||
.xContent()
|
||||
.createParser(
|
||||
xContentRegistry(),
|
||||
DeprecationHandler.THROW_UNSUPPORTED_OPERATION,
|
||||
BytesReference.bytes(builder).streamInput()
|
||||
);
|
||||
SyncedFlushResponse originalResponse = plan.clientResult;
|
||||
SyncedFlushResponse parsedResponse = SyncedFlushResponse.fromXContent(parser);
|
||||
assertNotNull(parsedResponse);
|
||||
assertShardCounts(originalResponse.getShardCounts(), parsedResponse.getShardCounts());
|
||||
for (Map.Entry<String, SyncedFlushResponse.IndexResult> entry: originalResponse.getIndexResults().entrySet()) {
|
||||
String index = entry.getKey();
|
||||
SyncedFlushResponse.IndexResult responseResult = entry.getValue();
|
||||
SyncedFlushResponse.IndexResult parsedResult = parsedResponse.getIndexResults().get(index);
|
||||
assertNotNull(responseResult);
|
||||
assertNotNull(parsedResult);
|
||||
assertShardCounts(responseResult.getShardCounts(), parsedResult.getShardCounts());
|
||||
assertEquals(responseResult.failures().size(), parsedResult.failures().size());
|
||||
for (SyncedFlushResponse.ShardFailure responseShardFailure: responseResult.failures()) {
|
||||
assertTrue(containsFailure(parsedResult.failures(), responseShardFailure));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static class TestPlan {
|
||||
SyncedFlushResponse.ShardCounts totalCounts;
|
||||
Map<String, SyncedFlushResponse.ShardCounts> countsPerIndex = new HashMap<>();
|
||||
ObjectIntMap<String> expectedFailuresPerIndex = new ObjectIntHashMap<>();
|
||||
org.elasticsearch.action.admin.indices.flush.SyncedFlushResponse result;
|
||||
SyncedFlushResponse clientResult;
|
||||
}
|
||||
|
||||
TestPlan createTestPlan() throws IOException {
|
||||
final TestPlan testPlan = new TestPlan();
|
||||
final Map<String, List<ShardsSyncedFlushResult>> indicesResults = new HashMap<>();
|
||||
Map<String, SyncedFlushResponse.IndexResult> indexResults = new HashMap<>();
|
||||
final XContentType xContentType = randomFrom(XContentType.values());
|
||||
final int indexCount = randomIntBetween(1, 10);
|
||||
int totalShards = 0;
|
||||
int totalSuccessful = 0;
|
||||
int totalFailed = 0;
|
||||
for (int i = 0; i < indexCount; i++) {
|
||||
final String index = "index_" + i;
|
||||
int shards = randomIntBetween(1, 4);
|
||||
int replicas = randomIntBetween(0, 2);
|
||||
int successful = 0;
|
||||
int failed = 0;
|
||||
int failures = 0;
|
||||
List<ShardsSyncedFlushResult> shardsResults = new ArrayList<>();
|
||||
List<SyncedFlushResponse.ShardFailure> shardFailures = new ArrayList<>();
|
||||
for (int shard = 0; shard < shards; shard++) {
|
||||
final ShardId shardId = new ShardId(index, "_na_", shard);
|
||||
if (randomInt(5) < 2) {
|
||||
// total shard failure
|
||||
failed += replicas + 1;
|
||||
failures++;
|
||||
shardsResults.add(new ShardsSyncedFlushResult(shardId, replicas + 1, "simulated total failure"));
|
||||
shardFailures.add(
|
||||
new SyncedFlushResponse.ShardFailure(
|
||||
shardId.id(),
|
||||
"simulated total failure",
|
||||
new HashMap<>()
|
||||
)
|
||||
);
|
||||
} else {
|
||||
Map<ShardRouting, SyncedFlushService.ShardSyncedFlushResponse> shardResponses = new HashMap<>();
|
||||
for (int copy = 0; copy < replicas + 1; copy++) {
|
||||
final ShardRouting shardRouting =
|
||||
TestShardRouting.newShardRouting(
|
||||
index, shard, "node_" + shardId + "_" + copy, null,
|
||||
copy == 0, ShardRoutingState.STARTED
|
||||
);
|
||||
if (randomInt(5) < 2) {
|
||||
// shard copy failure
|
||||
failed++;
|
||||
failures++;
|
||||
shardResponses.put(shardRouting, new SyncedFlushService.ShardSyncedFlushResponse("copy failure " + shardId));
|
||||
// Building the shardRouting map here.
|
||||
XContentBuilder builder = XContentBuilder.builder(xContentType.xContent());
|
||||
Map<String, Object> routing =
|
||||
shardRouting.toXContent(builder, ToXContent.EMPTY_PARAMS)
|
||||
.generator()
|
||||
.contentType()
|
||||
.xContent()
|
||||
.createParser(
|
||||
xContentRegistry(),
|
||||
DeprecationHandler.THROW_UNSUPPORTED_OPERATION,
|
||||
BytesReference.bytes(builder).streamInput()
|
||||
)
|
||||
.map();
|
||||
shardFailures.add(
|
||||
new SyncedFlushResponse.ShardFailure(
|
||||
shardId.id(),
|
||||
"copy failure " + shardId,
|
||||
routing
|
||||
)
|
||||
);
|
||||
} else {
|
||||
successful++;
|
||||
shardResponses.put(shardRouting, new SyncedFlushService.ShardSyncedFlushResponse((String) null));
|
||||
}
|
||||
}
|
||||
shardsResults.add(new ShardsSyncedFlushResult(shardId, "_sync_id_" + shard, replicas + 1, shardResponses));
|
||||
}
|
||||
}
|
||||
indicesResults.put(index, shardsResults);
|
||||
indexResults.put(
|
||||
index,
|
||||
new SyncedFlushResponse.IndexResult(
|
||||
shards * (replicas + 1),
|
||||
successful,
|
||||
failed,
|
||||
shardFailures
|
||||
)
|
||||
);
|
||||
testPlan.countsPerIndex.put(index, new SyncedFlushResponse.ShardCounts(shards * (replicas + 1), successful, failed));
|
||||
testPlan.expectedFailuresPerIndex.put(index, failures);
|
||||
totalFailed += failed;
|
||||
totalShards += shards * (replicas + 1);
|
||||
totalSuccessful += successful;
|
||||
}
|
||||
testPlan.result = new org.elasticsearch.action.admin.indices.flush.SyncedFlushResponse(indicesResults);
|
||||
testPlan.totalCounts = new SyncedFlushResponse.ShardCounts(totalShards, totalSuccessful, totalFailed);
|
||||
testPlan.clientResult = new SyncedFlushResponse(
|
||||
new SyncedFlushResponse.ShardCounts(totalShards, totalSuccessful, totalFailed),
|
||||
indexResults
|
||||
);
|
||||
return testPlan;
|
||||
}
|
||||
|
||||
public boolean containsFailure(List<SyncedFlushResponse.ShardFailure> failures, SyncedFlushResponse.ShardFailure origFailure) {
|
||||
for (SyncedFlushResponse.ShardFailure failure: failures) {
|
||||
if (failure.getShardId() == origFailure.getShardId() &&
|
||||
failure.getFailureReason().equals(origFailure.getFailureReason()) &&
|
||||
failure.getRouting().equals(origFailure.getRouting())) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
public void assertShardCounts(SyncedFlushResponse.ShardCounts first, SyncedFlushResponse.ShardCounts second) {
|
||||
if (first == null) {
|
||||
assertNull(second);
|
||||
} else {
|
||||
assertTrue(first.equals(second));
|
||||
}
|
||||
}
|
||||
|
||||
public Map<String, Object> convertFailureListToSet(Map<String, Object> input) {
|
||||
Map<String, Object> retMap = new HashMap<>();
|
||||
for (Map.Entry<String, Object> entry: input.entrySet()) {
|
||||
if (entry.getKey().equals(SyncedFlushResponse.SHARDS_FIELD)) {
|
||||
retMap.put(entry.getKey(), entry.getValue());
|
||||
} else {
|
||||
// This was an index entry.
|
||||
@SuppressWarnings("unchecked")
|
||||
Map<String, Object> indexResult = (Map<String, Object>)entry.getValue();
|
||||
Map<String, Object> retResult = new HashMap<>();
|
||||
for (Map.Entry<String, Object> entry2: indexResult.entrySet()) {
|
||||
if (entry2.getKey().equals(SyncedFlushResponse.IndexResult.FAILURES_FIELD)) {
|
||||
@SuppressWarnings("unchecked")
|
||||
List<Object> failures = (List<Object>)entry2.getValue();
|
||||
Set<Object> retSet = new HashSet<>(failures);
|
||||
retResult.put(entry.getKey(), retSet);
|
||||
} else {
|
||||
retResult.put(entry2.getKey(), entry2.getValue());
|
||||
}
|
||||
}
|
||||
retMap.put(entry.getKey(), retResult);
|
||||
}
|
||||
}
|
||||
return retMap;
|
||||
}
|
||||
}
|
|
@ -31,7 +31,6 @@ import org.elasticsearch.action.admin.indices.cache.clear.ClearIndicesCacheRespo
|
|||
import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest;
|
||||
import org.elasticsearch.action.admin.indices.flush.FlushRequest;
|
||||
import org.elasticsearch.action.admin.indices.flush.FlushResponse;
|
||||
import org.elasticsearch.action.admin.indices.flush.SyncedFlushRequest;
|
||||
import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequest;
|
||||
import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeResponse;
|
||||
import org.elasticsearch.action.admin.indices.open.OpenIndexRequest;
|
||||
|
@ -56,7 +55,6 @@ import org.elasticsearch.client.ESRestHighLevelClientTestCase;
|
|||
import org.elasticsearch.client.GetAliasesResponse;
|
||||
import org.elasticsearch.client.RequestOptions;
|
||||
import org.elasticsearch.client.RestHighLevelClient;
|
||||
import org.elasticsearch.client.SyncedFlushResponse;
|
||||
import org.elasticsearch.client.core.BroadcastResponse.Shards;
|
||||
import org.elasticsearch.client.core.ShardsAcknowledgedResponse;
|
||||
import org.elasticsearch.client.indices.AnalyzeRequest;
|
||||
|
@ -998,94 +996,6 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase
|
|||
}
|
||||
}
|
||||
|
||||
@SuppressWarnings("unused")
|
||||
public void testSyncedFlushIndex() throws Exception {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
|
||||
{
|
||||
createIndex("index1", Settings.EMPTY);
|
||||
}
|
||||
|
||||
{
|
||||
// tag::flush-synced-request
|
||||
SyncedFlushRequest request = new SyncedFlushRequest("index1"); // <1>
|
||||
SyncedFlushRequest requestMultiple = new SyncedFlushRequest("index1", "index2"); // <2>
|
||||
SyncedFlushRequest requestAll = new SyncedFlushRequest(); // <3>
|
||||
// end::flush-synced-request
|
||||
|
||||
// tag::flush-synced-request-indicesOptions
|
||||
request.indicesOptions(IndicesOptions.lenientExpandOpen()); // <1>
|
||||
// end::flush-synced-request-indicesOptions
|
||||
|
||||
// tag::flush-synced-execute
|
||||
SyncedFlushResponse flushSyncedResponse = client.indices().flushSynced(request, expectWarnings(
|
||||
"Synced flush is deprecated and will be removed in 8.0. Use flush at _/flush or /{index}/_flush instead."
|
||||
));
|
||||
// end::flush-synced-execute
|
||||
|
||||
// tag::flush-synced-response
|
||||
int totalShards = flushSyncedResponse.totalShards(); // <1>
|
||||
int successfulShards = flushSyncedResponse.successfulShards(); // <2>
|
||||
int failedShards = flushSyncedResponse.failedShards(); // <3>
|
||||
|
||||
for (Map.Entry<String, SyncedFlushResponse.IndexResult> responsePerIndexEntry:
|
||||
flushSyncedResponse.getIndexResults().entrySet()) {
|
||||
String indexName = responsePerIndexEntry.getKey(); // <4>
|
||||
SyncedFlushResponse.IndexResult indexResult = responsePerIndexEntry.getValue();
|
||||
int totalShardsForIndex = indexResult.totalShards(); // <5>
|
||||
int successfulShardsForIndex = indexResult.successfulShards(); // <6>
|
||||
int failedShardsForIndex = indexResult.failedShards(); // <7>
|
||||
if (failedShardsForIndex > 0) {
|
||||
for (SyncedFlushResponse.ShardFailure failureEntry: indexResult.failures()) {
|
||||
int shardId = failureEntry.getShardId(); // <8>
|
||||
String failureReason = failureEntry.getFailureReason(); // <9>
|
||||
Map<String, Object> routing = failureEntry.getRouting(); // <10>
|
||||
}
|
||||
}
|
||||
}
|
||||
// end::flush-synced-response
|
||||
|
||||
// tag::flush-synced-execute-listener
|
||||
ActionListener<SyncedFlushResponse> listener = new ActionListener<SyncedFlushResponse>() {
|
||||
@Override
|
||||
public void onResponse(SyncedFlushResponse refreshResponse) {
|
||||
// <1>
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
};
|
||||
// end::flush-synced-execute-listener
|
||||
|
||||
// Replace the empty listener by a blocking listener in test
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
// tag::flush-synced-execute-async
|
||||
client.indices().flushSyncedAsync(request, expectWarnings(
|
||||
"Synced flush is deprecated and will be removed in 8.0. Use flush at _/flush or /{index}/_flush instead."
|
||||
), listener); // <1>
|
||||
// end::flush-synced-execute-async
|
||||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
}
|
||||
|
||||
{
|
||||
// tag::flush-synced-notfound
|
||||
try {
|
||||
SyncedFlushRequest request = new SyncedFlushRequest("does_not_exist");
|
||||
client.indices().flushSynced(request, RequestOptions.DEFAULT);
|
||||
} catch (ElasticsearchException exception) {
|
||||
if (exception.status() == RestStatus.NOT_FOUND) {
|
||||
// <1>
|
||||
}
|
||||
}
|
||||
// end::flush-synced-notfound
|
||||
}
|
||||
}
|
||||
|
||||
public void testGetSettings() throws Exception {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
|
||||
|
|
|
@ -1,62 +0,0 @@
|
|||
--
|
||||
:api: flush-synced
|
||||
:request: SyncedFlushRequest
|
||||
:response: SyncedFlushResponse
|
||||
--
|
||||
|
||||
[id="{upid}-{api}"]
|
||||
=== Flush Synced API
|
||||
|
||||
[id="{upid}-{api}-request"]
|
||||
==== Flush Synced Request
|
||||
|
||||
A +{request}+ can be applied to one or more indices, or even on `_all` the indices:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request]
|
||||
--------------------------------------------------
|
||||
<1> Flush synced one index
|
||||
<2> Flush synced multiple indices
|
||||
<3> Flush synced all the indices
|
||||
|
||||
==== Optional arguments
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-indicesOptions]
|
||||
--------------------------------------------------
|
||||
<1> Setting `IndicesOptions` controls how unavailable indices are resolved and
|
||||
how wildcard expressions are expanded
|
||||
|
||||
include::../execution.asciidoc[]
|
||||
|
||||
[id="{upid}-{api}-response"]
|
||||
==== Flush Synced Response
|
||||
|
||||
The returned +{response}+ allows to retrieve information about the
|
||||
executed operation as follows:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-response]
|
||||
--------------------------------------------------
|
||||
<1> Total number of shards hit by the flush request
|
||||
<2> Number of shards where the flush has succeeded
|
||||
<3> Number of shards where the flush has failed
|
||||
<4> Name of the index whose results we are about to calculate.
|
||||
<5> Total number of shards for index mentioned in 4.
|
||||
<6> Successful shards for index mentioned in 4.
|
||||
<7> Failed shards for index mentioned in 4.
|
||||
<8> One of the failed shard ids of the failed index mentioned in 4.
|
||||
<9> Reason for failure of copies of the shard mentioned in 8.
|
||||
<10> JSON represented by a Map<String, Object>. Contains shard related information like id, state, version etc.
|
||||
for the failed shard copies. If the entire shard failed then this returns an empty map.
|
||||
|
||||
By default, if the indices were not found, an `ElasticsearchException` will be thrown:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-notfound]
|
||||
--------------------------------------------------
|
||||
<1> Do something if the indices to be flushed were not found
|
|
@ -102,7 +102,6 @@ Index Management::
|
|||
* <<{upid}-clone-index>>
|
||||
* <<{upid}-refresh>>
|
||||
* <<{upid}-flush>>
|
||||
* <<{upid}-flush-synced>>
|
||||
* <<{upid}-clear-cache>>
|
||||
* <<{upid}-force-merge>>
|
||||
* <<{upid}-rollover-index>>
|
||||
|
@ -138,7 +137,6 @@ include::indices/split_index.asciidoc[]
|
|||
include::indices/clone_index.asciidoc[]
|
||||
include::indices/refresh.asciidoc[]
|
||||
include::indices/flush.asciidoc[]
|
||||
include::indices/flush_synced.asciidoc[]
|
||||
include::indices/clear_cache.asciidoc[]
|
||||
include::indices/force_merge.asciidoc[]
|
||||
include::indices/rollover.asciidoc[]
|
||||
|
|
|
@ -28,7 +28,7 @@ this scenario:
|
|||
If the master had just waited for a few minutes, then the missing shards could
|
||||
have been re-allocated to Node 5 with the minimum of network traffic. This
|
||||
process would be even quicker for idle shards (shards not receiving indexing
|
||||
requests) which have been automatically <<indices-synced-flush-api,sync-flushed>>.
|
||||
requests) which have been automatically <<indices-flush, flushed>>.
|
||||
|
||||
The allocation of replica shards which become unassigned because a node has
|
||||
left can be delayed with the `index.unassigned.node_left.delayed_timeout`
|
||||
|
|
|
@ -68,7 +68,6 @@ index settings, aliases, mappings, and index templates.
|
|||
* <<indices-clearcache>>
|
||||
* <<indices-refresh>>
|
||||
* <<indices-flush>>
|
||||
* <<indices-synced-flush-api>>
|
||||
* <<indices-forcemerge>>
|
||||
|
||||
|
||||
|
@ -136,8 +135,6 @@ include::indices/shrink-index.asciidoc[]
|
|||
|
||||
include::indices/split-index.asciidoc[]
|
||||
|
||||
include::indices/synced-flush.asciidoc[]
|
||||
|
||||
include::indices/apis/unfreeze.asciidoc[]
|
||||
|
||||
include::indices/aliases.asciidoc[]
|
||||
|
|
|
@ -4,278 +4,4 @@
|
|||
<titleabbrev>Synced flush</titleabbrev>
|
||||
++++
|
||||
|
||||
deprecated::[7.6, synced-flush is deprecated and will be removed in 8.0.
|
||||
Use <<indices-flush,flush>> instead. A <<indices-flush,flush>> has the
|
||||
same effect as a synced flush on Elasticsearch 7.6 or later]
|
||||
|
||||
Performs a synced flush on one or more indices.
|
||||
|
||||
[source,console]
|
||||
--------------------------------------------------
|
||||
POST /twitter/_flush/synced
|
||||
--------------------------------------------------
|
||||
// TEST[skip: Synced flush can conflict with scheduled flushes in doc tests]
|
||||
|
||||
|
||||
[[synced-flush-api-request]]
|
||||
==== {api-request-title}
|
||||
|
||||
`POST /<index>/flush/synced`
|
||||
|
||||
`GET /<index>/flush/synced`
|
||||
|
||||
`POST /flush/synced`
|
||||
|
||||
`GET /flush/synced`
|
||||
|
||||
|
||||
[[synced-flush-api-desc]]
|
||||
==== {api-description-title}
|
||||
|
||||
[[synced-flush-using-api]]
|
||||
===== Use the synced flush API
|
||||
|
||||
Use the synced flush API to manually initiate a synced flush.
|
||||
This can be useful for a planned cluster restart where
|
||||
you can stop indexing but don't want to wait for 5 minutes until all indices
|
||||
are marked as inactive and automatically sync-flushed.
|
||||
|
||||
You can request a synced flush even if there is ongoing indexing activity, and
|
||||
{es} will perform the synced flush on a "best-effort" basis: shards that do not
|
||||
have any ongoing indexing activity will be successfully sync-flushed, and other
|
||||
shards will fail to sync-flush. The successfully sync-flushed shards will have
|
||||
faster recovery times as long as the `sync_id` marker is not removed by a
|
||||
subsequent flush.
|
||||
|
||||
|
||||
[[synced-flush-overview]]
|
||||
===== Synced flush overview
|
||||
|
||||
{es} keeps track of which shards have received indexing activity recently, and
|
||||
considers shards that have not received any indexing operations for 5 minutes to
|
||||
be inactive.
|
||||
|
||||
When a shard becomes inactive {es} performs a special kind of flush
|
||||
known as a *synced flush*. A synced flush performs a normal
|
||||
<<indices-flush,flush>> on each replica of the shard, and then adds a marker known
|
||||
as the `sync_id` to each replica to indicate that these copies have identical
|
||||
Lucene indices. Comparing the `sync_id` markers of the two copies is a very
|
||||
efficient way to check whether they have identical contents.
|
||||
|
||||
When allocating shard replicas, {es} must ensure that each replica contains the
|
||||
same data as the primary. If the shard copies have been synced-flushed and the
|
||||
replica shares a `sync_id` with the primary then {es} knows that the two copies
|
||||
have identical contents. This means there is no need to copy any segment files
|
||||
from the primary to the replica, which saves a good deal of time during
|
||||
recoveries and restarts.
|
||||
|
||||
This is particularly useful for clusters having lots of indices which are very
|
||||
rarely updated, such as with time-based indices. Without the synced flush
|
||||
marker, recovery of this kind of cluster would be much slower.
|
||||
|
||||
|
||||
[[synced-flush-sync-id-markers]]
|
||||
===== Check for `sync_id` markers
|
||||
|
||||
To check whether a shard has a `sync_id` marker or not, look for the `commit`
|
||||
section of the shard stats returned by the <<indices-stats,indices stats>> API:
|
||||
|
||||
[source,console]
|
||||
--------------------------------------------------
|
||||
GET /twitter/_stats?filter_path=**.commit&level=shards <1>
|
||||
--------------------------------------------------
|
||||
// TEST[skip: Synced flush can conflict with scheduled flushes in doc tests]
|
||||
|
||||
<1> `filter_path` is used to reduce the verbosity of the response, but is entirely optional
|
||||
|
||||
The API returns the following response:
|
||||
|
||||
[source,console-result]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"indices": {
|
||||
"twitter": {
|
||||
"shards": {
|
||||
"0": [
|
||||
{
|
||||
"commit" : {
|
||||
"id" : "3M3zkw2GHMo2Y4h4/KFKCg==",
|
||||
"generation" : 3,
|
||||
"user_data" : {
|
||||
"translog_uuid" : "hnOG3xFcTDeoI_kvvvOdNA",
|
||||
"history_uuid" : "XP7KDJGiS1a2fHYiFL5TXQ",
|
||||
"local_checkpoint" : "-1",
|
||||
"translog_generation" : "2",
|
||||
"max_seq_no" : "-1",
|
||||
"sync_id" : "AVvFY-071siAOuFGEO9P", <1>
|
||||
"max_unsafe_auto_id_timestamp" : "-1",
|
||||
"min_retained_seq_no" : "0"
|
||||
},
|
||||
"num_docs" : 0
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// TEST[skip: Synced flush can conflict with scheduled flushes in doc tests]
|
||||
<1> the `sync id` marker
|
||||
|
||||
NOTE: The `sync_id` marker is removed as soon as the shard is flushed again, and
|
||||
{es} may trigger an automatic flush of a shard at any time if there are
|
||||
unflushed operations in the shard's translog. In practice this means that one
|
||||
should consider any indexing operation on an index as having removed its
|
||||
`sync_id` markers.
|
||||
|
||||
|
||||
[[synced-flush-api-path-params]]
|
||||
==== {api-path-parms-title}
|
||||
|
||||
include::{docdir}/rest-api/common-parms.asciidoc[tag=index]
|
||||
+
|
||||
To sync-flush all indices,
|
||||
omit this parameter
|
||||
or use a value of `_all` or `*`.
|
||||
|
||||
|
||||
[[synced-flush-api-query-params]]
|
||||
==== {api-query-parms-title}
|
||||
|
||||
include::{docdir}/rest-api/common-parms.asciidoc[tag=allow-no-indices]
|
||||
|
||||
include::{docdir}/rest-api/common-parms.asciidoc[tag=expand-wildcards]
|
||||
+
|
||||
Defaults to `open`.
|
||||
|
||||
include::{docdir}/rest-api/common-parms.asciidoc[tag=index-ignore-unavailable]
|
||||
|
||||
|
||||
[[synced-flush-api-response-codes]]
|
||||
==== {api-response-codes-title}
|
||||
|
||||
`200`::
|
||||
All shards successfully sync-flushed.
|
||||
|
||||
`409`::
|
||||
A replica shard failed to sync-flush.
|
||||
|
||||
|
||||
[[synced-flush-api-example]]
|
||||
==== {api-examples-title}
|
||||
|
||||
|
||||
[[synced-flush-api-specific-ex]]
|
||||
===== Sync-flush a specific index
|
||||
|
||||
[source,console]
|
||||
----
|
||||
POST /kimchy/_flush/synced
|
||||
----
|
||||
// TEST[skip: Synced flush can conflict with scheduled flushes in doc tests]
|
||||
|
||||
|
||||
[[synced-flush-api-multi-ex]]
|
||||
===== Synch-flush several indices
|
||||
|
||||
[source,console]
|
||||
--------------------------------------------------
|
||||
POST /kimchy,elasticsearch/_flush/synced
|
||||
--------------------------------------------------
|
||||
// TEST[skip: Synced flush can conflict with scheduled flushes in doc tests]
|
||||
|
||||
|
||||
[[synced-flush-api-all-ex]]
|
||||
===== Sync-flush all indices
|
||||
|
||||
[source,console]
|
||||
--------------------------------------------------
|
||||
POST /_flush/synced
|
||||
--------------------------------------------------
|
||||
// TEST[skip: Synced flush can conflict with scheduled flushes in doc tests]
|
||||
|
||||
The response contains details about how many shards were successfully
|
||||
sync-flushed and information about any failure.
|
||||
|
||||
The following response indicates two shards
|
||||
and one replica shard
|
||||
successfully sync-flushed:
|
||||
|
||||
[source,console-result]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"_shards": {
|
||||
"total": 2,
|
||||
"successful": 2,
|
||||
"failed": 0
|
||||
},
|
||||
"twitter": {
|
||||
"total": 2,
|
||||
"successful": 2,
|
||||
"failed": 0
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// TEST[skip: Synced flush can conflict with scheduled flushes in doc tests]
|
||||
|
||||
The following response indicates one shard group failed
|
||||
due to pending operations:
|
||||
|
||||
[source,console-result]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"_shards": {
|
||||
"total": 4,
|
||||
"successful": 2,
|
||||
"failed": 2
|
||||
},
|
||||
"twitter": {
|
||||
"total": 4,
|
||||
"successful": 2,
|
||||
"failed": 2,
|
||||
"failures": [
|
||||
{
|
||||
"shard": 1,
|
||||
"reason": "[2] ongoing operations on primary"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// TEST[skip: Synced flush can conflict with scheduled flushes in doc tests]
|
||||
|
||||
Sometimes the failures are specific to a shard replica. The copies that failed
|
||||
will not be eligible for fast recovery but those that succeeded still will be.
|
||||
This case is reported as follows:
|
||||
|
||||
[source,console-result]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"_shards": {
|
||||
"total": 4,
|
||||
"successful": 1,
|
||||
"failed": 1
|
||||
},
|
||||
"twitter": {
|
||||
"total": 4,
|
||||
"successful": 3,
|
||||
"failed": 1,
|
||||
"failures": [
|
||||
{
|
||||
"shard": 1,
|
||||
"reason": "unexpected error",
|
||||
"routing": {
|
||||
"state": "STARTED",
|
||||
"primary": false,
|
||||
"node": "SZNr2J_ORxKTLUCydGX4zA",
|
||||
"relocating_node": null,
|
||||
"shard": 1,
|
||||
"index": "twitter"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// TEST[skip: Synced flush can conflict with scheduled flushes in doc tests]
|
||||
Synced flush was removed. Use normal <<indices-flush,flush>> instead.
|
||||
|
|
|
@ -27,3 +27,10 @@ and the setting is removed.
|
|||
In 6.0, we deprecated the `template` field in put index template requests
|
||||
in favor of using `index_patterns`. Support for the `template` field is now
|
||||
removed in 8.0.
|
||||
|
||||
|
||||
[float]
|
||||
==== Remove synced flush
|
||||
|
||||
Synced flush was deprecated in 7.6 and is removed in 8.0. Use a regular flush
|
||||
instead as it has the same effect as a synced flush in 7.6 and later.
|
||||
|
|
|
@ -66,7 +66,7 @@ There are several thread pools, but the important ones include:
|
|||
size of `2 * # of available processors`.
|
||||
|
||||
`flush`::
|
||||
For <<indices-flush,flush>>, <<indices-synced-flush-api,synced flush>>, and <<index-modules-translog, translog>> `fsync` operations.
|
||||
For <<indices-flush,flush>> and <<index-modules-translog, translog>> `fsync` operations.
|
||||
Thread pool type is `scaling` with a keep-alive of `5m` and a default
|
||||
maximum size of `min(5, (# of available processors)/2)`.
|
||||
|
||||
|
|
|
@ -21,13 +21,15 @@ include::{docdir}/upgrade/disable-shard-alloc.asciidoc[]
|
|||
--
|
||||
// end::disable_shard_alloc[]
|
||||
// tag::stop_indexing[]
|
||||
. *Stop indexing and perform a synced flush.*
|
||||
. *Stop indexing and perform a flush.*
|
||||
+
|
||||
--
|
||||
Performing a <<indices-synced-flush-api, synced-flush>> speeds up shard
|
||||
recovery.
|
||||
Performing a <<indices-flush, flush>> speeds up shard recovery.
|
||||
|
||||
include::{docdir}/upgrade/synced-flush.asciidoc[]
|
||||
[source,console]
|
||||
--------------------------------------------------
|
||||
POST /_flush
|
||||
--------------------------------------------------
|
||||
--
|
||||
// end::stop_indexing[]
|
||||
//tag::stop_ml[]
|
||||
|
|
|
@ -22,13 +22,15 @@ To perform a full cluster restart upgrade to {version}:
|
|||
include::disable-shard-alloc.asciidoc[]
|
||||
--
|
||||
|
||||
. *Stop indexing and perform a synced flush.*
|
||||
. *Stop indexing and perform a flush.*
|
||||
+
|
||||
--
|
||||
Performing a <<indices-synced-flush-api, synced-flush>> speeds up shard
|
||||
recovery.
|
||||
Performing a <<indices-flush, flush>> speeds up shard recovery.
|
||||
|
||||
include::synced-flush.asciidoc[]
|
||||
[source,console]
|
||||
--------------------------------------------------
|
||||
POST /_flush
|
||||
--------------------------------------------------
|
||||
--
|
||||
|
||||
. *Temporarily stop the tasks associated with active {ml} jobs and {dfeeds}.* (Optional)
|
||||
|
|
|
@ -1,14 +0,0 @@
|
|||
|
||||
[source,console]
|
||||
--------------------------------------------------
|
||||
POST _flush/synced
|
||||
--------------------------------------------------
|
||||
// TEST[skip: will fail as synced flush is deprecated]
|
||||
|
||||
When you perform a synced flush, check the response to make sure there are
|
||||
no failures. Synced flush operations that fail due to pending indexing
|
||||
operations are listed in the response body, although the request itself
|
||||
still returns a 200 OK status. If there are failures, reissue the request.
|
||||
|
||||
Note that synced flush is deprecated and will be removed in 8.0. A flush
|
||||
has the same effect as a synced flush on Elasticsearch 7.6 or later.
|
|
@ -23,7 +23,6 @@ import org.apache.http.util.EntityUtils;
|
|||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.client.Request;
|
||||
import org.elasticsearch.client.Response;
|
||||
import org.elasticsearch.client.ResponseException;
|
||||
import org.elasticsearch.client.RestClient;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.MetaDataIndexStateService;
|
||||
|
@ -679,16 +678,10 @@ public class FullClusterRestartIT extends AbstractFullClusterRestartTestCase {
|
|||
flushRequest.addParameter("force", "true");
|
||||
flushRequest.addParameter("wait_if_ongoing", "true");
|
||||
assertOK(client().performRequest(flushRequest));
|
||||
|
||||
if (randomBoolean()) {
|
||||
// We had a bug before where we failed to perform peer recovery with sync_id from 5.x to 6.x.
|
||||
// We added this synced flush so we can exercise different paths of recovery code.
|
||||
try {
|
||||
performSyncedFlush(index);
|
||||
} catch (ResponseException ignored) {
|
||||
// synced flush is optional here
|
||||
}
|
||||
syncedFlush(index);
|
||||
}
|
||||
|
||||
if (shouldHaveTranslog) {
|
||||
// Update a few documents so we are sure to have a translog
|
||||
indexRandomDocuments(
|
||||
|
|
|
@ -21,13 +21,17 @@ package org.elasticsearch.backwards;
|
|||
import org.apache.http.HttpHost;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.client.Request;
|
||||
import org.elasticsearch.client.RequestOptions;
|
||||
import org.elasticsearch.client.Response;
|
||||
import org.elasticsearch.client.ResponseException;
|
||||
import org.elasticsearch.client.RestClient;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.json.JsonXContent;
|
||||
import org.elasticsearch.common.xcontent.support.XContentMapValues;
|
||||
import org.elasticsearch.index.seqno.SeqNoStats;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
import org.elasticsearch.test.rest.ESRestTestCase;
|
||||
import org.elasticsearch.test.rest.yaml.ObjectPath;
|
||||
|
||||
|
@ -38,6 +42,7 @@ import java.util.List;
|
|||
import java.util.Map;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import static org.hamcrest.Matchers.contains;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
|
||||
public class IndexingIT extends ESRestTestCase {
|
||||
|
@ -274,6 +279,57 @@ public class IndexingIT extends ESRestTestCase {
|
|||
request.setJsonEntity("{\"indices\": \"" + index + "\"}");
|
||||
}
|
||||
|
||||
public void testSyncedFlushTransition() throws Exception {
|
||||
Nodes nodes = buildNodeAndVersions();
|
||||
assertTrue("bwc version is on 7.x", nodes.getBWCVersion().before(Version.V_8_0_0));
|
||||
assumeFalse("no new node found", nodes.getNewNodes().isEmpty());
|
||||
assumeFalse("no bwc node found", nodes.getBWCNodes().isEmpty());
|
||||
// Allocate shards to new nodes then verify synced flush requests processed by old nodes/new nodes
|
||||
String newNodes = nodes.getNewNodes().stream().map(Node::getNodeName).collect(Collectors.joining(","));
|
||||
int numShards = randomIntBetween(1, 10);
|
||||
int numOfReplicas = randomIntBetween(0, nodes.getNewNodes().size() - 1);
|
||||
int totalShards = numShards * (numOfReplicas + 1);
|
||||
final String index = "test_synced_flush";
|
||||
createIndex(index, Settings.builder()
|
||||
.put(IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), numShards)
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, numOfReplicas)
|
||||
.put("index.routing.allocation.include._name", newNodes).build());
|
||||
ensureGreen(index);
|
||||
indexDocs(index, randomIntBetween(0, 100), between(1, 100));
|
||||
try (RestClient oldNodeClient = buildClient(restClientSettings(),
|
||||
nodes.getBWCNodes().stream().map(Node::getPublishAddress).toArray(HttpHost[]::new))) {
|
||||
Request request = new Request("POST", index + "/_flush/synced");
|
||||
assertBusy(() -> {
|
||||
ResponseException responseException = expectThrows(ResponseException.class, () -> oldNodeClient.performRequest(request));
|
||||
assertThat(responseException.getResponse().getStatusLine().getStatusCode(), equalTo(RestStatus.CONFLICT.getStatus()));
|
||||
assertThat(responseException.getResponse().getWarnings(),
|
||||
contains("Synced flush is deprecated and will be removed in 8.0. Use flush at _/flush or /{index}/_flush instead."));
|
||||
Map<String, Object> result = ObjectPath.createFromResponse(responseException.getResponse()).evaluate("_shards");
|
||||
assertThat(result.get("total"), equalTo(totalShards));
|
||||
assertThat(result.get("successful"), equalTo(0));
|
||||
assertThat(result.get("failed"), equalTo(totalShards));
|
||||
});
|
||||
Map<String, Object> stats = entityAsMap(client().performRequest(new Request("GET", index + "/_stats?level=shards")));
|
||||
assertThat(XContentMapValues.extractValue("indices." + index + ".total.translog.uncommitted_operations", stats), equalTo(0));
|
||||
}
|
||||
indexDocs(index, randomIntBetween(0, 100), between(1, 100));
|
||||
try (RestClient newNodeClient = buildClient(restClientSettings(),
|
||||
nodes.getNewNodes().stream().map(Node::getPublishAddress).toArray(HttpHost[]::new))) {
|
||||
Request request = new Request("POST", index + "/_flush/synced");
|
||||
List<String> warningMsg = List.of("Synced flush was removed and a normal flush was performed instead. " +
|
||||
"This transition will be removed in a future version.");
|
||||
request.setOptions(RequestOptions.DEFAULT.toBuilder().setWarningsHandler(warnings -> warnings.equals(warningMsg) == false));
|
||||
assertBusy(() -> {
|
||||
Map<String, Object> result = ObjectPath.createFromResponse(newNodeClient.performRequest(request)).evaluate("_shards");
|
||||
assertThat(result.get("total"), equalTo(totalShards));
|
||||
assertThat(result.get("successful"), equalTo(totalShards));
|
||||
assertThat(result.get("failed"), equalTo(0));
|
||||
});
|
||||
Map<String, Object> stats = entityAsMap(client().performRequest(new Request("GET", index + "/_stats?level=shards")));
|
||||
assertThat(XContentMapValues.extractValue("indices." + index + ".total.translog.uncommitted_operations", stats), equalTo(0));
|
||||
}
|
||||
}
|
||||
|
||||
private void assertCount(final String index, final String preference, final int expectedCount) throws IOException {
|
||||
Request request = new Request("GET", index + "/_count");
|
||||
request.addParameter("preference", preference);
|
||||
|
|
|
@ -44,7 +44,6 @@ import java.util.HashMap;
|
|||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import java.util.concurrent.Future;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.function.Predicate;
|
||||
|
@ -283,7 +282,7 @@ public class RecoveryIT extends AbstractRollingTestCase {
|
|||
}
|
||||
|
||||
public void testRecovery() throws Exception {
|
||||
final String index = "recover_with_soft_deletes";
|
||||
final String index = "test_recovery";
|
||||
if (CLUSTER_TYPE == ClusterType.OLD) {
|
||||
Settings.Builder settings = Settings.builder()
|
||||
.put(IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1)
|
||||
|
@ -315,6 +314,9 @@ public class RecoveryIT extends AbstractRollingTestCase {
|
|||
}
|
||||
}
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
syncedFlush(index);
|
||||
}
|
||||
ensureGreen(index);
|
||||
}
|
||||
|
||||
|
@ -557,40 +559,6 @@ public class RecoveryIT extends AbstractRollingTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
private void syncedFlush(String index) throws Exception {
|
||||
// We have to spin synced-flush requests here because we fire the global checkpoint sync for the last write operation.
|
||||
// A synced-flush request considers the global checkpoint sync as an going operation because it acquires a shard permit.
|
||||
assertBusy(() -> {
|
||||
try {
|
||||
Response resp = performSyncedFlush(index);
|
||||
Map<String, Object> result = ObjectPath.createFromResponse(resp).evaluate("_shards");
|
||||
assertThat(result.get("failed"), equalTo(0));
|
||||
} catch (ResponseException ex) {
|
||||
throw new AssertionError(ex); // cause assert busy to retry
|
||||
}
|
||||
});
|
||||
// ensure the global checkpoint is synced; otherwise we might trim the commit with syncId
|
||||
ensureGlobalCheckpointSynced(index);
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
private void ensureGlobalCheckpointSynced(String index) throws Exception {
|
||||
assertBusy(() -> {
|
||||
Map<?, ?> stats = entityAsMap(client().performRequest(new Request("GET", index + "/_stats?level=shards")));
|
||||
List<Map<?, ?>> shardStats = (List<Map<?, ?>>) XContentMapValues.extractValue("indices." + index + ".shards.0", stats);
|
||||
shardStats.stream()
|
||||
.map(shard -> (Map<?, ?>) XContentMapValues.extractValue("seq_no", shard))
|
||||
.filter(Objects::nonNull)
|
||||
.forEach(seqNoStat -> {
|
||||
long globalCheckpoint = ((Number) XContentMapValues.extractValue("global_checkpoint", seqNoStat)).longValue();
|
||||
long localCheckpoint = ((Number) XContentMapValues.extractValue("local_checkpoint", seqNoStat)).longValue();
|
||||
long maxSeqNo = ((Number) XContentMapValues.extractValue("max_seq_no", seqNoStat)).longValue();
|
||||
assertThat(shardStats.toString(), localCheckpoint, equalTo(maxSeqNo));
|
||||
assertThat(shardStats.toString(), globalCheckpoint, equalTo(maxSeqNo));
|
||||
});
|
||||
}, 60, TimeUnit.SECONDS);
|
||||
}
|
||||
|
||||
/** Ensure that we can always execute update requests regardless of the version of cluster */
|
||||
public void testUpdateDoc() throws Exception {
|
||||
final String index = "test_update_doc";
|
||||
|
|
|
@ -1,54 +0,0 @@
|
|||
{
|
||||
"indices.flush_synced":{
|
||||
"documentation":{
|
||||
"url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-synced-flush-api.html",
|
||||
"description":"Performs a synced flush operation on one or more indices. Synced flush is deprecated and will be removed in 8.0. Use flush instead"
|
||||
},
|
||||
"stability":"stable",
|
||||
"url":{
|
||||
"paths":[
|
||||
{
|
||||
"path":"/_flush/synced",
|
||||
"methods":[
|
||||
"POST",
|
||||
"GET"
|
||||
]
|
||||
},
|
||||
{
|
||||
"path":"/{index}/_flush/synced",
|
||||
"methods":[
|
||||
"POST",
|
||||
"GET"
|
||||
],
|
||||
"parts":{
|
||||
"index":{
|
||||
"type":"list",
|
||||
"description":"A comma-separated list of index names; use `_all` or empty string for all indices"
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
"params":{
|
||||
"ignore_unavailable":{
|
||||
"type":"boolean",
|
||||
"description":"Whether specified concrete indices should be ignored when unavailable (missing or closed)"
|
||||
},
|
||||
"allow_no_indices":{
|
||||
"type":"boolean",
|
||||
"description":"Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified)"
|
||||
},
|
||||
"expand_wildcards":{
|
||||
"type":"enum",
|
||||
"options":[
|
||||
"open",
|
||||
"closed",
|
||||
"none",
|
||||
"all"
|
||||
],
|
||||
"default":"open",
|
||||
"description":"Whether to expand wildcard expression to concrete indices that are open, closed or both."
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,33 +1,3 @@
|
|||
---
|
||||
"Index synced flush rest test":
|
||||
- skip:
|
||||
version: " - 7.5.99"
|
||||
reason: "synced flush is deprecated in 7.6"
|
||||
features: "warnings"
|
||||
- do:
|
||||
indices.create:
|
||||
index: testing
|
||||
body:
|
||||
settings:
|
||||
index:
|
||||
number_of_replicas: 0
|
||||
|
||||
- do:
|
||||
cluster.health:
|
||||
wait_for_status: green
|
||||
- do:
|
||||
warnings:
|
||||
- Synced flush is deprecated and will be removed in 8.0. Use flush at _/flush or /{index}/_flush instead.
|
||||
indices.flush_synced:
|
||||
index: testing
|
||||
|
||||
- is_false: _shards.failed
|
||||
|
||||
- do:
|
||||
indices.stats: {level: shards}
|
||||
|
||||
- is_true: indices.testing.shards.0.0.commit.user_data.sync_id
|
||||
|
||||
---
|
||||
"Flush stats":
|
||||
|
||||
|
|
|
@ -107,10 +107,8 @@ import org.elasticsearch.action.admin.indices.create.TransportCreateIndexAction;
|
|||
import org.elasticsearch.action.admin.indices.delete.DeleteIndexAction;
|
||||
import org.elasticsearch.action.admin.indices.delete.TransportDeleteIndexAction;
|
||||
import org.elasticsearch.action.admin.indices.flush.FlushAction;
|
||||
import org.elasticsearch.action.admin.indices.flush.SyncedFlushAction;
|
||||
import org.elasticsearch.action.admin.indices.flush.TransportFlushAction;
|
||||
import org.elasticsearch.action.admin.indices.flush.TransportShardFlushAction;
|
||||
import org.elasticsearch.action.admin.indices.flush.TransportSyncedFlushAction;
|
||||
import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeAction;
|
||||
import org.elasticsearch.action.admin.indices.forcemerge.TransportForceMergeAction;
|
||||
import org.elasticsearch.action.admin.indices.get.GetIndexAction;
|
||||
|
@ -492,7 +490,6 @@ public class ActionModule extends AbstractModule {
|
|||
actions.register(ValidateQueryAction.INSTANCE, TransportValidateQueryAction.class);
|
||||
actions.register(RefreshAction.INSTANCE, TransportRefreshAction.class);
|
||||
actions.register(FlushAction.INSTANCE, TransportFlushAction.class);
|
||||
actions.register(SyncedFlushAction.INSTANCE, TransportSyncedFlushAction.class);
|
||||
actions.register(ForceMergeAction.INSTANCE, TransportForceMergeAction.class);
|
||||
actions.register(UpgradeAction.INSTANCE, TransportUpgradeAction.class);
|
||||
actions.register(UpgradeStatusAction.INSTANCE, TransportUpgradeStatusAction.class);
|
||||
|
|
|
@ -1,33 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.admin.indices.flush;
|
||||
|
||||
import org.elasticsearch.action.ActionType;
|
||||
|
||||
|
||||
public class SyncedFlushAction extends ActionType<SyncedFlushResponse> {
|
||||
|
||||
public static final SyncedFlushAction INSTANCE = new SyncedFlushAction();
|
||||
public static final String NAME = "indices:admin/synced_flush";
|
||||
|
||||
private SyncedFlushAction() {
|
||||
super(NAME, SyncedFlushResponse::new);
|
||||
}
|
||||
}
|
|
@ -1,57 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.admin.indices.flush;
|
||||
|
||||
import org.elasticsearch.action.support.broadcast.BroadcastRequest;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
|
||||
/**
|
||||
* A synced flush request to sync flush one or more indices. The synced flush process of an index performs a flush
|
||||
* and writes the same sync id to primary and all copies.
|
||||
*
|
||||
* <p>Best created with {@link org.elasticsearch.client.Requests#syncedFlushRequest(String...)}. </p>
|
||||
*
|
||||
* @see org.elasticsearch.client.Requests#flushRequest(String...)
|
||||
* @see org.elasticsearch.client.IndicesAdminClient#syncedFlush(SyncedFlushRequest)
|
||||
* @see SyncedFlushResponse
|
||||
*/
|
||||
public class SyncedFlushRequest extends BroadcastRequest<SyncedFlushRequest> {
|
||||
|
||||
/**
|
||||
* Constructs a new synced flush request against one or more indices. If nothing is provided, all indices will
|
||||
* be sync flushed.
|
||||
*/
|
||||
public SyncedFlushRequest(String... indices) {
|
||||
super(indices);
|
||||
}
|
||||
|
||||
public SyncedFlushRequest(StreamInput in) throws IOException {
|
||||
super(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "SyncedFlushRequest{" +
|
||||
"indices=" + Arrays.toString(indices) + "}";
|
||||
}
|
||||
}
|
|
@ -1,41 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.admin.indices.flush;
|
||||
|
||||
import org.elasticsearch.action.ActionRequestBuilder;
|
||||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
|
||||
public class SyncedFlushRequestBuilder extends ActionRequestBuilder<SyncedFlushRequest, SyncedFlushResponse> {
|
||||
|
||||
public SyncedFlushRequestBuilder(ElasticsearchClient client, SyncedFlushAction action) {
|
||||
super(client, action, new SyncedFlushRequest());
|
||||
}
|
||||
|
||||
public SyncedFlushRequestBuilder setIndices(String[] indices) {
|
||||
super.request().indices(indices);
|
||||
return this;
|
||||
}
|
||||
|
||||
public SyncedFlushRequestBuilder setIndicesOptions(IndicesOptions indicesOptions) {
|
||||
super.request().indicesOptions(indicesOptions);
|
||||
return this;
|
||||
}
|
||||
}
|
|
@ -1,213 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.action.admin.indices.flush;
|
||||
|
||||
import org.elasticsearch.action.ActionResponse;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.common.util.iterable.Iterables;
|
||||
import org.elasticsearch.common.xcontent.ToXContentFragment;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.indices.flush.ShardsSyncedFlushResult;
|
||||
import org.elasticsearch.indices.flush.SyncedFlushService;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import static java.util.Collections.unmodifiableMap;
|
||||
|
||||
/**
|
||||
* The result of performing a sync flush operation on all shards of multiple indices
|
||||
*/
|
||||
public class SyncedFlushResponse extends ActionResponse implements ToXContentFragment {
|
||||
|
||||
private final Map<String, List<ShardsSyncedFlushResult>> shardsResultPerIndex;
|
||||
private final ShardCounts shardCounts;
|
||||
|
||||
public SyncedFlushResponse(Map<String, List<ShardsSyncedFlushResult>> shardsResultPerIndex) {
|
||||
// shardsResultPerIndex is never modified after it is passed to this
|
||||
// constructor so this is safe even though shardsResultPerIndex is a
|
||||
// ConcurrentHashMap
|
||||
this.shardsResultPerIndex = unmodifiableMap(shardsResultPerIndex);
|
||||
this.shardCounts = calculateShardCounts(Iterables.flatten(shardsResultPerIndex.values()));
|
||||
}
|
||||
|
||||
public SyncedFlushResponse(StreamInput in) throws IOException {
|
||||
super(in);
|
||||
shardCounts = new ShardCounts(in);
|
||||
Map<String, List<ShardsSyncedFlushResult>> tmpShardsResultPerIndex = new HashMap<>();
|
||||
int numShardsResults = in.readInt();
|
||||
for (int i =0 ; i< numShardsResults; i++) {
|
||||
String index = in.readString();
|
||||
List<ShardsSyncedFlushResult> shardsSyncedFlushResults = new ArrayList<>();
|
||||
int numShards = in.readInt();
|
||||
for (int j =0; j< numShards; j++) {
|
||||
shardsSyncedFlushResults.add(new ShardsSyncedFlushResult(in));
|
||||
}
|
||||
tmpShardsResultPerIndex.put(index, shardsSyncedFlushResults);
|
||||
}
|
||||
shardsResultPerIndex = Collections.unmodifiableMap(tmpShardsResultPerIndex);
|
||||
}
|
||||
|
||||
/**
|
||||
* total number shards, including replicas, both assigned and unassigned
|
||||
*/
|
||||
public int totalShards() {
|
||||
return shardCounts.total;
|
||||
}
|
||||
|
||||
/**
|
||||
* total number of shards for which the operation failed
|
||||
*/
|
||||
public int failedShards() {
|
||||
return shardCounts.failed;
|
||||
}
|
||||
|
||||
/**
|
||||
* total number of shards which were successfully sync-flushed
|
||||
*/
|
||||
public int successfulShards() {
|
||||
return shardCounts.successful;
|
||||
}
|
||||
|
||||
public RestStatus restStatus() {
|
||||
return failedShards() == 0 ? RestStatus.OK : RestStatus.CONFLICT;
|
||||
}
|
||||
|
||||
public Map<String, List<ShardsSyncedFlushResult>> getShardsResultPerIndex() {
|
||||
return shardsResultPerIndex;
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject(Fields._SHARDS);
|
||||
shardCounts.toXContent(builder, params);
|
||||
builder.endObject();
|
||||
for (Map.Entry<String, List<ShardsSyncedFlushResult>> indexEntry : shardsResultPerIndex.entrySet()) {
|
||||
List<ShardsSyncedFlushResult> indexResult = indexEntry.getValue();
|
||||
builder.startObject(indexEntry.getKey());
|
||||
ShardCounts indexShardCounts = calculateShardCounts(indexResult);
|
||||
indexShardCounts.toXContent(builder, params);
|
||||
if (indexShardCounts.failed > 0) {
|
||||
builder.startArray(Fields.FAILURES);
|
||||
for (ShardsSyncedFlushResult shardResults : indexResult) {
|
||||
if (shardResults.failed()) {
|
||||
builder.startObject();
|
||||
builder.field(Fields.SHARD, shardResults.shardId().id());
|
||||
builder.field(Fields.REASON, shardResults.failureReason());
|
||||
builder.endObject();
|
||||
continue;
|
||||
}
|
||||
Map<ShardRouting, SyncedFlushService.ShardSyncedFlushResponse> failedShards = shardResults.failedShards();
|
||||
for (Map.Entry<ShardRouting, SyncedFlushService.ShardSyncedFlushResponse> shardEntry : failedShards.entrySet()) {
|
||||
builder.startObject();
|
||||
builder.field(Fields.SHARD, shardResults.shardId().id());
|
||||
builder.field(Fields.REASON, shardEntry.getValue().failureReason());
|
||||
builder.field(Fields.ROUTING, shardEntry.getKey());
|
||||
builder.endObject();
|
||||
}
|
||||
}
|
||||
builder.endArray();
|
||||
}
|
||||
builder.endObject();
|
||||
}
|
||||
return builder;
|
||||
}
|
||||
|
||||
static ShardCounts calculateShardCounts(Iterable<ShardsSyncedFlushResult> results) {
|
||||
int total = 0, successful = 0, failed = 0;
|
||||
for (ShardsSyncedFlushResult result : results) {
|
||||
total += result.totalShards();
|
||||
successful += result.successfulShards();
|
||||
if (result.failed()) {
|
||||
// treat all shard copies as failed
|
||||
failed += result.totalShards();
|
||||
} else {
|
||||
// some shards may have failed during the sync phase
|
||||
failed += result.failedShards().size();
|
||||
}
|
||||
}
|
||||
return new ShardCounts(total, successful, failed);
|
||||
}
|
||||
|
||||
static final class ShardCounts implements ToXContentFragment, Writeable {
|
||||
|
||||
public final int total;
|
||||
public final int successful;
|
||||
public final int failed;
|
||||
|
||||
ShardCounts(int total, int successful, int failed) {
|
||||
this.total = total;
|
||||
this.successful = successful;
|
||||
this.failed = failed;
|
||||
}
|
||||
|
||||
ShardCounts(StreamInput in) throws IOException {
|
||||
total = in.readInt();
|
||||
successful = in.readInt();
|
||||
failed = in.readInt();
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.field(Fields.TOTAL, total);
|
||||
builder.field(Fields.SUCCESSFUL, successful);
|
||||
builder.field(Fields.FAILED, failed);
|
||||
return builder;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeInt(total);
|
||||
out.writeInt(successful);
|
||||
out.writeInt(failed);
|
||||
}
|
||||
}
|
||||
|
||||
static final class Fields {
|
||||
static final String _SHARDS = "_shards";
|
||||
static final String TOTAL = "total";
|
||||
static final String SUCCESSFUL = "successful";
|
||||
static final String FAILED = "failed";
|
||||
static final String FAILURES = "failures";
|
||||
static final String SHARD = "shard";
|
||||
static final String ROUTING = "routing";
|
||||
static final String REASON = "reason";
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
shardCounts.writeTo(out);
|
||||
out.writeInt(shardsResultPerIndex.size());
|
||||
for (Map.Entry<String, List<ShardsSyncedFlushResult>> entry : shardsResultPerIndex.entrySet()) {
|
||||
out.writeString(entry.getKey());
|
||||
out.writeInt(entry.getValue().size());
|
||||
for (ShardsSyncedFlushResult shardsSyncedFlushResult : entry.getValue()) {
|
||||
shardsSyncedFlushResult.writeTo(out);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
package org.elasticsearch.action.admin.indices.flush;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.ActionType;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
|
@ -28,10 +29,16 @@ import org.elasticsearch.cluster.action.shard.ShardStateAction;
|
|||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.shard.IndexShard;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.indices.IndicesService;
|
||||
import org.elasticsearch.tasks.Task;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportChannel;
|
||||
import org.elasticsearch.transport.TransportRequest;
|
||||
import org.elasticsearch.transport.TransportRequestHandler;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -48,6 +55,8 @@ public class TransportShardFlushAction
|
|||
ActionFilters actionFilters) {
|
||||
super(settings, NAME, transportService, clusterService, indicesService, threadPool, shardStateAction,
|
||||
actionFilters, ShardFlushRequest::new, ShardFlushRequest::new, ThreadPool.Names.FLUSH);
|
||||
transportService.registerRequestHandler(PRE_SYNCED_FLUSH_ACTION_NAME,
|
||||
ThreadPool.Names.FLUSH, PreShardSyncedFlushRequest::new, new PreSyncedFlushTransportHandler(indicesService));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -71,4 +80,43 @@ public class TransportShardFlushAction
|
|||
logger.trace("{} flush request executed on replica", replica.shardId());
|
||||
return new ReplicaResult();
|
||||
}
|
||||
|
||||
// TODO: Remove this transition in 9.0
|
||||
private static final String PRE_SYNCED_FLUSH_ACTION_NAME = "internal:indices/flush/synced/pre";
|
||||
|
||||
private static class PreShardSyncedFlushRequest extends TransportRequest {
|
||||
private final ShardId shardId;
|
||||
|
||||
private PreShardSyncedFlushRequest(StreamInput in) throws IOException {
|
||||
super(in);
|
||||
assert in.getVersion().before(Version.V_8_0_0) : "received pre_sync request from a new node";
|
||||
this.shardId = new ShardId(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "PreShardSyncedFlushRequest{" + "shardId=" + shardId + '}';
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
assert false : "must not send pre_sync request from a new node";
|
||||
throw new UnsupportedOperationException("");
|
||||
}
|
||||
}
|
||||
|
||||
private static final class PreSyncedFlushTransportHandler implements TransportRequestHandler<PreShardSyncedFlushRequest> {
|
||||
private final IndicesService indicesService;
|
||||
|
||||
PreSyncedFlushTransportHandler(IndicesService indicesService) {
|
||||
this.indicesService = indicesService;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void messageReceived(PreShardSyncedFlushRequest request, TransportChannel channel, Task task) {
|
||||
IndexShard indexShard = indicesService.indexServiceSafe(request.shardId.getIndex()).getShard(request.shardId.id());
|
||||
indexShard.flush(new FlushRequest().force(false).waitIfOngoing(true));
|
||||
throw new UnsupportedOperationException("Synced flush was removed and a normal flush was performed instead.");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,48 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.admin.indices.flush;
|
||||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.HandledTransportAction;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.indices.flush.SyncedFlushService;
|
||||
import org.elasticsearch.tasks.Task;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
/**
|
||||
* Synced flush Action.
|
||||
*/
|
||||
public class TransportSyncedFlushAction extends HandledTransportAction<SyncedFlushRequest, SyncedFlushResponse> {
|
||||
|
||||
SyncedFlushService syncedFlushService;
|
||||
|
||||
@Inject
|
||||
public TransportSyncedFlushAction(TransportService transportService, ActionFilters actionFilters,
|
||||
SyncedFlushService syncedFlushService) {
|
||||
super(SyncedFlushAction.NAME, transportService, actionFilters, SyncedFlushRequest::new);
|
||||
this.syncedFlushService = syncedFlushService;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doExecute(Task task, SyncedFlushRequest request, ActionListener<SyncedFlushResponse> listener) {
|
||||
syncedFlushService.attemptSyncedFlush(request.indices(), request.indicesOptions(), listener);
|
||||
}
|
||||
}
|
|
@ -42,9 +42,6 @@ import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequestBuilder;
|
|||
import org.elasticsearch.action.admin.indices.flush.FlushRequest;
|
||||
import org.elasticsearch.action.admin.indices.flush.FlushRequestBuilder;
|
||||
import org.elasticsearch.action.admin.indices.flush.FlushResponse;
|
||||
import org.elasticsearch.action.admin.indices.flush.SyncedFlushRequest;
|
||||
import org.elasticsearch.action.admin.indices.flush.SyncedFlushRequestBuilder;
|
||||
import org.elasticsearch.action.admin.indices.flush.SyncedFlushResponse;
|
||||
import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequest;
|
||||
import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequestBuilder;
|
||||
import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeResponse;
|
||||
|
@ -336,29 +333,6 @@ public interface IndicesAdminClient extends ElasticsearchClient {
|
|||
*/
|
||||
FlushRequestBuilder prepareFlush(String... indices);
|
||||
|
||||
/**
|
||||
* Explicitly sync flush one or more indices (write sync id to shards for faster recovery).
|
||||
*
|
||||
* @param request The sync flush request
|
||||
* @return A result future
|
||||
* @see org.elasticsearch.client.Requests#syncedFlushRequest(String...)
|
||||
*/
|
||||
ActionFuture<SyncedFlushResponse> syncedFlush(SyncedFlushRequest request);
|
||||
|
||||
/**
|
||||
* Explicitly sync flush one or more indices (write sync id to shards for faster recovery).
|
||||
*
|
||||
* @param request The sync flush request
|
||||
* @param listener A listener to be notified with a result
|
||||
* @see org.elasticsearch.client.Requests#syncedFlushRequest(String...)
|
||||
*/
|
||||
void syncedFlush(SyncedFlushRequest request, ActionListener <SyncedFlushResponse> listener);
|
||||
|
||||
/**
|
||||
* Explicitly sync flush one or more indices (write sync id to shards for faster recovery).
|
||||
*/
|
||||
SyncedFlushRequestBuilder prepareSyncedFlush(String... indices);
|
||||
|
||||
/**
|
||||
* Explicitly force merge one or more indices into a the number of segments.
|
||||
*
|
||||
|
|
|
@ -47,7 +47,6 @@ import org.elasticsearch.action.admin.indices.close.CloseIndexRequest;
|
|||
import org.elasticsearch.action.admin.indices.create.CreateIndexRequest;
|
||||
import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest;
|
||||
import org.elasticsearch.action.admin.indices.flush.FlushRequest;
|
||||
import org.elasticsearch.action.admin.indices.flush.SyncedFlushRequest;
|
||||
import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequest;
|
||||
import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest;
|
||||
import org.elasticsearch.action.admin.indices.open.OpenIndexRequest;
|
||||
|
@ -248,17 +247,6 @@ public class Requests {
|
|||
return new FlushRequest(indices);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a synced flush indices request.
|
||||
*
|
||||
* @param indices The indices to sync flush. Use {@code null} or {@code _all} to execute against all indices
|
||||
* @return The synced flush request
|
||||
* @see org.elasticsearch.client.IndicesAdminClient#syncedFlush(SyncedFlushRequest)
|
||||
*/
|
||||
public static SyncedFlushRequest syncedFlushRequest(String... indices) {
|
||||
return new SyncedFlushRequest(indices);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a force merge request.
|
||||
*
|
||||
|
|
|
@ -163,10 +163,6 @@ import org.elasticsearch.action.admin.indices.flush.FlushAction;
|
|||
import org.elasticsearch.action.admin.indices.flush.FlushRequest;
|
||||
import org.elasticsearch.action.admin.indices.flush.FlushRequestBuilder;
|
||||
import org.elasticsearch.action.admin.indices.flush.FlushResponse;
|
||||
import org.elasticsearch.action.admin.indices.flush.SyncedFlushAction;
|
||||
import org.elasticsearch.action.admin.indices.flush.SyncedFlushRequest;
|
||||
import org.elasticsearch.action.admin.indices.flush.SyncedFlushRequestBuilder;
|
||||
import org.elasticsearch.action.admin.indices.flush.SyncedFlushResponse;
|
||||
import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeAction;
|
||||
import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequest;
|
||||
import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequestBuilder;
|
||||
|
@ -1351,21 +1347,6 @@ public abstract class AbstractClient implements Client {
|
|||
return new FlushRequestBuilder(this, FlushAction.INSTANCE).setIndices(indices);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ActionFuture<SyncedFlushResponse> syncedFlush(SyncedFlushRequest request) {
|
||||
return execute(SyncedFlushAction.INSTANCE, request);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void syncedFlush(SyncedFlushRequest request, ActionListener<SyncedFlushResponse> listener) {
|
||||
execute(SyncedFlushAction.INSTANCE, request, listener);
|
||||
}
|
||||
|
||||
@Override
|
||||
public SyncedFlushRequestBuilder prepareSyncedFlush(String... indices) {
|
||||
return new SyncedFlushRequestBuilder(this, SyncedFlushAction.INSTANCE).setIndices(indices);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void getMappings(GetMappingsRequest request, ActionListener<GetMappingsResponse> listener) {
|
||||
execute(GetMappingsAction.INSTANCE, request, listener);
|
||||
|
|
|
@ -88,13 +88,6 @@ public final class CommitStats implements Writeable, ToXContentFragment {
|
|||
return new Engine.CommitId(Base64.getDecoder().decode(id));
|
||||
}
|
||||
|
||||
/**
|
||||
* The synced-flush id of the commit if existed.
|
||||
*/
|
||||
public String syncId() {
|
||||
return userData.get(InternalEngine.SYNC_COMMIT_ID);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the number of documents in the in this commit
|
||||
*/
|
||||
|
|
|
@ -1035,12 +1035,6 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
|
|||
}
|
||||
}
|
||||
|
||||
public Engine.SyncedFlushResult syncFlush(String syncId, Engine.CommitId expectedCommitId) {
|
||||
verifyNotClosed();
|
||||
logger.trace("trying to sync flush. sync id [{}]. expected commit id [{}]]", syncId, expectedCommitId);
|
||||
return getEngine().syncFlush(syncId, expectedCommitId);
|
||||
}
|
||||
|
||||
/**
|
||||
* Executes the given flush request against the engine.
|
||||
*
|
||||
|
|
|
@ -61,7 +61,6 @@ import org.elasticsearch.index.seqno.RetentionLeaseSyncAction;
|
|||
import org.elasticsearch.index.seqno.RetentionLeaseSyncer;
|
||||
import org.elasticsearch.index.shard.PrimaryReplicaSyncer;
|
||||
import org.elasticsearch.indices.cluster.IndicesClusterStateService;
|
||||
import org.elasticsearch.indices.flush.SyncedFlushService;
|
||||
import org.elasticsearch.indices.mapper.MapperRegistry;
|
||||
import org.elasticsearch.indices.store.IndicesStore;
|
||||
import org.elasticsearch.plugins.MapperPlugin;
|
||||
|
@ -238,7 +237,6 @@ public class IndicesModule extends AbstractModule {
|
|||
protected void configure() {
|
||||
bind(IndicesStore.class).asEagerSingleton();
|
||||
bind(IndicesClusterStateService.class).asEagerSingleton();
|
||||
bind(SyncedFlushService.class).asEagerSingleton();
|
||||
bind(TransportResyncReplicationAction.class).asEagerSingleton();
|
||||
bind(PrimaryReplicaSyncer.class).asEagerSingleton();
|
||||
bind(RetentionLeaseSyncAction.class).asEagerSingleton();
|
||||
|
|
|
@ -68,7 +68,6 @@ import org.elasticsearch.index.shard.PrimaryReplicaSyncer.ResyncTask;
|
|||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.index.shard.ShardNotFoundException;
|
||||
import org.elasticsearch.indices.IndicesService;
|
||||
import org.elasticsearch.indices.flush.SyncedFlushService;
|
||||
import org.elasticsearch.indices.recovery.PeerRecoverySourceService;
|
||||
import org.elasticsearch.indices.recovery.PeerRecoveryTargetService;
|
||||
import org.elasticsearch.indices.recovery.RecoveryFailedException;
|
||||
|
@ -135,7 +134,6 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple
|
|||
final NodeMappingRefreshAction nodeMappingRefreshAction,
|
||||
final RepositoriesService repositoriesService,
|
||||
final SearchService searchService,
|
||||
final SyncedFlushService syncedFlushService,
|
||||
final PeerRecoverySourceService peerRecoverySourceService,
|
||||
final SnapshotShardsService snapshotShardsService,
|
||||
final PrimaryReplicaSyncer primaryReplicaSyncer,
|
||||
|
@ -151,7 +149,6 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple
|
|||
nodeMappingRefreshAction,
|
||||
repositoriesService,
|
||||
searchService,
|
||||
syncedFlushService,
|
||||
peerRecoverySourceService,
|
||||
snapshotShardsService,
|
||||
primaryReplicaSyncer,
|
||||
|
@ -170,7 +167,6 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple
|
|||
final NodeMappingRefreshAction nodeMappingRefreshAction,
|
||||
final RepositoriesService repositoriesService,
|
||||
final SearchService searchService,
|
||||
final SyncedFlushService syncedFlushService,
|
||||
final PeerRecoverySourceService peerRecoverySourceService,
|
||||
final SnapshotShardsService snapshotShardsService,
|
||||
final PrimaryReplicaSyncer primaryReplicaSyncer,
|
||||
|
|
|
@ -1,163 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.indices.flush;
|
||||
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import static java.util.Collections.emptyMap;
|
||||
|
||||
/**
|
||||
* Result for all copies of a shard
|
||||
*/
|
||||
public class ShardsSyncedFlushResult implements Writeable {
|
||||
private String failureReason;
|
||||
private Map<ShardRouting, SyncedFlushService.ShardSyncedFlushResponse> shardResponses;
|
||||
private String syncId;
|
||||
private ShardId shardId;
|
||||
// some shards may be unassigned, so we need this as state
|
||||
private int totalShards;
|
||||
|
||||
public ShardsSyncedFlushResult(StreamInput in) throws IOException {
|
||||
failureReason = in.readOptionalString();
|
||||
int numResponses = in.readInt();
|
||||
shardResponses = new HashMap<>();
|
||||
for (int i = 0; i < numResponses; i++) {
|
||||
ShardRouting shardRouting = new ShardRouting(in);
|
||||
SyncedFlushService.ShardSyncedFlushResponse response = SyncedFlushService.ShardSyncedFlushResponse.readSyncedFlushResponse(in);
|
||||
shardResponses.put(shardRouting, response);
|
||||
}
|
||||
syncId = in.readOptionalString();
|
||||
shardId = new ShardId(in);
|
||||
totalShards = in.readInt();
|
||||
}
|
||||
|
||||
public ShardId getShardId() {
|
||||
return shardId;
|
||||
}
|
||||
|
||||
/**
|
||||
* failure constructor
|
||||
*/
|
||||
public ShardsSyncedFlushResult(ShardId shardId, int totalShards, String failureReason) {
|
||||
this.syncId = null;
|
||||
this.failureReason = failureReason;
|
||||
this.shardResponses = emptyMap();
|
||||
this.shardId = shardId;
|
||||
this.totalShards = totalShards;
|
||||
}
|
||||
|
||||
/**
|
||||
* success constructor
|
||||
*/
|
||||
public ShardsSyncedFlushResult(ShardId shardId,
|
||||
String syncId,
|
||||
int totalShards,
|
||||
Map<ShardRouting, SyncedFlushService.ShardSyncedFlushResponse> shardResponses) {
|
||||
this.failureReason = null;
|
||||
this.shardResponses = Map.copyOf(shardResponses);
|
||||
this.syncId = syncId;
|
||||
this.totalShards = totalShards;
|
||||
this.shardId = shardId;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return true if the operation failed before reaching step three of synced flush. {@link #failureReason()} can be used for
|
||||
* more details
|
||||
*/
|
||||
public boolean failed() {
|
||||
return failureReason != null;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the reason for the failure if synced flush failed before step three of synced flush
|
||||
*/
|
||||
public String failureReason() {
|
||||
return failureReason;
|
||||
}
|
||||
|
||||
public String syncId() {
|
||||
return syncId;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return total number of shards for which a sync attempt was made
|
||||
*/
|
||||
public int totalShards() {
|
||||
return totalShards;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return total number of successful shards
|
||||
*/
|
||||
public int successfulShards() {
|
||||
int i = 0;
|
||||
for (SyncedFlushService.ShardSyncedFlushResponse result : shardResponses.values()) {
|
||||
if (result.success()) {
|
||||
i++;
|
||||
}
|
||||
}
|
||||
return i;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return an array of shard failures
|
||||
*/
|
||||
public Map<ShardRouting, SyncedFlushService.ShardSyncedFlushResponse> failedShards() {
|
||||
Map<ShardRouting, SyncedFlushService.ShardSyncedFlushResponse> failures = new HashMap<>();
|
||||
for (Map.Entry<ShardRouting, SyncedFlushService.ShardSyncedFlushResponse> result : shardResponses.entrySet()) {
|
||||
if (result.getValue().success() == false) {
|
||||
failures.put(result.getKey(), result.getValue());
|
||||
}
|
||||
}
|
||||
return failures;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return Individual responses for each shard copy with a detailed failure message if the copy failed to perform the synced flush.
|
||||
* Empty if synced flush failed before step three.
|
||||
*/
|
||||
public Map<ShardRouting, SyncedFlushService.ShardSyncedFlushResponse> shardResponses() {
|
||||
return shardResponses;
|
||||
}
|
||||
|
||||
public ShardId shardId() {
|
||||
return shardId;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeOptionalString(failureReason);
|
||||
out.writeInt(shardResponses.size());
|
||||
for (Map.Entry<ShardRouting, SyncedFlushService.ShardSyncedFlushResponse> entry : shardResponses.entrySet()) {
|
||||
entry.getKey().writeTo(out);
|
||||
entry.getValue().writeTo(out);
|
||||
}
|
||||
out.writeOptionalString(syncId);
|
||||
shardId.writeTo(out);
|
||||
out.writeInt(totalShards);
|
||||
}
|
||||
}
|
|
@ -1,768 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.indices.flush;
|
||||
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.StepListener;
|
||||
import org.elasticsearch.action.admin.indices.flush.FlushRequest;
|
||||
import org.elasticsearch.action.admin.indices.flush.SyncedFlushResponse;
|
||||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.UUIDs;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.logging.DeprecationLogger;
|
||||
import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
|
||||
import org.elasticsearch.common.util.concurrent.CountDown;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.IndexNotFoundException;
|
||||
import org.elasticsearch.index.IndexService;
|
||||
import org.elasticsearch.index.engine.CommitStats;
|
||||
import org.elasticsearch.index.engine.Engine;
|
||||
import org.elasticsearch.index.shard.IndexShard;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.index.shard.ShardNotFoundException;
|
||||
import org.elasticsearch.indices.IndexClosedException;
|
||||
import org.elasticsearch.indices.IndicesService;
|
||||
import org.elasticsearch.tasks.Task;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportChannel;
|
||||
import org.elasticsearch.transport.TransportException;
|
||||
import org.elasticsearch.transport.TransportRequest;
|
||||
import org.elasticsearch.transport.TransportRequestHandler;
|
||||
import org.elasticsearch.transport.TransportResponse;
|
||||
import org.elasticsearch.transport.TransportResponseHandler;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.ConcurrentMap;
|
||||
|
||||
public class SyncedFlushService {
|
||||
|
||||
private static final Logger logger = LogManager.getLogger(SyncedFlushService.class);
|
||||
|
||||
private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(logger);
|
||||
|
||||
public static final String SYNCED_FLUSH_DEPRECATION_MESSAGE =
|
||||
"Synced flush is deprecated and will be removed in 8.0. Use flush at _/flush or /{index}/_flush instead.";
|
||||
|
||||
private static final String PRE_SYNCED_FLUSH_ACTION_NAME = "internal:indices/flush/synced/pre";
|
||||
private static final String SYNCED_FLUSH_ACTION_NAME = "internal:indices/flush/synced/sync";
|
||||
private static final String IN_FLIGHT_OPS_ACTION_NAME = "internal:indices/flush/synced/in_flight";
|
||||
|
||||
private final IndicesService indicesService;
|
||||
private final ClusterService clusterService;
|
||||
private final TransportService transportService;
|
||||
private final IndexNameExpressionResolver indexNameExpressionResolver;
|
||||
|
||||
@Inject
|
||||
public SyncedFlushService(IndicesService indicesService,
|
||||
ClusterService clusterService,
|
||||
TransportService transportService,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver) {
|
||||
this.indicesService = indicesService;
|
||||
this.clusterService = clusterService;
|
||||
this.transportService = transportService;
|
||||
this.indexNameExpressionResolver = indexNameExpressionResolver;
|
||||
transportService.registerRequestHandler(PRE_SYNCED_FLUSH_ACTION_NAME, ThreadPool.Names.FLUSH, PreShardSyncedFlushRequest::new,
|
||||
new PreSyncedFlushTransportHandler());
|
||||
transportService.registerRequestHandler(SYNCED_FLUSH_ACTION_NAME, ThreadPool.Names.FLUSH, ShardSyncedFlushRequest::new,
|
||||
new SyncedFlushTransportHandler());
|
||||
transportService.registerRequestHandler(IN_FLIGHT_OPS_ACTION_NAME, ThreadPool.Names.SAME, InFlightOpsRequest::new,
|
||||
new InFlightOpCountTransportHandler());
|
||||
}
|
||||
|
||||
/**
|
||||
* a utility method to perform a synced flush for all shards of multiple indices.
|
||||
* see {@link #attemptSyncedFlush(ShardId, ActionListener)}
|
||||
* for more details.
|
||||
*/
|
||||
public void attemptSyncedFlush(final String[] aliasesOrIndices,
|
||||
IndicesOptions indicesOptions,
|
||||
final ActionListener<SyncedFlushResponse> listener) {
|
||||
final ClusterState state = clusterService.state();
|
||||
DEPRECATION_LOGGER.deprecatedAndMaybeLog("synced_flush", SYNCED_FLUSH_DEPRECATION_MESSAGE);
|
||||
final Index[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, indicesOptions, aliasesOrIndices);
|
||||
final Map<String, List<ShardsSyncedFlushResult>> results = ConcurrentCollections.newConcurrentMap();
|
||||
int numberOfShards = 0;
|
||||
for (Index index : concreteIndices) {
|
||||
final IndexMetaData indexMetaData = state.metaData().getIndexSafe(index);
|
||||
numberOfShards += indexMetaData.getNumberOfShards();
|
||||
results.put(index.getName(), Collections.synchronizedList(new ArrayList<>()));
|
||||
|
||||
}
|
||||
if (numberOfShards == 0) {
|
||||
listener.onResponse(new SyncedFlushResponse(results));
|
||||
return;
|
||||
}
|
||||
final CountDown countDown = new CountDown(numberOfShards);
|
||||
|
||||
for (final Index concreteIndex : concreteIndices) {
|
||||
final String index = concreteIndex.getName();
|
||||
final IndexMetaData indexMetaData = state.metaData().getIndexSafe(concreteIndex);
|
||||
final int indexNumberOfShards = indexMetaData.getNumberOfShards();
|
||||
for (int shard = 0; shard < indexNumberOfShards; shard++) {
|
||||
final ShardId shardId = new ShardId(indexMetaData.getIndex(), shard);
|
||||
innerAttemptSyncedFlush(shardId, state, new ActionListener<ShardsSyncedFlushResult>() {
|
||||
@Override
|
||||
public void onResponse(ShardsSyncedFlushResult syncedFlushResult) {
|
||||
results.get(index).add(syncedFlushResult);
|
||||
if (countDown.countDown()) {
|
||||
listener.onResponse(new SyncedFlushResponse(results));
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
logger.debug("{} unexpected error while executing synced flush", shardId);
|
||||
final int totalShards = indexMetaData.getNumberOfReplicas() + 1;
|
||||
results.get(index).add(new ShardsSyncedFlushResult(shardId, totalShards, e.getMessage()));
|
||||
if (countDown.countDown()) {
|
||||
listener.onResponse(new SyncedFlushResponse(results));
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Tries to flush all copies of a shard and write a sync id to it.
|
||||
* After a synced flush two shard copies may only contain the same sync id if they contain the same documents.
|
||||
* To ensure this, synced flush works in three steps:
|
||||
* 1. Flush all shard copies and gather the commit ids for each copy after the flush
|
||||
* 2. Ensure that there are no ongoing indexing operations on the primary
|
||||
* 3. Perform an additional flush on each shard copy that writes the sync id
|
||||
*
|
||||
* Step 3 is only executed on a shard if
|
||||
* a) the shard has no uncommitted changes since the last flush
|
||||
* b) the last flush was the one executed in 1 (use the collected commit id to verify this)
|
||||
*
|
||||
* This alone is not enough to ensure that all copies contain the same documents.
|
||||
* Without step 2 a sync id would be written for inconsistent copies in the following scenario:
|
||||
*
|
||||
* Write operation has completed on a primary and is being sent to replicas. The write request does not reach the
|
||||
* replicas until sync flush is finished.
|
||||
* Step 1 is executed. After the flush the commit points on primary contains a write operation that the replica does not have.
|
||||
* Step 3 will be executed on primary and replica as well because there are no uncommitted changes on primary (the first flush
|
||||
* committed them) and there are no uncommitted changes on the replica (the write operation has not reached the replica yet).
|
||||
*
|
||||
* Step 2 detects this scenario and fails the whole synced flush if a write operation is ongoing on the primary.
|
||||
* Together with the conditions for step 3 (same commit id and no uncommitted changes) this guarantees that a snc id will only
|
||||
* be written on a primary if no write operation was executed between step 1 and step 3 and sync id will only be written on
|
||||
* the replica if it contains the same changes that the primary contains.
|
||||
*
|
||||
* Synced flush is a best effort operation. The sync id may be written on all, some or none of the copies.
|
||||
**/
|
||||
public void attemptSyncedFlush(final ShardId shardId, final ActionListener<ShardsSyncedFlushResult> actionListener) {
|
||||
innerAttemptSyncedFlush(shardId, clusterService.state(), actionListener);
|
||||
}
|
||||
|
||||
private void innerAttemptSyncedFlush(final ShardId shardId,
|
||||
final ClusterState state,
|
||||
final ActionListener<ShardsSyncedFlushResult> actionListener) {
|
||||
try {
|
||||
final IndexShardRoutingTable shardRoutingTable = getShardRoutingTable(shardId, state);
|
||||
final List<ShardRouting> activeShards = shardRoutingTable.activeShards();
|
||||
final int totalShards = shardRoutingTable.getSize();
|
||||
|
||||
if (activeShards.size() == 0) {
|
||||
actionListener.onResponse(new ShardsSyncedFlushResult(shardId, totalShards, "no active shards"));
|
||||
return;
|
||||
}
|
||||
|
||||
// 1. send pre-sync flushes to all replicas
|
||||
final StepListener<Map<String, PreSyncedFlushResponse>> presyncStep = new StepListener<>();
|
||||
sendPreSyncRequests(activeShards, state, shardId, presyncStep);
|
||||
|
||||
// 2. fetch in flight operations
|
||||
final StepListener<InFlightOpsResponse> inflightOpsStep = new StepListener<>();
|
||||
presyncStep.whenComplete(presyncResponses -> {
|
||||
if (presyncResponses.isEmpty()) {
|
||||
actionListener.onResponse(new ShardsSyncedFlushResult(shardId, totalShards, "all shards failed to commit on pre-sync"));
|
||||
} else {
|
||||
getInflightOpsCount(shardId, state, shardRoutingTable, inflightOpsStep);
|
||||
}
|
||||
}, actionListener::onFailure);
|
||||
|
||||
// 3. now send the sync request to all the shards
|
||||
inflightOpsStep.whenComplete(inFlightOpsResponse -> {
|
||||
final Map<String, PreSyncedFlushResponse> presyncResponses = presyncStep.result();
|
||||
final int inflight = inFlightOpsResponse.opCount();
|
||||
assert inflight >= 0;
|
||||
if (inflight != 0) {
|
||||
actionListener.onResponse(
|
||||
new ShardsSyncedFlushResult(shardId, totalShards, "[" + inflight + "] ongoing operations on primary"));
|
||||
} else {
|
||||
final String sharedSyncId = sharedExistingSyncId(presyncResponses);
|
||||
if (sharedSyncId != null) {
|
||||
assert presyncResponses.values().stream().allMatch(r -> r.existingSyncId.equals(sharedSyncId)) :
|
||||
"Not all shards have the same existing sync id [" + sharedSyncId + "], responses [" + presyncResponses + "]";
|
||||
reportSuccessWithExistingSyncId(shardId, sharedSyncId, activeShards, totalShards, presyncResponses, actionListener);
|
||||
}else {
|
||||
String syncId = UUIDs.randomBase64UUID();
|
||||
sendSyncRequests(syncId, activeShards, state, presyncResponses, shardId, totalShards, actionListener);
|
||||
}
|
||||
}
|
||||
}, actionListener::onFailure);
|
||||
} catch (Exception e) {
|
||||
actionListener.onFailure(e);
|
||||
}
|
||||
}
|
||||
|
||||
private String sharedExistingSyncId(Map<String, PreSyncedFlushResponse> preSyncedFlushResponses) {
|
||||
String existingSyncId = null;
|
||||
for (PreSyncedFlushResponse resp : preSyncedFlushResponses.values()) {
|
||||
if (Strings.isNullOrEmpty(resp.existingSyncId)) {
|
||||
return null;
|
||||
}
|
||||
if (existingSyncId == null) {
|
||||
existingSyncId = resp.existingSyncId;
|
||||
}
|
||||
if (existingSyncId.equals(resp.existingSyncId) == false) {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
return existingSyncId;
|
||||
}
|
||||
|
||||
private void reportSuccessWithExistingSyncId(ShardId shardId,
|
||||
String existingSyncId,
|
||||
List<ShardRouting> shards,
|
||||
int totalShards,
|
||||
Map<String, PreSyncedFlushResponse> preSyncResponses,
|
||||
ActionListener<ShardsSyncedFlushResult> listener) {
|
||||
final Map<ShardRouting, ShardSyncedFlushResponse> results = new HashMap<>();
|
||||
for (final ShardRouting shard : shards) {
|
||||
if (preSyncResponses.containsKey(shard.currentNodeId())) {
|
||||
results.put(shard, new ShardSyncedFlushResponse((String) null));
|
||||
}
|
||||
}
|
||||
listener.onResponse(new ShardsSyncedFlushResult(shardId, existingSyncId, totalShards, results));
|
||||
}
|
||||
|
||||
final IndexShardRoutingTable getShardRoutingTable(final ShardId shardId, final ClusterState state) {
|
||||
final IndexMetaData indexMetaData = state.getMetaData().index(shardId.getIndex());
|
||||
if (indexMetaData == null) {
|
||||
throw new IndexNotFoundException(shardId.getIndexName());
|
||||
} else if (indexMetaData.getState() == IndexMetaData.State.CLOSE) {
|
||||
throw new IndexClosedException(shardId.getIndex());
|
||||
}
|
||||
final IndexShardRoutingTable shardRoutingTable = state.routingTable().index(indexMetaData.getIndex()).shard(shardId.id());
|
||||
if (shardRoutingTable == null) {
|
||||
throw new ShardNotFoundException(shardId);
|
||||
}
|
||||
return shardRoutingTable;
|
||||
}
|
||||
|
||||
/**
|
||||
* returns the number of in flight operations on primary. -1 upon error.
|
||||
*/
|
||||
protected void getInflightOpsCount(final ShardId shardId,
|
||||
ClusterState state,
|
||||
IndexShardRoutingTable shardRoutingTable,
|
||||
final ActionListener<InFlightOpsResponse> listener) {
|
||||
try {
|
||||
final ShardRouting primaryShard = shardRoutingTable.primaryShard();
|
||||
final DiscoveryNode primaryNode = state.nodes().get(primaryShard.currentNodeId());
|
||||
if (primaryNode == null) {
|
||||
logger.trace("{} failed to resolve node for primary shard {}, skipping sync", shardId, primaryShard);
|
||||
listener.onResponse(new InFlightOpsResponse(-1));
|
||||
return;
|
||||
}
|
||||
logger.trace("{} retrieving in flight operation count", shardId);
|
||||
transportService.sendRequest(primaryNode, IN_FLIGHT_OPS_ACTION_NAME, new InFlightOpsRequest(shardId),
|
||||
new TransportResponseHandler<InFlightOpsResponse>() {
|
||||
@Override
|
||||
public InFlightOpsResponse read(StreamInput in) throws IOException {
|
||||
return new InFlightOpsResponse(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void handleResponse(InFlightOpsResponse response) {
|
||||
listener.onResponse(response);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void handleException(TransportException exp) {
|
||||
logger.debug("{} unexpected error while retrieving in flight op count", shardId);
|
||||
listener.onFailure(exp);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String executor() {
|
||||
return ThreadPool.Names.SAME;
|
||||
}
|
||||
});
|
||||
} catch (Exception e) {
|
||||
listener.onFailure(e);
|
||||
}
|
||||
}
|
||||
|
||||
private int numDocsOnPrimary(List<ShardRouting> shards, Map<String, PreSyncedFlushResponse> preSyncResponses) {
|
||||
for (ShardRouting shard : shards) {
|
||||
if (shard.primary()) {
|
||||
final PreSyncedFlushResponse resp = preSyncResponses.get(shard.currentNodeId());
|
||||
if (resp != null) {
|
||||
return resp.numDocs;
|
||||
}
|
||||
}
|
||||
}
|
||||
return PreSyncedFlushResponse.UNKNOWN_NUM_DOCS;
|
||||
}
|
||||
|
||||
void sendSyncRequests(final String syncId,
|
||||
final List<ShardRouting> shards,
|
||||
ClusterState state,
|
||||
Map<String, PreSyncedFlushResponse> preSyncResponses,
|
||||
final ShardId shardId,
|
||||
final int totalShards,
|
||||
final ActionListener<ShardsSyncedFlushResult> listener) {
|
||||
final CountDown countDown = new CountDown(shards.size());
|
||||
final Map<ShardRouting, ShardSyncedFlushResponse> results = ConcurrentCollections.newConcurrentMap();
|
||||
final int numDocsOnPrimary = numDocsOnPrimary(shards, preSyncResponses);
|
||||
for (final ShardRouting shard : shards) {
|
||||
final DiscoveryNode node = state.nodes().get(shard.currentNodeId());
|
||||
if (node == null) {
|
||||
logger.trace("{} is assigned to an unknown node. skipping for sync id [{}]. shard routing {}", shardId, syncId, shard);
|
||||
results.put(shard, new ShardSyncedFlushResponse("unknown node"));
|
||||
countDownAndSendResponseIfDone(syncId, shards, shardId, totalShards, listener, countDown, results);
|
||||
continue;
|
||||
}
|
||||
final PreSyncedFlushResponse preSyncedResponse = preSyncResponses.get(shard.currentNodeId());
|
||||
if (preSyncedResponse == null) {
|
||||
logger.trace("{} can't resolve expected commit id for current node, skipping for sync id [{}]. shard routing {}",
|
||||
shardId, syncId, shard);
|
||||
results.put(shard, new ShardSyncedFlushResponse("no commit id from pre-sync flush"));
|
||||
countDownAndSendResponseIfDone(syncId, shards, shardId, totalShards, listener, countDown, results);
|
||||
continue;
|
||||
}
|
||||
if (preSyncedResponse.numDocs != numDocsOnPrimary &&
|
||||
preSyncedResponse.numDocs != PreSyncedFlushResponse.UNKNOWN_NUM_DOCS &&
|
||||
numDocsOnPrimary != PreSyncedFlushResponse.UNKNOWN_NUM_DOCS) {
|
||||
logger.debug("{} can't issue sync id [{}] for replica [{}] with num docs [{}]; num docs on primary [{}]",
|
||||
shardId, syncId, shard, preSyncedResponse.numDocs, numDocsOnPrimary);
|
||||
results.put(shard, new ShardSyncedFlushResponse("ongoing indexing operations: " +
|
||||
"num docs on replica [" + preSyncedResponse.numDocs + "]; num docs on primary [" + numDocsOnPrimary + "]"));
|
||||
countDownAndSendResponseIfDone(syncId, shards, shardId, totalShards, listener, countDown, results);
|
||||
continue;
|
||||
}
|
||||
logger.trace("{} sending synced flush request to {}. sync id [{}].", shardId, shard, syncId);
|
||||
ShardSyncedFlushRequest syncedFlushRequest = new ShardSyncedFlushRequest(shard.shardId(), syncId, preSyncedResponse.commitId);
|
||||
transportService.sendRequest(node, SYNCED_FLUSH_ACTION_NAME, syncedFlushRequest,
|
||||
new TransportResponseHandler<ShardSyncedFlushResponse>() {
|
||||
@Override
|
||||
public ShardSyncedFlushResponse read(StreamInput in) throws IOException {
|
||||
return new ShardSyncedFlushResponse(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void handleResponse(ShardSyncedFlushResponse response) {
|
||||
ShardSyncedFlushResponse existing = results.put(shard, response);
|
||||
assert existing == null : "got two answers for node [" + node + "]";
|
||||
// count after the assert so we won't decrement twice in handleException
|
||||
countDownAndSendResponseIfDone(syncId, shards, shardId, totalShards, listener, countDown, results);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void handleException(TransportException exp) {
|
||||
logger.trace(() -> new ParameterizedMessage("{} error while performing synced flush on [{}], skipping",
|
||||
shardId, shard), exp);
|
||||
results.put(shard, new ShardSyncedFlushResponse(exp.getMessage()));
|
||||
countDownAndSendResponseIfDone(syncId, shards, shardId, totalShards, listener, countDown, results);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String executor() {
|
||||
return ThreadPool.Names.SAME;
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
private void countDownAndSendResponseIfDone(String syncId,
|
||||
List<ShardRouting> shards,
|
||||
ShardId shardId,
|
||||
int totalShards,
|
||||
ActionListener<ShardsSyncedFlushResult> listener,
|
||||
CountDown countDown,
|
||||
Map<ShardRouting, ShardSyncedFlushResponse> results) {
|
||||
if (countDown.countDown()) {
|
||||
assert results.size() == shards.size();
|
||||
listener.onResponse(new ShardsSyncedFlushResult(shardId, syncId, totalShards, results));
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* send presync requests to all started copies of the given shard
|
||||
*/
|
||||
void sendPreSyncRequests(final List<ShardRouting> shards,
|
||||
final ClusterState state,
|
||||
final ShardId shardId,
|
||||
final ActionListener<Map<String, PreSyncedFlushResponse>> listener) {
|
||||
final CountDown countDown = new CountDown(shards.size());
|
||||
final ConcurrentMap<String, PreSyncedFlushResponse> presyncResponses = ConcurrentCollections.newConcurrentMap();
|
||||
for (final ShardRouting shard : shards) {
|
||||
logger.trace("{} sending pre-synced flush request to {}", shardId, shard);
|
||||
final DiscoveryNode node = state.nodes().get(shard.currentNodeId());
|
||||
if (node == null) {
|
||||
logger.trace("{} shard routing {} refers to an unknown node. skipping.", shardId, shard);
|
||||
if (countDown.countDown()) {
|
||||
listener.onResponse(presyncResponses);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
transportService.sendRequest(node, PRE_SYNCED_FLUSH_ACTION_NAME, new PreShardSyncedFlushRequest(shard.shardId()),
|
||||
new TransportResponseHandler<PreSyncedFlushResponse>() {
|
||||
@Override
|
||||
public PreSyncedFlushResponse read(StreamInput in) throws IOException {
|
||||
return new PreSyncedFlushResponse(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void handleResponse(PreSyncedFlushResponse response) {
|
||||
PreSyncedFlushResponse existing = presyncResponses.putIfAbsent(node.getId(), response);
|
||||
assert existing == null : "got two answers for node [" + node + "]";
|
||||
// count after the assert so we won't decrement twice in handleException
|
||||
if (countDown.countDown()) {
|
||||
listener.onResponse(presyncResponses);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void handleException(TransportException exp) {
|
||||
logger.trace(() -> new ParameterizedMessage("{} error while performing pre synced flush on [{}], skipping",
|
||||
shardId, shard), exp);
|
||||
if (countDown.countDown()) {
|
||||
listener.onResponse(presyncResponses);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String executor() {
|
||||
return ThreadPool.Names.SAME;
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
private PreSyncedFlushResponse performPreSyncedFlush(PreShardSyncedFlushRequest request) {
|
||||
IndexShard indexShard = indicesService.indexServiceSafe(request.shardId().getIndex()).getShard(request.shardId().id());
|
||||
FlushRequest flushRequest = new FlushRequest().force(false).waitIfOngoing(true);
|
||||
logger.trace("{} performing pre sync flush", request.shardId());
|
||||
indexShard.flush(flushRequest);
|
||||
final CommitStats commitStats = indexShard.commitStats();
|
||||
final Engine.CommitId commitId = commitStats.getRawCommitId();
|
||||
logger.trace("{} pre sync flush done. commit id {}, num docs {}", request.shardId(), commitId, commitStats.getNumDocs());
|
||||
return new PreSyncedFlushResponse(commitId, commitStats.getNumDocs(), commitStats.syncId());
|
||||
}
|
||||
|
||||
private ShardSyncedFlushResponse performSyncedFlush(ShardSyncedFlushRequest request) {
|
||||
IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex());
|
||||
IndexShard indexShard = indexService.getShard(request.shardId().id());
|
||||
logger.trace("{} performing sync flush. sync id [{}], expected commit id {}",
|
||||
request.shardId(), request.syncId(), request.expectedCommitId());
|
||||
Engine.SyncedFlushResult result = indexShard.syncFlush(request.syncId(), request.expectedCommitId());
|
||||
logger.trace("{} sync flush done. sync id [{}], result [{}]", request.shardId(), request.syncId(), result);
|
||||
switch (result) {
|
||||
case SUCCESS:
|
||||
return new ShardSyncedFlushResponse((String) null);
|
||||
case COMMIT_MISMATCH:
|
||||
return new ShardSyncedFlushResponse("commit has changed");
|
||||
case PENDING_OPERATIONS:
|
||||
return new ShardSyncedFlushResponse("pending operations");
|
||||
default:
|
||||
throw new ElasticsearchException("unknown synced flush result [" + result + "]");
|
||||
}
|
||||
}
|
||||
|
||||
private InFlightOpsResponse performInFlightOps(InFlightOpsRequest request) {
|
||||
IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex());
|
||||
IndexShard indexShard = indexService.getShard(request.shardId().id());
|
||||
if (indexShard.routingEntry().primary() == false) {
|
||||
throw new IllegalStateException("[" + request.shardId() +"] expected a primary shard");
|
||||
}
|
||||
int opCount = indexShard.getActiveOperationsCount();
|
||||
return new InFlightOpsResponse(opCount == IndexShard.OPERATIONS_BLOCKED ? 0 : opCount);
|
||||
}
|
||||
|
||||
public static final class PreShardSyncedFlushRequest extends TransportRequest {
|
||||
private ShardId shardId;
|
||||
|
||||
public PreShardSyncedFlushRequest(StreamInput in) throws IOException {
|
||||
super(in);
|
||||
this.shardId = new ShardId(in);
|
||||
}
|
||||
|
||||
public PreShardSyncedFlushRequest(ShardId shardId) {
|
||||
this.shardId = shardId;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "PreShardSyncedFlushRequest{" +
|
||||
"shardId=" + shardId +
|
||||
'}';
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
shardId.writeTo(out);
|
||||
}
|
||||
|
||||
public ShardId shardId() {
|
||||
return shardId;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Response for first step of synced flush (flush) for one shard copy
|
||||
*/
|
||||
static final class PreSyncedFlushResponse extends TransportResponse {
|
||||
static final int UNKNOWN_NUM_DOCS = -1;
|
||||
|
||||
Engine.CommitId commitId;
|
||||
int numDocs;
|
||||
@Nullable String existingSyncId = null;
|
||||
|
||||
PreSyncedFlushResponse(StreamInput in) throws IOException {
|
||||
super(in);
|
||||
commitId = new Engine.CommitId(in);
|
||||
numDocs = in.readInt();
|
||||
existingSyncId = in.readOptionalString();
|
||||
}
|
||||
|
||||
PreSyncedFlushResponse(Engine.CommitId commitId, int numDocs, String existingSyncId) {
|
||||
this.commitId = commitId;
|
||||
this.numDocs = numDocs;
|
||||
this.existingSyncId = existingSyncId;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
commitId.writeTo(out);
|
||||
out.writeInt(numDocs);
|
||||
out.writeOptionalString(existingSyncId);
|
||||
}
|
||||
}
|
||||
|
||||
public static final class ShardSyncedFlushRequest extends TransportRequest {
|
||||
|
||||
private String syncId;
|
||||
private Engine.CommitId expectedCommitId;
|
||||
private ShardId shardId;
|
||||
|
||||
public ShardSyncedFlushRequest(StreamInput in) throws IOException {
|
||||
super(in);
|
||||
shardId = new ShardId(in);
|
||||
expectedCommitId = new Engine.CommitId(in);
|
||||
syncId = in.readString();
|
||||
}
|
||||
|
||||
public ShardSyncedFlushRequest(ShardId shardId, String syncId, Engine.CommitId expectedCommitId) {
|
||||
this.expectedCommitId = expectedCommitId;
|
||||
this.shardId = shardId;
|
||||
this.syncId = syncId;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
shardId.writeTo(out);
|
||||
expectedCommitId.writeTo(out);
|
||||
out.writeString(syncId);
|
||||
}
|
||||
|
||||
public ShardId shardId() {
|
||||
return shardId;
|
||||
}
|
||||
|
||||
public String syncId() {
|
||||
return syncId;
|
||||
}
|
||||
|
||||
public Engine.CommitId expectedCommitId() {
|
||||
return expectedCommitId;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "ShardSyncedFlushRequest{" +
|
||||
"shardId=" + shardId +
|
||||
",syncId='" + syncId + '\'' +
|
||||
'}';
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Response for third step of synced flush (writing the sync id) for one shard copy
|
||||
*/
|
||||
public static final class ShardSyncedFlushResponse extends TransportResponse {
|
||||
|
||||
/**
|
||||
* a non null value indicates a failure to sync flush. null means success
|
||||
*/
|
||||
String failureReason;
|
||||
|
||||
public ShardSyncedFlushResponse(StreamInput in) throws IOException {
|
||||
super(in);
|
||||
failureReason = in.readOptionalString();
|
||||
}
|
||||
|
||||
public ShardSyncedFlushResponse(String failureReason) {
|
||||
this.failureReason = failureReason;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeOptionalString(failureReason);
|
||||
}
|
||||
|
||||
public boolean success() {
|
||||
return failureReason == null;
|
||||
}
|
||||
|
||||
public String failureReason() {
|
||||
return failureReason;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "ShardSyncedFlushResponse{" +
|
||||
"success=" + success() +
|
||||
", failureReason='" + failureReason + '\'' +
|
||||
'}';
|
||||
}
|
||||
|
||||
public static ShardSyncedFlushResponse readSyncedFlushResponse(StreamInput in) throws IOException {
|
||||
return new ShardSyncedFlushResponse(in);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
public static final class InFlightOpsRequest extends TransportRequest {
|
||||
|
||||
private ShardId shardId;
|
||||
|
||||
public InFlightOpsRequest(StreamInput in) throws IOException {
|
||||
super(in);
|
||||
shardId = new ShardId(in);
|
||||
}
|
||||
|
||||
public InFlightOpsRequest(ShardId shardId) {
|
||||
this.shardId = shardId;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
shardId.writeTo(out);
|
||||
}
|
||||
|
||||
public ShardId shardId() {
|
||||
return shardId;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "InFlightOpsRequest{" +
|
||||
"shardId=" + shardId +
|
||||
'}';
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Response for second step of synced flush (check operations in flight)
|
||||
*/
|
||||
static final class InFlightOpsResponse extends TransportResponse {
|
||||
|
||||
int opCount;
|
||||
|
||||
InFlightOpsResponse(StreamInput in) throws IOException {
|
||||
super(in);
|
||||
opCount = in.readVInt();
|
||||
}
|
||||
|
||||
InFlightOpsResponse(int opCount) {
|
||||
assert opCount >= 0 : opCount;
|
||||
this.opCount = opCount;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeVInt(opCount);
|
||||
}
|
||||
|
||||
public int opCount() {
|
||||
return opCount;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "InFlightOpsResponse{" +
|
||||
"opCount=" + opCount +
|
||||
'}';
|
||||
}
|
||||
}
|
||||
|
||||
private final class PreSyncedFlushTransportHandler implements TransportRequestHandler<PreShardSyncedFlushRequest> {
|
||||
|
||||
@Override
|
||||
public void messageReceived(PreShardSyncedFlushRequest request, TransportChannel channel, Task task) throws Exception {
|
||||
channel.sendResponse(performPreSyncedFlush(request));
|
||||
}
|
||||
}
|
||||
|
||||
private final class SyncedFlushTransportHandler implements TransportRequestHandler<ShardSyncedFlushRequest> {
|
||||
|
||||
@Override
|
||||
public void messageReceived(ShardSyncedFlushRequest request, TransportChannel channel, Task task) throws Exception {
|
||||
channel.sendResponse(performSyncedFlush(request));
|
||||
}
|
||||
}
|
||||
|
||||
private final class InFlightOpCountTransportHandler implements TransportRequestHandler<InFlightOpsRequest> {
|
||||
|
||||
@Override
|
||||
public void messageReceived(InFlightOpsRequest request, TransportChannel channel, Task task) throws Exception {
|
||||
channel.sendResponse(performInFlightOps(request));
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -19,18 +19,23 @@
|
|||
|
||||
package org.elasticsearch.rest.action.admin.indices;
|
||||
|
||||
import org.elasticsearch.action.admin.indices.flush.SyncedFlushRequest;
|
||||
import org.elasticsearch.action.admin.indices.flush.SyncedFlushResponse;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.elasticsearch.action.admin.indices.flush.FlushRequest;
|
||||
import org.elasticsearch.action.admin.indices.flush.FlushResponse;
|
||||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.client.node.NodeClient;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.logging.DeprecationLogger;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.rest.BaseRestHandler;
|
||||
import org.elasticsearch.rest.BytesRestResponse;
|
||||
import org.elasticsearch.rest.RestChannel;
|
||||
import org.elasticsearch.rest.RestController;
|
||||
import org.elasticsearch.rest.RestRequest;
|
||||
import org.elasticsearch.rest.RestResponse;
|
||||
import org.elasticsearch.rest.action.RestBuilderListener;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
import org.elasticsearch.rest.action.RestToXContentListener;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
|
@ -39,6 +44,9 @@ import static org.elasticsearch.rest.RestRequest.Method.POST;
|
|||
|
||||
public class RestSyncedFlushAction extends BaseRestHandler {
|
||||
|
||||
private static final Logger logger = LogManager.getLogger(RestSyncedFlushAction.class);
|
||||
private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(logger);
|
||||
|
||||
public RestSyncedFlushAction(RestController controller) {
|
||||
controller.registerHandler(POST, "/_flush/synced", this);
|
||||
controller.registerHandler(POST, "/{index}/_flush/synced", this);
|
||||
|
@ -54,17 +62,35 @@ public class RestSyncedFlushAction extends BaseRestHandler {
|
|||
|
||||
@Override
|
||||
public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException {
|
||||
IndicesOptions indicesOptions = IndicesOptions.fromRequest(request, IndicesOptions.lenientExpandOpen());
|
||||
SyncedFlushRequest syncedFlushRequest = new SyncedFlushRequest(Strings.splitStringByCommaToArray(request.param("index")));
|
||||
syncedFlushRequest.indicesOptions(indicesOptions);
|
||||
return channel -> client.admin().indices().syncedFlush(syncedFlushRequest, new RestBuilderListener<SyncedFlushResponse>(channel) {
|
||||
@Override
|
||||
public RestResponse buildResponse(SyncedFlushResponse results, XContentBuilder builder) throws Exception {
|
||||
builder.startObject();
|
||||
results.toXContent(builder, request);
|
||||
builder.endObject();
|
||||
return new BytesRestResponse(results.restStatus(), builder);
|
||||
DEPRECATION_LOGGER.deprecatedAndMaybeLog("synced_flush",
|
||||
"Synced flush was removed and a normal flush was performed instead. This transition will be removed in a future version.");
|
||||
final FlushRequest flushRequest = new FlushRequest(Strings.splitStringByCommaToArray(request.param("index")));
|
||||
flushRequest.indicesOptions(IndicesOptions.fromRequest(request, flushRequest.indicesOptions()));
|
||||
return channel -> client.admin().indices().flush(flushRequest, new SimulateSyncedFlushResponseListener(channel));
|
||||
}
|
||||
|
||||
static final class SimulateSyncedFlushResponseListener extends RestToXContentListener<FlushResponse> {
|
||||
|
||||
SimulateSyncedFlushResponseListener(RestChannel channel) {
|
||||
super(channel);
|
||||
}
|
||||
|
||||
@Override
|
||||
public RestResponse buildResponse(FlushResponse flushResponse, XContentBuilder builder) throws Exception {
|
||||
builder.startObject();
|
||||
buildSyncedFlushResponse(builder, flushResponse);
|
||||
builder.endObject();
|
||||
final RestStatus restStatus = flushResponse.getFailedShards() == 0 ? RestStatus.OK : RestStatus.CONFLICT;
|
||||
return new BytesRestResponse(restStatus, builder);
|
||||
}
|
||||
|
||||
private void buildSyncedFlushResponse(XContentBuilder builder, FlushResponse flushResponse) throws IOException {
|
||||
builder.startObject("_shards");
|
||||
builder.field("total", flushResponse.getTotalShards());
|
||||
builder.field("successful", flushResponse.getSuccessfulShards());
|
||||
builder.field("failed", flushResponse.getFailedShards());
|
||||
// can't serialize the detail of each index as we don't have the shard count per index.
|
||||
builder.endObject();
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,189 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.admin.indices.flush;
|
||||
|
||||
import com.carrotsearch.hppc.ObjectIntHashMap;
|
||||
import com.carrotsearch.hppc.ObjectIntMap;
|
||||
import org.elasticsearch.action.admin.indices.flush.SyncedFlushResponse.ShardCounts;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.routing.ShardRoutingState;
|
||||
import org.elasticsearch.cluster.routing.TestShardRouting;
|
||||
import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.indices.flush.ShardsSyncedFlushResult;
|
||||
import org.elasticsearch.indices.flush.SyncedFlushService;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.elasticsearch.test.XContentTestUtils.convertToMap;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.hasSize;
|
||||
|
||||
public class SyncedFlushUnitTests extends ESTestCase {
|
||||
|
||||
private static class TestPlan {
|
||||
public SyncedFlushResponse.ShardCounts totalCounts;
|
||||
public Map<String, SyncedFlushResponse.ShardCounts> countsPerIndex = new HashMap<>();
|
||||
public ObjectIntMap<String> expectedFailuresPerIndex = new ObjectIntHashMap<>();
|
||||
public SyncedFlushResponse result;
|
||||
}
|
||||
|
||||
public void testIndicesSyncedFlushResult() throws IOException {
|
||||
final TestPlan testPlan = createTestPlan();
|
||||
assertThat(testPlan.result.totalShards(), equalTo(testPlan.totalCounts.total));
|
||||
assertThat(testPlan.result.successfulShards(), equalTo(testPlan.totalCounts.successful));
|
||||
assertThat(testPlan.result.failedShards(), equalTo(testPlan.totalCounts.failed));
|
||||
assertThat(testPlan.result.restStatus(), equalTo(testPlan.totalCounts.failed > 0 ? RestStatus.CONFLICT : RestStatus.OK));
|
||||
Map<String, Object> asMap = convertToMap(testPlan.result);
|
||||
assertShardCount("_shards header", (Map<String, Object>) asMap.get("_shards"), testPlan.totalCounts);
|
||||
|
||||
assertThat("unexpected number of indices", asMap.size(), equalTo(1 + testPlan.countsPerIndex.size())); // +1 for the shards header
|
||||
for (String index : testPlan.countsPerIndex.keySet()) {
|
||||
Map<String, Object> indexMap = (Map<String, Object>) asMap.get(index);
|
||||
assertShardCount(index, indexMap, testPlan.countsPerIndex.get(index));
|
||||
List<Map<String, Object>> failureList = (List<Map<String, Object>>) indexMap.get("failures");
|
||||
final int expectedFailures = testPlan.expectedFailuresPerIndex.get(index);
|
||||
if (expectedFailures == 0) {
|
||||
assertNull(index + " has unexpected failures", failureList);
|
||||
} else {
|
||||
assertNotNull(index + " should have failures", failureList);
|
||||
assertThat(failureList, hasSize(expectedFailures));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public void testResponseStreaming() throws IOException {
|
||||
final TestPlan testPlan = createTestPlan();
|
||||
assertThat(testPlan.result.totalShards(), equalTo(testPlan.totalCounts.total));
|
||||
assertThat(testPlan.result.successfulShards(), equalTo(testPlan.totalCounts.successful));
|
||||
assertThat(testPlan.result.failedShards(), equalTo(testPlan.totalCounts.failed));
|
||||
assertThat(testPlan.result.restStatus(), equalTo(testPlan.totalCounts.failed > 0 ? RestStatus.CONFLICT : RestStatus.OK));
|
||||
BytesStreamOutput out = new BytesStreamOutput();
|
||||
testPlan.result.writeTo(out);
|
||||
StreamInput in = out.bytes().streamInput();
|
||||
SyncedFlushResponse readResponse = new SyncedFlushResponse(in);
|
||||
assertThat(readResponse.totalShards(), equalTo(testPlan.totalCounts.total));
|
||||
assertThat(readResponse.successfulShards(), equalTo(testPlan.totalCounts.successful));
|
||||
assertThat(readResponse.failedShards(), equalTo(testPlan.totalCounts.failed));
|
||||
assertThat(readResponse.restStatus(), equalTo(testPlan.totalCounts.failed > 0 ? RestStatus.CONFLICT : RestStatus.OK));
|
||||
assertThat(readResponse.getShardsResultPerIndex().size(), equalTo(testPlan.result.getShardsResultPerIndex().size()));
|
||||
for (Map.Entry<String, List<ShardsSyncedFlushResult>> entry : readResponse.getShardsResultPerIndex().entrySet()) {
|
||||
List<ShardsSyncedFlushResult> originalShardsResults = testPlan.result.getShardsResultPerIndex().get(entry.getKey());
|
||||
assertNotNull(originalShardsResults);
|
||||
List<ShardsSyncedFlushResult> readShardsResults = entry.getValue();
|
||||
assertThat(readShardsResults.size(), equalTo(originalShardsResults.size()));
|
||||
for (int i = 0; i < readShardsResults.size(); i++) {
|
||||
ShardsSyncedFlushResult originalShardResult = originalShardsResults.get(i);
|
||||
ShardsSyncedFlushResult readShardResult = readShardsResults.get(i);
|
||||
assertThat(originalShardResult.failureReason(), equalTo(readShardResult.failureReason()));
|
||||
assertThat(originalShardResult.failed(), equalTo(readShardResult.failed()));
|
||||
assertThat(originalShardResult.getShardId(), equalTo(readShardResult.getShardId()));
|
||||
assertThat(originalShardResult.successfulShards(), equalTo(readShardResult.successfulShards()));
|
||||
assertThat(originalShardResult.syncId(), equalTo(readShardResult.syncId()));
|
||||
assertThat(originalShardResult.totalShards(), equalTo(readShardResult.totalShards()));
|
||||
assertThat(originalShardResult.failedShards().size(), equalTo(readShardResult.failedShards().size()));
|
||||
for (Map.Entry<ShardRouting, SyncedFlushService.ShardSyncedFlushResponse> shardEntry
|
||||
: originalShardResult.failedShards().entrySet()) {
|
||||
SyncedFlushService.ShardSyncedFlushResponse readShardResponse = readShardResult.failedShards().get(shardEntry.getKey());
|
||||
assertNotNull(readShardResponse);
|
||||
SyncedFlushService.ShardSyncedFlushResponse originalShardResponse = shardEntry.getValue();
|
||||
assertThat(originalShardResponse.failureReason(), equalTo(readShardResponse.failureReason()));
|
||||
assertThat(originalShardResponse.success(), equalTo(readShardResponse.success()));
|
||||
}
|
||||
assertThat(originalShardResult.shardResponses().size(), equalTo(readShardResult.shardResponses().size()));
|
||||
for (Map.Entry<ShardRouting, SyncedFlushService.ShardSyncedFlushResponse> shardEntry
|
||||
: originalShardResult.shardResponses().entrySet()) {
|
||||
SyncedFlushService.ShardSyncedFlushResponse readShardResponse = readShardResult.shardResponses()
|
||||
.get(shardEntry.getKey());
|
||||
assertNotNull(readShardResponse);
|
||||
SyncedFlushService.ShardSyncedFlushResponse originalShardResponse = shardEntry.getValue();
|
||||
assertThat(originalShardResponse.failureReason(), equalTo(readShardResponse.failureReason()));
|
||||
assertThat(originalShardResponse.success(), equalTo(readShardResponse.success()));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void assertShardCount(String name, Map<String, Object> header, ShardCounts expectedCounts) {
|
||||
assertThat(name + " has unexpected total count", (Integer) header.get("total"), equalTo(expectedCounts.total));
|
||||
assertThat(name + " has unexpected successful count", (Integer) header.get("successful"), equalTo(expectedCounts.successful));
|
||||
assertThat(name + " has unexpected failed count", (Integer) header.get("failed"), equalTo(expectedCounts.failed));
|
||||
}
|
||||
|
||||
protected TestPlan createTestPlan() {
|
||||
final TestPlan testPlan = new TestPlan();
|
||||
final Map<String, List<ShardsSyncedFlushResult>> indicesResults = new HashMap<>();
|
||||
final int indexCount = randomIntBetween(1, 10);
|
||||
int totalShards = 0;
|
||||
int totalSuccesful = 0;
|
||||
int totalFailed = 0;
|
||||
for (int i = 0; i < indexCount; i++) {
|
||||
final String index = "index_" + i;
|
||||
int shards = randomIntBetween(1, 4);
|
||||
int replicas = randomIntBetween(0, 2);
|
||||
int successful = 0;
|
||||
int failed = 0;
|
||||
int failures = 0;
|
||||
List<ShardsSyncedFlushResult> shardsResults = new ArrayList<>();
|
||||
for (int shard = 0; shard < shards; shard++) {
|
||||
final ShardId shardId = new ShardId(index, "_na_", shard);
|
||||
if (randomInt(5) < 2) {
|
||||
// total shard failure
|
||||
failed += replicas + 1;
|
||||
failures++;
|
||||
shardsResults.add(new ShardsSyncedFlushResult(shardId, replicas + 1, "simulated total failure"));
|
||||
} else {
|
||||
Map<ShardRouting, SyncedFlushService.ShardSyncedFlushResponse> shardResponses = new HashMap<>();
|
||||
for (int copy = 0; copy < replicas + 1; copy++) {
|
||||
final ShardRouting shardRouting = TestShardRouting.newShardRouting(index, shard, "node_" + shardId + "_" + copy,
|
||||
null, copy == 0, ShardRoutingState.STARTED);
|
||||
if (randomInt(5) < 2) {
|
||||
// shard copy failure
|
||||
failed++;
|
||||
failures++;
|
||||
shardResponses.put(shardRouting, new SyncedFlushService.ShardSyncedFlushResponse("copy failure " + shardId));
|
||||
} else {
|
||||
successful++;
|
||||
shardResponses.put(shardRouting, new SyncedFlushService.ShardSyncedFlushResponse((String) null));
|
||||
}
|
||||
}
|
||||
shardsResults.add(new ShardsSyncedFlushResult(shardId, "_sync_id_" + shard, replicas + 1, shardResponses));
|
||||
}
|
||||
}
|
||||
indicesResults.put(index, shardsResults);
|
||||
testPlan.countsPerIndex.put(index, new SyncedFlushResponse.ShardCounts(shards * (replicas + 1), successful, failed));
|
||||
testPlan.expectedFailuresPerIndex.put(index, failures);
|
||||
totalFailed += failed;
|
||||
totalShards += shards * (replicas + 1);
|
||||
totalSuccesful += successful;
|
||||
}
|
||||
testPlan.result = new SyncedFlushResponse(indicesResults);
|
||||
testPlan.totalCounts = new SyncedFlushResponse.ShardCounts(totalShards, totalSuccesful, totalFailed);
|
||||
return testPlan;
|
||||
}
|
||||
|
||||
}
|
|
@ -19,7 +19,6 @@
|
|||
|
||||
package org.elasticsearch.gateway;
|
||||
|
||||
import org.elasticsearch.action.admin.indices.flush.SyncedFlushResponse;
|
||||
import org.elasticsearch.action.admin.indices.stats.ShardStats;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
|
@ -150,10 +149,6 @@ public class ReplicaShardAllocatorIT extends ESIntegTestCase {
|
|||
|
||||
indexRandom(randomBoolean(), false, randomBoolean(), IntStream.range(0, between(10, 100))
|
||||
.mapToObj(n -> client().prepareIndex(indexName).setSource("f", "v")).collect(Collectors.toList()));
|
||||
assertBusy(() -> {
|
||||
SyncedFlushResponse syncedFlushResponse = client().admin().indices().prepareSyncedFlush(indexName).get();
|
||||
assertThat(syncedFlushResponse.successfulShards(), equalTo(2));
|
||||
});
|
||||
internalCluster().stopRandomNode(InternalTestCluster.nameFilter(nodeWithReplica));
|
||||
if (randomBoolean()) {
|
||||
indexRandom(randomBoolean(), false, randomBoolean(), IntStream.range(0, between(10, 100))
|
||||
|
@ -357,10 +352,11 @@ public class ReplicaShardAllocatorIT extends ESIntegTestCase {
|
|||
assertNoOpRecoveries(indexName);
|
||||
}
|
||||
|
||||
private void ensureActivePeerRecoveryRetentionLeasesAdvanced(String indexName) throws Exception {
|
||||
public static void ensureActivePeerRecoveryRetentionLeasesAdvanced(String indexName) throws Exception {
|
||||
final ClusterService clusterService = internalCluster().clusterService();
|
||||
assertBusy(() -> {
|
||||
Index index = resolveIndex(indexName);
|
||||
Set<String> activeRetentionLeaseIds = clusterService().state().routingTable().index(index).shard(0).shards().stream()
|
||||
Set<String> activeRetentionLeaseIds = clusterService.state().routingTable().index(index).shard(0).shards().stream()
|
||||
.map(shardRouting -> ReplicationTracker.getPeerRecoveryRetentionLeaseId(shardRouting.currentNodeId()))
|
||||
.collect(Collectors.toSet());
|
||||
for (String node : internalCluster().nodesInclude(indexName)) {
|
||||
|
|
|
@ -514,7 +514,6 @@ public class IndicesClusterStateServiceRandomUpdatesTests extends AbstractIndice
|
|||
null,
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
primaryReplicaSyncer,
|
||||
RetentionLeaseSyncer.EMPTY,
|
||||
client) {
|
||||
|
|
|
@ -18,63 +18,34 @@
|
|||
*/
|
||||
package org.elasticsearch.indices.flush;
|
||||
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.admin.indices.flush.FlushRequest;
|
||||
import org.elasticsearch.action.admin.indices.flush.FlushResponse;
|
||||
import org.elasticsearch.action.admin.indices.flush.SyncedFlushResponse;
|
||||
import org.elasticsearch.action.admin.indices.stats.IndexStats;
|
||||
import org.elasticsearch.action.admin.indices.stats.ShardStats;
|
||||
import org.elasticsearch.action.support.ActiveShardCount;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.routing.allocation.command.MoveAllocationCommand;
|
||||
import org.elasticsearch.common.UUIDs;
|
||||
import org.elasticsearch.common.ValidationException;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.ByteSizeUnit;
|
||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.IndexService;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.engine.Engine;
|
||||
import org.elasticsearch.index.engine.InternalEngine;
|
||||
import org.elasticsearch.index.engine.InternalEngineTests;
|
||||
import org.elasticsearch.index.mapper.ParsedDocument;
|
||||
import org.elasticsearch.index.mapper.Uid;
|
||||
import org.elasticsearch.index.seqno.SequenceNumbers;
|
||||
import org.elasticsearch.index.shard.IndexShard;
|
||||
import org.elasticsearch.index.shard.IndexShardTestCase;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.indices.IndexingMemoryController;
|
||||
import org.elasticsearch.indices.IndicesService;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.test.ESIntegTestCase;
|
||||
import org.elasticsearch.test.InternalSettingsPlugin;
|
||||
import org.elasticsearch.test.InternalTestCluster;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.CopyOnWriteArrayList;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
import static org.hamcrest.Matchers.emptyArray;
|
||||
import static org.hamcrest.Matchers.emptyIterable;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.not;
|
||||
import static org.hamcrest.Matchers.nullValue;
|
||||
|
||||
@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST)
|
||||
public class FlushIT extends ESIntegTestCase {
|
||||
|
@ -136,253 +107,6 @@ public class FlushIT extends ESIntegTestCase {
|
|||
.actionGet().getShardFailures(), emptyArray());
|
||||
}
|
||||
|
||||
public void testSyncedFlush() throws Exception {
|
||||
internalCluster().ensureAtLeastNumDataNodes(2);
|
||||
prepareCreate("test").setSettings(Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)).get();
|
||||
ensureGreen();
|
||||
|
||||
final Index index = client().admin().cluster().prepareState().get().getState().metaData().index("test").getIndex();
|
||||
|
||||
IndexStats indexStats = client().admin().indices().prepareStats("test").get().getIndex("test");
|
||||
for (ShardStats shardStats : indexStats.getShards()) {
|
||||
assertNull(shardStats.getCommitStats().getUserData().get(Engine.SYNC_COMMIT_ID));
|
||||
}
|
||||
|
||||
ShardsSyncedFlushResult result;
|
||||
if (randomBoolean()) {
|
||||
logger.info("--> sync flushing shard 0");
|
||||
result = SyncedFlushUtil.attemptSyncedFlush(logger, internalCluster(), new ShardId(index, 0));
|
||||
} else {
|
||||
logger.info("--> sync flushing index [test]");
|
||||
SyncedFlushResponse indicesResult = client().admin().indices().prepareSyncedFlush("test").get();
|
||||
result = indicesResult.getShardsResultPerIndex().get("test").get(0);
|
||||
}
|
||||
assertFalse(result.failed());
|
||||
assertThat(result.totalShards(), equalTo(indexStats.getShards().length));
|
||||
assertThat(result.successfulShards(), equalTo(indexStats.getShards().length));
|
||||
|
||||
indexStats = client().admin().indices().prepareStats("test").get().getIndex("test");
|
||||
String syncId = result.syncId();
|
||||
for (ShardStats shardStats : indexStats.getShards()) {
|
||||
final String shardSyncId = shardStats.getCommitStats().getUserData().get(Engine.SYNC_COMMIT_ID);
|
||||
assertThat(shardSyncId, equalTo(syncId));
|
||||
}
|
||||
|
||||
// now, start new node and relocate a shard there and see if sync id still there
|
||||
String newNodeName = internalCluster().startNode();
|
||||
ClusterState clusterState = client().admin().cluster().prepareState().get().getState();
|
||||
ShardRouting shardRouting = clusterState.getRoutingTable().index("test").shard(0).iterator().next();
|
||||
String currentNodeName = clusterState.nodes().resolveNode(shardRouting.currentNodeId()).getName();
|
||||
assertFalse(currentNodeName.equals(newNodeName));
|
||||
internalCluster().client().admin().cluster().prepareReroute()
|
||||
.add(new MoveAllocationCommand("test", 0, currentNodeName, newNodeName)).get();
|
||||
|
||||
client().admin().cluster().prepareHealth()
|
||||
.setWaitForNoRelocatingShards(true)
|
||||
.get();
|
||||
indexStats = client().admin().indices().prepareStats("test").get().getIndex("test");
|
||||
for (ShardStats shardStats : indexStats.getShards()) {
|
||||
assertNotNull(shardStats.getCommitStats().getUserData().get(Engine.SYNC_COMMIT_ID));
|
||||
}
|
||||
|
||||
client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder()
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0).build()).get();
|
||||
ensureGreen("test");
|
||||
indexStats = client().admin().indices().prepareStats("test").get().getIndex("test");
|
||||
for (ShardStats shardStats : indexStats.getShards()) {
|
||||
assertNotNull(shardStats.getCommitStats().getUserData().get(Engine.SYNC_COMMIT_ID));
|
||||
}
|
||||
client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder()
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, internalCluster().numDataNodes() - 1).build()).get();
|
||||
ensureGreen("test");
|
||||
indexStats = client().admin().indices().prepareStats("test").get().getIndex("test");
|
||||
for (ShardStats shardStats : indexStats.getShards()) {
|
||||
assertNotNull(shardStats.getCommitStats().getUserData().get(Engine.SYNC_COMMIT_ID));
|
||||
}
|
||||
}
|
||||
|
||||
public void testSyncedFlushWithConcurrentIndexing() throws Exception {
|
||||
|
||||
internalCluster().ensureAtLeastNumDataNodes(3);
|
||||
createIndex("test");
|
||||
|
||||
client().admin().indices().prepareUpdateSettings("test").setSettings(
|
||||
Settings.builder()
|
||||
.put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(1, ByteSizeUnit.PB))
|
||||
.put("index.refresh_interval", -1)
|
||||
.put("index.number_of_replicas", internalCluster().numDataNodes() - 1))
|
||||
.get();
|
||||
ensureGreen();
|
||||
final AtomicBoolean stop = new AtomicBoolean(false);
|
||||
final AtomicInteger numDocs = new AtomicInteger(0);
|
||||
Thread indexingThread = new Thread() {
|
||||
@Override
|
||||
public void run() {
|
||||
while (stop.get() == false) {
|
||||
client().prepareIndex().setIndex("test").setSource("{}", XContentType.JSON).get();
|
||||
numDocs.incrementAndGet();
|
||||
}
|
||||
}
|
||||
};
|
||||
indexingThread.start();
|
||||
|
||||
IndexStats indexStats = client().admin().indices().prepareStats("test").get().getIndex("test");
|
||||
for (ShardStats shardStats : indexStats.getShards()) {
|
||||
assertNull(shardStats.getCommitStats().getUserData().get(Engine.SYNC_COMMIT_ID));
|
||||
}
|
||||
logger.info("--> trying sync flush");
|
||||
SyncedFlushResponse syncedFlushResult = client().admin().indices().prepareSyncedFlush("test").get();
|
||||
logger.info("--> sync flush done");
|
||||
stop.set(true);
|
||||
indexingThread.join();
|
||||
indexStats = client().admin().indices().prepareStats("test").get().getIndex("test");
|
||||
assertFlushResponseEqualsShardStats(indexStats.getShards(), syncedFlushResult.getShardsResultPerIndex().get("test"));
|
||||
refresh();
|
||||
assertThat(client().prepareSearch().setSize(0).get().getHits().getTotalHits().value, equalTo((long) numDocs.get()));
|
||||
logger.info("indexed {} docs", client().prepareSearch().setSize(0).get().getHits().getTotalHits().value);
|
||||
logClusterState();
|
||||
internalCluster().fullRestart();
|
||||
ensureGreen();
|
||||
assertThat(client().prepareSearch().setSize(0).get().getHits().getTotalHits().value, equalTo((long) numDocs.get()));
|
||||
}
|
||||
|
||||
private void assertFlushResponseEqualsShardStats(ShardStats[] shardsStats, List<ShardsSyncedFlushResult> syncedFlushResults) {
|
||||
|
||||
for (final ShardStats shardStats : shardsStats) {
|
||||
for (final ShardsSyncedFlushResult shardResult : syncedFlushResults) {
|
||||
if (shardStats.getShardRouting().getId() == shardResult.shardId().getId()) {
|
||||
for (Map.Entry<ShardRouting, SyncedFlushService.ShardSyncedFlushResponse> singleResponse :
|
||||
shardResult.shardResponses().entrySet()) {
|
||||
if (singleResponse.getKey().currentNodeId().equals(shardStats.getShardRouting().currentNodeId())) {
|
||||
if (singleResponse.getValue().success()) {
|
||||
logger.info("{} sync flushed on node {}", singleResponse.getKey().shardId(),
|
||||
singleResponse.getKey().currentNodeId());
|
||||
assertNotNull(shardStats.getCommitStats().getUserData().get(Engine.SYNC_COMMIT_ID));
|
||||
} else {
|
||||
logger.info("{} sync flush failed for on node {}", singleResponse.getKey().shardId(),
|
||||
singleResponse.getKey().currentNodeId());
|
||||
assertNull(shardStats.getCommitStats().getUserData().get(Engine.SYNC_COMMIT_ID));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public void testUnallocatedShardsDoesNotHang() throws InterruptedException {
|
||||
// create an index but disallow allocation
|
||||
prepareCreate("test").setWaitForActiveShards(ActiveShardCount.NONE).setSettings(Settings.builder()
|
||||
.put("index.routing.allocation.include._name", "nonexistent")).get();
|
||||
|
||||
// this should not hang but instead immediately return with empty result set
|
||||
List<ShardsSyncedFlushResult> shardsResult = client().admin().indices().prepareSyncedFlush("test").get()
|
||||
.getShardsResultPerIndex().get("test");
|
||||
// just to make sure the test actually tests the right thing
|
||||
int numShards = client().admin().indices().prepareGetSettings("test").get().getIndexToSettings().get("test")
|
||||
.getAsInt(IndexMetaData.SETTING_NUMBER_OF_SHARDS, -1);
|
||||
assertThat(shardsResult.size(), equalTo(numShards));
|
||||
assertThat(shardsResult.get(0).failureReason(), equalTo("no active shards"));
|
||||
}
|
||||
|
||||
private void indexDoc(Engine engine, String id) throws IOException {
|
||||
final ParsedDocument doc = InternalEngineTests.createParsedDoc(id, null);
|
||||
final Engine.IndexResult indexResult = engine.index(new Engine.Index(new Term("_id", Uid.encodeId(doc.id())), doc,
|
||||
((InternalEngine) engine).getProcessedLocalCheckpoint() + 1, 1L, 1L, null, Engine.Operation.Origin.REPLICA, System.nanoTime(),
|
||||
-1L, false, SequenceNumbers.UNASSIGNED_SEQ_NO, 0));
|
||||
assertThat(indexResult.getFailure(), nullValue());
|
||||
engine.syncTranslog();
|
||||
}
|
||||
|
||||
public void testSyncedFlushSkipOutOfSyncReplicas() throws Exception {
|
||||
internalCluster().ensureAtLeastNumDataNodes(between(2, 3));
|
||||
final int numberOfReplicas = internalCluster().numDataNodes() - 1;
|
||||
assertAcked(
|
||||
prepareCreate("test").setSettings(Settings.builder()
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, numberOfReplicas)).get()
|
||||
);
|
||||
ensureGreen();
|
||||
final Index index = clusterService().state().metaData().index("test").getIndex();
|
||||
final ShardId shardId = new ShardId(index, 0);
|
||||
final int numDocs = between(1, 10);
|
||||
for (int i = 0; i < numDocs; i++) {
|
||||
indexDoc("test", Integer.toString(i));
|
||||
}
|
||||
final List<IndexShard> indexShards = internalCluster().nodesInclude("test").stream()
|
||||
.map(node -> internalCluster().getInstance(IndicesService.class, node).getShardOrNull(shardId))
|
||||
.collect(Collectors.toList());
|
||||
// Index extra documents to one replica - synced-flush should fail on that replica.
|
||||
final IndexShard outOfSyncReplica = randomValueOtherThanMany(s -> s.routingEntry().primary(), () -> randomFrom(indexShards));
|
||||
final int extraDocs = between(1, 10);
|
||||
for (int i = 0; i < extraDocs; i++) {
|
||||
indexDoc(IndexShardTestCase.getEngine(outOfSyncReplica), "extra_" + i);
|
||||
}
|
||||
final ShardsSyncedFlushResult partialResult = SyncedFlushUtil.attemptSyncedFlush(logger, internalCluster(), shardId);
|
||||
assertThat(partialResult.totalShards(), equalTo(numberOfReplicas + 1));
|
||||
assertThat(partialResult.successfulShards(), equalTo(numberOfReplicas));
|
||||
assertThat(partialResult.shardResponses().get(outOfSyncReplica.routingEntry()).failureReason, equalTo(
|
||||
"ongoing indexing operations: num docs on replica [" + (numDocs + extraDocs) + "]; num docs on primary [" + numDocs + "]"));
|
||||
// Index extra documents to all shards - synced-flush should be ok.
|
||||
for (IndexShard indexShard : indexShards) {
|
||||
// Do reindex documents to the out of sync replica to avoid trigger merges
|
||||
if (indexShard != outOfSyncReplica) {
|
||||
for (int i = 0; i < extraDocs; i++) {
|
||||
indexDoc(IndexShardTestCase.getEngine(indexShard), "extra_" + i);
|
||||
}
|
||||
}
|
||||
}
|
||||
final ShardsSyncedFlushResult fullResult = SyncedFlushUtil.attemptSyncedFlush(logger, internalCluster(), shardId);
|
||||
assertThat(fullResult.totalShards(), equalTo(numberOfReplicas + 1));
|
||||
assertThat(fullResult.successfulShards(), equalTo(numberOfReplicas + 1));
|
||||
}
|
||||
|
||||
public void testDoNotRenewSyncedFlushWhenAllSealed() throws Exception {
|
||||
internalCluster().ensureAtLeastNumDataNodes(between(2, 3));
|
||||
final int numberOfReplicas = internalCluster().numDataNodes() - 1;
|
||||
assertAcked(
|
||||
prepareCreate("test").setSettings(Settings.builder()
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, numberOfReplicas)).get()
|
||||
);
|
||||
ensureGreen();
|
||||
final Index index = clusterService().state().metaData().index("test").getIndex();
|
||||
final ShardId shardId = new ShardId(index, 0);
|
||||
final int numDocs = between(1, 10);
|
||||
for (int i = 0; i < numDocs; i++) {
|
||||
indexDoc("test", Integer.toString(i));
|
||||
}
|
||||
final ShardsSyncedFlushResult firstSeal = SyncedFlushUtil.attemptSyncedFlush(logger, internalCluster(), shardId);
|
||||
assertThat(firstSeal.successfulShards(), equalTo(numberOfReplicas + 1));
|
||||
// Do not renew synced-flush
|
||||
final ShardsSyncedFlushResult secondSeal = SyncedFlushUtil.attemptSyncedFlush(logger, internalCluster(), shardId);
|
||||
assertThat(secondSeal.successfulShards(), equalTo(numberOfReplicas + 1));
|
||||
assertThat(secondSeal.syncId(), equalTo(firstSeal.syncId()));
|
||||
// Shards were updated, renew synced flush.
|
||||
final int moreDocs = between(1, 10);
|
||||
for (int i = 0; i < moreDocs; i++) {
|
||||
indexDoc("test", "more-" + i);
|
||||
}
|
||||
final ShardsSyncedFlushResult thirdSeal = SyncedFlushUtil.attemptSyncedFlush(logger, internalCluster(), shardId);
|
||||
assertThat(thirdSeal.successfulShards(), equalTo(numberOfReplicas + 1));
|
||||
assertThat(thirdSeal.syncId(), not(equalTo(firstSeal.syncId())));
|
||||
// Manually remove or change sync-id, renew synced flush.
|
||||
IndexShard shard = internalCluster().getInstance(IndicesService.class, randomFrom(internalCluster().nodesInclude("test")))
|
||||
.getShardOrNull(shardId);
|
||||
if (randomBoolean()) {
|
||||
// Change the existing sync-id of a single shard.
|
||||
shard.syncFlush(UUIDs.randomBase64UUID(random()), shard.commitStats().getRawCommitId());
|
||||
assertThat(shard.commitStats().syncId(), not(equalTo(thirdSeal.syncId())));
|
||||
} else {
|
||||
// Flush will create a new commit without sync-id
|
||||
shard.flush(new FlushRequest(shardId.getIndexName()).force(true).waitIfOngoing(true));
|
||||
assertThat(shard.commitStats().syncId(), nullValue());
|
||||
}
|
||||
final ShardsSyncedFlushResult forthSeal = SyncedFlushUtil.attemptSyncedFlush(logger, internalCluster(), shardId);
|
||||
assertThat(forthSeal.successfulShards(), equalTo(numberOfReplicas + 1));
|
||||
assertThat(forthSeal.syncId(), not(equalTo(thirdSeal.syncId())));
|
||||
}
|
||||
|
||||
public void testFlushOnInactive() throws Exception {
|
||||
final String indexName = "flush_on_inactive";
|
||||
List<String> dataNodes = internalCluster().startDataOnlyNodes(2, Settings.builder()
|
||||
|
|
|
@ -1,243 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.indices.flush;
|
||||
|
||||
import org.elasticsearch.action.support.PlainActionFuture;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.UUIDs;
|
||||
import org.elasticsearch.common.lease.Releasable;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.index.IndexService;
|
||||
import org.elasticsearch.index.shard.IndexShard;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.index.shard.ShardNotFoundException;
|
||||
import org.elasticsearch.indices.IndicesService;
|
||||
import org.elasticsearch.test.ESSingleNodeTestCase;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
|
||||
|
||||
public class SyncedFlushSingleNodeTests extends ESSingleNodeTestCase {
|
||||
|
||||
public void testModificationPreventsFlushing() throws InterruptedException {
|
||||
createIndex("test");
|
||||
client().prepareIndex("test").setId("1").setSource("{}", XContentType.JSON).get();
|
||||
IndexService test = getInstanceFromNode(IndicesService.class).indexService(resolveIndex("test"));
|
||||
IndexShard shard = test.getShardOrNull(0);
|
||||
|
||||
SyncedFlushService flushService = getInstanceFromNode(SyncedFlushService.class);
|
||||
final ShardId shardId = shard.shardId();
|
||||
final ClusterState state = getInstanceFromNode(ClusterService.class).state();
|
||||
final IndexShardRoutingTable shardRoutingTable = flushService.getShardRoutingTable(shardId, state);
|
||||
final List<ShardRouting> activeShards = shardRoutingTable.activeShards();
|
||||
assertEquals("exactly one active shard", 1, activeShards.size());
|
||||
Map<String, SyncedFlushService.PreSyncedFlushResponse> preSyncedResponses =
|
||||
SyncedFlushUtil.sendPreSyncRequests(flushService, activeShards, state, shardId);
|
||||
assertEquals("exactly one commit id", 1, preSyncedResponses.size());
|
||||
client().prepareIndex("test").setId("2").setSource("{}", XContentType.JSON).get();
|
||||
String syncId = UUIDs.randomBase64UUID();
|
||||
SyncedFlushUtil.LatchedListener<ShardsSyncedFlushResult> listener = new SyncedFlushUtil.LatchedListener<>();
|
||||
flushService.sendSyncRequests(syncId, activeShards, state, preSyncedResponses, shardId, shardRoutingTable.size(), listener);
|
||||
listener.latch.await();
|
||||
assertNull(listener.error);
|
||||
ShardsSyncedFlushResult syncedFlushResult = listener.result;
|
||||
assertNotNull(syncedFlushResult);
|
||||
assertEquals(0, syncedFlushResult.successfulShards());
|
||||
assertEquals(1, syncedFlushResult.totalShards());
|
||||
assertEquals(syncId, syncedFlushResult.syncId());
|
||||
assertNotNull(syncedFlushResult.shardResponses().get(activeShards.get(0)));
|
||||
assertFalse(syncedFlushResult.shardResponses().get(activeShards.get(0)).success());
|
||||
assertEquals("pending operations", syncedFlushResult.shardResponses().get(activeShards.get(0)).failureReason());
|
||||
|
||||
// pull another commit and make sure we can't sync-flush with the old one
|
||||
SyncedFlushUtil.sendPreSyncRequests(flushService, activeShards, state, shardId);
|
||||
listener = new SyncedFlushUtil.LatchedListener<>();
|
||||
flushService.sendSyncRequests(syncId, activeShards, state, preSyncedResponses, shardId, shardRoutingTable.size(), listener);
|
||||
listener.latch.await();
|
||||
assertNull(listener.error);
|
||||
syncedFlushResult = listener.result;
|
||||
assertNotNull(syncedFlushResult);
|
||||
assertEquals(0, syncedFlushResult.successfulShards());
|
||||
assertEquals(1, syncedFlushResult.totalShards());
|
||||
assertEquals(syncId, syncedFlushResult.syncId());
|
||||
assertNotNull(syncedFlushResult.shardResponses().get(activeShards.get(0)));
|
||||
assertFalse(syncedFlushResult.shardResponses().get(activeShards.get(0)).success());
|
||||
assertEquals("commit has changed", syncedFlushResult.shardResponses().get(activeShards.get(0)).failureReason());
|
||||
}
|
||||
|
||||
public void testSingleShardSuccess() throws InterruptedException {
|
||||
createIndex("test");
|
||||
client().prepareIndex("test").setId("1").setSource("{}", XContentType.JSON).get();
|
||||
IndexService test = getInstanceFromNode(IndicesService.class).indexService(resolveIndex("test"));
|
||||
IndexShard shard = test.getShardOrNull(0);
|
||||
|
||||
SyncedFlushService flushService = getInstanceFromNode(SyncedFlushService.class);
|
||||
final ShardId shardId = shard.shardId();
|
||||
SyncedFlushUtil.LatchedListener<ShardsSyncedFlushResult> listener = new SyncedFlushUtil.LatchedListener<>();
|
||||
flushService.attemptSyncedFlush(shardId, listener);
|
||||
listener.latch.await();
|
||||
assertNull(listener.error);
|
||||
ShardsSyncedFlushResult syncedFlushResult = listener.result;
|
||||
assertNotNull(syncedFlushResult);
|
||||
assertEquals(1, syncedFlushResult.successfulShards());
|
||||
assertEquals(1, syncedFlushResult.totalShards());
|
||||
SyncedFlushService.ShardSyncedFlushResponse response = syncedFlushResult.shardResponses().values().iterator().next();
|
||||
assertTrue(response.success());
|
||||
}
|
||||
|
||||
public void testSyncFailsIfOperationIsInFlight() throws Exception {
|
||||
createIndex("test");
|
||||
client().prepareIndex("test").setId("1").setSource("{}", XContentType.JSON).get();
|
||||
IndexService test = getInstanceFromNode(IndicesService.class).indexService(resolveIndex("test"));
|
||||
IndexShard shard = test.getShardOrNull(0);
|
||||
|
||||
// wait for the GCP sync spawned from the index request above to complete to avoid that request disturbing the check below
|
||||
assertBusy(() -> {
|
||||
assertEquals(0, shard.getLastSyncedGlobalCheckpoint());
|
||||
assertEquals(0, shard.getActiveOperationsCount());
|
||||
});
|
||||
|
||||
SyncedFlushService flushService = getInstanceFromNode(SyncedFlushService.class);
|
||||
final ShardId shardId = shard.shardId();
|
||||
PlainActionFuture<Releasable> fut = new PlainActionFuture<>();
|
||||
shard.acquirePrimaryOperationPermit(fut, ThreadPool.Names.WRITE, "");
|
||||
try (Releasable operationLock = fut.get()) {
|
||||
SyncedFlushUtil.LatchedListener<ShardsSyncedFlushResult> listener = new SyncedFlushUtil.LatchedListener<>();
|
||||
flushService.attemptSyncedFlush(shardId, listener);
|
||||
listener.latch.await();
|
||||
assertNull(listener.error);
|
||||
ShardsSyncedFlushResult syncedFlushResult = listener.result;
|
||||
assertNotNull(syncedFlushResult);
|
||||
assertEquals(0, syncedFlushResult.successfulShards());
|
||||
assertNotEquals(0, syncedFlushResult.totalShards());
|
||||
assertEquals("[1] ongoing operations on primary", syncedFlushResult.failureReason());
|
||||
}
|
||||
}
|
||||
|
||||
public void testSyncFailsOnIndexClosedOrMissing() throws InterruptedException {
|
||||
createIndex("test", Settings.builder()
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)
|
||||
.build());
|
||||
IndexService test = getInstanceFromNode(IndicesService.class).indexService(resolveIndex("test"));
|
||||
final IndexShard shard = test.getShardOrNull(0);
|
||||
assertNotNull(shard);
|
||||
final ShardId shardId = shard.shardId();
|
||||
|
||||
final SyncedFlushService flushService = getInstanceFromNode(SyncedFlushService.class);
|
||||
|
||||
SyncedFlushUtil.LatchedListener listener = new SyncedFlushUtil.LatchedListener();
|
||||
flushService.attemptSyncedFlush(new ShardId(shard.shardId().getIndex(), 1), listener);
|
||||
listener.latch.await();
|
||||
assertNotNull(listener.error);
|
||||
assertNull(listener.result);
|
||||
assertEquals(ShardNotFoundException.class, listener.error.getClass());
|
||||
assertEquals("no such shard", listener.error.getMessage());
|
||||
|
||||
assertAcked(client().admin().indices().prepareClose("test"));
|
||||
listener = new SyncedFlushUtil.LatchedListener();
|
||||
flushService.attemptSyncedFlush(shardId, listener);
|
||||
listener.latch.await();
|
||||
assertNotNull(listener.error);
|
||||
assertNull(listener.result);
|
||||
assertEquals("closed", listener.error.getMessage());
|
||||
|
||||
listener = new SyncedFlushUtil.LatchedListener();
|
||||
flushService.attemptSyncedFlush(new ShardId("index not found", "_na_", 0), listener);
|
||||
listener.latch.await();
|
||||
assertNotNull(listener.error);
|
||||
assertNull(listener.result);
|
||||
assertEquals("no such index [index not found]", listener.error.getMessage());
|
||||
}
|
||||
|
||||
public void testFailAfterIntermediateCommit() throws InterruptedException {
|
||||
createIndex("test");
|
||||
client().prepareIndex("test").setId("1").setSource("{}", XContentType.JSON).get();
|
||||
IndexService test = getInstanceFromNode(IndicesService.class).indexService(resolveIndex("test"));
|
||||
IndexShard shard = test.getShardOrNull(0);
|
||||
|
||||
SyncedFlushService flushService = getInstanceFromNode(SyncedFlushService.class);
|
||||
final ShardId shardId = shard.shardId();
|
||||
final ClusterState state = getInstanceFromNode(ClusterService.class).state();
|
||||
final IndexShardRoutingTable shardRoutingTable = flushService.getShardRoutingTable(shardId, state);
|
||||
final List<ShardRouting> activeShards = shardRoutingTable.activeShards();
|
||||
assertEquals("exactly one active shard", 1, activeShards.size());
|
||||
Map<String, SyncedFlushService.PreSyncedFlushResponse> preSyncedResponses =
|
||||
SyncedFlushUtil.sendPreSyncRequests(flushService, activeShards, state, shardId);
|
||||
assertEquals("exactly one commit id", 1, preSyncedResponses.size());
|
||||
if (randomBoolean()) {
|
||||
client().prepareIndex("test").setId("2").setSource("{}", XContentType.JSON).get();
|
||||
}
|
||||
client().admin().indices().prepareFlush("test").setForce(true).get();
|
||||
String syncId = UUIDs.randomBase64UUID();
|
||||
final SyncedFlushUtil.LatchedListener<ShardsSyncedFlushResult> listener = new SyncedFlushUtil.LatchedListener<>();
|
||||
flushService.sendSyncRequests(syncId, activeShards, state, preSyncedResponses, shardId, shardRoutingTable.size(), listener);
|
||||
listener.latch.await();
|
||||
assertNull(listener.error);
|
||||
ShardsSyncedFlushResult syncedFlushResult = listener.result;
|
||||
assertNotNull(syncedFlushResult);
|
||||
assertEquals(0, syncedFlushResult.successfulShards());
|
||||
assertEquals(1, syncedFlushResult.totalShards());
|
||||
assertEquals(syncId, syncedFlushResult.syncId());
|
||||
assertNotNull(syncedFlushResult.shardResponses().get(activeShards.get(0)));
|
||||
assertFalse(syncedFlushResult.shardResponses().get(activeShards.get(0)).success());
|
||||
assertEquals("commit has changed", syncedFlushResult.shardResponses().get(activeShards.get(0)).failureReason());
|
||||
}
|
||||
|
||||
public void testFailWhenCommitIsMissing() throws InterruptedException {
|
||||
createIndex("test");
|
||||
client().prepareIndex("test").setId("1").setSource("{}", XContentType.JSON).get();
|
||||
IndexService test = getInstanceFromNode(IndicesService.class).indexService(resolveIndex("test"));
|
||||
IndexShard shard = test.getShardOrNull(0);
|
||||
|
||||
SyncedFlushService flushService = getInstanceFromNode(SyncedFlushService.class);
|
||||
final ShardId shardId = shard.shardId();
|
||||
final ClusterState state = getInstanceFromNode(ClusterService.class).state();
|
||||
final IndexShardRoutingTable shardRoutingTable = flushService.getShardRoutingTable(shardId, state);
|
||||
final List<ShardRouting> activeShards = shardRoutingTable.activeShards();
|
||||
assertEquals("exactly one active shard", 1, activeShards.size());
|
||||
Map<String, SyncedFlushService.PreSyncedFlushResponse> preSyncedResponses =
|
||||
SyncedFlushUtil.sendPreSyncRequests(flushService, activeShards, state, shardId);
|
||||
assertEquals("exactly one commit id", 1, preSyncedResponses.size());
|
||||
preSyncedResponses.clear(); // wipe it...
|
||||
String syncId = UUIDs.randomBase64UUID();
|
||||
SyncedFlushUtil.LatchedListener<ShardsSyncedFlushResult> listener = new SyncedFlushUtil.LatchedListener<>();
|
||||
flushService.sendSyncRequests(syncId, activeShards, state, preSyncedResponses, shardId, shardRoutingTable.size(), listener);
|
||||
listener.latch.await();
|
||||
assertNull(listener.error);
|
||||
ShardsSyncedFlushResult syncedFlushResult = listener.result;
|
||||
assertNotNull(syncedFlushResult);
|
||||
assertEquals(0, syncedFlushResult.successfulShards());
|
||||
assertEquals(1, syncedFlushResult.totalShards());
|
||||
assertEquals(syncId, syncedFlushResult.syncId());
|
||||
assertNotNull(syncedFlushResult.shardResponses().get(activeShards.get(0)));
|
||||
assertFalse(syncedFlushResult.shardResponses().get(activeShards.get(0)).success());
|
||||
assertEquals("no commit id from pre-sync flush", syncedFlushResult.shardResponses().get(activeShards.get(0)).failureReason());
|
||||
}
|
||||
|
||||
|
||||
}
|
|
@ -1,110 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.indices.flush;
|
||||
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.test.InternalTestCluster;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
|
||||
import static org.elasticsearch.test.ESTestCase.assertBusy;
|
||||
|
||||
/** Utils for SyncedFlush */
|
||||
public class SyncedFlushUtil {
|
||||
|
||||
private SyncedFlushUtil() {
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* Blocking version of {@link SyncedFlushService#attemptSyncedFlush(ShardId, ActionListener)}
|
||||
*/
|
||||
public static ShardsSyncedFlushResult attemptSyncedFlush(Logger logger, InternalTestCluster cluster, ShardId shardId) throws Exception {
|
||||
/*
|
||||
* When the last indexing operation is completed, we will fire a global checkpoint sync.
|
||||
* Since a global checkpoint sync request is a replication request, it will acquire an index
|
||||
* shard permit on the primary when executing. If this happens at the same time while we are
|
||||
* issuing the synced-flush, the synced-flush request will fail as it thinks there are
|
||||
* in-flight operations. We can avoid such situation by continuing issuing another synced-flush
|
||||
* if the synced-flush failed due to the ongoing operations on the primary.
|
||||
*/
|
||||
SyncedFlushService service = cluster.getInstance(SyncedFlushService.class);
|
||||
AtomicReference<LatchedListener<ShardsSyncedFlushResult>> listenerHolder = new AtomicReference<>();
|
||||
assertBusy(() -> {
|
||||
LatchedListener<ShardsSyncedFlushResult> listener = new LatchedListener<>();
|
||||
listenerHolder.set(listener);
|
||||
service.attemptSyncedFlush(shardId, listener);
|
||||
listener.latch.await();
|
||||
if (listener.result != null && listener.result.failureReason() != null
|
||||
&& listener.result.failureReason().contains("ongoing operations on primary")) {
|
||||
throw new AssertionError(listener.result.failureReason()); // cause the assert busy to retry
|
||||
}
|
||||
});
|
||||
if (listenerHolder.get().error != null) {
|
||||
throw ExceptionsHelper.convertToElastic(listenerHolder.get().error);
|
||||
}
|
||||
return listenerHolder.get().result;
|
||||
}
|
||||
|
||||
public static final class LatchedListener<T> implements ActionListener<T> {
|
||||
public volatile T result;
|
||||
public volatile Exception error;
|
||||
public final CountDownLatch latch = new CountDownLatch(1);
|
||||
|
||||
@Override
|
||||
public void onResponse(T result) {
|
||||
this.result = result;
|
||||
latch.countDown();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
error = e;
|
||||
latch.countDown();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Blocking version of {@link SyncedFlushService#sendPreSyncRequests(List, ClusterState, ShardId, ActionListener)}
|
||||
*/
|
||||
public static Map<String, SyncedFlushService.PreSyncedFlushResponse> sendPreSyncRequests(SyncedFlushService service,
|
||||
List<ShardRouting> activeShards,
|
||||
ClusterState state,
|
||||
ShardId shardId) {
|
||||
LatchedListener<Map<String, SyncedFlushService.PreSyncedFlushResponse>> listener = new LatchedListener<>();
|
||||
service.sendPreSyncRequests(activeShards, state, shardId, listener);
|
||||
try {
|
||||
listener.latch.await();
|
||||
} catch (InterruptedException e) {
|
||||
Thread.currentThread().interrupt();
|
||||
}
|
||||
if (listener.error != null) {
|
||||
throw ExceptionsHelper.convertToElastic(listener.error);
|
||||
}
|
||||
return listener.result;
|
||||
}
|
||||
}
|
|
@ -62,6 +62,7 @@ import org.elasticsearch.common.unit.ByteSizeUnit;
|
|||
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.gateway.ReplicaShardAllocatorIT;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.IndexService;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
|
@ -78,7 +79,6 @@ import org.elasticsearch.index.shard.ShardId;
|
|||
import org.elasticsearch.index.store.Store;
|
||||
import org.elasticsearch.indices.IndicesService;
|
||||
import org.elasticsearch.indices.analysis.AnalysisModule;
|
||||
import org.elasticsearch.indices.flush.SyncedFlushUtil;
|
||||
import org.elasticsearch.indices.recovery.RecoveryState.Stage;
|
||||
import org.elasticsearch.node.NodeClosedException;
|
||||
import org.elasticsearch.node.RecoverySettingsChunkSizePlugin;
|
||||
|
@ -109,7 +109,6 @@ import java.util.Arrays;
|
|||
import java.util.Collection;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
import java.util.concurrent.Semaphore;
|
||||
|
@ -118,7 +117,6 @@ import java.util.concurrent.atomic.AtomicInteger;
|
|||
import java.util.function.Consumer;
|
||||
import java.util.stream.Collectors;
|
||||
import java.util.stream.IntStream;
|
||||
import java.util.stream.Stream;
|
||||
import java.util.stream.StreamSupport;
|
||||
|
||||
import static java.util.Collections.singletonMap;
|
||||
|
@ -329,8 +327,19 @@ public class IndexRecoveryIT extends ESIntegTestCase {
|
|||
final String nodeA = internalCluster().startNode();
|
||||
|
||||
logger.info("--> create index on node: {}", nodeA);
|
||||
createAndPopulateIndex(INDEX_NAME, 1, SHARD_COUNT, REPLICA_COUNT)
|
||||
.getShards()[0].getStats().getStore().size();
|
||||
createIndex(INDEX_NAME, Settings.builder()
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1)
|
||||
.put(IndexService.RETENTION_LEASE_SYNC_INTERVAL_SETTING.getKey(), "100ms")
|
||||
.put(IndexService.GLOBAL_CHECKPOINT_SYNC_INTERVAL_SETTING.getKey(), "100ms").build());
|
||||
|
||||
int numDocs = randomIntBetween(10, 200);
|
||||
final IndexRequestBuilder[] docs = new IndexRequestBuilder[numDocs];
|
||||
for (int i = 0; i < numDocs; i++) {
|
||||
docs[i] = client().prepareIndex(INDEX_NAME).
|
||||
setSource("foo-int", randomInt(), "foo-string", randomAlphaOfLength(32), "foo-float", randomFloat());
|
||||
}
|
||||
indexRandom(randomBoolean(), docs);
|
||||
|
||||
logger.info("--> start node B");
|
||||
// force a shard recovery from nodeA to nodeB
|
||||
|
@ -346,8 +355,7 @@ public class IndexRecoveryIT extends ESIntegTestCase {
|
|||
logger.info("--> start node C");
|
||||
final String nodeC = internalCluster().startNode();
|
||||
|
||||
// do sync flush to gen sync id
|
||||
assertThat(client().admin().indices().prepareSyncedFlush(INDEX_NAME).get().failedShards(), equalTo(0));
|
||||
ReplicaShardAllocatorIT.ensureActivePeerRecoveryRetentionLeasesAdvanced(INDEX_NAME);
|
||||
|
||||
// hold peer recovery on phase 2 after nodeB down
|
||||
CountDownLatch phase1ReadyBlocked = new CountDownLatch(1);
|
||||
|
@ -1077,73 +1085,6 @@ public class IndexRecoveryIT extends ESIntegTestCase {
|
|||
ensureGreen(indexName);
|
||||
}
|
||||
|
||||
public void testRecoveryFlushReplica() throws Exception {
|
||||
internalCluster().ensureAtLeastNumDataNodes(3);
|
||||
String indexName = "test-index";
|
||||
createIndex(indexName, Settings.builder().put("index.number_of_replicas", 0).put("index.number_of_shards", 1).build());
|
||||
int numDocs = randomIntBetween(0, 10);
|
||||
indexRandom(randomBoolean(), false, randomBoolean(), IntStream.range(0, numDocs)
|
||||
.mapToObj(n -> client().prepareIndex(indexName).setSource("num", n)).collect(toList()));
|
||||
assertAcked(client().admin().indices().prepareUpdateSettings(indexName)
|
||||
.setSettings(Settings.builder().put("index.number_of_replicas", 1)));
|
||||
ensureGreen(indexName);
|
||||
ShardId shardId = null;
|
||||
for (ShardStats shardStats : client().admin().indices().prepareStats(indexName).get().getIndex(indexName).getShards()) {
|
||||
shardId = shardStats.getShardRouting().shardId();
|
||||
if (shardStats.getShardRouting().primary() == false) {
|
||||
assertThat(shardStats.getCommitStats().getNumDocs(), equalTo(numDocs));
|
||||
SequenceNumbers.CommitInfo commitInfo = SequenceNumbers.loadSeqNoInfoFromLuceneCommit(
|
||||
shardStats.getCommitStats().getUserData().entrySet());
|
||||
assertThat(commitInfo.localCheckpoint, equalTo(shardStats.getSeqNoStats().getLocalCheckpoint()));
|
||||
assertThat(commitInfo.maxSeqNo, equalTo(shardStats.getSeqNoStats().getMaxSeqNo()));
|
||||
}
|
||||
}
|
||||
SyncedFlushUtil.attemptSyncedFlush(logger, internalCluster(), shardId);
|
||||
assertBusy(() -> assertThat(client().admin().indices().prepareSyncedFlush(indexName).get().failedShards(), equalTo(0)));
|
||||
assertAcked(client().admin().indices().prepareUpdateSettings(indexName)
|
||||
.setSettings(Settings.builder().put("index.number_of_replicas", 2)));
|
||||
ensureGreen(indexName);
|
||||
// Recovery should keep syncId if no indexing activity on the primary after synced-flush.
|
||||
Set<String> syncIds = Stream.of(client().admin().indices().prepareStats(indexName).get().getIndex(indexName).getShards())
|
||||
.map(shardStats -> shardStats.getCommitStats().syncId())
|
||||
.collect(Collectors.toSet());
|
||||
assertThat(syncIds, hasSize(1));
|
||||
}
|
||||
|
||||
public void testRecoveryUsingSyncedFlushWithoutRetentionLease() throws Exception {
|
||||
internalCluster().ensureAtLeastNumDataNodes(2);
|
||||
String indexName = "test-index";
|
||||
createIndex(indexName, Settings.builder()
|
||||
.put("index.number_of_shards", 1)
|
||||
.put("index.number_of_replicas", 1)
|
||||
.put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), "24h") // do not reallocate the lost shard
|
||||
.put(IndexSettings.INDEX_SOFT_DELETES_RETENTION_LEASE_PERIOD_SETTING.getKey(), "100ms") // expire leases quickly
|
||||
.put(IndexService.RETENTION_LEASE_SYNC_INTERVAL_SETTING.getKey(), "100ms") // sync frequently
|
||||
.build());
|
||||
int numDocs = randomIntBetween(0, 10);
|
||||
indexRandom(randomBoolean(), false, randomBoolean(), IntStream.range(0, numDocs)
|
||||
.mapToObj(n -> client().prepareIndex(indexName).setSource("num", n)).collect(toList()));
|
||||
ensureGreen(indexName);
|
||||
|
||||
final ShardId shardId = new ShardId(resolveIndex(indexName), 0);
|
||||
assertThat(SyncedFlushUtil.attemptSyncedFlush(logger, internalCluster(), shardId).successfulShards(), equalTo(2));
|
||||
|
||||
final ClusterState clusterState = client().admin().cluster().prepareState().get().getState();
|
||||
final ShardRouting shardToResync = randomFrom(clusterState.routingTable().shardRoutingTable(shardId).activeShards());
|
||||
internalCluster().restartNode(clusterState.nodes().get(shardToResync.currentNodeId()).getName(),
|
||||
new InternalTestCluster.RestartCallback() {
|
||||
@Override
|
||||
public Settings onNodeStopped(String nodeName) throws Exception {
|
||||
assertBusy(() -> assertFalse(client().admin().indices().prepareStats(indexName).get()
|
||||
.getShards()[0].getRetentionLeaseStats().retentionLeases().contains(
|
||||
ReplicationTracker.getPeerRecoveryRetentionLeaseId(shardToResync))));
|
||||
return super.onNodeStopped(nodeName);
|
||||
}
|
||||
});
|
||||
|
||||
ensureGreen(indexName);
|
||||
}
|
||||
|
||||
public void testRecoverLocallyUpToGlobalCheckpoint() throws Exception {
|
||||
internalCluster().ensureAtLeastNumDataNodes(2);
|
||||
List<String> nodes = randomSubsetOf(2, StreamSupport.stream(clusterService().state().nodes().getDataNodes().spliterator(), false)
|
||||
|
|
|
@ -404,11 +404,8 @@ public class CloseIndexIT extends ESIntegTestCase {
|
|||
indexRandom(randomBoolean(), randomBoolean(), randomBoolean(), IntStream.range(0, randomIntBetween(0, 50))
|
||||
.mapToObj(n -> client().prepareIndex(indexName).setSource("num", n)).collect(toList()));
|
||||
ensureGreen(indexName);
|
||||
if (randomBoolean()) {
|
||||
client().admin().indices().prepareFlush(indexName).get();
|
||||
} else {
|
||||
client().admin().indices().prepareSyncedFlush(indexName).get();
|
||||
}
|
||||
|
||||
// index more documents while one shard copy is offline
|
||||
internalCluster().restartNode(dataNodes.get(1), new InternalTestCluster.RestartCallback() {
|
||||
@Override
|
||||
|
|
|
@ -154,7 +154,6 @@ import org.elasticsearch.indices.IndicesService;
|
|||
import org.elasticsearch.indices.analysis.AnalysisModule;
|
||||
import org.elasticsearch.indices.breaker.NoneCircuitBreakerService;
|
||||
import org.elasticsearch.indices.cluster.IndicesClusterStateService;
|
||||
import org.elasticsearch.indices.flush.SyncedFlushService;
|
||||
import org.elasticsearch.indices.mapper.MapperRegistry;
|
||||
import org.elasticsearch.indices.recovery.PeerRecoverySourceService;
|
||||
import org.elasticsearch.indices.recovery.PeerRecoveryTargetService;
|
||||
|
@ -1202,7 +1201,6 @@ public class SnapshotResiliencyTests extends ESTestCase {
|
|||
new NodeMappingRefreshAction(transportService, metaDataMappingService),
|
||||
repositoriesService,
|
||||
mock(SearchService.class),
|
||||
new SyncedFlushService(indicesService, clusterService, transportService, indexNameExpressionResolver),
|
||||
new PeerRecoverySourceService(transportService, indicesService, recoverySettings),
|
||||
snapshotShardsService,
|
||||
new PrimaryReplicaSyncer(
|
||||
|
|
|
@ -164,7 +164,6 @@ import java.util.concurrent.atomic.AtomicInteger;
|
|||
import java.util.function.Function;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import static org.elasticsearch.client.Requests.syncedFlushRequest;
|
||||
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
|
||||
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
|
||||
import static org.elasticsearch.common.unit.TimeValue.timeValueMillis;
|
||||
|
@ -1401,13 +1400,8 @@ public abstract class ESIntegTestCase extends ESTestCase {
|
|||
client().admin().indices().prepareRefresh(indices).setIndicesOptions(IndicesOptions.lenientExpandOpen()).execute(
|
||||
new LatchedActionListener<>(newLatch(inFlightAsyncOperations)));
|
||||
} else if (maybeFlush && rarely()) {
|
||||
if (randomBoolean()) {
|
||||
client().admin().indices().prepareFlush(indices).setIndicesOptions(IndicesOptions.lenientExpandOpen()).execute(
|
||||
new LatchedActionListener<>(newLatch(inFlightAsyncOperations)));
|
||||
} else {
|
||||
client().admin().indices().syncedFlush(syncedFlushRequest(indices).indicesOptions(IndicesOptions.lenientExpandOpen()),
|
||||
new LatchedActionListener<>(newLatch(inFlightAsyncOperations)));
|
||||
}
|
||||
} else if (rarely()) {
|
||||
client().admin().indices().prepareForceMerge(indices)
|
||||
.setIndicesOptions(IndicesOptions.lenientExpandOpen())
|
||||
|
|
|
@ -79,7 +79,6 @@ import org.elasticsearch.env.ShardLockObtainFailedException;
|
|||
import org.elasticsearch.http.HttpServerTransport;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.IndexService;
|
||||
import org.elasticsearch.index.engine.CommitStats;
|
||||
import org.elasticsearch.index.engine.DocIdSeqNoAndSource;
|
||||
import org.elasticsearch.index.engine.Engine;
|
||||
import org.elasticsearch.index.engine.EngineTestCase;
|
||||
|
@ -116,7 +115,6 @@ import java.util.ArrayList;
|
|||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
|
@ -1144,40 +1142,10 @@ public final class InternalTestCluster extends TestCluster {
|
|||
// and not all docs have been purged after the test) and inherit from
|
||||
// ElasticsearchIntegrationTest must override beforeIndexDeletion() to avoid failures.
|
||||
assertNoPendingIndexOperations();
|
||||
//check that shards that have same sync id also contain same number of documents
|
||||
assertSameSyncIdSameDocs();
|
||||
assertOpenTranslogReferences();
|
||||
assertNoSnapshottedIndexCommit();
|
||||
}
|
||||
|
||||
private void assertSameSyncIdSameDocs() {
|
||||
Map<String, Long> docsOnShards = new HashMap<>();
|
||||
final Collection<NodeAndClient> nodesAndClients = nodes.values();
|
||||
for (NodeAndClient nodeAndClient : nodesAndClients) {
|
||||
IndicesService indexServices = getInstance(IndicesService.class, nodeAndClient.name);
|
||||
for (IndexService indexService : indexServices) {
|
||||
for (IndexShard indexShard : indexService) {
|
||||
try {
|
||||
CommitStats commitStats = indexShard.commitStats();
|
||||
String syncId = commitStats.getUserData().get(Engine.SYNC_COMMIT_ID);
|
||||
if (syncId != null) {
|
||||
long liveDocsOnShard = commitStats.getNumDocs();
|
||||
if (docsOnShards.get(syncId) != null) {
|
||||
assertThat("sync id is equal but number of docs does not match on node "
|
||||
+ nodeAndClient.name + ". expected " + docsOnShards.get(syncId) + " but got "
|
||||
+ liveDocsOnShard, docsOnShards.get(syncId), equalTo(liveDocsOnShard));
|
||||
} else {
|
||||
docsOnShards.put(syncId, liveDocsOnShard);
|
||||
}
|
||||
}
|
||||
} catch (AlreadyClosedException e) {
|
||||
// the engine is closed or if the shard is recovering
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void assertNoPendingIndexOperations() throws Exception {
|
||||
assertBusy(() -> {
|
||||
for (NodeAndClient nodeAndClient : nodes.values()) {
|
||||
|
|
|
@ -54,7 +54,6 @@ import org.elasticsearch.common.xcontent.support.XContentMapValues;
|
|||
import org.elasticsearch.core.internal.io.IOUtils;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.seqno.ReplicationTracker;
|
||||
import org.elasticsearch.indices.flush.SyncedFlushService;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
import org.elasticsearch.snapshots.SnapshotState;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
@ -1214,18 +1213,60 @@ public abstract class ESRestTestCase extends ESTestCase {
|
|||
return minVersion;
|
||||
}
|
||||
|
||||
protected static Response performSyncedFlush(String indexName) throws IOException {
|
||||
final Request request = new Request("POST", indexName + "/_flush/synced");
|
||||
final List<String> expectedWarnings = Collections.singletonList(SyncedFlushService.SYNCED_FLUSH_DEPRECATION_MESSAGE);
|
||||
if (nodeVersions.stream().allMatch(version -> version.onOrAfter(Version.V_7_6_0))) {
|
||||
final Builder options = RequestOptions.DEFAULT.toBuilder();
|
||||
options.setWarningsHandler(warnings -> warnings.equals(expectedWarnings) == false);
|
||||
request.setOptions(options);
|
||||
} else if (nodeVersions.stream().anyMatch(version -> version.onOrAfter(Version.V_7_6_0))) {
|
||||
final Builder options = RequestOptions.DEFAULT.toBuilder();
|
||||
options.setWarningsHandler(warnings -> warnings.isEmpty() == false && warnings.equals(expectedWarnings) == false);
|
||||
request.setOptions(options);
|
||||
protected void syncedFlush(String indexName) throws Exception {
|
||||
final List<String> deprecationMessages = List.of(
|
||||
"Synced flush is deprecated and will be removed in 8.0. Use flush at _/flush or /{index}/_flush instead.");
|
||||
final List<String> transitionMessages = List.of(
|
||||
"Synced flush was removed and a normal flush was performed instead. This transition will be removed in a future version.");
|
||||
final WarningsHandler warningsHandler;
|
||||
if (minimumNodeVersion().onOrAfter(Version.V_8_0_0)) {
|
||||
warningsHandler = warnings -> warnings.equals(transitionMessages) == false;
|
||||
} else if (minimumNodeVersion().onOrAfter(Version.V_7_6_0)) {
|
||||
warningsHandler = warnings -> warnings.equals(deprecationMessages) == false && warnings.equals(transitionMessages) == false;
|
||||
} else if (nodeVersions.stream().anyMatch(n -> n.onOrAfter(Version.V_8_0_0))) {
|
||||
warningsHandler = warnings -> warnings.isEmpty() == false && warnings.equals(transitionMessages) == false;
|
||||
} else {
|
||||
warningsHandler = warnings -> warnings.isEmpty() == false;
|
||||
}
|
||||
return client().performRequest(request);
|
||||
// We have to spin synced-flush requests here because we fire the global checkpoint sync for the last write operation.
|
||||
// A synced-flush request considers the global checkpoint sync as an going operation because it acquires a shard permit.
|
||||
assertBusy(() -> {
|
||||
try {
|
||||
final Request request = new Request("POST", indexName + "/_flush/synced");
|
||||
request.setOptions(RequestOptions.DEFAULT.toBuilder().setWarningsHandler(warningsHandler));
|
||||
Response resp = client().performRequest(request);
|
||||
if (nodeVersions.stream().allMatch(v -> v.before(Version.V_8_0_0))) {
|
||||
Map<String, Object> result = ObjectPath.createFromResponse(resp).evaluate("_shards");
|
||||
assertThat(result.get("failed"), equalTo(0));
|
||||
}
|
||||
} catch (ResponseException ex) {
|
||||
if (ex.getResponse().getStatusLine().getStatusCode() == RestStatus.CONFLICT.getStatus()
|
||||
&& ex.getResponse().getWarnings().equals(transitionMessages)) {
|
||||
logger.info("a normal flush was performed instead");
|
||||
} else {
|
||||
throw new AssertionError(ex); // cause assert busy to retry
|
||||
}
|
||||
}
|
||||
});
|
||||
// ensure the global checkpoint is synced; otherwise we might trim the commit with syncId
|
||||
ensureGlobalCheckpointSynced(indexName);
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
private void ensureGlobalCheckpointSynced(String index) throws Exception {
|
||||
assertBusy(() -> {
|
||||
Map<?, ?> stats = entityAsMap(client().performRequest(new Request("GET", index + "/_stats?level=shards")));
|
||||
List<Map<?, ?>> shardStats = (List<Map<?, ?>>) XContentMapValues.extractValue("indices." + index + ".shards.0", stats);
|
||||
shardStats.stream()
|
||||
.map(shard -> (Map<?, ?>) XContentMapValues.extractValue("seq_no", shard))
|
||||
.filter(Objects::nonNull)
|
||||
.forEach(seqNoStat -> {
|
||||
long globalCheckpoint = ((Number) XContentMapValues.extractValue("global_checkpoint", seqNoStat)).longValue();
|
||||
long localCheckpoint = ((Number) XContentMapValues.extractValue("local_checkpoint", seqNoStat)).longValue();
|
||||
long maxSeqNo = ((Number) XContentMapValues.extractValue("max_seq_no", seqNoStat)).longValue();
|
||||
assertThat(shardStats.toString(), localCheckpoint, equalTo(maxSeqNo));
|
||||
assertThat(shardStats.toString(), globalCheckpoint, equalTo(maxSeqNo));
|
||||
});
|
||||
}, 60, TimeUnit.SECONDS);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -57,11 +57,7 @@ public class FrozenIndexRecoveryTests extends ESIntegTestCase {
|
|||
indexRandom(randomBoolean(), randomBoolean(), randomBoolean(), IntStream.range(0, randomIntBetween(0, 50))
|
||||
.mapToObj(n -> client().prepareIndex(indexName).setSource("num", n)).collect(toList()));
|
||||
ensureGreen(indexName);
|
||||
if (randomBoolean()) {
|
||||
client().admin().indices().prepareFlush(indexName).get();
|
||||
} else {
|
||||
client().admin().indices().prepareSyncedFlush(indexName).get();
|
||||
}
|
||||
// index more documents while one shard copy is offline
|
||||
internalCluster().restartNode(dataNodes.get(1), new InternalTestCluster.RestartCallback() {
|
||||
@Override
|
||||
|
|
|
@ -435,7 +435,7 @@ public class MlDistributedFailureIT extends BaseMlIntegTestCase {
|
|||
setupJobAndDatafeed(jobId, "data_feed_id", TimeValue.timeValueSeconds(1));
|
||||
waitForDatafeed(jobId, numDocs1);
|
||||
|
||||
client().admin().indices().prepareSyncedFlush().get();
|
||||
client().admin().indices().prepareFlush().get();
|
||||
|
||||
disrupt.run();
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue