diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java index e00a9c0da8d5..96494238e75e 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesClient.java @@ -27,7 +27,6 @@ import org.elasticsearch.action.admin.indices.cache.clear.ClearIndicesCacheRespo import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest; import org.elasticsearch.action.admin.indices.flush.FlushRequest; import org.elasticsearch.action.admin.indices.flush.FlushResponse; -import org.elasticsearch.action.admin.indices.flush.SyncedFlushRequest; import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequest; import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeResponse; import org.elasticsearch.action.admin.indices.open.OpenIndexRequest; @@ -415,41 +414,6 @@ public final class IndicesClient { FlushResponse::fromXContent, listener, emptySet()); } - /** - * Initiate a synced flush manually using the synced flush API. - * See - * Synced flush API on elastic.co - * @param syncedFlushRequest the request - * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized - * @return the response - * @throws IOException in case there is a problem sending the request or parsing back the response - * @deprecated synced flush is deprecated and will be removed in 8.0. - * Use {@link #flush(FlushRequest, RequestOptions)} instead. - */ - @Deprecated - public SyncedFlushResponse flushSynced(SyncedFlushRequest syncedFlushRequest, RequestOptions options) throws IOException { - return restHighLevelClient.performRequestAndParseEntity(syncedFlushRequest, IndicesRequestConverters::flushSynced, options, - SyncedFlushResponse::fromXContent, emptySet()); - } - - /** - * Asynchronously initiate a synced flush manually using the synced flush API. - * See - * Synced flush API on elastic.co - * @param syncedFlushRequest the request - * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized - * @param listener the listener to be notified upon request completion - * @return cancellable that may be used to cancel the request - * @deprecated synced flush is deprecated and will be removed in 8.0. - * Use {@link #flushAsync(FlushRequest, RequestOptions, ActionListener)} instead. - */ - @Deprecated - public Cancellable flushSyncedAsync(SyncedFlushRequest syncedFlushRequest, RequestOptions options, - ActionListener listener) { - return restHighLevelClient.performRequestAsyncAndParseEntity(syncedFlushRequest, IndicesRequestConverters::flushSynced, options, - SyncedFlushResponse::fromXContent, listener, emptySet()); - } - /** * Retrieve the settings of one or more indices. * See diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesRequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesRequestConverters.java index 553da42711c3..fea48c1c3e90 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesRequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndicesRequestConverters.java @@ -29,7 +29,6 @@ import org.elasticsearch.action.admin.indices.alias.get.GetAliasesRequest; import org.elasticsearch.action.admin.indices.cache.clear.ClearIndicesCacheRequest; import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest; import org.elasticsearch.action.admin.indices.flush.FlushRequest; -import org.elasticsearch.action.admin.indices.flush.SyncedFlushRequest; import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequest; import org.elasticsearch.action.admin.indices.open.OpenIndexRequest; import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; @@ -194,15 +193,6 @@ final class IndicesRequestConverters { return request; } - static Request flushSynced(SyncedFlushRequest syncedFlushRequest) { - String[] indices = syncedFlushRequest.indices() == null ? Strings.EMPTY_ARRAY : syncedFlushRequest.indices(); - Request request = new Request(HttpPost.METHOD_NAME, RequestConverters.endpoint(indices, "_flush/synced")); - RequestConverters.Params parameters = new RequestConverters.Params(); - parameters.withIndicesOptions(syncedFlushRequest.indicesOptions()); - request.addParameters(parameters.asMap()); - return request; - } - static Request forceMerge(ForceMergeRequest forceMergeRequest) { String[] indices = forceMergeRequest.indices() == null ? Strings.EMPTY_ARRAY : forceMergeRequest.indices(); Request request = new Request(HttpPost.METHOD_NAME, RequestConverters.endpoint(indices, "_forcemerge")); diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/SyncedFlushResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/SyncedFlushResponse.java deleted file mode 100644 index 41e9c3d062b0..000000000000 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/SyncedFlushResponse.java +++ /dev/null @@ -1,346 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.client; - -import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.ParsingException; -import org.elasticsearch.common.xcontent.ConstructingObjectParser; -import org.elasticsearch.common.xcontent.ToXContentFragment; -import org.elasticsearch.common.xcontent.ToXContentObject; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentLocation; -import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.common.xcontent.XContentParser.Token; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; -import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; -import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; - -public class SyncedFlushResponse implements ToXContentObject { - - public static final String SHARDS_FIELD = "_shards"; - - private ShardCounts totalCounts; - private Map indexResults; - - SyncedFlushResponse(ShardCounts totalCounts, Map indexResults) { - this.totalCounts = new ShardCounts(totalCounts.total, totalCounts.successful, totalCounts.failed); - this.indexResults = Collections.unmodifiableMap(indexResults); - } - - /** - * @return The total number of shard copies that were processed across all indexes - */ - public int totalShards() { - return totalCounts.total; - } - - /** - * @return The number of successful shard copies that were processed across all indexes - */ - public int successfulShards() { - return totalCounts.successful; - } - - /** - * @return The number of failed shard copies that were processed across all indexes - */ - public int failedShards() { - return totalCounts.failed; - } - - /** - * @return A map of results for each index where the keys of the map are the index names - * and the values are the results encapsulated in {@link IndexResult}. - */ - public Map getIndexResults() { - return indexResults; - } - - ShardCounts getShardCounts() { - return totalCounts; - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(); - builder.startObject(SHARDS_FIELD); - totalCounts.toXContent(builder, params); - builder.endObject(); - for (Map.Entry entry: indexResults.entrySet()) { - String indexName = entry.getKey(); - IndexResult indexResult = entry.getValue(); - builder.startObject(indexName); - indexResult.toXContent(builder, params); - builder.endObject(); - } - builder.endObject(); - return builder; - } - - public static SyncedFlushResponse fromXContent(XContentParser parser) throws IOException { - ensureExpectedToken(Token.START_OBJECT, parser.nextToken(), parser::getTokenLocation); - ShardCounts totalCounts = null; - Map indexResults = new HashMap<>(); - XContentLocation startLoc = parser.getTokenLocation(); - while (parser.nextToken().equals(Token.FIELD_NAME)) { - if (parser.currentName().equals(SHARDS_FIELD)) { - ensureExpectedToken(Token.START_OBJECT, parser.nextToken(), parser::getTokenLocation); - totalCounts = ShardCounts.fromXContent(parser); - } else { - String indexName = parser.currentName(); - IndexResult indexResult = IndexResult.fromXContent(parser); - indexResults.put(indexName, indexResult); - } - } - if (totalCounts != null) { - return new SyncedFlushResponse(totalCounts, indexResults); - } else { - throw new ParsingException( - startLoc, - "Unable to reconstruct object. Total counts for shards couldn't be parsed." - ); - } - } - - /** - * Encapsulates the number of total successful and failed shard copies - */ - public static final class ShardCounts implements ToXContentFragment { - - public static final String TOTAL_FIELD = "total"; - public static final String SUCCESSFUL_FIELD = "successful"; - public static final String FAILED_FIELD = "failed"; - - private static final ConstructingObjectParser PARSER = - new ConstructingObjectParser<>( - "shardcounts", - a -> new ShardCounts((Integer) a[0], (Integer) a[1], (Integer) a[2]) - ); - static { - PARSER.declareInt(constructorArg(), new ParseField(TOTAL_FIELD)); - PARSER.declareInt(constructorArg(), new ParseField(SUCCESSFUL_FIELD)); - PARSER.declareInt(constructorArg(), new ParseField(FAILED_FIELD)); - } - - private int total; - private int successful; - private int failed; - - - ShardCounts(int total, int successful, int failed) { - this.total = total; - this.successful = successful; - this.failed = failed; - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.field(TOTAL_FIELD, total); - builder.field(SUCCESSFUL_FIELD, successful); - builder.field(FAILED_FIELD, failed); - return builder; - } - - public static ShardCounts fromXContent(XContentParser parser) throws IOException { - return PARSER.parse(parser, null); - } - - public boolean equals(ShardCounts other) { - if (other != null) { - return - other.total == this.total && - other.successful == this.successful && - other.failed == this.failed; - } else { - return false; - } - } - - } - - /** - * Description for the flush/synced results for a particular index. - * This includes total, successful and failed copies along with failure description for each failed copy. - */ - public static final class IndexResult implements ToXContentFragment { - - public static final String TOTAL_FIELD = "total"; - public static final String SUCCESSFUL_FIELD = "successful"; - public static final String FAILED_FIELD = "failed"; - public static final String FAILURES_FIELD = "failures"; - - @SuppressWarnings("unchecked") - private static final ConstructingObjectParser PARSER = - new ConstructingObjectParser<>( - "indexresult", - a -> new IndexResult((Integer) a[0], (Integer) a[1], (Integer) a[2], (List)a[3]) - ); - static { - PARSER.declareInt(constructorArg(), new ParseField(TOTAL_FIELD)); - PARSER.declareInt(constructorArg(), new ParseField(SUCCESSFUL_FIELD)); - PARSER.declareInt(constructorArg(), new ParseField(FAILED_FIELD)); - PARSER.declareObjectArray(optionalConstructorArg(), ShardFailure.PARSER, new ParseField(FAILURES_FIELD)); - } - - private ShardCounts counts; - private List failures; - - IndexResult(int total, int successful, int failed, List failures) { - counts = new ShardCounts(total, successful, failed); - if (failures != null) { - this.failures = Collections.unmodifiableList(failures); - } else { - this.failures = Collections.unmodifiableList(new ArrayList<>()); - } - } - - /** - * @return The total number of shard copies that were processed for this index. - */ - public int totalShards() { - return counts.total; - } - - /** - * @return The number of successful shard copies that were processed for this index. - */ - public int successfulShards() { - return counts.successful; - } - - /** - * @return The number of failed shard copies that were processed for this index. - */ - public int failedShards() { - return counts.failed; - } - - /** - * @return A list of {@link ShardFailure} objects that describe each of the failed shard copies for this index. - */ - public List failures() { - return failures; - } - - ShardCounts getShardCounts() { - return counts; - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - counts.toXContent(builder, params); - if (failures.size() > 0) { - builder.startArray(FAILURES_FIELD); - for (ShardFailure failure : failures) { - failure.toXContent(builder, params); - } - builder.endArray(); - } - return builder; - } - - public static IndexResult fromXContent(XContentParser parser) throws IOException { - return PARSER.parse(parser, null); - } - } - - /** - * Description of a failed shard copy for an index. - */ - public static final class ShardFailure implements ToXContentFragment { - - public static String SHARD_ID_FIELD = "shard"; - public static String FAILURE_REASON_FIELD = "reason"; - public static String ROUTING_FIELD = "routing"; - - private int shardId; - private String failureReason; - private Map routing; - - @SuppressWarnings("unchecked") - static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( - "shardfailure", - a -> new ShardFailure((Integer)a[0], (String)a[1], (Map)a[2]) - ); - static { - PARSER.declareInt(constructorArg(), new ParseField(SHARD_ID_FIELD)); - PARSER.declareString(constructorArg(), new ParseField(FAILURE_REASON_FIELD)); - PARSER.declareObject( - optionalConstructorArg(), - (parser, c) -> parser.map(), - new ParseField(ROUTING_FIELD) - ); - } - - ShardFailure(int shardId, String failureReason, Map routing) { - this.shardId = shardId; - this.failureReason = failureReason; - if (routing != null) { - this.routing = Collections.unmodifiableMap(routing); - } else { - this.routing = Collections.unmodifiableMap(new HashMap<>()); - } - } - - /** - * @return Id of the shard whose copy failed - */ - public int getShardId() { - return shardId; - } - - /** - * @return Reason for failure of the shard copy - */ - public String getFailureReason() { - return failureReason; - } - - /** - * @return Additional information about the failure. - */ - public Map getRouting() { - return routing; - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(); - builder.field(SHARD_ID_FIELD, shardId); - builder.field(FAILURE_REASON_FIELD, failureReason); - if (routing.size() > 0) { - builder.field(ROUTING_FIELD, routing); - } - builder.endObject(); - return builder; - } - - public static ShardFailure fromXContent(XContentParser parser) throws IOException { - return PARSER.parse(parser, null); - } - } -} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java index 842d34811584..3303ed4f218e 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java @@ -33,7 +33,6 @@ import org.elasticsearch.action.admin.indices.cache.clear.ClearIndicesCacheRespo import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest; import org.elasticsearch.action.admin.indices.flush.FlushRequest; import org.elasticsearch.action.admin.indices.flush.FlushResponse; -import org.elasticsearch.action.admin.indices.flush.SyncedFlushRequest; import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequest; import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeResponse; import org.elasticsearch.action.admin.indices.open.OpenIndexRequest; @@ -97,7 +96,6 @@ import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilders; -import org.elasticsearch.indices.flush.SyncedFlushService; import org.elasticsearch.rest.RestStatus; import java.io.IOException; @@ -759,41 +757,6 @@ public class IndicesClientIT extends ESRestHighLevelClientTestCase { } } - public void testSyncedFlush() throws IOException { - { - String index = "index"; - Settings settings = Settings.builder() - .put("number_of_shards", 1) - .put("number_of_replicas", 0) - .build(); - createIndex(index, settings); - SyncedFlushRequest syncedFlushRequest = new SyncedFlushRequest(index); - SyncedFlushResponse flushResponse = - execute(syncedFlushRequest, highLevelClient().indices()::flushSynced, highLevelClient().indices()::flushSyncedAsync, - expectWarnings(SyncedFlushService.SYNCED_FLUSH_DEPRECATION_MESSAGE)); - assertThat(flushResponse.totalShards(), equalTo(1)); - assertThat(flushResponse.successfulShards(), equalTo(1)); - assertThat(flushResponse.failedShards(), equalTo(0)); - } - { - String nonExistentIndex = "non_existent_index"; - assertFalse(indexExists(nonExistentIndex)); - SyncedFlushRequest syncedFlushRequest = new SyncedFlushRequest(nonExistentIndex); - ElasticsearchException exception = expectThrows( - ElasticsearchException.class, - () -> - execute( - syncedFlushRequest, - highLevelClient().indices()::flushSynced, - highLevelClient().indices()::flushSyncedAsync, - expectWarnings(SyncedFlushService.SYNCED_FLUSH_DEPRECATION_MESSAGE) - ) - ); - assertEquals(RestStatus.NOT_FOUND, exception.status()); - } - } - - public void testClearCache() throws IOException { { String index = "index"; diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesRequestConvertersTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesRequestConvertersTests.java index ae94e700b5fb..c9c2ee065fde 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesRequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesRequestConvertersTests.java @@ -32,7 +32,6 @@ import org.elasticsearch.action.admin.indices.alias.get.GetAliasesRequest; import org.elasticsearch.action.admin.indices.cache.clear.ClearIndicesCacheRequest; import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest; import org.elasticsearch.action.admin.indices.flush.FlushRequest; -import org.elasticsearch.action.admin.indices.flush.SyncedFlushRequest; import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequest; import org.elasticsearch.action.admin.indices.open.OpenIndexRequest; import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; @@ -460,30 +459,6 @@ public class IndicesRequestConvertersTests extends ESTestCase { Assert.assertThat(request.getMethod(), equalTo(HttpPost.METHOD_NAME)); } - public void testSyncedFlush() { - String[] indices = ESTestCase.randomBoolean() ? null : RequestConvertersTests.randomIndicesNames(0, 5); - SyncedFlushRequest syncedFlushRequest; - if (ESTestCase.randomBoolean()) { - syncedFlushRequest = new SyncedFlushRequest(indices); - } else { - syncedFlushRequest = new SyncedFlushRequest(); - syncedFlushRequest.indices(indices); - } - Map expectedParams = new HashMap<>(); - RequestConvertersTests.setRandomIndicesOptions(syncedFlushRequest::indicesOptions, syncedFlushRequest::indicesOptions, - expectedParams); - Request request = IndicesRequestConverters.flushSynced(syncedFlushRequest); - StringJoiner endpoint = new StringJoiner("/", "/", ""); - if (indices != null && indices.length > 0) { - endpoint.add(String.join(",", indices)); - } - endpoint.add("_flush/synced"); - Assert.assertThat(request.getEndpoint(), equalTo(endpoint.toString())); - Assert.assertThat(request.getParameters(), equalTo(expectedParams)); - Assert.assertThat(request.getEntity(), nullValue()); - Assert.assertThat(request.getMethod(), equalTo(HttpPost.METHOD_NAME)); - } - public void testForceMerge() { String[] indices = ESTestCase.randomBoolean() ? null : RequestConvertersTests.randomIndicesNames(0, 5); ForceMergeRequest forceMergeRequest; diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/SyncedFlushResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/SyncedFlushResponseTests.java deleted file mode 100644 index f5cb9cdb0e02..000000000000 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/SyncedFlushResponseTests.java +++ /dev/null @@ -1,270 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.client; - -import com.carrotsearch.hppc.ObjectIntHashMap; -import com.carrotsearch.hppc.ObjectIntMap; -import org.elasticsearch.cluster.routing.ShardRouting; -import org.elasticsearch.cluster.routing.ShardRoutingState; -import org.elasticsearch.cluster.routing.TestShardRouting; -import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.xcontent.DeprecationHandler; -import org.elasticsearch.common.xcontent.ToXContent; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.indices.flush.ShardsSyncedFlushResult; -import org.elasticsearch.indices.flush.SyncedFlushService; -import org.elasticsearch.test.ESTestCase; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; - -public class SyncedFlushResponseTests extends ESTestCase { - - public void testXContentSerialization() throws IOException { - final XContentType xContentType = randomFrom(XContentType.values()); - TestPlan plan = createTestPlan(); - - XContentBuilder serverResponsebuilder = XContentBuilder.builder(xContentType.xContent()); - assertNotNull(plan.result); - serverResponsebuilder.startObject(); - plan.result.toXContent(serverResponsebuilder, ToXContent.EMPTY_PARAMS); - serverResponsebuilder.endObject(); - XContentBuilder clientResponsebuilder = XContentBuilder.builder(xContentType.xContent()); - assertNotNull(plan.result); - plan.clientResult.toXContent(clientResponsebuilder, ToXContent.EMPTY_PARAMS); - Map serverContentMap = convertFailureListToSet( - serverResponsebuilder - .generator() - .contentType() - .xContent() - .createParser( - xContentRegistry(), - DeprecationHandler.THROW_UNSUPPORTED_OPERATION, - BytesReference.bytes(serverResponsebuilder).streamInput() - ).map() - ); - Map clientContentMap = convertFailureListToSet( - clientResponsebuilder - .generator() - .contentType() - .xContent() - .createParser( - xContentRegistry(), - DeprecationHandler.THROW_UNSUPPORTED_OPERATION, - BytesReference.bytes(clientResponsebuilder).streamInput() - ) - .map() - ); - assertEquals(serverContentMap, clientContentMap); - } - - public void testXContentDeserialization() throws IOException { - final XContentType xContentType = randomFrom(XContentType.values()); - TestPlan plan = createTestPlan(); - XContentBuilder builder = XContentBuilder.builder(xContentType.xContent()); - builder.startObject(); - plan.result.toXContent(builder, ToXContent.EMPTY_PARAMS); - builder.endObject(); - XContentParser parser = builder - .generator() - .contentType() - .xContent() - .createParser( - xContentRegistry(), - DeprecationHandler.THROW_UNSUPPORTED_OPERATION, - BytesReference.bytes(builder).streamInput() - ); - SyncedFlushResponse originalResponse = plan.clientResult; - SyncedFlushResponse parsedResponse = SyncedFlushResponse.fromXContent(parser); - assertNotNull(parsedResponse); - assertShardCounts(originalResponse.getShardCounts(), parsedResponse.getShardCounts()); - for (Map.Entry entry: originalResponse.getIndexResults().entrySet()) { - String index = entry.getKey(); - SyncedFlushResponse.IndexResult responseResult = entry.getValue(); - SyncedFlushResponse.IndexResult parsedResult = parsedResponse.getIndexResults().get(index); - assertNotNull(responseResult); - assertNotNull(parsedResult); - assertShardCounts(responseResult.getShardCounts(), parsedResult.getShardCounts()); - assertEquals(responseResult.failures().size(), parsedResult.failures().size()); - for (SyncedFlushResponse.ShardFailure responseShardFailure: responseResult.failures()) { - assertTrue(containsFailure(parsedResult.failures(), responseShardFailure)); - } - } - } - - static class TestPlan { - SyncedFlushResponse.ShardCounts totalCounts; - Map countsPerIndex = new HashMap<>(); - ObjectIntMap expectedFailuresPerIndex = new ObjectIntHashMap<>(); - org.elasticsearch.action.admin.indices.flush.SyncedFlushResponse result; - SyncedFlushResponse clientResult; - } - - TestPlan createTestPlan() throws IOException { - final TestPlan testPlan = new TestPlan(); - final Map> indicesResults = new HashMap<>(); - Map indexResults = new HashMap<>(); - final XContentType xContentType = randomFrom(XContentType.values()); - final int indexCount = randomIntBetween(1, 10); - int totalShards = 0; - int totalSuccessful = 0; - int totalFailed = 0; - for (int i = 0; i < indexCount; i++) { - final String index = "index_" + i; - int shards = randomIntBetween(1, 4); - int replicas = randomIntBetween(0, 2); - int successful = 0; - int failed = 0; - int failures = 0; - List shardsResults = new ArrayList<>(); - List shardFailures = new ArrayList<>(); - for (int shard = 0; shard < shards; shard++) { - final ShardId shardId = new ShardId(index, "_na_", shard); - if (randomInt(5) < 2) { - // total shard failure - failed += replicas + 1; - failures++; - shardsResults.add(new ShardsSyncedFlushResult(shardId, replicas + 1, "simulated total failure")); - shardFailures.add( - new SyncedFlushResponse.ShardFailure( - shardId.id(), - "simulated total failure", - new HashMap<>() - ) - ); - } else { - Map shardResponses = new HashMap<>(); - for (int copy = 0; copy < replicas + 1; copy++) { - final ShardRouting shardRouting = - TestShardRouting.newShardRouting( - index, shard, "node_" + shardId + "_" + copy, null, - copy == 0, ShardRoutingState.STARTED - ); - if (randomInt(5) < 2) { - // shard copy failure - failed++; - failures++; - shardResponses.put(shardRouting, new SyncedFlushService.ShardSyncedFlushResponse("copy failure " + shardId)); - // Building the shardRouting map here. - XContentBuilder builder = XContentBuilder.builder(xContentType.xContent()); - Map routing = - shardRouting.toXContent(builder, ToXContent.EMPTY_PARAMS) - .generator() - .contentType() - .xContent() - .createParser( - xContentRegistry(), - DeprecationHandler.THROW_UNSUPPORTED_OPERATION, - BytesReference.bytes(builder).streamInput() - ) - .map(); - shardFailures.add( - new SyncedFlushResponse.ShardFailure( - shardId.id(), - "copy failure " + shardId, - routing - ) - ); - } else { - successful++; - shardResponses.put(shardRouting, new SyncedFlushService.ShardSyncedFlushResponse((String) null)); - } - } - shardsResults.add(new ShardsSyncedFlushResult(shardId, "_sync_id_" + shard, replicas + 1, shardResponses)); - } - } - indicesResults.put(index, shardsResults); - indexResults.put( - index, - new SyncedFlushResponse.IndexResult( - shards * (replicas + 1), - successful, - failed, - shardFailures - ) - ); - testPlan.countsPerIndex.put(index, new SyncedFlushResponse.ShardCounts(shards * (replicas + 1), successful, failed)); - testPlan.expectedFailuresPerIndex.put(index, failures); - totalFailed += failed; - totalShards += shards * (replicas + 1); - totalSuccessful += successful; - } - testPlan.result = new org.elasticsearch.action.admin.indices.flush.SyncedFlushResponse(indicesResults); - testPlan.totalCounts = new SyncedFlushResponse.ShardCounts(totalShards, totalSuccessful, totalFailed); - testPlan.clientResult = new SyncedFlushResponse( - new SyncedFlushResponse.ShardCounts(totalShards, totalSuccessful, totalFailed), - indexResults - ); - return testPlan; - } - - public boolean containsFailure(List failures, SyncedFlushResponse.ShardFailure origFailure) { - for (SyncedFlushResponse.ShardFailure failure: failures) { - if (failure.getShardId() == origFailure.getShardId() && - failure.getFailureReason().equals(origFailure.getFailureReason()) && - failure.getRouting().equals(origFailure.getRouting())) { - return true; - } - } - return false; - } - - - public void assertShardCounts(SyncedFlushResponse.ShardCounts first, SyncedFlushResponse.ShardCounts second) { - if (first == null) { - assertNull(second); - } else { - assertTrue(first.equals(second)); - } - } - - public Map convertFailureListToSet(Map input) { - Map retMap = new HashMap<>(); - for (Map.Entry entry: input.entrySet()) { - if (entry.getKey().equals(SyncedFlushResponse.SHARDS_FIELD)) { - retMap.put(entry.getKey(), entry.getValue()); - } else { - // This was an index entry. - @SuppressWarnings("unchecked") - Map indexResult = (Map)entry.getValue(); - Map retResult = new HashMap<>(); - for (Map.Entry entry2: indexResult.entrySet()) { - if (entry2.getKey().equals(SyncedFlushResponse.IndexResult.FAILURES_FIELD)) { - @SuppressWarnings("unchecked") - List failures = (List)entry2.getValue(); - Set retSet = new HashSet<>(failures); - retResult.put(entry.getKey(), retSet); - } else { - retResult.put(entry2.getKey(), entry2.getValue()); - } - } - retMap.put(entry.getKey(), retResult); - } - } - return retMap; - } -} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java index 02d426e4b6c6..506a2b52e47b 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IndicesClientDocumentationIT.java @@ -31,7 +31,6 @@ import org.elasticsearch.action.admin.indices.cache.clear.ClearIndicesCacheRespo import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest; import org.elasticsearch.action.admin.indices.flush.FlushRequest; import org.elasticsearch.action.admin.indices.flush.FlushResponse; -import org.elasticsearch.action.admin.indices.flush.SyncedFlushRequest; import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequest; import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeResponse; import org.elasticsearch.action.admin.indices.open.OpenIndexRequest; @@ -56,7 +55,6 @@ import org.elasticsearch.client.ESRestHighLevelClientTestCase; import org.elasticsearch.client.GetAliasesResponse; import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.RestHighLevelClient; -import org.elasticsearch.client.SyncedFlushResponse; import org.elasticsearch.client.core.BroadcastResponse.Shards; import org.elasticsearch.client.core.ShardsAcknowledgedResponse; import org.elasticsearch.client.indices.AnalyzeRequest; @@ -998,94 +996,6 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase } } - @SuppressWarnings("unused") - public void testSyncedFlushIndex() throws Exception { - RestHighLevelClient client = highLevelClient(); - - { - createIndex("index1", Settings.EMPTY); - } - - { - // tag::flush-synced-request - SyncedFlushRequest request = new SyncedFlushRequest("index1"); // <1> - SyncedFlushRequest requestMultiple = new SyncedFlushRequest("index1", "index2"); // <2> - SyncedFlushRequest requestAll = new SyncedFlushRequest(); // <3> - // end::flush-synced-request - - // tag::flush-synced-request-indicesOptions - request.indicesOptions(IndicesOptions.lenientExpandOpen()); // <1> - // end::flush-synced-request-indicesOptions - - // tag::flush-synced-execute - SyncedFlushResponse flushSyncedResponse = client.indices().flushSynced(request, expectWarnings( - "Synced flush is deprecated and will be removed in 8.0. Use flush at _/flush or /{index}/_flush instead." - )); - // end::flush-synced-execute - - // tag::flush-synced-response - int totalShards = flushSyncedResponse.totalShards(); // <1> - int successfulShards = flushSyncedResponse.successfulShards(); // <2> - int failedShards = flushSyncedResponse.failedShards(); // <3> - - for (Map.Entry responsePerIndexEntry: - flushSyncedResponse.getIndexResults().entrySet()) { - String indexName = responsePerIndexEntry.getKey(); // <4> - SyncedFlushResponse.IndexResult indexResult = responsePerIndexEntry.getValue(); - int totalShardsForIndex = indexResult.totalShards(); // <5> - int successfulShardsForIndex = indexResult.successfulShards(); // <6> - int failedShardsForIndex = indexResult.failedShards(); // <7> - if (failedShardsForIndex > 0) { - for (SyncedFlushResponse.ShardFailure failureEntry: indexResult.failures()) { - int shardId = failureEntry.getShardId(); // <8> - String failureReason = failureEntry.getFailureReason(); // <9> - Map routing = failureEntry.getRouting(); // <10> - } - } - } - // end::flush-synced-response - - // tag::flush-synced-execute-listener - ActionListener listener = new ActionListener() { - @Override - public void onResponse(SyncedFlushResponse refreshResponse) { - // <1> - } - - @Override - public void onFailure(Exception e) { - // <2> - } - }; - // end::flush-synced-execute-listener - - // Replace the empty listener by a blocking listener in test - final CountDownLatch latch = new CountDownLatch(1); - listener = new LatchedActionListener<>(listener, latch); - - // tag::flush-synced-execute-async - client.indices().flushSyncedAsync(request, expectWarnings( - "Synced flush is deprecated and will be removed in 8.0. Use flush at _/flush or /{index}/_flush instead." - ), listener); // <1> - // end::flush-synced-execute-async - - assertTrue(latch.await(30L, TimeUnit.SECONDS)); - } - - { - // tag::flush-synced-notfound - try { - SyncedFlushRequest request = new SyncedFlushRequest("does_not_exist"); - client.indices().flushSynced(request, RequestOptions.DEFAULT); - } catch (ElasticsearchException exception) { - if (exception.status() == RestStatus.NOT_FOUND) { - // <1> - } - } - // end::flush-synced-notfound - } - } - public void testGetSettings() throws Exception { RestHighLevelClient client = highLevelClient(); diff --git a/docs/java-rest/high-level/indices/flush_synced.asciidoc b/docs/java-rest/high-level/indices/flush_synced.asciidoc deleted file mode 100644 index e5dfa59153b0..000000000000 --- a/docs/java-rest/high-level/indices/flush_synced.asciidoc +++ /dev/null @@ -1,62 +0,0 @@ --- -:api: flush-synced -:request: SyncedFlushRequest -:response: SyncedFlushResponse --- - -[id="{upid}-{api}"] -=== Flush Synced API - -[id="{upid}-{api}-request"] -==== Flush Synced Request - -A +{request}+ can be applied to one or more indices, or even on `_all` the indices: - -["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{doc-tests-file}[{api}-request] --------------------------------------------------- -<1> Flush synced one index -<2> Flush synced multiple indices -<3> Flush synced all the indices - -==== Optional arguments - -["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{doc-tests-file}[{api}-request-indicesOptions] --------------------------------------------------- -<1> Setting `IndicesOptions` controls how unavailable indices are resolved and -how wildcard expressions are expanded - -include::../execution.asciidoc[] - -[id="{upid}-{api}-response"] -==== Flush Synced Response - -The returned +{response}+ allows to retrieve information about the -executed operation as follows: - -["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{doc-tests-file}[{api}-response] --------------------------------------------------- -<1> Total number of shards hit by the flush request -<2> Number of shards where the flush has succeeded -<3> Number of shards where the flush has failed -<4> Name of the index whose results we are about to calculate. -<5> Total number of shards for index mentioned in 4. -<6> Successful shards for index mentioned in 4. -<7> Failed shards for index mentioned in 4. -<8> One of the failed shard ids of the failed index mentioned in 4. -<9> Reason for failure of copies of the shard mentioned in 8. -<10> JSON represented by a Map. Contains shard related information like id, state, version etc. -for the failed shard copies. If the entire shard failed then this returns an empty map. - -By default, if the indices were not found, an `ElasticsearchException` will be thrown: - -["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{doc-tests-file}[{api}-notfound] --------------------------------------------------- -<1> Do something if the indices to be flushed were not found diff --git a/docs/java-rest/high-level/supported-apis.asciidoc b/docs/java-rest/high-level/supported-apis.asciidoc index 4b848819702b..3e2ed5ce6cb3 100644 --- a/docs/java-rest/high-level/supported-apis.asciidoc +++ b/docs/java-rest/high-level/supported-apis.asciidoc @@ -102,7 +102,6 @@ Index Management:: * <<{upid}-clone-index>> * <<{upid}-refresh>> * <<{upid}-flush>> -* <<{upid}-flush-synced>> * <<{upid}-clear-cache>> * <<{upid}-force-merge>> * <<{upid}-rollover-index>> @@ -138,7 +137,6 @@ include::indices/split_index.asciidoc[] include::indices/clone_index.asciidoc[] include::indices/refresh.asciidoc[] include::indices/flush.asciidoc[] -include::indices/flush_synced.asciidoc[] include::indices/clear_cache.asciidoc[] include::indices/force_merge.asciidoc[] include::indices/rollover.asciidoc[] diff --git a/docs/reference/index-modules/allocation/delayed.asciidoc b/docs/reference/index-modules/allocation/delayed.asciidoc index f49ed7e05dbc..5dee9444668c 100644 --- a/docs/reference/index-modules/allocation/delayed.asciidoc +++ b/docs/reference/index-modules/allocation/delayed.asciidoc @@ -28,7 +28,7 @@ this scenario: If the master had just waited for a few minutes, then the missing shards could have been re-allocated to Node 5 with the minimum of network traffic. This process would be even quicker for idle shards (shards not receiving indexing -requests) which have been automatically <>. +requests) which have been automatically <>. The allocation of replica shards which become unassigned because a node has left can be delayed with the `index.unassigned.node_left.delayed_timeout` diff --git a/docs/reference/indices.asciidoc b/docs/reference/indices.asciidoc index e1bec04eabc0..224346f3f5df 100644 --- a/docs/reference/indices.asciidoc +++ b/docs/reference/indices.asciidoc @@ -68,7 +68,6 @@ index settings, aliases, mappings, and index templates. * <> * <> * <> -* <> * <> @@ -136,10 +135,8 @@ include::indices/shrink-index.asciidoc[] include::indices/split-index.asciidoc[] -include::indices/synced-flush.asciidoc[] - include::indices/apis/unfreeze.asciidoc[] include::indices/aliases.asciidoc[] -include::indices/update-settings.asciidoc[] \ No newline at end of file +include::indices/update-settings.asciidoc[] diff --git a/docs/reference/indices/synced-flush.asciidoc b/docs/reference/indices/synced-flush.asciidoc index 8e3a85aceca3..4d0ab4ff98c2 100644 --- a/docs/reference/indices/synced-flush.asciidoc +++ b/docs/reference/indices/synced-flush.asciidoc @@ -4,278 +4,4 @@ Synced flush ++++ -deprecated::[7.6, synced-flush is deprecated and will be removed in 8.0. -Use <> instead. A <> has the -same effect as a synced flush on Elasticsearch 7.6 or later] - -Performs a synced flush on one or more indices. - -[source,console] --------------------------------------------------- -POST /twitter/_flush/synced --------------------------------------------------- -// TEST[skip: Synced flush can conflict with scheduled flushes in doc tests] - - -[[synced-flush-api-request]] -==== {api-request-title} - -`POST //flush/synced` - -`GET //flush/synced` - -`POST /flush/synced` - -`GET /flush/synced` - - -[[synced-flush-api-desc]] -==== {api-description-title} - -[[synced-flush-using-api]] -===== Use the synced flush API - -Use the synced flush API to manually initiate a synced flush. -This can be useful for a planned cluster restart where -you can stop indexing but don't want to wait for 5 minutes until all indices -are marked as inactive and automatically sync-flushed. - -You can request a synced flush even if there is ongoing indexing activity, and -{es} will perform the synced flush on a "best-effort" basis: shards that do not -have any ongoing indexing activity will be successfully sync-flushed, and other -shards will fail to sync-flush. The successfully sync-flushed shards will have -faster recovery times as long as the `sync_id` marker is not removed by a -subsequent flush. - - -[[synced-flush-overview]] -===== Synced flush overview - -{es} keeps track of which shards have received indexing activity recently, and -considers shards that have not received any indexing operations for 5 minutes to -be inactive. - -When a shard becomes inactive {es} performs a special kind of flush -known as a *synced flush*. A synced flush performs a normal -<> on each replica of the shard, and then adds a marker known -as the `sync_id` to each replica to indicate that these copies have identical -Lucene indices. Comparing the `sync_id` markers of the two copies is a very -efficient way to check whether they have identical contents. - -When allocating shard replicas, {es} must ensure that each replica contains the -same data as the primary. If the shard copies have been synced-flushed and the -replica shares a `sync_id` with the primary then {es} knows that the two copies -have identical contents. This means there is no need to copy any segment files -from the primary to the replica, which saves a good deal of time during -recoveries and restarts. - -This is particularly useful for clusters having lots of indices which are very -rarely updated, such as with time-based indices. Without the synced flush -marker, recovery of this kind of cluster would be much slower. - - -[[synced-flush-sync-id-markers]] -===== Check for `sync_id` markers - -To check whether a shard has a `sync_id` marker or not, look for the `commit` -section of the shard stats returned by the <> API: - -[source,console] --------------------------------------------------- -GET /twitter/_stats?filter_path=**.commit&level=shards <1> --------------------------------------------------- -// TEST[skip: Synced flush can conflict with scheduled flushes in doc tests] - -<1> `filter_path` is used to reduce the verbosity of the response, but is entirely optional - -The API returns the following response: - -[source,console-result] --------------------------------------------------- -{ - "indices": { - "twitter": { - "shards": { - "0": [ - { - "commit" : { - "id" : "3M3zkw2GHMo2Y4h4/KFKCg==", - "generation" : 3, - "user_data" : { - "translog_uuid" : "hnOG3xFcTDeoI_kvvvOdNA", - "history_uuid" : "XP7KDJGiS1a2fHYiFL5TXQ", - "local_checkpoint" : "-1", - "translog_generation" : "2", - "max_seq_no" : "-1", - "sync_id" : "AVvFY-071siAOuFGEO9P", <1> - "max_unsafe_auto_id_timestamp" : "-1", - "min_retained_seq_no" : "0" - }, - "num_docs" : 0 - } - } - ] - } - } - } -} --------------------------------------------------- -// TEST[skip: Synced flush can conflict with scheduled flushes in doc tests] -<1> the `sync id` marker - -NOTE: The `sync_id` marker is removed as soon as the shard is flushed again, and -{es} may trigger an automatic flush of a shard at any time if there are -unflushed operations in the shard's translog. In practice this means that one -should consider any indexing operation on an index as having removed its -`sync_id` markers. - - -[[synced-flush-api-path-params]] -==== {api-path-parms-title} - -include::{docdir}/rest-api/common-parms.asciidoc[tag=index] -+ -To sync-flush all indices, -omit this parameter -or use a value of `_all` or `*`. - - -[[synced-flush-api-query-params]] -==== {api-query-parms-title} - -include::{docdir}/rest-api/common-parms.asciidoc[tag=allow-no-indices] - -include::{docdir}/rest-api/common-parms.asciidoc[tag=expand-wildcards] -+ -Defaults to `open`. - -include::{docdir}/rest-api/common-parms.asciidoc[tag=index-ignore-unavailable] - - -[[synced-flush-api-response-codes]] -==== {api-response-codes-title} - -`200`:: -All shards successfully sync-flushed. - -`409`:: -A replica shard failed to sync-flush. - - -[[synced-flush-api-example]] -==== {api-examples-title} - - -[[synced-flush-api-specific-ex]] -===== Sync-flush a specific index - -[source,console] ----- -POST /kimchy/_flush/synced ----- -// TEST[skip: Synced flush can conflict with scheduled flushes in doc tests] - - -[[synced-flush-api-multi-ex]] -===== Synch-flush several indices - -[source,console] --------------------------------------------------- -POST /kimchy,elasticsearch/_flush/synced --------------------------------------------------- -// TEST[skip: Synced flush can conflict with scheduled flushes in doc tests] - - -[[synced-flush-api-all-ex]] -===== Sync-flush all indices - -[source,console] --------------------------------------------------- -POST /_flush/synced --------------------------------------------------- -// TEST[skip: Synced flush can conflict with scheduled flushes in doc tests] - -The response contains details about how many shards were successfully -sync-flushed and information about any failure. - -The following response indicates two shards -and one replica shard -successfully sync-flushed: - -[source,console-result] --------------------------------------------------- -{ - "_shards": { - "total": 2, - "successful": 2, - "failed": 0 - }, - "twitter": { - "total": 2, - "successful": 2, - "failed": 0 - } -} --------------------------------------------------- -// TEST[skip: Synced flush can conflict with scheduled flushes in doc tests] - -The following response indicates one shard group failed -due to pending operations: - -[source,console-result] --------------------------------------------------- -{ - "_shards": { - "total": 4, - "successful": 2, - "failed": 2 - }, - "twitter": { - "total": 4, - "successful": 2, - "failed": 2, - "failures": [ - { - "shard": 1, - "reason": "[2] ongoing operations on primary" - } - ] - } -} --------------------------------------------------- -// TEST[skip: Synced flush can conflict with scheduled flushes in doc tests] - -Sometimes the failures are specific to a shard replica. The copies that failed -will not be eligible for fast recovery but those that succeeded still will be. -This case is reported as follows: - -[source,console-result] --------------------------------------------------- -{ - "_shards": { - "total": 4, - "successful": 1, - "failed": 1 - }, - "twitter": { - "total": 4, - "successful": 3, - "failed": 1, - "failures": [ - { - "shard": 1, - "reason": "unexpected error", - "routing": { - "state": "STARTED", - "primary": false, - "node": "SZNr2J_ORxKTLUCydGX4zA", - "relocating_node": null, - "shard": 1, - "index": "twitter" - } - } - ] - } -} --------------------------------------------------- -// TEST[skip: Synced flush can conflict with scheduled flushes in doc tests] +Synced flush was removed. Use normal <> instead. diff --git a/docs/reference/migration/migrate_8_0/indices.asciidoc b/docs/reference/migration/migrate_8_0/indices.asciidoc index 5b1d5a10df09..ef5cd25f8180 100644 --- a/docs/reference/migration/migrate_8_0/indices.asciidoc +++ b/docs/reference/migration/migrate_8_0/indices.asciidoc @@ -27,3 +27,10 @@ and the setting is removed. In 6.0, we deprecated the `template` field in put index template requests in favor of using `index_patterns`. Support for the `template` field is now removed in 8.0. + + +[float] +==== Remove synced flush + +Synced flush was deprecated in 7.6 and is removed in 8.0. Use a regular flush +instead as it has the same effect as a synced flush in 7.6 and later. diff --git a/docs/reference/modules/threadpool.asciidoc b/docs/reference/modules/threadpool.asciidoc index c69f736feb17..5a5c5c5e0b74 100644 --- a/docs/reference/modules/threadpool.asciidoc +++ b/docs/reference/modules/threadpool.asciidoc @@ -20,8 +20,8 @@ There are several thread pools, but the important ones include: `1000`. [[search-throttled]]`search_throttled`:: - For count/search/suggest/get operations on `search_throttled indices`. - Thread pool type is `fixed_auto_queue_size` with a size of `1`, and initial + For count/search/suggest/get operations on `search_throttled indices`. + Thread pool type is `fixed_auto_queue_size` with a size of `1`, and initial queue_size of `100`. `get`:: @@ -30,7 +30,7 @@ There are several thread pools, but the important ones include: queue_size of `1000`. `analyze`:: - For analyze requests. Thread pool type is `fixed` with a size of `1`, queue + For analyze requests. Thread pool type is `fixed` with a size of `1`, queue size of `16`. `write`:: @@ -51,8 +51,8 @@ There are several thread pools, but the important ones include: keep-alive of `5m` and a max of `min(10, (# of available processors)/2)`. `listener`:: - Mainly for java client executing of action when listener threaded is set to - `true`. Thread pool type is `scaling` with a default max of + Mainly for java client executing of action when listener threaded is set to + `true`. Thread pool type is `scaling` with a default max of `min(10, (# of available processors)/2)`. `fetch_shard_started`:: @@ -66,7 +66,7 @@ There are several thread pools, but the important ones include: size of `2 * # of available processors`. `flush`:: - For <>, <>, and <> `fsync` operations. + For <> and <> `fsync` operations. Thread pool type is `scaling` with a keep-alive of `5m` and a default maximum size of `min(5, (# of available processors)/2)`. @@ -202,13 +202,13 @@ processors: 2 There are a few use-cases for explicitly overriding the `processors` setting: -. If you are running multiple instances of {es} on the same host but want {es} -to size its thread pools as if it only has a fraction of the CPU, you should -override the `processors` setting to the desired fraction, for example, if +. If you are running multiple instances of {es} on the same host but want {es} +to size its thread pools as if it only has a fraction of the CPU, you should +override the `processors` setting to the desired fraction, for example, if you're running two instances of {es} on a 16-core machine, set `processors` to 8. -Note that this is an expert-level use case and there's a lot more involved -than just setting the `processors` setting as there are other considerations -like changing the number of garbage collector threads, pinning processes to +Note that this is an expert-level use case and there's a lot more involved +than just setting the `processors` setting as there are other considerations +like changing the number of garbage collector threads, pinning processes to cores, and so on. . Sometimes the number of processors is wrongly detected and in such cases explicitly setting the `processors` setting will workaround such diff --git a/docs/reference/setup/restart-cluster.asciidoc b/docs/reference/setup/restart-cluster.asciidoc index 58402758a72d..6b3151f752cf 100644 --- a/docs/reference/setup/restart-cluster.asciidoc +++ b/docs/reference/setup/restart-cluster.asciidoc @@ -1,11 +1,11 @@ [[restart-cluster]] == Full-cluster restart and rolling restart - -There may be {ref}/configuring-tls.html#tls-transport[situations where you want -to perform a full-cluster restart] or a rolling restart. In the case of -<>, you shut down and restart all the -nodes in the cluster while in the case of -<>, you shut down only one node at a + +There may be {ref}/configuring-tls.html#tls-transport[situations where you want +to perform a full-cluster restart] or a rolling restart. In the case of +<>, you shut down and restart all the +nodes in the cluster while in the case of +<>, you shut down only one node at a time, so the service remains uninterrupted. @@ -21,27 +21,29 @@ include::{docdir}/upgrade/disable-shard-alloc.asciidoc[] -- // end::disable_shard_alloc[] // tag::stop_indexing[] -. *Stop indexing and perform a synced flush.* +. *Stop indexing and perform a flush.* + -- -Performing a <> speeds up shard -recovery. +Performing a <> speeds up shard recovery. -include::{docdir}/upgrade/synced-flush.asciidoc[] +[source,console] +-------------------------------------------------- +POST /_flush +-------------------------------------------------- -- // end::stop_indexing[] //tag::stop_ml[] . *Temporarily stop the tasks associated with active {ml} jobs and {dfeeds}.* (Optional) + -- -{ml-cap} features require a platinum license or higher. For more information about Elastic +{ml-cap} features require a platinum license or higher. For more information about Elastic license levels, see https://www.elastic.co/subscriptions[the subscription page]. -You have two options to handle {ml} jobs and {dfeeds} when you shut down a +You have two options to handle {ml} jobs and {dfeeds} when you shut down a cluster: * Temporarily halt the tasks associated with your {ml} jobs and {dfeeds} and -prevent new jobs from opening by using the +prevent new jobs from opening by using the <>: + [source,console] @@ -50,15 +52,15 @@ POST _ml/set_upgrade_mode?enabled=true -------------------------------------------------- // TEST + -When you disable upgrade mode, the jobs resume using the last model state that -was automatically saved. This option avoids the overhead of managing active jobs -during the shutdown and is faster than explicitly stopping {dfeeds} and closing +When you disable upgrade mode, the jobs resume using the last model state that +was automatically saved. This option avoids the overhead of managing active jobs +during the shutdown and is faster than explicitly stopping {dfeeds} and closing jobs. * {ml-docs}/stopping-ml.html[Stop all {dfeeds} and close all jobs]. This option saves the model state at the time of closure. When you reopen the jobs after the -cluster restart, they use the exact same model. However, saving the latest model -state takes longer than using upgrade mode, especially if you have a lot of jobs +cluster restart, they use the exact same model. However, saving the latest model +state takes longer than using upgrade mode, especially if you have a lot of jobs or jobs with large model states. -- // end::stop_ml[] @@ -102,8 +104,8 @@ When a node joins the cluster, it begins to recover any primary shards that are stored locally. The <> API initially reports a `status` of `red`, indicating that not all primary shards have been allocated. -Once a node recovers its local shards, the cluster `status` switches to -`yellow`, indicating that all primary shards have been recovered, but not all +Once a node recovers its local shards, the cluster `status` switches to +`yellow`, indicating that all primary shards have been recovered, but not all replica shards are allocated. This is to be expected because you have not yet re-enabled allocation. Delaying the allocation of replicas until all nodes are `yellow` allows the master to allocate replicas to nodes that @@ -149,7 +151,7 @@ GET _cat/recovery . *Restart machine learning jobs.* (Optional) + -- -If you temporarily halted the tasks associated with your {ml} jobs, use the +If you temporarily halted the tasks associated with your {ml} jobs, use the <> to return them to active states: [source,console] @@ -158,7 +160,7 @@ POST _ml/set_upgrade_mode?enabled=false -------------------------------------------------- // TEST[continued] -If you closed all {ml} jobs before stopping the nodes, open the jobs and start +If you closed all {ml} jobs before stopping the nodes, open the jobs and start the datafeeds from {kib} or with the <> and <> APIs. -- @@ -177,10 +179,10 @@ include::{docdir}/setup/restart-cluster.asciidoc[tag=stop_indexing] include::{docdir}/setup/restart-cluster.asciidoc[tag=stop_ml] + -- -* If you perform a rolling restart, you can also leave your machine learning -jobs running. When you shut down a machine learning node, its jobs automatically -move to another node and restore the model states. This option enables your jobs -to continue running during the shutdown but it puts increased load on the +* If you perform a rolling restart, you can also leave your machine learning +jobs running. When you shut down a machine learning node, its jobs automatically +move to another node and restore the model states. This option enables your jobs +to continue running during the shutdown but it puts increased load on the cluster. -- @@ -191,11 +193,11 @@ include::{docdir}/upgrade/shut-down-node.asciidoc[] -- . *Perform any needed changes.* - + . *Restart the node you changed.* + -- -Start the node and confirm that it joins the cluster by checking the log file or +Start the node and confirm that it joins the cluster by checking the log file or by submitting a `_cat/nodes` request: [source,console] @@ -208,8 +210,8 @@ GET _cat/nodes . *Reenable shard allocation.* + -- -Once the node has joined the cluster, remove the -`cluster.routing.allocation.enable` setting to enable shard allocation and start +Once the node has joined the cluster, remove the +`cluster.routing.allocation.enable` setting to enable shard allocation and start using the node: [source,console] diff --git a/docs/reference/upgrade/cluster_restart.asciidoc b/docs/reference/upgrade/cluster_restart.asciidoc index 5d967929dc18..3066d44c7101 100644 --- a/docs/reference/upgrade/cluster_restart.asciidoc +++ b/docs/reference/upgrade/cluster_restart.asciidoc @@ -22,13 +22,15 @@ To perform a full cluster restart upgrade to {version}: include::disable-shard-alloc.asciidoc[] -- -. *Stop indexing and perform a synced flush.* +. *Stop indexing and perform a flush.* + -- -Performing a <> speeds up shard -recovery. +Performing a <> speeds up shard recovery. -include::synced-flush.asciidoc[] +[source,console] +-------------------------------------------------- +POST /_flush +-------------------------------------------------- -- . *Temporarily stop the tasks associated with active {ml} jobs and {dfeeds}.* (Optional) @@ -71,7 +73,7 @@ a node. . If you use {es} {security-features} to define realms, verify that your realm settings are up-to-date. The format of realm settings changed in version 7.0, in particular, the placement of the realm type changed. See -<>. +<>. . *Start each upgraded node.* + diff --git a/docs/reference/upgrade/synced-flush.asciidoc b/docs/reference/upgrade/synced-flush.asciidoc deleted file mode 100644 index a67c171f9987..000000000000 --- a/docs/reference/upgrade/synced-flush.asciidoc +++ /dev/null @@ -1,14 +0,0 @@ - -[source,console] --------------------------------------------------- -POST _flush/synced --------------------------------------------------- -// TEST[skip: will fail as synced flush is deprecated] - -When you perform a synced flush, check the response to make sure there are -no failures. Synced flush operations that fail due to pending indexing -operations are listed in the response body, although the request itself -still returns a 200 OK status. If there are failures, reissue the request. - -Note that synced flush is deprecated and will be removed in 8.0. A flush -has the same effect as a synced flush on Elasticsearch 7.6 or later. diff --git a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java b/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java index 9c7d7af4ca6c..427383d7d829 100644 --- a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java +++ b/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java @@ -23,7 +23,6 @@ import org.apache.http.util.EntityUtils; import org.elasticsearch.Version; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; -import org.elasticsearch.client.ResponseException; import org.elasticsearch.client.RestClient; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaDataIndexStateService; @@ -679,16 +678,10 @@ public class FullClusterRestartIT extends AbstractFullClusterRestartTestCase { flushRequest.addParameter("force", "true"); flushRequest.addParameter("wait_if_ongoing", "true"); assertOK(client().performRequest(flushRequest)); - if (randomBoolean()) { - // We had a bug before where we failed to perform peer recovery with sync_id from 5.x to 6.x. - // We added this synced flush so we can exercise different paths of recovery code. - try { - performSyncedFlush(index); - } catch (ResponseException ignored) { - // synced flush is optional here - } + syncedFlush(index); } + if (shouldHaveTranslog) { // Update a few documents so we are sure to have a translog indexRandomDocuments( diff --git a/qa/mixed-cluster/src/test/java/org/elasticsearch/backwards/IndexingIT.java b/qa/mixed-cluster/src/test/java/org/elasticsearch/backwards/IndexingIT.java index 3acefce8e4e8..d951c4d0c56e 100644 --- a/qa/mixed-cluster/src/test/java/org/elasticsearch/backwards/IndexingIT.java +++ b/qa/mixed-cluster/src/test/java/org/elasticsearch/backwards/IndexingIT.java @@ -21,13 +21,17 @@ package org.elasticsearch.backwards; import org.apache.http.HttpHost; import org.elasticsearch.Version; import org.elasticsearch.client.Request; +import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.Response; +import org.elasticsearch.client.ResponseException; import org.elasticsearch.client.RestClient; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.index.seqno.SeqNoStats; +import org.elasticsearch.rest.RestStatus; import org.elasticsearch.test.rest.ESRestTestCase; import org.elasticsearch.test.rest.yaml.ObjectPath; @@ -38,6 +42,7 @@ import java.util.List; import java.util.Map; import java.util.stream.Collectors; +import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.equalTo; public class IndexingIT extends ESRestTestCase { @@ -274,6 +279,57 @@ public class IndexingIT extends ESRestTestCase { request.setJsonEntity("{\"indices\": \"" + index + "\"}"); } + public void testSyncedFlushTransition() throws Exception { + Nodes nodes = buildNodeAndVersions(); + assertTrue("bwc version is on 7.x", nodes.getBWCVersion().before(Version.V_8_0_0)); + assumeFalse("no new node found", nodes.getNewNodes().isEmpty()); + assumeFalse("no bwc node found", nodes.getBWCNodes().isEmpty()); + // Allocate shards to new nodes then verify synced flush requests processed by old nodes/new nodes + String newNodes = nodes.getNewNodes().stream().map(Node::getNodeName).collect(Collectors.joining(",")); + int numShards = randomIntBetween(1, 10); + int numOfReplicas = randomIntBetween(0, nodes.getNewNodes().size() - 1); + int totalShards = numShards * (numOfReplicas + 1); + final String index = "test_synced_flush"; + createIndex(index, Settings.builder() + .put(IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), numShards) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, numOfReplicas) + .put("index.routing.allocation.include._name", newNodes).build()); + ensureGreen(index); + indexDocs(index, randomIntBetween(0, 100), between(1, 100)); + try (RestClient oldNodeClient = buildClient(restClientSettings(), + nodes.getBWCNodes().stream().map(Node::getPublishAddress).toArray(HttpHost[]::new))) { + Request request = new Request("POST", index + "/_flush/synced"); + assertBusy(() -> { + ResponseException responseException = expectThrows(ResponseException.class, () -> oldNodeClient.performRequest(request)); + assertThat(responseException.getResponse().getStatusLine().getStatusCode(), equalTo(RestStatus.CONFLICT.getStatus())); + assertThat(responseException.getResponse().getWarnings(), + contains("Synced flush is deprecated and will be removed in 8.0. Use flush at _/flush or /{index}/_flush instead.")); + Map result = ObjectPath.createFromResponse(responseException.getResponse()).evaluate("_shards"); + assertThat(result.get("total"), equalTo(totalShards)); + assertThat(result.get("successful"), equalTo(0)); + assertThat(result.get("failed"), equalTo(totalShards)); + }); + Map stats = entityAsMap(client().performRequest(new Request("GET", index + "/_stats?level=shards"))); + assertThat(XContentMapValues.extractValue("indices." + index + ".total.translog.uncommitted_operations", stats), equalTo(0)); + } + indexDocs(index, randomIntBetween(0, 100), between(1, 100)); + try (RestClient newNodeClient = buildClient(restClientSettings(), + nodes.getNewNodes().stream().map(Node::getPublishAddress).toArray(HttpHost[]::new))) { + Request request = new Request("POST", index + "/_flush/synced"); + List warningMsg = List.of("Synced flush was removed and a normal flush was performed instead. " + + "This transition will be removed in a future version."); + request.setOptions(RequestOptions.DEFAULT.toBuilder().setWarningsHandler(warnings -> warnings.equals(warningMsg) == false)); + assertBusy(() -> { + Map result = ObjectPath.createFromResponse(newNodeClient.performRequest(request)).evaluate("_shards"); + assertThat(result.get("total"), equalTo(totalShards)); + assertThat(result.get("successful"), equalTo(totalShards)); + assertThat(result.get("failed"), equalTo(0)); + }); + Map stats = entityAsMap(client().performRequest(new Request("GET", index + "/_stats?level=shards"))); + assertThat(XContentMapValues.extractValue("indices." + index + ".total.translog.uncommitted_operations", stats), equalTo(0)); + } + } + private void assertCount(final String index, final String preference, final int expectedCount) throws IOException { Request request = new Request("GET", index + "/_count"); request.addParameter("preference", preference); diff --git a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java index f29bfddde042..d0892fd914f5 100644 --- a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java +++ b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java @@ -44,7 +44,6 @@ import java.util.HashMap; import java.util.List; import java.util.Locale; import java.util.Map; -import java.util.Objects; import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; import java.util.function.Predicate; @@ -283,7 +282,7 @@ public class RecoveryIT extends AbstractRollingTestCase { } public void testRecovery() throws Exception { - final String index = "recover_with_soft_deletes"; + final String index = "test_recovery"; if (CLUSTER_TYPE == ClusterType.OLD) { Settings.Builder settings = Settings.builder() .put(IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) @@ -315,6 +314,9 @@ public class RecoveryIT extends AbstractRollingTestCase { } } } + if (randomBoolean()) { + syncedFlush(index); + } ensureGreen(index); } @@ -557,40 +559,6 @@ public class RecoveryIT extends AbstractRollingTestCase { } } - private void syncedFlush(String index) throws Exception { - // We have to spin synced-flush requests here because we fire the global checkpoint sync for the last write operation. - // A synced-flush request considers the global checkpoint sync as an going operation because it acquires a shard permit. - assertBusy(() -> { - try { - Response resp = performSyncedFlush(index); - Map result = ObjectPath.createFromResponse(resp).evaluate("_shards"); - assertThat(result.get("failed"), equalTo(0)); - } catch (ResponseException ex) { - throw new AssertionError(ex); // cause assert busy to retry - } - }); - // ensure the global checkpoint is synced; otherwise we might trim the commit with syncId - ensureGlobalCheckpointSynced(index); - } - - @SuppressWarnings("unchecked") - private void ensureGlobalCheckpointSynced(String index) throws Exception { - assertBusy(() -> { - Map stats = entityAsMap(client().performRequest(new Request("GET", index + "/_stats?level=shards"))); - List> shardStats = (List>) XContentMapValues.extractValue("indices." + index + ".shards.0", stats); - shardStats.stream() - .map(shard -> (Map) XContentMapValues.extractValue("seq_no", shard)) - .filter(Objects::nonNull) - .forEach(seqNoStat -> { - long globalCheckpoint = ((Number) XContentMapValues.extractValue("global_checkpoint", seqNoStat)).longValue(); - long localCheckpoint = ((Number) XContentMapValues.extractValue("local_checkpoint", seqNoStat)).longValue(); - long maxSeqNo = ((Number) XContentMapValues.extractValue("max_seq_no", seqNoStat)).longValue(); - assertThat(shardStats.toString(), localCheckpoint, equalTo(maxSeqNo)); - assertThat(shardStats.toString(), globalCheckpoint, equalTo(maxSeqNo)); - }); - }, 60, TimeUnit.SECONDS); - } - /** Ensure that we can always execute update requests regardless of the version of cluster */ public void testUpdateDoc() throws Exception { final String index = "test_update_doc"; diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.flush_synced.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.flush_synced.json deleted file mode 100644 index a7b4541c9623..000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.flush_synced.json +++ /dev/null @@ -1,54 +0,0 @@ -{ - "indices.flush_synced":{ - "documentation":{ - "url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-synced-flush-api.html", - "description":"Performs a synced flush operation on one or more indices. Synced flush is deprecated and will be removed in 8.0. Use flush instead" - }, - "stability":"stable", - "url":{ - "paths":[ - { - "path":"/_flush/synced", - "methods":[ - "POST", - "GET" - ] - }, - { - "path":"/{index}/_flush/synced", - "methods":[ - "POST", - "GET" - ], - "parts":{ - "index":{ - "type":"list", - "description":"A comma-separated list of index names; use `_all` or empty string for all indices" - } - } - } - ] - }, - "params":{ - "ignore_unavailable":{ - "type":"boolean", - "description":"Whether specified concrete indices should be ignored when unavailable (missing or closed)" - }, - "allow_no_indices":{ - "type":"boolean", - "description":"Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified)" - }, - "expand_wildcards":{ - "type":"enum", - "options":[ - "open", - "closed", - "none", - "all" - ], - "default":"open", - "description":"Whether to expand wildcard expression to concrete indices that are open, closed or both." - } - } - } -} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.flush/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.flush/10_basic.yml index e3681c834dba..89b8236225c0 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.flush/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.flush/10_basic.yml @@ -1,33 +1,3 @@ ---- -"Index synced flush rest test": - - skip: - version: " - 7.5.99" - reason: "synced flush is deprecated in 7.6" - features: "warnings" - - do: - indices.create: - index: testing - body: - settings: - index: - number_of_replicas: 0 - - - do: - cluster.health: - wait_for_status: green - - do: - warnings: - - Synced flush is deprecated and will be removed in 8.0. Use flush at _/flush or /{index}/_flush instead. - indices.flush_synced: - index: testing - - - is_false: _shards.failed - - - do: - indices.stats: {level: shards} - - - is_true: indices.testing.shards.0.0.commit.user_data.sync_id - --- "Flush stats": diff --git a/server/src/main/java/org/elasticsearch/action/ActionModule.java b/server/src/main/java/org/elasticsearch/action/ActionModule.java index 3d60a1fb698d..1ef3c1418adb 100644 --- a/server/src/main/java/org/elasticsearch/action/ActionModule.java +++ b/server/src/main/java/org/elasticsearch/action/ActionModule.java @@ -107,10 +107,8 @@ import org.elasticsearch.action.admin.indices.create.TransportCreateIndexAction; import org.elasticsearch.action.admin.indices.delete.DeleteIndexAction; import org.elasticsearch.action.admin.indices.delete.TransportDeleteIndexAction; import org.elasticsearch.action.admin.indices.flush.FlushAction; -import org.elasticsearch.action.admin.indices.flush.SyncedFlushAction; import org.elasticsearch.action.admin.indices.flush.TransportFlushAction; import org.elasticsearch.action.admin.indices.flush.TransportShardFlushAction; -import org.elasticsearch.action.admin.indices.flush.TransportSyncedFlushAction; import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeAction; import org.elasticsearch.action.admin.indices.forcemerge.TransportForceMergeAction; import org.elasticsearch.action.admin.indices.get.GetIndexAction; @@ -492,7 +490,6 @@ public class ActionModule extends AbstractModule { actions.register(ValidateQueryAction.INSTANCE, TransportValidateQueryAction.class); actions.register(RefreshAction.INSTANCE, TransportRefreshAction.class); actions.register(FlushAction.INSTANCE, TransportFlushAction.class); - actions.register(SyncedFlushAction.INSTANCE, TransportSyncedFlushAction.class); actions.register(ForceMergeAction.INSTANCE, TransportForceMergeAction.class); actions.register(UpgradeAction.INSTANCE, TransportUpgradeAction.class); actions.register(UpgradeStatusAction.INSTANCE, TransportUpgradeStatusAction.class); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/flush/SyncedFlushAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/flush/SyncedFlushAction.java deleted file mode 100644 index d5180c799ade..000000000000 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/flush/SyncedFlushAction.java +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.action.admin.indices.flush; - -import org.elasticsearch.action.ActionType; - - -public class SyncedFlushAction extends ActionType { - - public static final SyncedFlushAction INSTANCE = new SyncedFlushAction(); - public static final String NAME = "indices:admin/synced_flush"; - - private SyncedFlushAction() { - super(NAME, SyncedFlushResponse::new); - } -} diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/flush/SyncedFlushRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/flush/SyncedFlushRequest.java deleted file mode 100644 index cb3333354b8e..000000000000 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/flush/SyncedFlushRequest.java +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.action.admin.indices.flush; - -import org.elasticsearch.action.support.broadcast.BroadcastRequest; -import org.elasticsearch.common.io.stream.StreamInput; - -import java.io.IOException; -import java.util.Arrays; - -/** - * A synced flush request to sync flush one or more indices. The synced flush process of an index performs a flush - * and writes the same sync id to primary and all copies. - * - *

Best created with {@link org.elasticsearch.client.Requests#syncedFlushRequest(String...)}.

- * - * @see org.elasticsearch.client.Requests#flushRequest(String...) - * @see org.elasticsearch.client.IndicesAdminClient#syncedFlush(SyncedFlushRequest) - * @see SyncedFlushResponse - */ -public class SyncedFlushRequest extends BroadcastRequest { - - /** - * Constructs a new synced flush request against one or more indices. If nothing is provided, all indices will - * be sync flushed. - */ - public SyncedFlushRequest(String... indices) { - super(indices); - } - - public SyncedFlushRequest(StreamInput in) throws IOException { - super(in); - } - - @Override - public String toString() { - return "SyncedFlushRequest{" + - "indices=" + Arrays.toString(indices) + "}"; - } -} diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/flush/SyncedFlushRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/indices/flush/SyncedFlushRequestBuilder.java deleted file mode 100644 index aee7c4688bb6..000000000000 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/flush/SyncedFlushRequestBuilder.java +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.action.admin.indices.flush; - -import org.elasticsearch.action.ActionRequestBuilder; -import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.client.ElasticsearchClient; - -public class SyncedFlushRequestBuilder extends ActionRequestBuilder { - - public SyncedFlushRequestBuilder(ElasticsearchClient client, SyncedFlushAction action) { - super(client, action, new SyncedFlushRequest()); - } - - public SyncedFlushRequestBuilder setIndices(String[] indices) { - super.request().indices(indices); - return this; - } - - public SyncedFlushRequestBuilder setIndicesOptions(IndicesOptions indicesOptions) { - super.request().indicesOptions(indicesOptions); - return this; - } -} diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/flush/SyncedFlushResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/flush/SyncedFlushResponse.java deleted file mode 100644 index 5e286b184fec..000000000000 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/flush/SyncedFlushResponse.java +++ /dev/null @@ -1,213 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.action.admin.indices.flush; - -import org.elasticsearch.action.ActionResponse; -import org.elasticsearch.cluster.routing.ShardRouting; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.common.util.iterable.Iterables; -import org.elasticsearch.common.xcontent.ToXContentFragment; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.indices.flush.ShardsSyncedFlushResult; -import org.elasticsearch.indices.flush.SyncedFlushService; -import org.elasticsearch.rest.RestStatus; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -import static java.util.Collections.unmodifiableMap; - -/** - * The result of performing a sync flush operation on all shards of multiple indices - */ -public class SyncedFlushResponse extends ActionResponse implements ToXContentFragment { - - private final Map> shardsResultPerIndex; - private final ShardCounts shardCounts; - - public SyncedFlushResponse(Map> shardsResultPerIndex) { - // shardsResultPerIndex is never modified after it is passed to this - // constructor so this is safe even though shardsResultPerIndex is a - // ConcurrentHashMap - this.shardsResultPerIndex = unmodifiableMap(shardsResultPerIndex); - this.shardCounts = calculateShardCounts(Iterables.flatten(shardsResultPerIndex.values())); - } - - public SyncedFlushResponse(StreamInput in) throws IOException { - super(in); - shardCounts = new ShardCounts(in); - Map> tmpShardsResultPerIndex = new HashMap<>(); - int numShardsResults = in.readInt(); - for (int i =0 ; i< numShardsResults; i++) { - String index = in.readString(); - List shardsSyncedFlushResults = new ArrayList<>(); - int numShards = in.readInt(); - for (int j =0; j< numShards; j++) { - shardsSyncedFlushResults.add(new ShardsSyncedFlushResult(in)); - } - tmpShardsResultPerIndex.put(index, shardsSyncedFlushResults); - } - shardsResultPerIndex = Collections.unmodifiableMap(tmpShardsResultPerIndex); - } - - /** - * total number shards, including replicas, both assigned and unassigned - */ - public int totalShards() { - return shardCounts.total; - } - - /** - * total number of shards for which the operation failed - */ - public int failedShards() { - return shardCounts.failed; - } - - /** - * total number of shards which were successfully sync-flushed - */ - public int successfulShards() { - return shardCounts.successful; - } - - public RestStatus restStatus() { - return failedShards() == 0 ? RestStatus.OK : RestStatus.CONFLICT; - } - - public Map> getShardsResultPerIndex() { - return shardsResultPerIndex; - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(Fields._SHARDS); - shardCounts.toXContent(builder, params); - builder.endObject(); - for (Map.Entry> indexEntry : shardsResultPerIndex.entrySet()) { - List indexResult = indexEntry.getValue(); - builder.startObject(indexEntry.getKey()); - ShardCounts indexShardCounts = calculateShardCounts(indexResult); - indexShardCounts.toXContent(builder, params); - if (indexShardCounts.failed > 0) { - builder.startArray(Fields.FAILURES); - for (ShardsSyncedFlushResult shardResults : indexResult) { - if (shardResults.failed()) { - builder.startObject(); - builder.field(Fields.SHARD, shardResults.shardId().id()); - builder.field(Fields.REASON, shardResults.failureReason()); - builder.endObject(); - continue; - } - Map failedShards = shardResults.failedShards(); - for (Map.Entry shardEntry : failedShards.entrySet()) { - builder.startObject(); - builder.field(Fields.SHARD, shardResults.shardId().id()); - builder.field(Fields.REASON, shardEntry.getValue().failureReason()); - builder.field(Fields.ROUTING, shardEntry.getKey()); - builder.endObject(); - } - } - builder.endArray(); - } - builder.endObject(); - } - return builder; - } - - static ShardCounts calculateShardCounts(Iterable results) { - int total = 0, successful = 0, failed = 0; - for (ShardsSyncedFlushResult result : results) { - total += result.totalShards(); - successful += result.successfulShards(); - if (result.failed()) { - // treat all shard copies as failed - failed += result.totalShards(); - } else { - // some shards may have failed during the sync phase - failed += result.failedShards().size(); - } - } - return new ShardCounts(total, successful, failed); - } - - static final class ShardCounts implements ToXContentFragment, Writeable { - - public final int total; - public final int successful; - public final int failed; - - ShardCounts(int total, int successful, int failed) { - this.total = total; - this.successful = successful; - this.failed = failed; - } - - ShardCounts(StreamInput in) throws IOException { - total = in.readInt(); - successful = in.readInt(); - failed = in.readInt(); - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.field(Fields.TOTAL, total); - builder.field(Fields.SUCCESSFUL, successful); - builder.field(Fields.FAILED, failed); - return builder; - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeInt(total); - out.writeInt(successful); - out.writeInt(failed); - } - } - - static final class Fields { - static final String _SHARDS = "_shards"; - static final String TOTAL = "total"; - static final String SUCCESSFUL = "successful"; - static final String FAILED = "failed"; - static final String FAILURES = "failures"; - static final String SHARD = "shard"; - static final String ROUTING = "routing"; - static final String REASON = "reason"; - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - shardCounts.writeTo(out); - out.writeInt(shardsResultPerIndex.size()); - for (Map.Entry> entry : shardsResultPerIndex.entrySet()) { - out.writeString(entry.getKey()); - out.writeInt(entry.getValue().size()); - for (ShardsSyncedFlushResult shardsSyncedFlushResult : entry.getValue()) { - shardsSyncedFlushResult.writeTo(out); - } - } - } -} diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportShardFlushAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportShardFlushAction.java index 077657cc62dd..397ce43747d5 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportShardFlushAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportShardFlushAction.java @@ -19,6 +19,7 @@ package org.elasticsearch.action.admin.indices.flush; +import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.ActionFilters; @@ -28,10 +29,16 @@ import org.elasticsearch.cluster.action.shard.ShardStateAction; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportChannel; +import org.elasticsearch.transport.TransportRequest; +import org.elasticsearch.transport.TransportRequestHandler; import org.elasticsearch.transport.TransportService; import java.io.IOException; @@ -48,6 +55,8 @@ public class TransportShardFlushAction ActionFilters actionFilters) { super(settings, NAME, transportService, clusterService, indicesService, threadPool, shardStateAction, actionFilters, ShardFlushRequest::new, ShardFlushRequest::new, ThreadPool.Names.FLUSH); + transportService.registerRequestHandler(PRE_SYNCED_FLUSH_ACTION_NAME, + ThreadPool.Names.FLUSH, PreShardSyncedFlushRequest::new, new PreSyncedFlushTransportHandler(indicesService)); } @Override @@ -71,4 +80,43 @@ public class TransportShardFlushAction logger.trace("{} flush request executed on replica", replica.shardId()); return new ReplicaResult(); } + + // TODO: Remove this transition in 9.0 + private static final String PRE_SYNCED_FLUSH_ACTION_NAME = "internal:indices/flush/synced/pre"; + + private static class PreShardSyncedFlushRequest extends TransportRequest { + private final ShardId shardId; + + private PreShardSyncedFlushRequest(StreamInput in) throws IOException { + super(in); + assert in.getVersion().before(Version.V_8_0_0) : "received pre_sync request from a new node"; + this.shardId = new ShardId(in); + } + + @Override + public String toString() { + return "PreShardSyncedFlushRequest{" + "shardId=" + shardId + '}'; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + assert false : "must not send pre_sync request from a new node"; + throw new UnsupportedOperationException(""); + } + } + + private static final class PreSyncedFlushTransportHandler implements TransportRequestHandler { + private final IndicesService indicesService; + + PreSyncedFlushTransportHandler(IndicesService indicesService) { + this.indicesService = indicesService; + } + + @Override + public void messageReceived(PreShardSyncedFlushRequest request, TransportChannel channel, Task task) { + IndexShard indexShard = indicesService.indexServiceSafe(request.shardId.getIndex()).getShard(request.shardId.id()); + indexShard.flush(new FlushRequest().force(false).waitIfOngoing(true)); + throw new UnsupportedOperationException("Synced flush was removed and a normal flush was performed instead."); + } + } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportSyncedFlushAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportSyncedFlushAction.java deleted file mode 100644 index 3eb72e0b0227..000000000000 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportSyncedFlushAction.java +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.action.admin.indices.flush; - -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.action.support.HandledTransportAction; -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.indices.flush.SyncedFlushService; -import org.elasticsearch.tasks.Task; -import org.elasticsearch.transport.TransportService; - -/** - * Synced flush Action. - */ -public class TransportSyncedFlushAction extends HandledTransportAction { - - SyncedFlushService syncedFlushService; - - @Inject - public TransportSyncedFlushAction(TransportService transportService, ActionFilters actionFilters, - SyncedFlushService syncedFlushService) { - super(SyncedFlushAction.NAME, transportService, actionFilters, SyncedFlushRequest::new); - this.syncedFlushService = syncedFlushService; - } - - @Override - protected void doExecute(Task task, SyncedFlushRequest request, ActionListener listener) { - syncedFlushService.attemptSyncedFlush(request.indices(), request.indicesOptions(), listener); - } -} diff --git a/server/src/main/java/org/elasticsearch/client/IndicesAdminClient.java b/server/src/main/java/org/elasticsearch/client/IndicesAdminClient.java index 40c4c1046577..36b34a7b24c8 100644 --- a/server/src/main/java/org/elasticsearch/client/IndicesAdminClient.java +++ b/server/src/main/java/org/elasticsearch/client/IndicesAdminClient.java @@ -42,9 +42,6 @@ import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequestBuilder; import org.elasticsearch.action.admin.indices.flush.FlushRequest; import org.elasticsearch.action.admin.indices.flush.FlushRequestBuilder; import org.elasticsearch.action.admin.indices.flush.FlushResponse; -import org.elasticsearch.action.admin.indices.flush.SyncedFlushRequest; -import org.elasticsearch.action.admin.indices.flush.SyncedFlushRequestBuilder; -import org.elasticsearch.action.admin.indices.flush.SyncedFlushResponse; import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequest; import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequestBuilder; import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeResponse; @@ -336,29 +333,6 @@ public interface IndicesAdminClient extends ElasticsearchClient { */ FlushRequestBuilder prepareFlush(String... indices); - /** - * Explicitly sync flush one or more indices (write sync id to shards for faster recovery). - * - * @param request The sync flush request - * @return A result future - * @see org.elasticsearch.client.Requests#syncedFlushRequest(String...) - */ - ActionFuture syncedFlush(SyncedFlushRequest request); - - /** - * Explicitly sync flush one or more indices (write sync id to shards for faster recovery). - * - * @param request The sync flush request - * @param listener A listener to be notified with a result - * @see org.elasticsearch.client.Requests#syncedFlushRequest(String...) - */ - void syncedFlush(SyncedFlushRequest request, ActionListener listener); - - /** - * Explicitly sync flush one or more indices (write sync id to shards for faster recovery). - */ - SyncedFlushRequestBuilder prepareSyncedFlush(String... indices); - /** * Explicitly force merge one or more indices into a the number of segments. * diff --git a/server/src/main/java/org/elasticsearch/client/Requests.java b/server/src/main/java/org/elasticsearch/client/Requests.java index 01d04c64ae1b..bec0865bea8a 100644 --- a/server/src/main/java/org/elasticsearch/client/Requests.java +++ b/server/src/main/java/org/elasticsearch/client/Requests.java @@ -47,7 +47,6 @@ import org.elasticsearch.action.admin.indices.close.CloseIndexRequest; import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest; import org.elasticsearch.action.admin.indices.flush.FlushRequest; -import org.elasticsearch.action.admin.indices.flush.SyncedFlushRequest; import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequest; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; import org.elasticsearch.action.admin.indices.open.OpenIndexRequest; @@ -248,17 +247,6 @@ public class Requests { return new FlushRequest(indices); } - /** - * Creates a synced flush indices request. - * - * @param indices The indices to sync flush. Use {@code null} or {@code _all} to execute against all indices - * @return The synced flush request - * @see org.elasticsearch.client.IndicesAdminClient#syncedFlush(SyncedFlushRequest) - */ - public static SyncedFlushRequest syncedFlushRequest(String... indices) { - return new SyncedFlushRequest(indices); - } - /** * Creates a force merge request. * diff --git a/server/src/main/java/org/elasticsearch/client/support/AbstractClient.java b/server/src/main/java/org/elasticsearch/client/support/AbstractClient.java index 5bb480d8c23c..1ee480fb55ed 100644 --- a/server/src/main/java/org/elasticsearch/client/support/AbstractClient.java +++ b/server/src/main/java/org/elasticsearch/client/support/AbstractClient.java @@ -163,10 +163,6 @@ import org.elasticsearch.action.admin.indices.flush.FlushAction; import org.elasticsearch.action.admin.indices.flush.FlushRequest; import org.elasticsearch.action.admin.indices.flush.FlushRequestBuilder; import org.elasticsearch.action.admin.indices.flush.FlushResponse; -import org.elasticsearch.action.admin.indices.flush.SyncedFlushAction; -import org.elasticsearch.action.admin.indices.flush.SyncedFlushRequest; -import org.elasticsearch.action.admin.indices.flush.SyncedFlushRequestBuilder; -import org.elasticsearch.action.admin.indices.flush.SyncedFlushResponse; import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeAction; import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequest; import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequestBuilder; @@ -1351,21 +1347,6 @@ public abstract class AbstractClient implements Client { return new FlushRequestBuilder(this, FlushAction.INSTANCE).setIndices(indices); } - @Override - public ActionFuture syncedFlush(SyncedFlushRequest request) { - return execute(SyncedFlushAction.INSTANCE, request); - } - - @Override - public void syncedFlush(SyncedFlushRequest request, ActionListener listener) { - execute(SyncedFlushAction.INSTANCE, request, listener); - } - - @Override - public SyncedFlushRequestBuilder prepareSyncedFlush(String... indices) { - return new SyncedFlushRequestBuilder(this, SyncedFlushAction.INSTANCE).setIndices(indices); - } - @Override public void getMappings(GetMappingsRequest request, ActionListener listener) { execute(GetMappingsAction.INSTANCE, request, listener); diff --git a/server/src/main/java/org/elasticsearch/index/engine/CommitStats.java b/server/src/main/java/org/elasticsearch/index/engine/CommitStats.java index 22587cf6aad7..517e2966e41c 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/CommitStats.java +++ b/server/src/main/java/org/elasticsearch/index/engine/CommitStats.java @@ -88,13 +88,6 @@ public final class CommitStats implements Writeable, ToXContentFragment { return new Engine.CommitId(Base64.getDecoder().decode(id)); } - /** - * The synced-flush id of the commit if existed. - */ - public String syncId() { - return userData.get(InternalEngine.SYNC_COMMIT_ID); - } - /** * Returns the number of documents in the in this commit */ diff --git a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java index 5415a433d867..549b3b7a21dc 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -1035,12 +1035,6 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl } } - public Engine.SyncedFlushResult syncFlush(String syncId, Engine.CommitId expectedCommitId) { - verifyNotClosed(); - logger.trace("trying to sync flush. sync id [{}]. expected commit id [{}]]", syncId, expectedCommitId); - return getEngine().syncFlush(syncId, expectedCommitId); - } - /** * Executes the given flush request against the engine. * diff --git a/server/src/main/java/org/elasticsearch/indices/IndicesModule.java b/server/src/main/java/org/elasticsearch/indices/IndicesModule.java index 7584fda21c32..1214103dd69c 100644 --- a/server/src/main/java/org/elasticsearch/indices/IndicesModule.java +++ b/server/src/main/java/org/elasticsearch/indices/IndicesModule.java @@ -61,7 +61,6 @@ import org.elasticsearch.index.seqno.RetentionLeaseSyncAction; import org.elasticsearch.index.seqno.RetentionLeaseSyncer; import org.elasticsearch.index.shard.PrimaryReplicaSyncer; import org.elasticsearch.indices.cluster.IndicesClusterStateService; -import org.elasticsearch.indices.flush.SyncedFlushService; import org.elasticsearch.indices.mapper.MapperRegistry; import org.elasticsearch.indices.store.IndicesStore; import org.elasticsearch.plugins.MapperPlugin; @@ -238,7 +237,6 @@ public class IndicesModule extends AbstractModule { protected void configure() { bind(IndicesStore.class).asEagerSingleton(); bind(IndicesClusterStateService.class).asEagerSingleton(); - bind(SyncedFlushService.class).asEagerSingleton(); bind(TransportResyncReplicationAction.class).asEagerSingleton(); bind(PrimaryReplicaSyncer.class).asEagerSingleton(); bind(RetentionLeaseSyncAction.class).asEagerSingleton(); diff --git a/server/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java b/server/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java index e7e6d954aa1c..754635b12124 100644 --- a/server/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java +++ b/server/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java @@ -68,7 +68,6 @@ import org.elasticsearch.index.shard.PrimaryReplicaSyncer.ResyncTask; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardNotFoundException; import org.elasticsearch.indices.IndicesService; -import org.elasticsearch.indices.flush.SyncedFlushService; import org.elasticsearch.indices.recovery.PeerRecoverySourceService; import org.elasticsearch.indices.recovery.PeerRecoveryTargetService; import org.elasticsearch.indices.recovery.RecoveryFailedException; @@ -135,7 +134,6 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple final NodeMappingRefreshAction nodeMappingRefreshAction, final RepositoriesService repositoriesService, final SearchService searchService, - final SyncedFlushService syncedFlushService, final PeerRecoverySourceService peerRecoverySourceService, final SnapshotShardsService snapshotShardsService, final PrimaryReplicaSyncer primaryReplicaSyncer, @@ -151,7 +149,6 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple nodeMappingRefreshAction, repositoriesService, searchService, - syncedFlushService, peerRecoverySourceService, snapshotShardsService, primaryReplicaSyncer, @@ -170,7 +167,6 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple final NodeMappingRefreshAction nodeMappingRefreshAction, final RepositoriesService repositoriesService, final SearchService searchService, - final SyncedFlushService syncedFlushService, final PeerRecoverySourceService peerRecoverySourceService, final SnapshotShardsService snapshotShardsService, final PrimaryReplicaSyncer primaryReplicaSyncer, diff --git a/server/src/main/java/org/elasticsearch/indices/flush/ShardsSyncedFlushResult.java b/server/src/main/java/org/elasticsearch/indices/flush/ShardsSyncedFlushResult.java deleted file mode 100644 index 4748c41d4b3e..000000000000 --- a/server/src/main/java/org/elasticsearch/indices/flush/ShardsSyncedFlushResult.java +++ /dev/null @@ -1,163 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.indices.flush; - -import org.elasticsearch.cluster.routing.ShardRouting; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.index.shard.ShardId; - -import java.io.IOException; -import java.util.HashMap; -import java.util.Map; - -import static java.util.Collections.emptyMap; - -/** - * Result for all copies of a shard - */ -public class ShardsSyncedFlushResult implements Writeable { - private String failureReason; - private Map shardResponses; - private String syncId; - private ShardId shardId; - // some shards may be unassigned, so we need this as state - private int totalShards; - - public ShardsSyncedFlushResult(StreamInput in) throws IOException { - failureReason = in.readOptionalString(); - int numResponses = in.readInt(); - shardResponses = new HashMap<>(); - for (int i = 0; i < numResponses; i++) { - ShardRouting shardRouting = new ShardRouting(in); - SyncedFlushService.ShardSyncedFlushResponse response = SyncedFlushService.ShardSyncedFlushResponse.readSyncedFlushResponse(in); - shardResponses.put(shardRouting, response); - } - syncId = in.readOptionalString(); - shardId = new ShardId(in); - totalShards = in.readInt(); - } - - public ShardId getShardId() { - return shardId; - } - - /** - * failure constructor - */ - public ShardsSyncedFlushResult(ShardId shardId, int totalShards, String failureReason) { - this.syncId = null; - this.failureReason = failureReason; - this.shardResponses = emptyMap(); - this.shardId = shardId; - this.totalShards = totalShards; - } - - /** - * success constructor - */ - public ShardsSyncedFlushResult(ShardId shardId, - String syncId, - int totalShards, - Map shardResponses) { - this.failureReason = null; - this.shardResponses = Map.copyOf(shardResponses); - this.syncId = syncId; - this.totalShards = totalShards; - this.shardId = shardId; - } - - /** - * @return true if the operation failed before reaching step three of synced flush. {@link #failureReason()} can be used for - * more details - */ - public boolean failed() { - return failureReason != null; - } - - /** - * @return the reason for the failure if synced flush failed before step three of synced flush - */ - public String failureReason() { - return failureReason; - } - - public String syncId() { - return syncId; - } - - /** - * @return total number of shards for which a sync attempt was made - */ - public int totalShards() { - return totalShards; - } - - /** - * @return total number of successful shards - */ - public int successfulShards() { - int i = 0; - for (SyncedFlushService.ShardSyncedFlushResponse result : shardResponses.values()) { - if (result.success()) { - i++; - } - } - return i; - } - - /** - * @return an array of shard failures - */ - public Map failedShards() { - Map failures = new HashMap<>(); - for (Map.Entry result : shardResponses.entrySet()) { - if (result.getValue().success() == false) { - failures.put(result.getKey(), result.getValue()); - } - } - return failures; - } - - /** - * @return Individual responses for each shard copy with a detailed failure message if the copy failed to perform the synced flush. - * Empty if synced flush failed before step three. - */ - public Map shardResponses() { - return shardResponses; - } - - public ShardId shardId() { - return shardId; - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeOptionalString(failureReason); - out.writeInt(shardResponses.size()); - for (Map.Entry entry : shardResponses.entrySet()) { - entry.getKey().writeTo(out); - entry.getValue().writeTo(out); - } - out.writeOptionalString(syncId); - shardId.writeTo(out); - out.writeInt(totalShards); - } -} diff --git a/server/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java b/server/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java deleted file mode 100644 index a0fc2e153b90..000000000000 --- a/server/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java +++ /dev/null @@ -1,768 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.indices.flush; - -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.apache.logging.log4j.message.ParameterizedMessage; -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.StepListener; -import org.elasticsearch.action.admin.indices.flush.FlushRequest; -import org.elasticsearch.action.admin.indices.flush.SyncedFlushResponse; -import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.routing.IndexShardRoutingTable; -import org.elasticsearch.cluster.routing.ShardRouting; -import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.UUIDs; -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.logging.DeprecationLogger; -import org.elasticsearch.common.util.concurrent.ConcurrentCollections; -import org.elasticsearch.common.util.concurrent.CountDown; -import org.elasticsearch.index.Index; -import org.elasticsearch.index.IndexNotFoundException; -import org.elasticsearch.index.IndexService; -import org.elasticsearch.index.engine.CommitStats; -import org.elasticsearch.index.engine.Engine; -import org.elasticsearch.index.shard.IndexShard; -import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.index.shard.ShardNotFoundException; -import org.elasticsearch.indices.IndexClosedException; -import org.elasticsearch.indices.IndicesService; -import org.elasticsearch.tasks.Task; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.TransportChannel; -import org.elasticsearch.transport.TransportException; -import org.elasticsearch.transport.TransportRequest; -import org.elasticsearch.transport.TransportRequestHandler; -import org.elasticsearch.transport.TransportResponse; -import org.elasticsearch.transport.TransportResponseHandler; -import org.elasticsearch.transport.TransportService; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.concurrent.ConcurrentMap; - -public class SyncedFlushService { - - private static final Logger logger = LogManager.getLogger(SyncedFlushService.class); - - private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(logger); - - public static final String SYNCED_FLUSH_DEPRECATION_MESSAGE = - "Synced flush is deprecated and will be removed in 8.0. Use flush at _/flush or /{index}/_flush instead."; - - private static final String PRE_SYNCED_FLUSH_ACTION_NAME = "internal:indices/flush/synced/pre"; - private static final String SYNCED_FLUSH_ACTION_NAME = "internal:indices/flush/synced/sync"; - private static final String IN_FLIGHT_OPS_ACTION_NAME = "internal:indices/flush/synced/in_flight"; - - private final IndicesService indicesService; - private final ClusterService clusterService; - private final TransportService transportService; - private final IndexNameExpressionResolver indexNameExpressionResolver; - - @Inject - public SyncedFlushService(IndicesService indicesService, - ClusterService clusterService, - TransportService transportService, - IndexNameExpressionResolver indexNameExpressionResolver) { - this.indicesService = indicesService; - this.clusterService = clusterService; - this.transportService = transportService; - this.indexNameExpressionResolver = indexNameExpressionResolver; - transportService.registerRequestHandler(PRE_SYNCED_FLUSH_ACTION_NAME, ThreadPool.Names.FLUSH, PreShardSyncedFlushRequest::new, - new PreSyncedFlushTransportHandler()); - transportService.registerRequestHandler(SYNCED_FLUSH_ACTION_NAME, ThreadPool.Names.FLUSH, ShardSyncedFlushRequest::new, - new SyncedFlushTransportHandler()); - transportService.registerRequestHandler(IN_FLIGHT_OPS_ACTION_NAME, ThreadPool.Names.SAME, InFlightOpsRequest::new, - new InFlightOpCountTransportHandler()); - } - - /** - * a utility method to perform a synced flush for all shards of multiple indices. - * see {@link #attemptSyncedFlush(ShardId, ActionListener)} - * for more details. - */ - public void attemptSyncedFlush(final String[] aliasesOrIndices, - IndicesOptions indicesOptions, - final ActionListener listener) { - final ClusterState state = clusterService.state(); - DEPRECATION_LOGGER.deprecatedAndMaybeLog("synced_flush", SYNCED_FLUSH_DEPRECATION_MESSAGE); - final Index[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, indicesOptions, aliasesOrIndices); - final Map> results = ConcurrentCollections.newConcurrentMap(); - int numberOfShards = 0; - for (Index index : concreteIndices) { - final IndexMetaData indexMetaData = state.metaData().getIndexSafe(index); - numberOfShards += indexMetaData.getNumberOfShards(); - results.put(index.getName(), Collections.synchronizedList(new ArrayList<>())); - - } - if (numberOfShards == 0) { - listener.onResponse(new SyncedFlushResponse(results)); - return; - } - final CountDown countDown = new CountDown(numberOfShards); - - for (final Index concreteIndex : concreteIndices) { - final String index = concreteIndex.getName(); - final IndexMetaData indexMetaData = state.metaData().getIndexSafe(concreteIndex); - final int indexNumberOfShards = indexMetaData.getNumberOfShards(); - for (int shard = 0; shard < indexNumberOfShards; shard++) { - final ShardId shardId = new ShardId(indexMetaData.getIndex(), shard); - innerAttemptSyncedFlush(shardId, state, new ActionListener() { - @Override - public void onResponse(ShardsSyncedFlushResult syncedFlushResult) { - results.get(index).add(syncedFlushResult); - if (countDown.countDown()) { - listener.onResponse(new SyncedFlushResponse(results)); - } - } - - @Override - public void onFailure(Exception e) { - logger.debug("{} unexpected error while executing synced flush", shardId); - final int totalShards = indexMetaData.getNumberOfReplicas() + 1; - results.get(index).add(new ShardsSyncedFlushResult(shardId, totalShards, e.getMessage())); - if (countDown.countDown()) { - listener.onResponse(new SyncedFlushResponse(results)); - } - } - }); - } - } - } - - /* - * Tries to flush all copies of a shard and write a sync id to it. - * After a synced flush two shard copies may only contain the same sync id if they contain the same documents. - * To ensure this, synced flush works in three steps: - * 1. Flush all shard copies and gather the commit ids for each copy after the flush - * 2. Ensure that there are no ongoing indexing operations on the primary - * 3. Perform an additional flush on each shard copy that writes the sync id - * - * Step 3 is only executed on a shard if - * a) the shard has no uncommitted changes since the last flush - * b) the last flush was the one executed in 1 (use the collected commit id to verify this) - * - * This alone is not enough to ensure that all copies contain the same documents. - * Without step 2 a sync id would be written for inconsistent copies in the following scenario: - * - * Write operation has completed on a primary and is being sent to replicas. The write request does not reach the - * replicas until sync flush is finished. - * Step 1 is executed. After the flush the commit points on primary contains a write operation that the replica does not have. - * Step 3 will be executed on primary and replica as well because there are no uncommitted changes on primary (the first flush - * committed them) and there are no uncommitted changes on the replica (the write operation has not reached the replica yet). - * - * Step 2 detects this scenario and fails the whole synced flush if a write operation is ongoing on the primary. - * Together with the conditions for step 3 (same commit id and no uncommitted changes) this guarantees that a snc id will only - * be written on a primary if no write operation was executed between step 1 and step 3 and sync id will only be written on - * the replica if it contains the same changes that the primary contains. - * - * Synced flush is a best effort operation. The sync id may be written on all, some or none of the copies. - **/ - public void attemptSyncedFlush(final ShardId shardId, final ActionListener actionListener) { - innerAttemptSyncedFlush(shardId, clusterService.state(), actionListener); - } - - private void innerAttemptSyncedFlush(final ShardId shardId, - final ClusterState state, - final ActionListener actionListener) { - try { - final IndexShardRoutingTable shardRoutingTable = getShardRoutingTable(shardId, state); - final List activeShards = shardRoutingTable.activeShards(); - final int totalShards = shardRoutingTable.getSize(); - - if (activeShards.size() == 0) { - actionListener.onResponse(new ShardsSyncedFlushResult(shardId, totalShards, "no active shards")); - return; - } - - // 1. send pre-sync flushes to all replicas - final StepListener> presyncStep = new StepListener<>(); - sendPreSyncRequests(activeShards, state, shardId, presyncStep); - - // 2. fetch in flight operations - final StepListener inflightOpsStep = new StepListener<>(); - presyncStep.whenComplete(presyncResponses -> { - if (presyncResponses.isEmpty()) { - actionListener.onResponse(new ShardsSyncedFlushResult(shardId, totalShards, "all shards failed to commit on pre-sync")); - } else { - getInflightOpsCount(shardId, state, shardRoutingTable, inflightOpsStep); - } - }, actionListener::onFailure); - - // 3. now send the sync request to all the shards - inflightOpsStep.whenComplete(inFlightOpsResponse -> { - final Map presyncResponses = presyncStep.result(); - final int inflight = inFlightOpsResponse.opCount(); - assert inflight >= 0; - if (inflight != 0) { - actionListener.onResponse( - new ShardsSyncedFlushResult(shardId, totalShards, "[" + inflight + "] ongoing operations on primary")); - } else { - final String sharedSyncId = sharedExistingSyncId(presyncResponses); - if (sharedSyncId != null) { - assert presyncResponses.values().stream().allMatch(r -> r.existingSyncId.equals(sharedSyncId)) : - "Not all shards have the same existing sync id [" + sharedSyncId + "], responses [" + presyncResponses + "]"; - reportSuccessWithExistingSyncId(shardId, sharedSyncId, activeShards, totalShards, presyncResponses, actionListener); - }else { - String syncId = UUIDs.randomBase64UUID(); - sendSyncRequests(syncId, activeShards, state, presyncResponses, shardId, totalShards, actionListener); - } - } - }, actionListener::onFailure); - } catch (Exception e) { - actionListener.onFailure(e); - } - } - - private String sharedExistingSyncId(Map preSyncedFlushResponses) { - String existingSyncId = null; - for (PreSyncedFlushResponse resp : preSyncedFlushResponses.values()) { - if (Strings.isNullOrEmpty(resp.existingSyncId)) { - return null; - } - if (existingSyncId == null) { - existingSyncId = resp.existingSyncId; - } - if (existingSyncId.equals(resp.existingSyncId) == false) { - return null; - } - } - return existingSyncId; - } - - private void reportSuccessWithExistingSyncId(ShardId shardId, - String existingSyncId, - List shards, - int totalShards, - Map preSyncResponses, - ActionListener listener) { - final Map results = new HashMap<>(); - for (final ShardRouting shard : shards) { - if (preSyncResponses.containsKey(shard.currentNodeId())) { - results.put(shard, new ShardSyncedFlushResponse((String) null)); - } - } - listener.onResponse(new ShardsSyncedFlushResult(shardId, existingSyncId, totalShards, results)); - } - - final IndexShardRoutingTable getShardRoutingTable(final ShardId shardId, final ClusterState state) { - final IndexMetaData indexMetaData = state.getMetaData().index(shardId.getIndex()); - if (indexMetaData == null) { - throw new IndexNotFoundException(shardId.getIndexName()); - } else if (indexMetaData.getState() == IndexMetaData.State.CLOSE) { - throw new IndexClosedException(shardId.getIndex()); - } - final IndexShardRoutingTable shardRoutingTable = state.routingTable().index(indexMetaData.getIndex()).shard(shardId.id()); - if (shardRoutingTable == null) { - throw new ShardNotFoundException(shardId); - } - return shardRoutingTable; - } - - /** - * returns the number of in flight operations on primary. -1 upon error. - */ - protected void getInflightOpsCount(final ShardId shardId, - ClusterState state, - IndexShardRoutingTable shardRoutingTable, - final ActionListener listener) { - try { - final ShardRouting primaryShard = shardRoutingTable.primaryShard(); - final DiscoveryNode primaryNode = state.nodes().get(primaryShard.currentNodeId()); - if (primaryNode == null) { - logger.trace("{} failed to resolve node for primary shard {}, skipping sync", shardId, primaryShard); - listener.onResponse(new InFlightOpsResponse(-1)); - return; - } - logger.trace("{} retrieving in flight operation count", shardId); - transportService.sendRequest(primaryNode, IN_FLIGHT_OPS_ACTION_NAME, new InFlightOpsRequest(shardId), - new TransportResponseHandler() { - @Override - public InFlightOpsResponse read(StreamInput in) throws IOException { - return new InFlightOpsResponse(in); - } - - @Override - public void handleResponse(InFlightOpsResponse response) { - listener.onResponse(response); - } - - @Override - public void handleException(TransportException exp) { - logger.debug("{} unexpected error while retrieving in flight op count", shardId); - listener.onFailure(exp); - } - - @Override - public String executor() { - return ThreadPool.Names.SAME; - } - }); - } catch (Exception e) { - listener.onFailure(e); - } - } - - private int numDocsOnPrimary(List shards, Map preSyncResponses) { - for (ShardRouting shard : shards) { - if (shard.primary()) { - final PreSyncedFlushResponse resp = preSyncResponses.get(shard.currentNodeId()); - if (resp != null) { - return resp.numDocs; - } - } - } - return PreSyncedFlushResponse.UNKNOWN_NUM_DOCS; - } - - void sendSyncRequests(final String syncId, - final List shards, - ClusterState state, - Map preSyncResponses, - final ShardId shardId, - final int totalShards, - final ActionListener listener) { - final CountDown countDown = new CountDown(shards.size()); - final Map results = ConcurrentCollections.newConcurrentMap(); - final int numDocsOnPrimary = numDocsOnPrimary(shards, preSyncResponses); - for (final ShardRouting shard : shards) { - final DiscoveryNode node = state.nodes().get(shard.currentNodeId()); - if (node == null) { - logger.trace("{} is assigned to an unknown node. skipping for sync id [{}]. shard routing {}", shardId, syncId, shard); - results.put(shard, new ShardSyncedFlushResponse("unknown node")); - countDownAndSendResponseIfDone(syncId, shards, shardId, totalShards, listener, countDown, results); - continue; - } - final PreSyncedFlushResponse preSyncedResponse = preSyncResponses.get(shard.currentNodeId()); - if (preSyncedResponse == null) { - logger.trace("{} can't resolve expected commit id for current node, skipping for sync id [{}]. shard routing {}", - shardId, syncId, shard); - results.put(shard, new ShardSyncedFlushResponse("no commit id from pre-sync flush")); - countDownAndSendResponseIfDone(syncId, shards, shardId, totalShards, listener, countDown, results); - continue; - } - if (preSyncedResponse.numDocs != numDocsOnPrimary && - preSyncedResponse.numDocs != PreSyncedFlushResponse.UNKNOWN_NUM_DOCS && - numDocsOnPrimary != PreSyncedFlushResponse.UNKNOWN_NUM_DOCS) { - logger.debug("{} can't issue sync id [{}] for replica [{}] with num docs [{}]; num docs on primary [{}]", - shardId, syncId, shard, preSyncedResponse.numDocs, numDocsOnPrimary); - results.put(shard, new ShardSyncedFlushResponse("ongoing indexing operations: " + - "num docs on replica [" + preSyncedResponse.numDocs + "]; num docs on primary [" + numDocsOnPrimary + "]")); - countDownAndSendResponseIfDone(syncId, shards, shardId, totalShards, listener, countDown, results); - continue; - } - logger.trace("{} sending synced flush request to {}. sync id [{}].", shardId, shard, syncId); - ShardSyncedFlushRequest syncedFlushRequest = new ShardSyncedFlushRequest(shard.shardId(), syncId, preSyncedResponse.commitId); - transportService.sendRequest(node, SYNCED_FLUSH_ACTION_NAME, syncedFlushRequest, - new TransportResponseHandler() { - @Override - public ShardSyncedFlushResponse read(StreamInput in) throws IOException { - return new ShardSyncedFlushResponse(in); - } - - @Override - public void handleResponse(ShardSyncedFlushResponse response) { - ShardSyncedFlushResponse existing = results.put(shard, response); - assert existing == null : "got two answers for node [" + node + "]"; - // count after the assert so we won't decrement twice in handleException - countDownAndSendResponseIfDone(syncId, shards, shardId, totalShards, listener, countDown, results); - } - - @Override - public void handleException(TransportException exp) { - logger.trace(() -> new ParameterizedMessage("{} error while performing synced flush on [{}], skipping", - shardId, shard), exp); - results.put(shard, new ShardSyncedFlushResponse(exp.getMessage())); - countDownAndSendResponseIfDone(syncId, shards, shardId, totalShards, listener, countDown, results); - } - - @Override - public String executor() { - return ThreadPool.Names.SAME; - } - }); - } - - } - - private void countDownAndSendResponseIfDone(String syncId, - List shards, - ShardId shardId, - int totalShards, - ActionListener listener, - CountDown countDown, - Map results) { - if (countDown.countDown()) { - assert results.size() == shards.size(); - listener.onResponse(new ShardsSyncedFlushResult(shardId, syncId, totalShards, results)); - } - } - - /** - * send presync requests to all started copies of the given shard - */ - void sendPreSyncRequests(final List shards, - final ClusterState state, - final ShardId shardId, - final ActionListener> listener) { - final CountDown countDown = new CountDown(shards.size()); - final ConcurrentMap presyncResponses = ConcurrentCollections.newConcurrentMap(); - for (final ShardRouting shard : shards) { - logger.trace("{} sending pre-synced flush request to {}", shardId, shard); - final DiscoveryNode node = state.nodes().get(shard.currentNodeId()); - if (node == null) { - logger.trace("{} shard routing {} refers to an unknown node. skipping.", shardId, shard); - if (countDown.countDown()) { - listener.onResponse(presyncResponses); - } - continue; - } - transportService.sendRequest(node, PRE_SYNCED_FLUSH_ACTION_NAME, new PreShardSyncedFlushRequest(shard.shardId()), - new TransportResponseHandler() { - @Override - public PreSyncedFlushResponse read(StreamInput in) throws IOException { - return new PreSyncedFlushResponse(in); - } - - @Override - public void handleResponse(PreSyncedFlushResponse response) { - PreSyncedFlushResponse existing = presyncResponses.putIfAbsent(node.getId(), response); - assert existing == null : "got two answers for node [" + node + "]"; - // count after the assert so we won't decrement twice in handleException - if (countDown.countDown()) { - listener.onResponse(presyncResponses); - } - } - - @Override - public void handleException(TransportException exp) { - logger.trace(() -> new ParameterizedMessage("{} error while performing pre synced flush on [{}], skipping", - shardId, shard), exp); - if (countDown.countDown()) { - listener.onResponse(presyncResponses); - } - } - - @Override - public String executor() { - return ThreadPool.Names.SAME; - } - }); - } - } - - private PreSyncedFlushResponse performPreSyncedFlush(PreShardSyncedFlushRequest request) { - IndexShard indexShard = indicesService.indexServiceSafe(request.shardId().getIndex()).getShard(request.shardId().id()); - FlushRequest flushRequest = new FlushRequest().force(false).waitIfOngoing(true); - logger.trace("{} performing pre sync flush", request.shardId()); - indexShard.flush(flushRequest); - final CommitStats commitStats = indexShard.commitStats(); - final Engine.CommitId commitId = commitStats.getRawCommitId(); - logger.trace("{} pre sync flush done. commit id {}, num docs {}", request.shardId(), commitId, commitStats.getNumDocs()); - return new PreSyncedFlushResponse(commitId, commitStats.getNumDocs(), commitStats.syncId()); - } - - private ShardSyncedFlushResponse performSyncedFlush(ShardSyncedFlushRequest request) { - IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex()); - IndexShard indexShard = indexService.getShard(request.shardId().id()); - logger.trace("{} performing sync flush. sync id [{}], expected commit id {}", - request.shardId(), request.syncId(), request.expectedCommitId()); - Engine.SyncedFlushResult result = indexShard.syncFlush(request.syncId(), request.expectedCommitId()); - logger.trace("{} sync flush done. sync id [{}], result [{}]", request.shardId(), request.syncId(), result); - switch (result) { - case SUCCESS: - return new ShardSyncedFlushResponse((String) null); - case COMMIT_MISMATCH: - return new ShardSyncedFlushResponse("commit has changed"); - case PENDING_OPERATIONS: - return new ShardSyncedFlushResponse("pending operations"); - default: - throw new ElasticsearchException("unknown synced flush result [" + result + "]"); - } - } - - private InFlightOpsResponse performInFlightOps(InFlightOpsRequest request) { - IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex()); - IndexShard indexShard = indexService.getShard(request.shardId().id()); - if (indexShard.routingEntry().primary() == false) { - throw new IllegalStateException("[" + request.shardId() +"] expected a primary shard"); - } - int opCount = indexShard.getActiveOperationsCount(); - return new InFlightOpsResponse(opCount == IndexShard.OPERATIONS_BLOCKED ? 0 : opCount); - } - - public static final class PreShardSyncedFlushRequest extends TransportRequest { - private ShardId shardId; - - public PreShardSyncedFlushRequest(StreamInput in) throws IOException { - super(in); - this.shardId = new ShardId(in); - } - - public PreShardSyncedFlushRequest(ShardId shardId) { - this.shardId = shardId; - } - - @Override - public String toString() { - return "PreShardSyncedFlushRequest{" + - "shardId=" + shardId + - '}'; - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - shardId.writeTo(out); - } - - public ShardId shardId() { - return shardId; - } - } - - /** - * Response for first step of synced flush (flush) for one shard copy - */ - static final class PreSyncedFlushResponse extends TransportResponse { - static final int UNKNOWN_NUM_DOCS = -1; - - Engine.CommitId commitId; - int numDocs; - @Nullable String existingSyncId = null; - - PreSyncedFlushResponse(StreamInput in) throws IOException { - super(in); - commitId = new Engine.CommitId(in); - numDocs = in.readInt(); - existingSyncId = in.readOptionalString(); - } - - PreSyncedFlushResponse(Engine.CommitId commitId, int numDocs, String existingSyncId) { - this.commitId = commitId; - this.numDocs = numDocs; - this.existingSyncId = existingSyncId; - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - commitId.writeTo(out); - out.writeInt(numDocs); - out.writeOptionalString(existingSyncId); - } - } - - public static final class ShardSyncedFlushRequest extends TransportRequest { - - private String syncId; - private Engine.CommitId expectedCommitId; - private ShardId shardId; - - public ShardSyncedFlushRequest(StreamInput in) throws IOException { - super(in); - shardId = new ShardId(in); - expectedCommitId = new Engine.CommitId(in); - syncId = in.readString(); - } - - public ShardSyncedFlushRequest(ShardId shardId, String syncId, Engine.CommitId expectedCommitId) { - this.expectedCommitId = expectedCommitId; - this.shardId = shardId; - this.syncId = syncId; - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - shardId.writeTo(out); - expectedCommitId.writeTo(out); - out.writeString(syncId); - } - - public ShardId shardId() { - return shardId; - } - - public String syncId() { - return syncId; - } - - public Engine.CommitId expectedCommitId() { - return expectedCommitId; - } - - @Override - public String toString() { - return "ShardSyncedFlushRequest{" + - "shardId=" + shardId + - ",syncId='" + syncId + '\'' + - '}'; - } - } - - /** - * Response for third step of synced flush (writing the sync id) for one shard copy - */ - public static final class ShardSyncedFlushResponse extends TransportResponse { - - /** - * a non null value indicates a failure to sync flush. null means success - */ - String failureReason; - - public ShardSyncedFlushResponse(StreamInput in) throws IOException { - super(in); - failureReason = in.readOptionalString(); - } - - public ShardSyncedFlushResponse(String failureReason) { - this.failureReason = failureReason; - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeOptionalString(failureReason); - } - - public boolean success() { - return failureReason == null; - } - - public String failureReason() { - return failureReason; - } - - @Override - public String toString() { - return "ShardSyncedFlushResponse{" + - "success=" + success() + - ", failureReason='" + failureReason + '\'' + - '}'; - } - - public static ShardSyncedFlushResponse readSyncedFlushResponse(StreamInput in) throws IOException { - return new ShardSyncedFlushResponse(in); - } - } - - - public static final class InFlightOpsRequest extends TransportRequest { - - private ShardId shardId; - - public InFlightOpsRequest(StreamInput in) throws IOException { - super(in); - shardId = new ShardId(in); - } - - public InFlightOpsRequest(ShardId shardId) { - this.shardId = shardId; - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - shardId.writeTo(out); - } - - public ShardId shardId() { - return shardId; - } - - @Override - public String toString() { - return "InFlightOpsRequest{" + - "shardId=" + shardId + - '}'; - } - } - - /** - * Response for second step of synced flush (check operations in flight) - */ - static final class InFlightOpsResponse extends TransportResponse { - - int opCount; - - InFlightOpsResponse(StreamInput in) throws IOException { - super(in); - opCount = in.readVInt(); - } - - InFlightOpsResponse(int opCount) { - assert opCount >= 0 : opCount; - this.opCount = opCount; - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeVInt(opCount); - } - - public int opCount() { - return opCount; - } - - @Override - public String toString() { - return "InFlightOpsResponse{" + - "opCount=" + opCount + - '}'; - } - } - - private final class PreSyncedFlushTransportHandler implements TransportRequestHandler { - - @Override - public void messageReceived(PreShardSyncedFlushRequest request, TransportChannel channel, Task task) throws Exception { - channel.sendResponse(performPreSyncedFlush(request)); - } - } - - private final class SyncedFlushTransportHandler implements TransportRequestHandler { - - @Override - public void messageReceived(ShardSyncedFlushRequest request, TransportChannel channel, Task task) throws Exception { - channel.sendResponse(performSyncedFlush(request)); - } - } - - private final class InFlightOpCountTransportHandler implements TransportRequestHandler { - - @Override - public void messageReceived(InFlightOpsRequest request, TransportChannel channel, Task task) throws Exception { - channel.sendResponse(performInFlightOps(request)); - } - } - -} diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestSyncedFlushAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestSyncedFlushAction.java index 53cbb5c6d10d..d775189af48e 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestSyncedFlushAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestSyncedFlushAction.java @@ -19,18 +19,23 @@ package org.elasticsearch.rest.action.admin.indices; -import org.elasticsearch.action.admin.indices.flush.SyncedFlushRequest; -import org.elasticsearch.action.admin.indices.flush.SyncedFlushResponse; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.admin.indices.flush.FlushRequest; +import org.elasticsearch.action.admin.indices.flush.FlushResponse; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.BytesRestResponse; +import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestResponse; -import org.elasticsearch.rest.action.RestBuilderListener; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.rest.action.RestToXContentListener; import java.io.IOException; @@ -39,6 +44,9 @@ import static org.elasticsearch.rest.RestRequest.Method.POST; public class RestSyncedFlushAction extends BaseRestHandler { + private static final Logger logger = LogManager.getLogger(RestSyncedFlushAction.class); + private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(logger); + public RestSyncedFlushAction(RestController controller) { controller.registerHandler(POST, "/_flush/synced", this); controller.registerHandler(POST, "/{index}/_flush/synced", this); @@ -54,17 +62,35 @@ public class RestSyncedFlushAction extends BaseRestHandler { @Override public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { - IndicesOptions indicesOptions = IndicesOptions.fromRequest(request, IndicesOptions.lenientExpandOpen()); - SyncedFlushRequest syncedFlushRequest = new SyncedFlushRequest(Strings.splitStringByCommaToArray(request.param("index"))); - syncedFlushRequest.indicesOptions(indicesOptions); - return channel -> client.admin().indices().syncedFlush(syncedFlushRequest, new RestBuilderListener(channel) { - @Override - public RestResponse buildResponse(SyncedFlushResponse results, XContentBuilder builder) throws Exception { - builder.startObject(); - results.toXContent(builder, request); - builder.endObject(); - return new BytesRestResponse(results.restStatus(), builder); - } - }); + DEPRECATION_LOGGER.deprecatedAndMaybeLog("synced_flush", + "Synced flush was removed and a normal flush was performed instead. This transition will be removed in a future version."); + final FlushRequest flushRequest = new FlushRequest(Strings.splitStringByCommaToArray(request.param("index"))); + flushRequest.indicesOptions(IndicesOptions.fromRequest(request, flushRequest.indicesOptions())); + return channel -> client.admin().indices().flush(flushRequest, new SimulateSyncedFlushResponseListener(channel)); + } + + static final class SimulateSyncedFlushResponseListener extends RestToXContentListener { + + SimulateSyncedFlushResponseListener(RestChannel channel) { + super(channel); + } + + @Override + public RestResponse buildResponse(FlushResponse flushResponse, XContentBuilder builder) throws Exception { + builder.startObject(); + buildSyncedFlushResponse(builder, flushResponse); + builder.endObject(); + final RestStatus restStatus = flushResponse.getFailedShards() == 0 ? RestStatus.OK : RestStatus.CONFLICT; + return new BytesRestResponse(restStatus, builder); + } + + private void buildSyncedFlushResponse(XContentBuilder builder, FlushResponse flushResponse) throws IOException { + builder.startObject("_shards"); + builder.field("total", flushResponse.getTotalShards()); + builder.field("successful", flushResponse.getSuccessfulShards()); + builder.field("failed", flushResponse.getFailedShards()); + // can't serialize the detail of each index as we don't have the shard count per index. + builder.endObject(); + } } } diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/flush/SyncedFlushUnitTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/flush/SyncedFlushUnitTests.java deleted file mode 100644 index 5fc8ce5fe3cf..000000000000 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/flush/SyncedFlushUnitTests.java +++ /dev/null @@ -1,189 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.action.admin.indices.flush; - -import com.carrotsearch.hppc.ObjectIntHashMap; -import com.carrotsearch.hppc.ObjectIntMap; -import org.elasticsearch.action.admin.indices.flush.SyncedFlushResponse.ShardCounts; -import org.elasticsearch.cluster.routing.ShardRouting; -import org.elasticsearch.cluster.routing.ShardRoutingState; -import org.elasticsearch.cluster.routing.TestShardRouting; -import org.elasticsearch.common.io.stream.BytesStreamOutput; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.indices.flush.ShardsSyncedFlushResult; -import org.elasticsearch.indices.flush.SyncedFlushService; -import org.elasticsearch.rest.RestStatus; -import org.elasticsearch.test.ESTestCase; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -import static org.elasticsearch.test.XContentTestUtils.convertToMap; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.hasSize; - -public class SyncedFlushUnitTests extends ESTestCase { - - private static class TestPlan { - public SyncedFlushResponse.ShardCounts totalCounts; - public Map countsPerIndex = new HashMap<>(); - public ObjectIntMap expectedFailuresPerIndex = new ObjectIntHashMap<>(); - public SyncedFlushResponse result; - } - - public void testIndicesSyncedFlushResult() throws IOException { - final TestPlan testPlan = createTestPlan(); - assertThat(testPlan.result.totalShards(), equalTo(testPlan.totalCounts.total)); - assertThat(testPlan.result.successfulShards(), equalTo(testPlan.totalCounts.successful)); - assertThat(testPlan.result.failedShards(), equalTo(testPlan.totalCounts.failed)); - assertThat(testPlan.result.restStatus(), equalTo(testPlan.totalCounts.failed > 0 ? RestStatus.CONFLICT : RestStatus.OK)); - Map asMap = convertToMap(testPlan.result); - assertShardCount("_shards header", (Map) asMap.get("_shards"), testPlan.totalCounts); - - assertThat("unexpected number of indices", asMap.size(), equalTo(1 + testPlan.countsPerIndex.size())); // +1 for the shards header - for (String index : testPlan.countsPerIndex.keySet()) { - Map indexMap = (Map) asMap.get(index); - assertShardCount(index, indexMap, testPlan.countsPerIndex.get(index)); - List> failureList = (List>) indexMap.get("failures"); - final int expectedFailures = testPlan.expectedFailuresPerIndex.get(index); - if (expectedFailures == 0) { - assertNull(index + " has unexpected failures", failureList); - } else { - assertNotNull(index + " should have failures", failureList); - assertThat(failureList, hasSize(expectedFailures)); - } - } - } - - public void testResponseStreaming() throws IOException { - final TestPlan testPlan = createTestPlan(); - assertThat(testPlan.result.totalShards(), equalTo(testPlan.totalCounts.total)); - assertThat(testPlan.result.successfulShards(), equalTo(testPlan.totalCounts.successful)); - assertThat(testPlan.result.failedShards(), equalTo(testPlan.totalCounts.failed)); - assertThat(testPlan.result.restStatus(), equalTo(testPlan.totalCounts.failed > 0 ? RestStatus.CONFLICT : RestStatus.OK)); - BytesStreamOutput out = new BytesStreamOutput(); - testPlan.result.writeTo(out); - StreamInput in = out.bytes().streamInput(); - SyncedFlushResponse readResponse = new SyncedFlushResponse(in); - assertThat(readResponse.totalShards(), equalTo(testPlan.totalCounts.total)); - assertThat(readResponse.successfulShards(), equalTo(testPlan.totalCounts.successful)); - assertThat(readResponse.failedShards(), equalTo(testPlan.totalCounts.failed)); - assertThat(readResponse.restStatus(), equalTo(testPlan.totalCounts.failed > 0 ? RestStatus.CONFLICT : RestStatus.OK)); - assertThat(readResponse.getShardsResultPerIndex().size(), equalTo(testPlan.result.getShardsResultPerIndex().size())); - for (Map.Entry> entry : readResponse.getShardsResultPerIndex().entrySet()) { - List originalShardsResults = testPlan.result.getShardsResultPerIndex().get(entry.getKey()); - assertNotNull(originalShardsResults); - List readShardsResults = entry.getValue(); - assertThat(readShardsResults.size(), equalTo(originalShardsResults.size())); - for (int i = 0; i < readShardsResults.size(); i++) { - ShardsSyncedFlushResult originalShardResult = originalShardsResults.get(i); - ShardsSyncedFlushResult readShardResult = readShardsResults.get(i); - assertThat(originalShardResult.failureReason(), equalTo(readShardResult.failureReason())); - assertThat(originalShardResult.failed(), equalTo(readShardResult.failed())); - assertThat(originalShardResult.getShardId(), equalTo(readShardResult.getShardId())); - assertThat(originalShardResult.successfulShards(), equalTo(readShardResult.successfulShards())); - assertThat(originalShardResult.syncId(), equalTo(readShardResult.syncId())); - assertThat(originalShardResult.totalShards(), equalTo(readShardResult.totalShards())); - assertThat(originalShardResult.failedShards().size(), equalTo(readShardResult.failedShards().size())); - for (Map.Entry shardEntry - : originalShardResult.failedShards().entrySet()) { - SyncedFlushService.ShardSyncedFlushResponse readShardResponse = readShardResult.failedShards().get(shardEntry.getKey()); - assertNotNull(readShardResponse); - SyncedFlushService.ShardSyncedFlushResponse originalShardResponse = shardEntry.getValue(); - assertThat(originalShardResponse.failureReason(), equalTo(readShardResponse.failureReason())); - assertThat(originalShardResponse.success(), equalTo(readShardResponse.success())); - } - assertThat(originalShardResult.shardResponses().size(), equalTo(readShardResult.shardResponses().size())); - for (Map.Entry shardEntry - : originalShardResult.shardResponses().entrySet()) { - SyncedFlushService.ShardSyncedFlushResponse readShardResponse = readShardResult.shardResponses() - .get(shardEntry.getKey()); - assertNotNull(readShardResponse); - SyncedFlushService.ShardSyncedFlushResponse originalShardResponse = shardEntry.getValue(); - assertThat(originalShardResponse.failureReason(), equalTo(readShardResponse.failureReason())); - assertThat(originalShardResponse.success(), equalTo(readShardResponse.success())); - } - } - } - } - - private void assertShardCount(String name, Map header, ShardCounts expectedCounts) { - assertThat(name + " has unexpected total count", (Integer) header.get("total"), equalTo(expectedCounts.total)); - assertThat(name + " has unexpected successful count", (Integer) header.get("successful"), equalTo(expectedCounts.successful)); - assertThat(name + " has unexpected failed count", (Integer) header.get("failed"), equalTo(expectedCounts.failed)); - } - - protected TestPlan createTestPlan() { - final TestPlan testPlan = new TestPlan(); - final Map> indicesResults = new HashMap<>(); - final int indexCount = randomIntBetween(1, 10); - int totalShards = 0; - int totalSuccesful = 0; - int totalFailed = 0; - for (int i = 0; i < indexCount; i++) { - final String index = "index_" + i; - int shards = randomIntBetween(1, 4); - int replicas = randomIntBetween(0, 2); - int successful = 0; - int failed = 0; - int failures = 0; - List shardsResults = new ArrayList<>(); - for (int shard = 0; shard < shards; shard++) { - final ShardId shardId = new ShardId(index, "_na_", shard); - if (randomInt(5) < 2) { - // total shard failure - failed += replicas + 1; - failures++; - shardsResults.add(new ShardsSyncedFlushResult(shardId, replicas + 1, "simulated total failure")); - } else { - Map shardResponses = new HashMap<>(); - for (int copy = 0; copy < replicas + 1; copy++) { - final ShardRouting shardRouting = TestShardRouting.newShardRouting(index, shard, "node_" + shardId + "_" + copy, - null, copy == 0, ShardRoutingState.STARTED); - if (randomInt(5) < 2) { - // shard copy failure - failed++; - failures++; - shardResponses.put(shardRouting, new SyncedFlushService.ShardSyncedFlushResponse("copy failure " + shardId)); - } else { - successful++; - shardResponses.put(shardRouting, new SyncedFlushService.ShardSyncedFlushResponse((String) null)); - } - } - shardsResults.add(new ShardsSyncedFlushResult(shardId, "_sync_id_" + shard, replicas + 1, shardResponses)); - } - } - indicesResults.put(index, shardsResults); - testPlan.countsPerIndex.put(index, new SyncedFlushResponse.ShardCounts(shards * (replicas + 1), successful, failed)); - testPlan.expectedFailuresPerIndex.put(index, failures); - totalFailed += failed; - totalShards += shards * (replicas + 1); - totalSuccesful += successful; - } - testPlan.result = new SyncedFlushResponse(indicesResults); - testPlan.totalCounts = new SyncedFlushResponse.ShardCounts(totalShards, totalSuccesful, totalFailed); - return testPlan; - } - -} diff --git a/server/src/test/java/org/elasticsearch/gateway/ReplicaShardAllocatorIT.java b/server/src/test/java/org/elasticsearch/gateway/ReplicaShardAllocatorIT.java index 9868adfe3b86..cf0c766972e0 100644 --- a/server/src/test/java/org/elasticsearch/gateway/ReplicaShardAllocatorIT.java +++ b/server/src/test/java/org/elasticsearch/gateway/ReplicaShardAllocatorIT.java @@ -19,7 +19,6 @@ package org.elasticsearch.gateway; -import org.elasticsearch.action.admin.indices.flush.SyncedFlushResponse; import org.elasticsearch.action.admin.indices.stats.ShardStats; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -150,10 +149,6 @@ public class ReplicaShardAllocatorIT extends ESIntegTestCase { indexRandom(randomBoolean(), false, randomBoolean(), IntStream.range(0, between(10, 100)) .mapToObj(n -> client().prepareIndex(indexName).setSource("f", "v")).collect(Collectors.toList())); - assertBusy(() -> { - SyncedFlushResponse syncedFlushResponse = client().admin().indices().prepareSyncedFlush(indexName).get(); - assertThat(syncedFlushResponse.successfulShards(), equalTo(2)); - }); internalCluster().stopRandomNode(InternalTestCluster.nameFilter(nodeWithReplica)); if (randomBoolean()) { indexRandom(randomBoolean(), false, randomBoolean(), IntStream.range(0, between(10, 100)) @@ -357,10 +352,11 @@ public class ReplicaShardAllocatorIT extends ESIntegTestCase { assertNoOpRecoveries(indexName); } - private void ensureActivePeerRecoveryRetentionLeasesAdvanced(String indexName) throws Exception { + public static void ensureActivePeerRecoveryRetentionLeasesAdvanced(String indexName) throws Exception { + final ClusterService clusterService = internalCluster().clusterService(); assertBusy(() -> { Index index = resolveIndex(indexName); - Set activeRetentionLeaseIds = clusterService().state().routingTable().index(index).shard(0).shards().stream() + Set activeRetentionLeaseIds = clusterService.state().routingTable().index(index).shard(0).shards().stream() .map(shardRouting -> ReplicationTracker.getPeerRecoveryRetentionLeaseId(shardRouting.currentNodeId())) .collect(Collectors.toSet()); for (String node : internalCluster().nodesInclude(indexName)) { diff --git a/server/src/test/java/org/elasticsearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java b/server/src/test/java/org/elasticsearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java index 71af6ac7f040..4fc8f4395e56 100644 --- a/server/src/test/java/org/elasticsearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java +++ b/server/src/test/java/org/elasticsearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java @@ -514,7 +514,6 @@ public class IndicesClusterStateServiceRandomUpdatesTests extends AbstractIndice null, null, null, - null, primaryReplicaSyncer, RetentionLeaseSyncer.EMPTY, client) { diff --git a/server/src/test/java/org/elasticsearch/indices/flush/FlushIT.java b/server/src/test/java/org/elasticsearch/indices/flush/FlushIT.java index 5b4b80aab8c5..ed442fd9cb51 100644 --- a/server/src/test/java/org/elasticsearch/indices/flush/FlushIT.java +++ b/server/src/test/java/org/elasticsearch/indices/flush/FlushIT.java @@ -18,63 +18,34 @@ */ package org.elasticsearch.indices.flush; -import org.apache.lucene.index.Term; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.flush.FlushRequest; import org.elasticsearch.action.admin.indices.flush.FlushResponse; -import org.elasticsearch.action.admin.indices.flush.SyncedFlushResponse; -import org.elasticsearch.action.admin.indices.stats.IndexStats; import org.elasticsearch.action.admin.indices.stats.ShardStats; -import org.elasticsearch.action.support.ActiveShardCount; -import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.cluster.routing.ShardRouting; -import org.elasticsearch.cluster.routing.allocation.command.MoveAllocationCommand; -import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.ValidationException; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.unit.ByteSizeUnit; -import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexService; -import org.elasticsearch.index.IndexSettings; -import org.elasticsearch.index.engine.Engine; -import org.elasticsearch.index.engine.InternalEngine; -import org.elasticsearch.index.engine.InternalEngineTests; -import org.elasticsearch.index.mapper.ParsedDocument; -import org.elasticsearch.index.mapper.Uid; -import org.elasticsearch.index.seqno.SequenceNumbers; -import org.elasticsearch.index.shard.IndexShard; -import org.elasticsearch.index.shard.IndexShardTestCase; -import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.IndexingMemoryController; -import org.elasticsearch.indices.IndicesService; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.InternalSettingsPlugin; import org.elasticsearch.test.InternalTestCluster; -import java.io.IOException; import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.List; -import java.util.Map; import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.stream.Collectors; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.emptyArray; import static org.hamcrest.Matchers.emptyIterable; import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.not; -import static org.hamcrest.Matchers.nullValue; @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST) public class FlushIT extends ESIntegTestCase { @@ -136,253 +107,6 @@ public class FlushIT extends ESIntegTestCase { .actionGet().getShardFailures(), emptyArray()); } - public void testSyncedFlush() throws Exception { - internalCluster().ensureAtLeastNumDataNodes(2); - prepareCreate("test").setSettings(Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)).get(); - ensureGreen(); - - final Index index = client().admin().cluster().prepareState().get().getState().metaData().index("test").getIndex(); - - IndexStats indexStats = client().admin().indices().prepareStats("test").get().getIndex("test"); - for (ShardStats shardStats : indexStats.getShards()) { - assertNull(shardStats.getCommitStats().getUserData().get(Engine.SYNC_COMMIT_ID)); - } - - ShardsSyncedFlushResult result; - if (randomBoolean()) { - logger.info("--> sync flushing shard 0"); - result = SyncedFlushUtil.attemptSyncedFlush(logger, internalCluster(), new ShardId(index, 0)); - } else { - logger.info("--> sync flushing index [test]"); - SyncedFlushResponse indicesResult = client().admin().indices().prepareSyncedFlush("test").get(); - result = indicesResult.getShardsResultPerIndex().get("test").get(0); - } - assertFalse(result.failed()); - assertThat(result.totalShards(), equalTo(indexStats.getShards().length)); - assertThat(result.successfulShards(), equalTo(indexStats.getShards().length)); - - indexStats = client().admin().indices().prepareStats("test").get().getIndex("test"); - String syncId = result.syncId(); - for (ShardStats shardStats : indexStats.getShards()) { - final String shardSyncId = shardStats.getCommitStats().getUserData().get(Engine.SYNC_COMMIT_ID); - assertThat(shardSyncId, equalTo(syncId)); - } - - // now, start new node and relocate a shard there and see if sync id still there - String newNodeName = internalCluster().startNode(); - ClusterState clusterState = client().admin().cluster().prepareState().get().getState(); - ShardRouting shardRouting = clusterState.getRoutingTable().index("test").shard(0).iterator().next(); - String currentNodeName = clusterState.nodes().resolveNode(shardRouting.currentNodeId()).getName(); - assertFalse(currentNodeName.equals(newNodeName)); - internalCluster().client().admin().cluster().prepareReroute() - .add(new MoveAllocationCommand("test", 0, currentNodeName, newNodeName)).get(); - - client().admin().cluster().prepareHealth() - .setWaitForNoRelocatingShards(true) - .get(); - indexStats = client().admin().indices().prepareStats("test").get().getIndex("test"); - for (ShardStats shardStats : indexStats.getShards()) { - assertNotNull(shardStats.getCommitStats().getUserData().get(Engine.SYNC_COMMIT_ID)); - } - - client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder() - .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0).build()).get(); - ensureGreen("test"); - indexStats = client().admin().indices().prepareStats("test").get().getIndex("test"); - for (ShardStats shardStats : indexStats.getShards()) { - assertNotNull(shardStats.getCommitStats().getUserData().get(Engine.SYNC_COMMIT_ID)); - } - client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder() - .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, internalCluster().numDataNodes() - 1).build()).get(); - ensureGreen("test"); - indexStats = client().admin().indices().prepareStats("test").get().getIndex("test"); - for (ShardStats shardStats : indexStats.getShards()) { - assertNotNull(shardStats.getCommitStats().getUserData().get(Engine.SYNC_COMMIT_ID)); - } - } - - public void testSyncedFlushWithConcurrentIndexing() throws Exception { - - internalCluster().ensureAtLeastNumDataNodes(3); - createIndex("test"); - - client().admin().indices().prepareUpdateSettings("test").setSettings( - Settings.builder() - .put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(1, ByteSizeUnit.PB)) - .put("index.refresh_interval", -1) - .put("index.number_of_replicas", internalCluster().numDataNodes() - 1)) - .get(); - ensureGreen(); - final AtomicBoolean stop = new AtomicBoolean(false); - final AtomicInteger numDocs = new AtomicInteger(0); - Thread indexingThread = new Thread() { - @Override - public void run() { - while (stop.get() == false) { - client().prepareIndex().setIndex("test").setSource("{}", XContentType.JSON).get(); - numDocs.incrementAndGet(); - } - } - }; - indexingThread.start(); - - IndexStats indexStats = client().admin().indices().prepareStats("test").get().getIndex("test"); - for (ShardStats shardStats : indexStats.getShards()) { - assertNull(shardStats.getCommitStats().getUserData().get(Engine.SYNC_COMMIT_ID)); - } - logger.info("--> trying sync flush"); - SyncedFlushResponse syncedFlushResult = client().admin().indices().prepareSyncedFlush("test").get(); - logger.info("--> sync flush done"); - stop.set(true); - indexingThread.join(); - indexStats = client().admin().indices().prepareStats("test").get().getIndex("test"); - assertFlushResponseEqualsShardStats(indexStats.getShards(), syncedFlushResult.getShardsResultPerIndex().get("test")); - refresh(); - assertThat(client().prepareSearch().setSize(0).get().getHits().getTotalHits().value, equalTo((long) numDocs.get())); - logger.info("indexed {} docs", client().prepareSearch().setSize(0).get().getHits().getTotalHits().value); - logClusterState(); - internalCluster().fullRestart(); - ensureGreen(); - assertThat(client().prepareSearch().setSize(0).get().getHits().getTotalHits().value, equalTo((long) numDocs.get())); - } - - private void assertFlushResponseEqualsShardStats(ShardStats[] shardsStats, List syncedFlushResults) { - - for (final ShardStats shardStats : shardsStats) { - for (final ShardsSyncedFlushResult shardResult : syncedFlushResults) { - if (shardStats.getShardRouting().getId() == shardResult.shardId().getId()) { - for (Map.Entry singleResponse : - shardResult.shardResponses().entrySet()) { - if (singleResponse.getKey().currentNodeId().equals(shardStats.getShardRouting().currentNodeId())) { - if (singleResponse.getValue().success()) { - logger.info("{} sync flushed on node {}", singleResponse.getKey().shardId(), - singleResponse.getKey().currentNodeId()); - assertNotNull(shardStats.getCommitStats().getUserData().get(Engine.SYNC_COMMIT_ID)); - } else { - logger.info("{} sync flush failed for on node {}", singleResponse.getKey().shardId(), - singleResponse.getKey().currentNodeId()); - assertNull(shardStats.getCommitStats().getUserData().get(Engine.SYNC_COMMIT_ID)); - } - } - } - } - } - } - } - - public void testUnallocatedShardsDoesNotHang() throws InterruptedException { - // create an index but disallow allocation - prepareCreate("test").setWaitForActiveShards(ActiveShardCount.NONE).setSettings(Settings.builder() - .put("index.routing.allocation.include._name", "nonexistent")).get(); - - // this should not hang but instead immediately return with empty result set - List shardsResult = client().admin().indices().prepareSyncedFlush("test").get() - .getShardsResultPerIndex().get("test"); - // just to make sure the test actually tests the right thing - int numShards = client().admin().indices().prepareGetSettings("test").get().getIndexToSettings().get("test") - .getAsInt(IndexMetaData.SETTING_NUMBER_OF_SHARDS, -1); - assertThat(shardsResult.size(), equalTo(numShards)); - assertThat(shardsResult.get(0).failureReason(), equalTo("no active shards")); - } - - private void indexDoc(Engine engine, String id) throws IOException { - final ParsedDocument doc = InternalEngineTests.createParsedDoc(id, null); - final Engine.IndexResult indexResult = engine.index(new Engine.Index(new Term("_id", Uid.encodeId(doc.id())), doc, - ((InternalEngine) engine).getProcessedLocalCheckpoint() + 1, 1L, 1L, null, Engine.Operation.Origin.REPLICA, System.nanoTime(), - -1L, false, SequenceNumbers.UNASSIGNED_SEQ_NO, 0)); - assertThat(indexResult.getFailure(), nullValue()); - engine.syncTranslog(); - } - - public void testSyncedFlushSkipOutOfSyncReplicas() throws Exception { - internalCluster().ensureAtLeastNumDataNodes(between(2, 3)); - final int numberOfReplicas = internalCluster().numDataNodes() - 1; - assertAcked( - prepareCreate("test").setSettings(Settings.builder() - .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, numberOfReplicas)).get() - ); - ensureGreen(); - final Index index = clusterService().state().metaData().index("test").getIndex(); - final ShardId shardId = new ShardId(index, 0); - final int numDocs = between(1, 10); - for (int i = 0; i < numDocs; i++) { - indexDoc("test", Integer.toString(i)); - } - final List indexShards = internalCluster().nodesInclude("test").stream() - .map(node -> internalCluster().getInstance(IndicesService.class, node).getShardOrNull(shardId)) - .collect(Collectors.toList()); - // Index extra documents to one replica - synced-flush should fail on that replica. - final IndexShard outOfSyncReplica = randomValueOtherThanMany(s -> s.routingEntry().primary(), () -> randomFrom(indexShards)); - final int extraDocs = between(1, 10); - for (int i = 0; i < extraDocs; i++) { - indexDoc(IndexShardTestCase.getEngine(outOfSyncReplica), "extra_" + i); - } - final ShardsSyncedFlushResult partialResult = SyncedFlushUtil.attemptSyncedFlush(logger, internalCluster(), shardId); - assertThat(partialResult.totalShards(), equalTo(numberOfReplicas + 1)); - assertThat(partialResult.successfulShards(), equalTo(numberOfReplicas)); - assertThat(partialResult.shardResponses().get(outOfSyncReplica.routingEntry()).failureReason, equalTo( - "ongoing indexing operations: num docs on replica [" + (numDocs + extraDocs) + "]; num docs on primary [" + numDocs + "]")); - // Index extra documents to all shards - synced-flush should be ok. - for (IndexShard indexShard : indexShards) { - // Do reindex documents to the out of sync replica to avoid trigger merges - if (indexShard != outOfSyncReplica) { - for (int i = 0; i < extraDocs; i++) { - indexDoc(IndexShardTestCase.getEngine(indexShard), "extra_" + i); - } - } - } - final ShardsSyncedFlushResult fullResult = SyncedFlushUtil.attemptSyncedFlush(logger, internalCluster(), shardId); - assertThat(fullResult.totalShards(), equalTo(numberOfReplicas + 1)); - assertThat(fullResult.successfulShards(), equalTo(numberOfReplicas + 1)); - } - - public void testDoNotRenewSyncedFlushWhenAllSealed() throws Exception { - internalCluster().ensureAtLeastNumDataNodes(between(2, 3)); - final int numberOfReplicas = internalCluster().numDataNodes() - 1; - assertAcked( - prepareCreate("test").setSettings(Settings.builder() - .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, numberOfReplicas)).get() - ); - ensureGreen(); - final Index index = clusterService().state().metaData().index("test").getIndex(); - final ShardId shardId = new ShardId(index, 0); - final int numDocs = between(1, 10); - for (int i = 0; i < numDocs; i++) { - indexDoc("test", Integer.toString(i)); - } - final ShardsSyncedFlushResult firstSeal = SyncedFlushUtil.attemptSyncedFlush(logger, internalCluster(), shardId); - assertThat(firstSeal.successfulShards(), equalTo(numberOfReplicas + 1)); - // Do not renew synced-flush - final ShardsSyncedFlushResult secondSeal = SyncedFlushUtil.attemptSyncedFlush(logger, internalCluster(), shardId); - assertThat(secondSeal.successfulShards(), equalTo(numberOfReplicas + 1)); - assertThat(secondSeal.syncId(), equalTo(firstSeal.syncId())); - // Shards were updated, renew synced flush. - final int moreDocs = between(1, 10); - for (int i = 0; i < moreDocs; i++) { - indexDoc("test", "more-" + i); - } - final ShardsSyncedFlushResult thirdSeal = SyncedFlushUtil.attemptSyncedFlush(logger, internalCluster(), shardId); - assertThat(thirdSeal.successfulShards(), equalTo(numberOfReplicas + 1)); - assertThat(thirdSeal.syncId(), not(equalTo(firstSeal.syncId()))); - // Manually remove or change sync-id, renew synced flush. - IndexShard shard = internalCluster().getInstance(IndicesService.class, randomFrom(internalCluster().nodesInclude("test"))) - .getShardOrNull(shardId); - if (randomBoolean()) { - // Change the existing sync-id of a single shard. - shard.syncFlush(UUIDs.randomBase64UUID(random()), shard.commitStats().getRawCommitId()); - assertThat(shard.commitStats().syncId(), not(equalTo(thirdSeal.syncId()))); - } else { - // Flush will create a new commit without sync-id - shard.flush(new FlushRequest(shardId.getIndexName()).force(true).waitIfOngoing(true)); - assertThat(shard.commitStats().syncId(), nullValue()); - } - final ShardsSyncedFlushResult forthSeal = SyncedFlushUtil.attemptSyncedFlush(logger, internalCluster(), shardId); - assertThat(forthSeal.successfulShards(), equalTo(numberOfReplicas + 1)); - assertThat(forthSeal.syncId(), not(equalTo(thirdSeal.syncId()))); - } - public void testFlushOnInactive() throws Exception { final String indexName = "flush_on_inactive"; List dataNodes = internalCluster().startDataOnlyNodes(2, Settings.builder() diff --git a/server/src/test/java/org/elasticsearch/indices/flush/SyncedFlushSingleNodeTests.java b/server/src/test/java/org/elasticsearch/indices/flush/SyncedFlushSingleNodeTests.java deleted file mode 100644 index d695e5e612de..000000000000 --- a/server/src/test/java/org/elasticsearch/indices/flush/SyncedFlushSingleNodeTests.java +++ /dev/null @@ -1,243 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.indices.flush; - -import org.elasticsearch.action.support.PlainActionFuture; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.cluster.routing.IndexShardRoutingTable; -import org.elasticsearch.cluster.routing.ShardRouting; -import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.UUIDs; -import org.elasticsearch.common.lease.Releasable; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.index.IndexService; -import org.elasticsearch.index.shard.IndexShard; -import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.index.shard.ShardNotFoundException; -import org.elasticsearch.indices.IndicesService; -import org.elasticsearch.test.ESSingleNodeTestCase; -import org.elasticsearch.threadpool.ThreadPool; - -import java.util.List; -import java.util.Map; - -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; - -public class SyncedFlushSingleNodeTests extends ESSingleNodeTestCase { - - public void testModificationPreventsFlushing() throws InterruptedException { - createIndex("test"); - client().prepareIndex("test").setId("1").setSource("{}", XContentType.JSON).get(); - IndexService test = getInstanceFromNode(IndicesService.class).indexService(resolveIndex("test")); - IndexShard shard = test.getShardOrNull(0); - - SyncedFlushService flushService = getInstanceFromNode(SyncedFlushService.class); - final ShardId shardId = shard.shardId(); - final ClusterState state = getInstanceFromNode(ClusterService.class).state(); - final IndexShardRoutingTable shardRoutingTable = flushService.getShardRoutingTable(shardId, state); - final List activeShards = shardRoutingTable.activeShards(); - assertEquals("exactly one active shard", 1, activeShards.size()); - Map preSyncedResponses = - SyncedFlushUtil.sendPreSyncRequests(flushService, activeShards, state, shardId); - assertEquals("exactly one commit id", 1, preSyncedResponses.size()); - client().prepareIndex("test").setId("2").setSource("{}", XContentType.JSON).get(); - String syncId = UUIDs.randomBase64UUID(); - SyncedFlushUtil.LatchedListener listener = new SyncedFlushUtil.LatchedListener<>(); - flushService.sendSyncRequests(syncId, activeShards, state, preSyncedResponses, shardId, shardRoutingTable.size(), listener); - listener.latch.await(); - assertNull(listener.error); - ShardsSyncedFlushResult syncedFlushResult = listener.result; - assertNotNull(syncedFlushResult); - assertEquals(0, syncedFlushResult.successfulShards()); - assertEquals(1, syncedFlushResult.totalShards()); - assertEquals(syncId, syncedFlushResult.syncId()); - assertNotNull(syncedFlushResult.shardResponses().get(activeShards.get(0))); - assertFalse(syncedFlushResult.shardResponses().get(activeShards.get(0)).success()); - assertEquals("pending operations", syncedFlushResult.shardResponses().get(activeShards.get(0)).failureReason()); - - // pull another commit and make sure we can't sync-flush with the old one - SyncedFlushUtil.sendPreSyncRequests(flushService, activeShards, state, shardId); - listener = new SyncedFlushUtil.LatchedListener<>(); - flushService.sendSyncRequests(syncId, activeShards, state, preSyncedResponses, shardId, shardRoutingTable.size(), listener); - listener.latch.await(); - assertNull(listener.error); - syncedFlushResult = listener.result; - assertNotNull(syncedFlushResult); - assertEquals(0, syncedFlushResult.successfulShards()); - assertEquals(1, syncedFlushResult.totalShards()); - assertEquals(syncId, syncedFlushResult.syncId()); - assertNotNull(syncedFlushResult.shardResponses().get(activeShards.get(0))); - assertFalse(syncedFlushResult.shardResponses().get(activeShards.get(0)).success()); - assertEquals("commit has changed", syncedFlushResult.shardResponses().get(activeShards.get(0)).failureReason()); - } - - public void testSingleShardSuccess() throws InterruptedException { - createIndex("test"); - client().prepareIndex("test").setId("1").setSource("{}", XContentType.JSON).get(); - IndexService test = getInstanceFromNode(IndicesService.class).indexService(resolveIndex("test")); - IndexShard shard = test.getShardOrNull(0); - - SyncedFlushService flushService = getInstanceFromNode(SyncedFlushService.class); - final ShardId shardId = shard.shardId(); - SyncedFlushUtil.LatchedListener listener = new SyncedFlushUtil.LatchedListener<>(); - flushService.attemptSyncedFlush(shardId, listener); - listener.latch.await(); - assertNull(listener.error); - ShardsSyncedFlushResult syncedFlushResult = listener.result; - assertNotNull(syncedFlushResult); - assertEquals(1, syncedFlushResult.successfulShards()); - assertEquals(1, syncedFlushResult.totalShards()); - SyncedFlushService.ShardSyncedFlushResponse response = syncedFlushResult.shardResponses().values().iterator().next(); - assertTrue(response.success()); - } - - public void testSyncFailsIfOperationIsInFlight() throws Exception { - createIndex("test"); - client().prepareIndex("test").setId("1").setSource("{}", XContentType.JSON).get(); - IndexService test = getInstanceFromNode(IndicesService.class).indexService(resolveIndex("test")); - IndexShard shard = test.getShardOrNull(0); - - // wait for the GCP sync spawned from the index request above to complete to avoid that request disturbing the check below - assertBusy(() -> { - assertEquals(0, shard.getLastSyncedGlobalCheckpoint()); - assertEquals(0, shard.getActiveOperationsCount()); - }); - - SyncedFlushService flushService = getInstanceFromNode(SyncedFlushService.class); - final ShardId shardId = shard.shardId(); - PlainActionFuture fut = new PlainActionFuture<>(); - shard.acquirePrimaryOperationPermit(fut, ThreadPool.Names.WRITE, ""); - try (Releasable operationLock = fut.get()) { - SyncedFlushUtil.LatchedListener listener = new SyncedFlushUtil.LatchedListener<>(); - flushService.attemptSyncedFlush(shardId, listener); - listener.latch.await(); - assertNull(listener.error); - ShardsSyncedFlushResult syncedFlushResult = listener.result; - assertNotNull(syncedFlushResult); - assertEquals(0, syncedFlushResult.successfulShards()); - assertNotEquals(0, syncedFlushResult.totalShards()); - assertEquals("[1] ongoing operations on primary", syncedFlushResult.failureReason()); - } - } - - public void testSyncFailsOnIndexClosedOrMissing() throws InterruptedException { - createIndex("test", Settings.builder() - .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) - .build()); - IndexService test = getInstanceFromNode(IndicesService.class).indexService(resolveIndex("test")); - final IndexShard shard = test.getShardOrNull(0); - assertNotNull(shard); - final ShardId shardId = shard.shardId(); - - final SyncedFlushService flushService = getInstanceFromNode(SyncedFlushService.class); - - SyncedFlushUtil.LatchedListener listener = new SyncedFlushUtil.LatchedListener(); - flushService.attemptSyncedFlush(new ShardId(shard.shardId().getIndex(), 1), listener); - listener.latch.await(); - assertNotNull(listener.error); - assertNull(listener.result); - assertEquals(ShardNotFoundException.class, listener.error.getClass()); - assertEquals("no such shard", listener.error.getMessage()); - - assertAcked(client().admin().indices().prepareClose("test")); - listener = new SyncedFlushUtil.LatchedListener(); - flushService.attemptSyncedFlush(shardId, listener); - listener.latch.await(); - assertNotNull(listener.error); - assertNull(listener.result); - assertEquals("closed", listener.error.getMessage()); - - listener = new SyncedFlushUtil.LatchedListener(); - flushService.attemptSyncedFlush(new ShardId("index not found", "_na_", 0), listener); - listener.latch.await(); - assertNotNull(listener.error); - assertNull(listener.result); - assertEquals("no such index [index not found]", listener.error.getMessage()); - } - - public void testFailAfterIntermediateCommit() throws InterruptedException { - createIndex("test"); - client().prepareIndex("test").setId("1").setSource("{}", XContentType.JSON).get(); - IndexService test = getInstanceFromNode(IndicesService.class).indexService(resolveIndex("test")); - IndexShard shard = test.getShardOrNull(0); - - SyncedFlushService flushService = getInstanceFromNode(SyncedFlushService.class); - final ShardId shardId = shard.shardId(); - final ClusterState state = getInstanceFromNode(ClusterService.class).state(); - final IndexShardRoutingTable shardRoutingTable = flushService.getShardRoutingTable(shardId, state); - final List activeShards = shardRoutingTable.activeShards(); - assertEquals("exactly one active shard", 1, activeShards.size()); - Map preSyncedResponses = - SyncedFlushUtil.sendPreSyncRequests(flushService, activeShards, state, shardId); - assertEquals("exactly one commit id", 1, preSyncedResponses.size()); - if (randomBoolean()) { - client().prepareIndex("test").setId("2").setSource("{}", XContentType.JSON).get(); - } - client().admin().indices().prepareFlush("test").setForce(true).get(); - String syncId = UUIDs.randomBase64UUID(); - final SyncedFlushUtil.LatchedListener listener = new SyncedFlushUtil.LatchedListener<>(); - flushService.sendSyncRequests(syncId, activeShards, state, preSyncedResponses, shardId, shardRoutingTable.size(), listener); - listener.latch.await(); - assertNull(listener.error); - ShardsSyncedFlushResult syncedFlushResult = listener.result; - assertNotNull(syncedFlushResult); - assertEquals(0, syncedFlushResult.successfulShards()); - assertEquals(1, syncedFlushResult.totalShards()); - assertEquals(syncId, syncedFlushResult.syncId()); - assertNotNull(syncedFlushResult.shardResponses().get(activeShards.get(0))); - assertFalse(syncedFlushResult.shardResponses().get(activeShards.get(0)).success()); - assertEquals("commit has changed", syncedFlushResult.shardResponses().get(activeShards.get(0)).failureReason()); - } - - public void testFailWhenCommitIsMissing() throws InterruptedException { - createIndex("test"); - client().prepareIndex("test").setId("1").setSource("{}", XContentType.JSON).get(); - IndexService test = getInstanceFromNode(IndicesService.class).indexService(resolveIndex("test")); - IndexShard shard = test.getShardOrNull(0); - - SyncedFlushService flushService = getInstanceFromNode(SyncedFlushService.class); - final ShardId shardId = shard.shardId(); - final ClusterState state = getInstanceFromNode(ClusterService.class).state(); - final IndexShardRoutingTable shardRoutingTable = flushService.getShardRoutingTable(shardId, state); - final List activeShards = shardRoutingTable.activeShards(); - assertEquals("exactly one active shard", 1, activeShards.size()); - Map preSyncedResponses = - SyncedFlushUtil.sendPreSyncRequests(flushService, activeShards, state, shardId); - assertEquals("exactly one commit id", 1, preSyncedResponses.size()); - preSyncedResponses.clear(); // wipe it... - String syncId = UUIDs.randomBase64UUID(); - SyncedFlushUtil.LatchedListener listener = new SyncedFlushUtil.LatchedListener<>(); - flushService.sendSyncRequests(syncId, activeShards, state, preSyncedResponses, shardId, shardRoutingTable.size(), listener); - listener.latch.await(); - assertNull(listener.error); - ShardsSyncedFlushResult syncedFlushResult = listener.result; - assertNotNull(syncedFlushResult); - assertEquals(0, syncedFlushResult.successfulShards()); - assertEquals(1, syncedFlushResult.totalShards()); - assertEquals(syncId, syncedFlushResult.syncId()); - assertNotNull(syncedFlushResult.shardResponses().get(activeShards.get(0))); - assertFalse(syncedFlushResult.shardResponses().get(activeShards.get(0)).success()); - assertEquals("no commit id from pre-sync flush", syncedFlushResult.shardResponses().get(activeShards.get(0)).failureReason()); - } - - -} diff --git a/server/src/test/java/org/elasticsearch/indices/flush/SyncedFlushUtil.java b/server/src/test/java/org/elasticsearch/indices/flush/SyncedFlushUtil.java deleted file mode 100644 index ffb494570a5b..000000000000 --- a/server/src/test/java/org/elasticsearch/indices/flush/SyncedFlushUtil.java +++ /dev/null @@ -1,110 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.indices.flush; - -import org.apache.logging.log4j.Logger; -import org.elasticsearch.ExceptionsHelper; -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.routing.ShardRouting; -import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.test.InternalTestCluster; - -import java.util.List; -import java.util.Map; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.atomic.AtomicReference; - -import static org.elasticsearch.test.ESTestCase.assertBusy; - -/** Utils for SyncedFlush */ -public class SyncedFlushUtil { - - private SyncedFlushUtil() { - - } - - /** - * Blocking version of {@link SyncedFlushService#attemptSyncedFlush(ShardId, ActionListener)} - */ - public static ShardsSyncedFlushResult attemptSyncedFlush(Logger logger, InternalTestCluster cluster, ShardId shardId) throws Exception { - /* - * When the last indexing operation is completed, we will fire a global checkpoint sync. - * Since a global checkpoint sync request is a replication request, it will acquire an index - * shard permit on the primary when executing. If this happens at the same time while we are - * issuing the synced-flush, the synced-flush request will fail as it thinks there are - * in-flight operations. We can avoid such situation by continuing issuing another synced-flush - * if the synced-flush failed due to the ongoing operations on the primary. - */ - SyncedFlushService service = cluster.getInstance(SyncedFlushService.class); - AtomicReference> listenerHolder = new AtomicReference<>(); - assertBusy(() -> { - LatchedListener listener = new LatchedListener<>(); - listenerHolder.set(listener); - service.attemptSyncedFlush(shardId, listener); - listener.latch.await(); - if (listener.result != null && listener.result.failureReason() != null - && listener.result.failureReason().contains("ongoing operations on primary")) { - throw new AssertionError(listener.result.failureReason()); // cause the assert busy to retry - } - }); - if (listenerHolder.get().error != null) { - throw ExceptionsHelper.convertToElastic(listenerHolder.get().error); - } - return listenerHolder.get().result; - } - - public static final class LatchedListener implements ActionListener { - public volatile T result; - public volatile Exception error; - public final CountDownLatch latch = new CountDownLatch(1); - - @Override - public void onResponse(T result) { - this.result = result; - latch.countDown(); - } - - @Override - public void onFailure(Exception e) { - error = e; - latch.countDown(); - } - } - - /** - * Blocking version of {@link SyncedFlushService#sendPreSyncRequests(List, ClusterState, ShardId, ActionListener)} - */ - public static Map sendPreSyncRequests(SyncedFlushService service, - List activeShards, - ClusterState state, - ShardId shardId) { - LatchedListener> listener = new LatchedListener<>(); - service.sendPreSyncRequests(activeShards, state, shardId, listener); - try { - listener.latch.await(); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - } - if (listener.error != null) { - throw ExceptionsHelper.convertToElastic(listener.error); - } - return listener.result; - } -} diff --git a/server/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java b/server/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java index 414df8d648c5..a8b3d82a0298 100644 --- a/server/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java +++ b/server/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java @@ -62,6 +62,7 @@ import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.gateway.ReplicaShardAllocatorIT; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.IndexSettings; @@ -78,7 +79,6 @@ import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.store.Store; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.analysis.AnalysisModule; -import org.elasticsearch.indices.flush.SyncedFlushUtil; import org.elasticsearch.indices.recovery.RecoveryState.Stage; import org.elasticsearch.node.NodeClosedException; import org.elasticsearch.node.RecoverySettingsChunkSizePlugin; @@ -109,7 +109,6 @@ import java.util.Arrays; import java.util.Collection; import java.util.List; import java.util.Map; -import java.util.Set; import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutionException; import java.util.concurrent.Semaphore; @@ -118,7 +117,6 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.function.Consumer; import java.util.stream.Collectors; import java.util.stream.IntStream; -import java.util.stream.Stream; import java.util.stream.StreamSupport; import static java.util.Collections.singletonMap; @@ -329,8 +327,19 @@ public class IndexRecoveryIT extends ESIntegTestCase { final String nodeA = internalCluster().startNode(); logger.info("--> create index on node: {}", nodeA); - createAndPopulateIndex(INDEX_NAME, 1, SHARD_COUNT, REPLICA_COUNT) - .getShards()[0].getStats().getStore().size(); + createIndex(INDEX_NAME, Settings.builder() + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1) + .put(IndexService.RETENTION_LEASE_SYNC_INTERVAL_SETTING.getKey(), "100ms") + .put(IndexService.GLOBAL_CHECKPOINT_SYNC_INTERVAL_SETTING.getKey(), "100ms").build()); + + int numDocs = randomIntBetween(10, 200); + final IndexRequestBuilder[] docs = new IndexRequestBuilder[numDocs]; + for (int i = 0; i < numDocs; i++) { + docs[i] = client().prepareIndex(INDEX_NAME). + setSource("foo-int", randomInt(), "foo-string", randomAlphaOfLength(32), "foo-float", randomFloat()); + } + indexRandom(randomBoolean(), docs); logger.info("--> start node B"); // force a shard recovery from nodeA to nodeB @@ -346,8 +355,7 @@ public class IndexRecoveryIT extends ESIntegTestCase { logger.info("--> start node C"); final String nodeC = internalCluster().startNode(); - // do sync flush to gen sync id - assertThat(client().admin().indices().prepareSyncedFlush(INDEX_NAME).get().failedShards(), equalTo(0)); + ReplicaShardAllocatorIT.ensureActivePeerRecoveryRetentionLeasesAdvanced(INDEX_NAME); // hold peer recovery on phase 2 after nodeB down CountDownLatch phase1ReadyBlocked = new CountDownLatch(1); @@ -1077,73 +1085,6 @@ public class IndexRecoveryIT extends ESIntegTestCase { ensureGreen(indexName); } - public void testRecoveryFlushReplica() throws Exception { - internalCluster().ensureAtLeastNumDataNodes(3); - String indexName = "test-index"; - createIndex(indexName, Settings.builder().put("index.number_of_replicas", 0).put("index.number_of_shards", 1).build()); - int numDocs = randomIntBetween(0, 10); - indexRandom(randomBoolean(), false, randomBoolean(), IntStream.range(0, numDocs) - .mapToObj(n -> client().prepareIndex(indexName).setSource("num", n)).collect(toList())); - assertAcked(client().admin().indices().prepareUpdateSettings(indexName) - .setSettings(Settings.builder().put("index.number_of_replicas", 1))); - ensureGreen(indexName); - ShardId shardId = null; - for (ShardStats shardStats : client().admin().indices().prepareStats(indexName).get().getIndex(indexName).getShards()) { - shardId = shardStats.getShardRouting().shardId(); - if (shardStats.getShardRouting().primary() == false) { - assertThat(shardStats.getCommitStats().getNumDocs(), equalTo(numDocs)); - SequenceNumbers.CommitInfo commitInfo = SequenceNumbers.loadSeqNoInfoFromLuceneCommit( - shardStats.getCommitStats().getUserData().entrySet()); - assertThat(commitInfo.localCheckpoint, equalTo(shardStats.getSeqNoStats().getLocalCheckpoint())); - assertThat(commitInfo.maxSeqNo, equalTo(shardStats.getSeqNoStats().getMaxSeqNo())); - } - } - SyncedFlushUtil.attemptSyncedFlush(logger, internalCluster(), shardId); - assertBusy(() -> assertThat(client().admin().indices().prepareSyncedFlush(indexName).get().failedShards(), equalTo(0))); - assertAcked(client().admin().indices().prepareUpdateSettings(indexName) - .setSettings(Settings.builder().put("index.number_of_replicas", 2))); - ensureGreen(indexName); - // Recovery should keep syncId if no indexing activity on the primary after synced-flush. - Set syncIds = Stream.of(client().admin().indices().prepareStats(indexName).get().getIndex(indexName).getShards()) - .map(shardStats -> shardStats.getCommitStats().syncId()) - .collect(Collectors.toSet()); - assertThat(syncIds, hasSize(1)); - } - - public void testRecoveryUsingSyncedFlushWithoutRetentionLease() throws Exception { - internalCluster().ensureAtLeastNumDataNodes(2); - String indexName = "test-index"; - createIndex(indexName, Settings.builder() - .put("index.number_of_shards", 1) - .put("index.number_of_replicas", 1) - .put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), "24h") // do not reallocate the lost shard - .put(IndexSettings.INDEX_SOFT_DELETES_RETENTION_LEASE_PERIOD_SETTING.getKey(), "100ms") // expire leases quickly - .put(IndexService.RETENTION_LEASE_SYNC_INTERVAL_SETTING.getKey(), "100ms") // sync frequently - .build()); - int numDocs = randomIntBetween(0, 10); - indexRandom(randomBoolean(), false, randomBoolean(), IntStream.range(0, numDocs) - .mapToObj(n -> client().prepareIndex(indexName).setSource("num", n)).collect(toList())); - ensureGreen(indexName); - - final ShardId shardId = new ShardId(resolveIndex(indexName), 0); - assertThat(SyncedFlushUtil.attemptSyncedFlush(logger, internalCluster(), shardId).successfulShards(), equalTo(2)); - - final ClusterState clusterState = client().admin().cluster().prepareState().get().getState(); - final ShardRouting shardToResync = randomFrom(clusterState.routingTable().shardRoutingTable(shardId).activeShards()); - internalCluster().restartNode(clusterState.nodes().get(shardToResync.currentNodeId()).getName(), - new InternalTestCluster.RestartCallback() { - @Override - public Settings onNodeStopped(String nodeName) throws Exception { - assertBusy(() -> assertFalse(client().admin().indices().prepareStats(indexName).get() - .getShards()[0].getRetentionLeaseStats().retentionLeases().contains( - ReplicationTracker.getPeerRecoveryRetentionLeaseId(shardToResync)))); - return super.onNodeStopped(nodeName); - } - }); - - ensureGreen(indexName); - } - public void testRecoverLocallyUpToGlobalCheckpoint() throws Exception { internalCluster().ensureAtLeastNumDataNodes(2); List nodes = randomSubsetOf(2, StreamSupport.stream(clusterService().state().nodes().getDataNodes().spliterator(), false) diff --git a/server/src/test/java/org/elasticsearch/indices/state/CloseIndexIT.java b/server/src/test/java/org/elasticsearch/indices/state/CloseIndexIT.java index 1000a3df4c8d..ff5a3b3097a6 100644 --- a/server/src/test/java/org/elasticsearch/indices/state/CloseIndexIT.java +++ b/server/src/test/java/org/elasticsearch/indices/state/CloseIndexIT.java @@ -404,11 +404,8 @@ public class CloseIndexIT extends ESIntegTestCase { indexRandom(randomBoolean(), randomBoolean(), randomBoolean(), IntStream.range(0, randomIntBetween(0, 50)) .mapToObj(n -> client().prepareIndex(indexName).setSource("num", n)).collect(toList())); ensureGreen(indexName); - if (randomBoolean()) { - client().admin().indices().prepareFlush(indexName).get(); - } else { - client().admin().indices().prepareSyncedFlush(indexName).get(); - } + client().admin().indices().prepareFlush(indexName).get(); + // index more documents while one shard copy is offline internalCluster().restartNode(dataNodes.get(1), new InternalTestCluster.RestartCallback() { @Override diff --git a/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java b/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java index 03153595dcf1..d36a31a30f59 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java +++ b/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java @@ -154,7 +154,6 @@ import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.analysis.AnalysisModule; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.indices.cluster.IndicesClusterStateService; -import org.elasticsearch.indices.flush.SyncedFlushService; import org.elasticsearch.indices.mapper.MapperRegistry; import org.elasticsearch.indices.recovery.PeerRecoverySourceService; import org.elasticsearch.indices.recovery.PeerRecoveryTargetService; @@ -1202,7 +1201,6 @@ public class SnapshotResiliencyTests extends ESTestCase { new NodeMappingRefreshAction(transportService, metaDataMappingService), repositoriesService, mock(SearchService.class), - new SyncedFlushService(indicesService, clusterService, transportService, indexNameExpressionResolver), new PeerRecoverySourceService(transportService, indicesService, recoverySettings), snapshotShardsService, new PrimaryReplicaSyncer( diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java index 236779306bab..038efa4d4159 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java @@ -164,7 +164,6 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.function.Function; import java.util.stream.Collectors; -import static org.elasticsearch.client.Requests.syncedFlushRequest; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS; import static org.elasticsearch.common.unit.TimeValue.timeValueMillis; @@ -1401,13 +1400,8 @@ public abstract class ESIntegTestCase extends ESTestCase { client().admin().indices().prepareRefresh(indices).setIndicesOptions(IndicesOptions.lenientExpandOpen()).execute( new LatchedActionListener<>(newLatch(inFlightAsyncOperations))); } else if (maybeFlush && rarely()) { - if (randomBoolean()) { - client().admin().indices().prepareFlush(indices).setIndicesOptions(IndicesOptions.lenientExpandOpen()).execute( - new LatchedActionListener<>(newLatch(inFlightAsyncOperations))); - } else { - client().admin().indices().syncedFlush(syncedFlushRequest(indices).indicesOptions(IndicesOptions.lenientExpandOpen()), - new LatchedActionListener<>(newLatch(inFlightAsyncOperations))); - } + client().admin().indices().prepareFlush(indices).setIndicesOptions(IndicesOptions.lenientExpandOpen()).execute( + new LatchedActionListener<>(newLatch(inFlightAsyncOperations))); } else if (rarely()) { client().admin().indices().prepareForceMerge(indices) .setIndicesOptions(IndicesOptions.lenientExpandOpen()) diff --git a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java index 58144da28d11..7e325b61db9b 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java +++ b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java @@ -79,7 +79,6 @@ import org.elasticsearch.env.ShardLockObtainFailedException; import org.elasticsearch.http.HttpServerTransport; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexService; -import org.elasticsearch.index.engine.CommitStats; import org.elasticsearch.index.engine.DocIdSeqNoAndSource; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.EngineTestCase; @@ -116,7 +115,6 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Collections; -import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; import java.util.List; @@ -1144,40 +1142,10 @@ public final class InternalTestCluster extends TestCluster { // and not all docs have been purged after the test) and inherit from // ElasticsearchIntegrationTest must override beforeIndexDeletion() to avoid failures. assertNoPendingIndexOperations(); - //check that shards that have same sync id also contain same number of documents - assertSameSyncIdSameDocs(); assertOpenTranslogReferences(); assertNoSnapshottedIndexCommit(); } - private void assertSameSyncIdSameDocs() { - Map docsOnShards = new HashMap<>(); - final Collection nodesAndClients = nodes.values(); - for (NodeAndClient nodeAndClient : nodesAndClients) { - IndicesService indexServices = getInstance(IndicesService.class, nodeAndClient.name); - for (IndexService indexService : indexServices) { - for (IndexShard indexShard : indexService) { - try { - CommitStats commitStats = indexShard.commitStats(); - String syncId = commitStats.getUserData().get(Engine.SYNC_COMMIT_ID); - if (syncId != null) { - long liveDocsOnShard = commitStats.getNumDocs(); - if (docsOnShards.get(syncId) != null) { - assertThat("sync id is equal but number of docs does not match on node " - + nodeAndClient.name + ". expected " + docsOnShards.get(syncId) + " but got " - + liveDocsOnShard, docsOnShards.get(syncId), equalTo(liveDocsOnShard)); - } else { - docsOnShards.put(syncId, liveDocsOnShard); - } - } - } catch (AlreadyClosedException e) { - // the engine is closed or if the shard is recovering - } - } - } - } - } - private void assertNoPendingIndexOperations() throws Exception { assertBusy(() -> { for (NodeAndClient nodeAndClient : nodes.values()) { diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java index 988975619fd8..6ec594a0fdaf 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java @@ -54,7 +54,6 @@ import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.seqno.ReplicationTracker; -import org.elasticsearch.indices.flush.SyncedFlushService; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.snapshots.SnapshotState; import org.elasticsearch.test.ESTestCase; @@ -1214,18 +1213,60 @@ public abstract class ESRestTestCase extends ESTestCase { return minVersion; } - protected static Response performSyncedFlush(String indexName) throws IOException { - final Request request = new Request("POST", indexName + "/_flush/synced"); - final List expectedWarnings = Collections.singletonList(SyncedFlushService.SYNCED_FLUSH_DEPRECATION_MESSAGE); - if (nodeVersions.stream().allMatch(version -> version.onOrAfter(Version.V_7_6_0))) { - final Builder options = RequestOptions.DEFAULT.toBuilder(); - options.setWarningsHandler(warnings -> warnings.equals(expectedWarnings) == false); - request.setOptions(options); - } else if (nodeVersions.stream().anyMatch(version -> version.onOrAfter(Version.V_7_6_0))) { - final Builder options = RequestOptions.DEFAULT.toBuilder(); - options.setWarningsHandler(warnings -> warnings.isEmpty() == false && warnings.equals(expectedWarnings) == false); - request.setOptions(options); + protected void syncedFlush(String indexName) throws Exception { + final List deprecationMessages = List.of( + "Synced flush is deprecated and will be removed in 8.0. Use flush at _/flush or /{index}/_flush instead."); + final List transitionMessages = List.of( + "Synced flush was removed and a normal flush was performed instead. This transition will be removed in a future version."); + final WarningsHandler warningsHandler; + if (minimumNodeVersion().onOrAfter(Version.V_8_0_0)) { + warningsHandler = warnings -> warnings.equals(transitionMessages) == false; + } else if (minimumNodeVersion().onOrAfter(Version.V_7_6_0)) { + warningsHandler = warnings -> warnings.equals(deprecationMessages) == false && warnings.equals(transitionMessages) == false; + } else if (nodeVersions.stream().anyMatch(n -> n.onOrAfter(Version.V_8_0_0))) { + warningsHandler = warnings -> warnings.isEmpty() == false && warnings.equals(transitionMessages) == false; + } else { + warningsHandler = warnings -> warnings.isEmpty() == false; } - return client().performRequest(request); + // We have to spin synced-flush requests here because we fire the global checkpoint sync for the last write operation. + // A synced-flush request considers the global checkpoint sync as an going operation because it acquires a shard permit. + assertBusy(() -> { + try { + final Request request = new Request("POST", indexName + "/_flush/synced"); + request.setOptions(RequestOptions.DEFAULT.toBuilder().setWarningsHandler(warningsHandler)); + Response resp = client().performRequest(request); + if (nodeVersions.stream().allMatch(v -> v.before(Version.V_8_0_0))) { + Map result = ObjectPath.createFromResponse(resp).evaluate("_shards"); + assertThat(result.get("failed"), equalTo(0)); + } + } catch (ResponseException ex) { + if (ex.getResponse().getStatusLine().getStatusCode() == RestStatus.CONFLICT.getStatus() + && ex.getResponse().getWarnings().equals(transitionMessages)) { + logger.info("a normal flush was performed instead"); + } else { + throw new AssertionError(ex); // cause assert busy to retry + } + } + }); + // ensure the global checkpoint is synced; otherwise we might trim the commit with syncId + ensureGlobalCheckpointSynced(indexName); + } + + @SuppressWarnings("unchecked") + private void ensureGlobalCheckpointSynced(String index) throws Exception { + assertBusy(() -> { + Map stats = entityAsMap(client().performRequest(new Request("GET", index + "/_stats?level=shards"))); + List> shardStats = (List>) XContentMapValues.extractValue("indices." + index + ".shards.0", stats); + shardStats.stream() + .map(shard -> (Map) XContentMapValues.extractValue("seq_no", shard)) + .filter(Objects::nonNull) + .forEach(seqNoStat -> { + long globalCheckpoint = ((Number) XContentMapValues.extractValue("global_checkpoint", seqNoStat)).longValue(); + long localCheckpoint = ((Number) XContentMapValues.extractValue("local_checkpoint", seqNoStat)).longValue(); + long maxSeqNo = ((Number) XContentMapValues.extractValue("max_seq_no", seqNoStat)).longValue(); + assertThat(shardStats.toString(), localCheckpoint, equalTo(maxSeqNo)); + assertThat(shardStats.toString(), globalCheckpoint, equalTo(maxSeqNo)); + }); + }, 60, TimeUnit.SECONDS); } } diff --git a/x-pack/plugin/frozen-indices/src/test/java/org/elasticsearch/index/engine/FrozenIndexRecoveryTests.java b/x-pack/plugin/frozen-indices/src/test/java/org/elasticsearch/index/engine/FrozenIndexRecoveryTests.java index 6de7952e4d84..a877159b079c 100644 --- a/x-pack/plugin/frozen-indices/src/test/java/org/elasticsearch/index/engine/FrozenIndexRecoveryTests.java +++ b/x-pack/plugin/frozen-indices/src/test/java/org/elasticsearch/index/engine/FrozenIndexRecoveryTests.java @@ -57,11 +57,7 @@ public class FrozenIndexRecoveryTests extends ESIntegTestCase { indexRandom(randomBoolean(), randomBoolean(), randomBoolean(), IntStream.range(0, randomIntBetween(0, 50)) .mapToObj(n -> client().prepareIndex(indexName).setSource("num", n)).collect(toList())); ensureGreen(indexName); - if (randomBoolean()) { - client().admin().indices().prepareFlush(indexName).get(); - } else { - client().admin().indices().prepareSyncedFlush(indexName).get(); - } + client().admin().indices().prepareFlush(indexName).get(); // index more documents while one shard copy is offline internalCluster().restartNode(dataNodes.get(1), new InternalTestCluster.RestartCallback() { @Override diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/MlDistributedFailureIT.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/MlDistributedFailureIT.java index f0ef71c0a6a4..cd58db91466d 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/MlDistributedFailureIT.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/MlDistributedFailureIT.java @@ -435,7 +435,7 @@ public class MlDistributedFailureIT extends BaseMlIntegTestCase { setupJobAndDatafeed(jobId, "data_feed_id", TimeValue.timeValueSeconds(1)); waitForDatafeed(jobId, numDocs1); - client().admin().indices().prepareSyncedFlush().get(); + client().admin().indices().prepareFlush().get(); disrupt.run();