Save 400 LoC in tests by using indexSettings shortcut (#111573)

It's in the title, randomly saw a bunch of spots where we're
not using the shortcut, figured I'd clean this up quickly to save ~400 lines.
This commit is contained in:
Armin Braun 2024-08-05 10:21:13 +02:00 committed by GitHub
parent e58678da4e
commit bf7be8e23a
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
69 changed files with 132 additions and 498 deletions

View file

@ -17,7 +17,6 @@ import org.elasticsearch.action.support.WriteRequest;
import org.elasticsearch.aggregations.AggregationIntegTestCase; import org.elasticsearch.aggregations.AggregationIntegTestCase;
import org.elasticsearch.aggregations.bucket.timeseries.InternalTimeSeries; import org.elasticsearch.aggregations.bucket.timeseries.InternalTimeSeries;
import org.elasticsearch.aggregations.bucket.timeseries.TimeSeriesAggregationBuilder; import org.elasticsearch.aggregations.bucket.timeseries.TimeSeriesAggregationBuilder;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.index.mapper.DateFieldMapper;
import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.query.MatchAllQueryBuilder; import org.elasticsearch.index.query.MatchAllQueryBuilder;
@ -102,15 +101,11 @@ public class TimeSeriesAggregationsUnlimitedDimensionsIT extends AggregationInte
final String[] routingDimensions final String[] routingDimensions
) { ) {
return prepareCreate("index").setSettings( return prepareCreate("index").setSettings(
Settings.builder() indexSettings(randomIntBetween(1, 3), randomIntBetween(1, 3)).put("mode", "time_series")
.put("mode", "time_series")
.put("routing_path", String.join(",", routingDimensions)) .put("routing_path", String.join(",", routingDimensions))
.put("index.number_of_shards", randomIntBetween(1, 3))
.put("index.number_of_replicas", randomIntBetween(1, 3))
.put("time_series.start_time", startMillis) .put("time_series.start_time", startMillis)
.put("time_series.end_time", endMillis) .put("time_series.end_time", endMillis)
.put(MapperService.INDEX_MAPPING_FIELD_NAME_LENGTH_LIMIT_SETTING.getKey(), 4192) .put(MapperService.INDEX_MAPPING_FIELD_NAME_LENGTH_LIMIT_SETTING.getKey(), 4192)
.build()
).setMapping(mapping).get(); ).setMapping(mapping).get();
} }

View file

@ -16,7 +16,6 @@ import org.elasticsearch.action.support.WriteRequest;
import org.elasticsearch.aggregations.AggregationIntegTestCase; import org.elasticsearch.aggregations.AggregationIntegTestCase;
import org.elasticsearch.aggregations.bucket.timeseries.InternalTimeSeries; import org.elasticsearch.aggregations.bucket.timeseries.InternalTimeSeries;
import org.elasticsearch.aggregations.bucket.timeseries.TimeSeriesAggregationBuilder; import org.elasticsearch.aggregations.bucket.timeseries.TimeSeriesAggregationBuilder;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.index.mapper.DateFieldMapper;
import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.query.MatchAllQueryBuilder; import org.elasticsearch.index.query.MatchAllQueryBuilder;
@ -103,15 +102,11 @@ public class TimeSeriesNestedAggregationsIT extends AggregationIntegTestCase {
final String[] routingDimensions final String[] routingDimensions
) { ) {
return prepareCreate("index").setSettings( return prepareCreate("index").setSettings(
Settings.builder() indexSettings(randomIntBetween(1, 3), randomIntBetween(1, 3)).put("mode", "time_series")
.put("mode", "time_series")
.put("routing_path", String.join(",", routingDimensions)) .put("routing_path", String.join(",", routingDimensions))
.put("index.number_of_shards", randomIntBetween(1, 3))
.put("index.number_of_replicas", randomIntBetween(1, 3))
.put("time_series.start_time", startMillis) .put("time_series.start_time", startMillis)
.put("time_series.end_time", endMillis) .put("time_series.end_time", endMillis)
.put(MapperService.INDEX_MAPPING_FIELD_NAME_LENGTH_LIMIT_SETTING.getKey(), 4192) .put(MapperService.INDEX_MAPPING_FIELD_NAME_LENGTH_LIMIT_SETTING.getKey(), 4192)
.build()
).setMapping(mapping).get(); ).setMapping(mapping).get();
} }

View file

@ -112,11 +112,7 @@ public class DataStreamAutoshardingIT extends ESIntegTestCase {
public void testRolloverOnAutoShardCondition() throws Exception { public void testRolloverOnAutoShardCondition() throws Exception {
final String dataStreamName = "logs-es"; final String dataStreamName = "logs-es";
putComposableIndexTemplate( putComposableIndexTemplate("my-template", List.of("logs-*"), indexSettings(3, 0).build());
"my-template",
List.of("logs-*"),
Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 3).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0).build()
);
final var createDataStreamRequest = new CreateDataStreamAction.Request(dataStreamName); final var createDataStreamRequest = new CreateDataStreamAction.Request(dataStreamName);
assertAcked(client().execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest).actionGet()); assertAcked(client().execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest).actionGet());
@ -277,11 +273,7 @@ public class DataStreamAutoshardingIT extends ESIntegTestCase {
final String dataStreamName = "logs-es"; final String dataStreamName = "logs-es";
// start with 3 shards // start with 3 shards
putComposableIndexTemplate( putComposableIndexTemplate("my-template", List.of("logs-*"), indexSettings(3, 0).build());
"my-template",
List.of("logs-*"),
Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 3).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0).build()
);
final var createDataStreamRequest = new CreateDataStreamAction.Request(dataStreamName); final var createDataStreamRequest = new CreateDataStreamAction.Request(dataStreamName);
assertAcked(client().execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest).actionGet()); assertAcked(client().execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest).actionGet());
@ -391,11 +383,7 @@ public class DataStreamAutoshardingIT extends ESIntegTestCase {
public void testLazyRolloverKeepsPreviousAutoshardingDecision() throws IOException { public void testLazyRolloverKeepsPreviousAutoshardingDecision() throws IOException {
final String dataStreamName = "logs-es"; final String dataStreamName = "logs-es";
putComposableIndexTemplate( putComposableIndexTemplate("my-template", List.of("logs-*"), indexSettings(3, 0).build());
"my-template",
List.of("logs-*"),
Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 3).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0).build()
);
final var createDataStreamRequest = new CreateDataStreamAction.Request(dataStreamName); final var createDataStreamRequest = new CreateDataStreamAction.Request(dataStreamName);
assertAcked(client().execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest).actionGet()); assertAcked(client().execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest).actionGet());

View file

@ -142,10 +142,7 @@ public class TSDBPassthroughIndexingIT extends ESSingleNodeTestCase {
} }
public void testIndexingGettingAndSearching() throws Exception { public void testIndexingGettingAndSearching() throws Exception {
var templateSettings = Settings.builder() var templateSettings = indexSettings(randomIntBetween(2, 10), 0).put("index.mode", "time_series");
.put("index.mode", "time_series")
.put("index.number_of_shards", randomIntBetween(2, 10))
.put("index.number_of_replicas", 0);
var request = new TransportPutComposableIndexTemplateAction.Request("id"); var request = new TransportPutComposableIndexTemplateAction.Request("id");
request.indexTemplate( request.indexTemplate(
@ -218,10 +215,7 @@ public class TSDBPassthroughIndexingIT extends ESSingleNodeTestCase {
public void testIndexingGettingAndSearchingShrunkIndex() throws Exception { public void testIndexingGettingAndSearchingShrunkIndex() throws Exception {
String dataStreamName = "k8s"; String dataStreamName = "k8s";
var templateSettings = Settings.builder() var templateSettings = indexSettings(8, 0).put("index.mode", "time_series");
.put("index.mode", "time_series")
.put("index.number_of_shards", 8)
.put("index.number_of_replicas", 0);
var request = new TransportPutComposableIndexTemplateAction.Request("id"); var request = new TransportPutComposableIndexTemplateAction.Request("id");
request.indexTemplate( request.indexTemplate(

View file

@ -416,10 +416,7 @@ public class DataStreamLifecycleServiceIT extends ESIntegTestCase {
"id1", "id1",
null, null,
List.of(dataStreamName + "*"), List.of(dataStreamName + "*"),
Settings.builder() indexSettings(1, 1).put(MergePolicyConfig.INDEX_MERGE_POLICY_FLOOR_SEGMENT_SETTING.getKey(), ONE_HUNDRED_MB)
.put("index.number_of_replicas", 1)
.put("index.number_of_shards", 1)
.put(MergePolicyConfig.INDEX_MERGE_POLICY_FLOOR_SEGMENT_SETTING.getKey(), ONE_HUNDRED_MB)
.put(MergePolicyConfig.INDEX_MERGE_POLICY_MERGE_FACTOR_SETTING.getKey(), TARGET_MERGE_FACTOR_VALUE) .put(MergePolicyConfig.INDEX_MERGE_POLICY_MERGE_FACTOR_SETTING.getKey(), TARGET_MERGE_FACTOR_VALUE)
.build(), .build(),
null, null,

View file

@ -1392,13 +1392,7 @@ public class DataStreamLifecycleServiceTests extends ESTestCase {
{ {
// non time_series indices are not within time bounds (they don't have any) // non time_series indices are not within time bounds (they don't have any)
IndexMetadata indexMeta = IndexMetadata.builder(randomAlphaOfLengthBetween(10, 30)) IndexMetadata indexMeta = IndexMetadata.builder(randomAlphaOfLengthBetween(10, 30))
.settings( .settings(indexSettings(1, 1).put(IndexMetadata.SETTING_INDEX_VERSION_CREATED.getKey(), IndexVersion.current()))
Settings.builder()
.put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1)
.put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1)
.put(IndexMetadata.SETTING_INDEX_VERSION_CREATED.getKey(), IndexVersion.current())
.build()
)
.build(); .build();
Metadata newMetadata = Metadata.builder(clusterState.metadata()).put(indexMeta, true).build(); Metadata newMetadata = Metadata.builder(clusterState.metadata()).put(indexMeta, true).build();
@ -1596,12 +1590,14 @@ public class DataStreamLifecycleServiceTests extends ESTestCase {
var routingTableBuilder = RoutingTable.builder(); var routingTableBuilder = RoutingTable.builder();
Metadata.Builder metadataBuilder = Metadata.builder(); Metadata.Builder metadataBuilder = Metadata.builder();
Map<String, IndexMetadata> indices = new HashMap<>(); Map<String, IndexMetadata> indices = new HashMap<>();
Settings indexSettings = Settings.builder() IndexMetadata.Builder indexMetadataBuilder = IndexMetadata.builder(indexName)
.put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), randomIntBetween(1, 10)) .version(randomLong())
.put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), randomIntBetween(0, 3)) .settings(
.put(IndexMetadata.SETTING_INDEX_VERSION_CREATED.getKey(), IndexVersion.current()) indexSettings(randomIntBetween(1, 10), randomIntBetween(0, 3)).put(
.build(); IndexMetadata.SETTING_INDEX_VERSION_CREATED.getKey(),
IndexMetadata.Builder indexMetadataBuilder = IndexMetadata.builder(indexName).version(randomLong()).settings(indexSettings); IndexVersion.current()
)
);
if (customDataStreamLifecycleMetadata != null) { if (customDataStreamLifecycleMetadata != null) {
indexMetadataBuilder.putCustom(LIFECYCLE_CUSTOM_INDEX_METADATA_KEY, customDataStreamLifecycleMetadata); indexMetadataBuilder.putCustom(LIFECYCLE_CUSTOM_INDEX_METADATA_KEY, customDataStreamLifecycleMetadata);
} }

View file

@ -920,9 +920,7 @@ public class FullClusterRestartIT extends ParameterizedFullClusterRestartTestCas
final String indexName = "test_empty_shard"; final String indexName = "test_empty_shard";
if (isRunningAgainstOldCluster()) { if (isRunningAgainstOldCluster()) {
Settings.Builder settings = Settings.builder() Settings.Builder settings = indexSettings(1, 1)
.put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1)
.put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1)
// if the node with the replica is the first to be restarted, while a replica is still recovering // if the node with the replica is the first to be restarted, while a replica is still recovering
// then delayed allocation will kick in. When the node comes back, the master will search for a copy // then delayed allocation will kick in. When the node comes back, the master will search for a copy
// but the recovering copy will be seen as invalid and the cluster health won't return to GREEN // but the recovering copy will be seen as invalid and the cluster health won't return to GREEN
@ -1522,14 +1520,7 @@ public class FullClusterRestartIT extends ParameterizedFullClusterRestartTestCas
*/ */
public void testTurnOffTranslogRetentionAfterUpgraded() throws Exception { public void testTurnOffTranslogRetentionAfterUpgraded() throws Exception {
if (isRunningAgainstOldCluster()) { if (isRunningAgainstOldCluster()) {
createIndex( createIndex(index, indexSettings(1, 1).put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true).build());
index,
Settings.builder()
.put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1)
.put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1)
.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true)
.build()
);
ensureGreen(index); ensureGreen(index);
int numDocs = randomIntBetween(10, 100); int numDocs = randomIntBetween(10, 100);
for (int i = 0; i < numDocs; i++) { for (int i = 0; i < numDocs; i++) {
@ -1549,9 +1540,7 @@ public class FullClusterRestartIT extends ParameterizedFullClusterRestartTestCas
public void testResize() throws Exception { public void testResize() throws Exception {
int numDocs; int numDocs;
if (isRunningAgainstOldCluster()) { if (isRunningAgainstOldCluster()) {
final Settings.Builder settings = Settings.builder() final Settings.Builder settings = indexSettings(3, 1);
.put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 3)
.put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1);
if (minimumIndexVersion().before(IndexVersions.V_8_0_0) && randomBoolean()) { if (minimumIndexVersion().before(IndexVersions.V_8_0_0) && randomBoolean()) {
settings.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), false); settings.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), false);
} }

View file

@ -13,7 +13,6 @@ import org.elasticsearch.client.RequestOptions;
import org.elasticsearch.client.Response; import org.elasticsearch.client.Response;
import org.elasticsearch.client.ResponseException; import org.elasticsearch.client.ResponseException;
import org.elasticsearch.client.RestClient; import org.elasticsearch.client.RestClient;
import org.elasticsearch.cluster.metadata.IndexMetadata;
import org.elasticsearch.common.Strings; import org.elasticsearch.common.Strings;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.common.xcontent.support.XContentMapValues;
@ -75,10 +74,7 @@ public class IndexingIT extends ESRestTestCase {
logger.info("cluster discovered: {}", nodes.toString()); logger.info("cluster discovered: {}", nodes.toString());
final List<String> bwcNamesList = nodes.getBWCNodes().stream().map(MixedClusterTestNode::nodeName).collect(Collectors.toList()); final List<String> bwcNamesList = nodes.getBWCNodes().stream().map(MixedClusterTestNode::nodeName).collect(Collectors.toList());
final String bwcNames = bwcNamesList.stream().collect(Collectors.joining(",")); final String bwcNames = bwcNamesList.stream().collect(Collectors.joining(","));
Settings.Builder settings = Settings.builder() Settings.Builder settings = indexSettings(1, 2).put("index.routing.allocation.include._name", bwcNames);
.put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1)
.put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 2)
.put("index.routing.allocation.include._name", bwcNames);
final String index = "indexversionprop"; final String index = "indexversionprop";
final int minUpdates = 5; final int minUpdates = 5;
final int maxUpdates = 10; final int maxUpdates = 10;
@ -165,10 +161,7 @@ public class IndexingIT extends ESRestTestCase {
logger.info("cluster discovered: {}", nodes.toString()); logger.info("cluster discovered: {}", nodes.toString());
final List<String> bwcNamesList = nodes.getBWCNodes().stream().map(MixedClusterTestNode::nodeName).collect(Collectors.toList()); final List<String> bwcNamesList = nodes.getBWCNodes().stream().map(MixedClusterTestNode::nodeName).collect(Collectors.toList());
final String bwcNames = bwcNamesList.stream().collect(Collectors.joining(",")); final String bwcNames = bwcNamesList.stream().collect(Collectors.joining(","));
Settings.Builder settings = Settings.builder() Settings.Builder settings = indexSettings(1, 2).put("index.routing.allocation.include._name", bwcNames);
.put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1)
.put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 2)
.put("index.routing.allocation.include._name", bwcNames);
final String index = "test"; final String index = "test";
createIndex(index, settings.build()); createIndex(index, settings.build());
@ -251,10 +244,7 @@ public class IndexingIT extends ESRestTestCase {
String bwcNames = nodes.getBWCNodes().stream().map(MixedClusterTestNode::nodeName).collect(Collectors.joining(",")); String bwcNames = nodes.getBWCNodes().stream().map(MixedClusterTestNode::nodeName).collect(Collectors.joining(","));
// Allocating shards on the BWC nodes to makes sure that taking snapshot happens on those nodes. // Allocating shards on the BWC nodes to makes sure that taking snapshot happens on those nodes.
Settings.Builder settings = Settings.builder() Settings.Builder settings = indexSettings(between(5, 10), 1).put("index.routing.allocation.include._name", bwcNames);
.put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), between(5, 10))
.put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1)
.put("index.routing.allocation.include._name", bwcNames);
final String index = "test-snapshot-index"; final String index = "test-snapshot-index";
createIndex(index, settings.build()); createIndex(index, settings.build());
@ -315,14 +305,7 @@ public class IndexingIT extends ESRestTestCase {
int numOfReplicas = randomIntBetween(0, nodes.getNewNodes().size() - 1); int numOfReplicas = randomIntBetween(0, nodes.getNewNodes().size() - 1);
int totalShards = numShards * (numOfReplicas + 1); int totalShards = numShards * (numOfReplicas + 1);
final String index = "test_synced_flush"; final String index = "test_synced_flush";
createIndex( createIndex(index, indexSettings(numShards, numOfReplicas).put("index.routing.allocation.include._name", newNodes).build());
index,
Settings.builder()
.put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), numShards)
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, numOfReplicas)
.put("index.routing.allocation.include._name", newNodes)
.build()
);
ensureGreen(index); ensureGreen(index);
indexDocs(index, randomIntBetween(0, 100), between(1, 100)); indexDocs(index, randomIntBetween(0, 100), between(1, 100));
try ( try (
@ -394,14 +377,7 @@ public class IndexingIT extends ESRestTestCase {
int numOfReplicas = randomIntBetween(0, nodes.getNewNodes().size() - 1); int numOfReplicas = randomIntBetween(0, nodes.getNewNodes().size() - 1);
int totalShards = numShards * (numOfReplicas + 1); int totalShards = numShards * (numOfReplicas + 1);
final String index = "test_flush"; final String index = "test_flush";
createIndex( createIndex(index, indexSettings(numShards, numOfReplicas).put("index.routing.allocation.include._name", newNodes).build());
index,
Settings.builder()
.put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), numShards)
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, numOfReplicas)
.put("index.routing.allocation.include._name", newNodes)
.build()
);
ensureGreen(index); ensureGreen(index);
indexDocs(index, randomIntBetween(0, 100), between(1, 100)); indexDocs(index, randomIntBetween(0, 100), between(1, 100));
try ( try (

View file

@ -10,7 +10,6 @@ package org.elasticsearch.backwards;
import org.elasticsearch.client.Request; import org.elasticsearch.client.Request;
import org.elasticsearch.client.Response; import org.elasticsearch.client.Response;
import org.elasticsearch.cluster.metadata.IndexMetadata;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.common.xcontent.support.XContentMapValues;
import org.elasticsearch.core.Strings; import org.elasticsearch.core.Strings;
@ -43,9 +42,7 @@ public class RareTermsIT extends ESRestTestCase {
} }
public void testSingleValuedString() throws Exception { public void testSingleValuedString() throws Exception {
final Settings.Builder settings = Settings.builder() final Settings.Builder settings = indexSettings(2, 0);
.put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 2)
.put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0);
createIndex(index, settings.build()); createIndex(index, settings.build());
// We want to trigger the usage oif cuckoo filters that happen only when there are // We want to trigger the usage oif cuckoo filters that happen only when there are
// more than 10k distinct values in one shard. // more than 10k distinct values in one shard.

View file

@ -12,8 +12,6 @@ import org.elasticsearch.client.Request;
import org.elasticsearch.client.Response; import org.elasticsearch.client.Response;
import org.elasticsearch.client.ResponseException; import org.elasticsearch.client.ResponseException;
import org.elasticsearch.client.RestClient; import org.elasticsearch.client.RestClient;
import org.elasticsearch.cluster.metadata.IndexMetadata;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.core.Strings; import org.elasticsearch.core.Strings;
import org.elasticsearch.rest.RestStatus; import org.elasticsearch.rest.RestStatus;
import org.elasticsearch.test.rest.ESRestTestCase; import org.elasticsearch.test.rest.ESRestTestCase;
@ -50,13 +48,7 @@ public class SearchWithMinCompatibleSearchNodeIT extends ESRestTestCase {
allNodes.addAll(nodes.getNewNodes()); allNodes.addAll(nodes.getNewNodes());
if (client().performRequest(new Request("HEAD", "/" + index)).getStatusLine().getStatusCode() == 404) { if (client().performRequest(new Request("HEAD", "/" + index)).getStatusLine().getStatusCode() == 404) {
createIndex( createIndex(index, indexSettings(numShards, numReplicas).build());
index,
Settings.builder()
.put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), numShards)
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, numReplicas)
.build()
);
for (int i = 0; i < numDocs; i++) { for (int i = 0; i < numDocs; i++) {
Request request = new Request("PUT", index + "/_doc/" + i); Request request = new Request("PUT", index + "/_doc/" + i);
request.setJsonEntity("{\"test\": \"test_" + randomAlphaOfLength(2) + "\"}"); request.setJsonEntity("{\"test\": \"test_" + randomAlphaOfLength(2) + "\"}");

View file

@ -60,9 +60,7 @@ public class RecoveryIT extends AbstractRollingTestCase {
public void testHistoryUUIDIsGenerated() throws Exception { public void testHistoryUUIDIsGenerated() throws Exception {
final String index = "index_history_uuid"; final String index = "index_history_uuid";
if (CLUSTER_TYPE == ClusterType.OLD) { if (CLUSTER_TYPE == ClusterType.OLD) {
Settings.Builder settings = Settings.builder() Settings.Builder settings = indexSettings(1, 1)
.put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1)
.put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1)
// if the node with the replica is the first to be restarted, while a replica is still recovering // if the node with the replica is the first to be restarted, while a replica is still recovering
// then delayed allocation will kick in. When the node comes back, the master will search for a copy // then delayed allocation will kick in. When the node comes back, the master will search for a copy
// but the recovering copy will be seen as invalid and the cluster health won't return to GREEN // but the recovering copy will be seen as invalid and the cluster health won't return to GREEN
@ -128,9 +126,7 @@ public class RecoveryIT extends AbstractRollingTestCase {
switch (CLUSTER_TYPE) { switch (CLUSTER_TYPE) {
case OLD -> { case OLD -> {
Settings.Builder settings = Settings.builder() Settings.Builder settings = indexSettings(1, 2)
.put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1)
.put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 2)
// if the node with the replica is the first to be restarted, while a replica is still recovering // if the node with the replica is the first to be restarted, while a replica is still recovering
// then delayed allocation will kick in. When the node comes back, the master will search for a copy // then delayed allocation will kick in. When the node comes back, the master will search for a copy
// but the recovering copy will be seen as invalid and the cluster health won't return to GREEN // but the recovering copy will be seen as invalid and the cluster health won't return to GREEN
@ -217,9 +213,7 @@ public class RecoveryIT extends AbstractRollingTestCase {
final String index = "relocation_with_concurrent_indexing"; final String index = "relocation_with_concurrent_indexing";
switch (CLUSTER_TYPE) { switch (CLUSTER_TYPE) {
case OLD -> { case OLD -> {
Settings.Builder settings = Settings.builder() Settings.Builder settings = indexSettings(1, 2)
.put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1)
.put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 2)
// if the node with the replica is the first to be restarted, while a replica is still recovering // if the node with the replica is the first to be restarted, while a replica is still recovering
// then delayed allocation will kick in. When the node comes back, the master will search for a copy // then delayed allocation will kick in. When the node comes back, the master will search for a copy
// but the recovering copy will be seen as invalid and the cluster health won't return to GREEN // but the recovering copy will be seen as invalid and the cluster health won't return to GREEN
@ -296,9 +290,7 @@ public class RecoveryIT extends AbstractRollingTestCase {
public void testRecovery() throws Exception { public void testRecovery() throws Exception {
final String index = "test_recovery"; final String index = "test_recovery";
if (CLUSTER_TYPE == ClusterType.OLD) { if (CLUSTER_TYPE == ClusterType.OLD) {
Settings.Builder settings = Settings.builder() Settings.Builder settings = indexSettings(1, 1)
.put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1)
.put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1)
// if the node with the replica is the first to be restarted, while a replica is still recovering // if the node with the replica is the first to be restarted, while a replica is still recovering
// then delayed allocation will kick in. When the node comes back, the master will search for a copy // then delayed allocation will kick in. When the node comes back, the master will search for a copy
// but the recovering copy will be seen as invalid and the cluster health won't return to GREEN // but the recovering copy will be seen as invalid and the cluster health won't return to GREEN
@ -413,9 +405,7 @@ public class RecoveryIT extends AbstractRollingTestCase {
if (CLUSTER_TYPE == ClusterType.OLD) { if (CLUSTER_TYPE == ClusterType.OLD) {
createIndex( createIndex(
indexName, indexName,
Settings.builder() indexSettings(1, 1)
.put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1)
.put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1)
// if the node with the replica is the first to be restarted, while a replica is still recovering // if the node with the replica is the first to be restarted, while a replica is still recovering
// then delayed allocation will kick in. When the node comes back, the master will search for a copy // then delayed allocation will kick in. When the node comes back, the master will search for a copy
// but the recovering copy will be seen as invalid and the cluster health won't return to GREEN // but the recovering copy will be seen as invalid and the cluster health won't return to GREEN
@ -453,13 +443,7 @@ public class RecoveryIT extends AbstractRollingTestCase {
final String indexName = String.join("_", "index", CLUSTER_TYPE.toString(), Integer.toString(id)).toLowerCase(Locale.ROOT); final String indexName = String.join("_", "index", CLUSTER_TYPE.toString(), Integer.toString(id)).toLowerCase(Locale.ROOT);
if (indexExists(indexName) == false) { if (indexExists(indexName) == false) {
createIndex( createIndex(indexName, indexSettings(1, 0).build());
indexName,
Settings.builder()
.put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1)
.put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0)
.build()
);
ensureGreen(indexName); ensureGreen(indexName);
closeIndex(indexName); closeIndex(indexName);
} }
@ -482,10 +466,7 @@ public class RecoveryIT extends AbstractRollingTestCase {
if (CLUSTER_TYPE == ClusterType.OLD) { if (CLUSTER_TYPE == ClusterType.OLD) {
createIndex( createIndex(
indexName, indexName,
Settings.builder() indexSettings(1, 0).put(EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), "none")
.put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1)
.put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0)
.put(EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), "none")
.put(INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), "24h") .put(INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), "24h")
.put("index.routing.allocation.include._name", CLUSTER_NAME + "-0") .put("index.routing.allocation.include._name", CLUSTER_NAME + "-0")
.build() .build()
@ -578,9 +559,7 @@ public class RecoveryIT extends AbstractRollingTestCase {
public void testUpdateDoc() throws Exception { public void testUpdateDoc() throws Exception {
final String index = "test_update_doc"; final String index = "test_update_doc";
if (CLUSTER_TYPE == ClusterType.OLD) { if (CLUSTER_TYPE == ClusterType.OLD) {
Settings.Builder settings = Settings.builder() Settings.Builder settings = indexSettings(1, 2);
.put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1)
.put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 2);
createIndex(index, settings.build()); createIndex(index, settings.build());
indexDocs(index, 0, 100); indexDocs(index, 0, 100);
} }
@ -648,9 +627,7 @@ public class RecoveryIT extends AbstractRollingTestCase {
public void testOperationBasedRecovery() throws Exception { public void testOperationBasedRecovery() throws Exception {
final String index = "test_operation_based_recovery"; final String index = "test_operation_based_recovery";
if (CLUSTER_TYPE == ClusterType.OLD) { if (CLUSTER_TYPE == ClusterType.OLD) {
final Settings.Builder settings = Settings.builder() final Settings.Builder settings = indexSettings(1, 2);
.put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1)
.put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 2);
if (minimumIndexVersion().before(IndexVersions.V_8_0_0) && randomBoolean()) { if (minimumIndexVersion().before(IndexVersions.V_8_0_0) && randomBoolean()) {
settings.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), randomBoolean()); settings.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), randomBoolean());
} }

View file

@ -59,10 +59,7 @@ public class SnapshotBasedRecoveryIT extends AbstractRollingUpgradeTestCase {
final String repositoryName = "snapshot_based_recovery_repo"; final String repositoryName = "snapshot_based_recovery_repo";
final int numDocs = 200; final int numDocs = 200;
if (isOldCluster()) { if (isOldCluster()) {
Settings.Builder settings = Settings.builder() Settings.Builder settings = indexSettings(1, 0).put(INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), "100ms")
.put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1)
.put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0)
.put(INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), "100ms")
.put(SETTING_ALLOCATION_MAX_RETRY.getKey(), "0"); // fail faster .put(SETTING_ALLOCATION_MAX_RETRY.getKey(), "0"); // fail faster
createIndex(indexName, settings.build()); createIndex(indexName, settings.build());
ensureGreen(indexName); ensureGreen(indexName);

View file

@ -161,7 +161,7 @@ public class ClusterStatsIT extends ESIntegTestCase {
ClusterStatsResponse response = clusterAdmin().prepareClusterStats().get(); ClusterStatsResponse response = clusterAdmin().prepareClusterStats().get();
assertThat(response.getStatus(), Matchers.equalTo(ClusterHealthStatus.GREEN)); assertThat(response.getStatus(), Matchers.equalTo(ClusterHealthStatus.GREEN));
prepareCreate("test1").setSettings(Settings.builder().put("number_of_shards", 2).put("number_of_replicas", 1)).get(); prepareCreate("test1").setSettings(indexSettings(2, 1)).get();
response = clusterAdmin().prepareClusterStats().get(); response = clusterAdmin().prepareClusterStats().get();
assertThat(response.getStatus(), Matchers.equalTo(ClusterHealthStatus.YELLOW)); assertThat(response.getStatus(), Matchers.equalTo(ClusterHealthStatus.YELLOW));
@ -179,7 +179,7 @@ public class ClusterStatsIT extends ESIntegTestCase {
assertThat(response.indicesStats.getDocs().getCount(), Matchers.equalTo(1L)); assertThat(response.indicesStats.getDocs().getCount(), Matchers.equalTo(1L));
assertShardStats(response.getIndicesStats().getShards(), 1, 4, 2, 1.0); assertShardStats(response.getIndicesStats().getShards(), 1, 4, 2, 1.0);
prepareCreate("test2").setSettings(Settings.builder().put("number_of_shards", 3).put("number_of_replicas", 0)).get(); prepareCreate("test2").setSettings(indexSettings(3, 0)).get();
ensureGreen(); ensureGreen();
response = clusterAdmin().prepareClusterStats().get(); response = clusterAdmin().prepareClusterStats().get();
assertThat(response.getStatus(), Matchers.equalTo(ClusterHealthStatus.GREEN)); assertThat(response.getStatus(), Matchers.equalTo(ClusterHealthStatus.GREEN));

View file

@ -306,10 +306,7 @@ public class CreateIndexIT extends ESIntegTestCase {
public void testFailureToCreateIndexCleansUpIndicesService() { public void testFailureToCreateIndexCleansUpIndicesService() {
final int numReplicas = internalCluster().numDataNodes(); final int numReplicas = internalCluster().numDataNodes();
Settings settings = Settings.builder() Settings settings = indexSettings(1, numReplicas).build();
.put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1)
.put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), numReplicas)
.build();
assertAcked(indicesAdmin().prepareCreate("test-idx-1").setSettings(settings).addAlias(new Alias("alias1").writeIndex(true)).get()); assertAcked(indicesAdmin().prepareCreate("test-idx-1").setSettings(settings).addAlias(new Alias("alias1").writeIndex(true)).get());
ActionRequestBuilder<?, ?> builder = indicesAdmin().prepareCreate("test-idx-2") ActionRequestBuilder<?, ?> builder = indicesAdmin().prepareCreate("test-idx-2")
@ -328,10 +325,7 @@ public class CreateIndexIT extends ESIntegTestCase {
*/ */
public void testDefaultWaitForActiveShardsUsesIndexSetting() throws Exception { public void testDefaultWaitForActiveShardsUsesIndexSetting() throws Exception {
final int numReplicas = internalCluster().numDataNodes(); final int numReplicas = internalCluster().numDataNodes();
Settings settings = Settings.builder() Settings settings = indexSettings(1, numReplicas).put(SETTING_WAIT_FOR_ACTIVE_SHARDS.getKey(), Integer.toString(numReplicas))
.put(SETTING_WAIT_FOR_ACTIVE_SHARDS.getKey(), Integer.toString(numReplicas))
.put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1)
.put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), numReplicas)
.build(); .build();
assertAcked(indicesAdmin().prepareCreate("test-idx-1").setSettings(settings).get()); assertAcked(indicesAdmin().prepareCreate("test-idx-1").setSettings(settings).get());

View file

@ -211,9 +211,8 @@ public class RolloverIT extends ESIntegTestCase {
assertAcked(prepareCreate("test_index-2").addAlias(testAlias).get()); assertAcked(prepareCreate("test_index-2").addAlias(testAlias).get());
indexDoc("test_index-2", "1", "field", "value"); indexDoc("test_index-2", "1", "field", "value");
flush("test_index-2"); flush("test_index-2");
final Settings settings = Settings.builder().put("number_of_shards", 1).put("number_of_replicas", 0).build();
final RolloverResponse response = indicesAdmin().prepareRolloverIndex("test_alias") final RolloverResponse response = indicesAdmin().prepareRolloverIndex("test_alias")
.settings(settings) .settings(indexSettings(1, 0).build())
.alias(new Alias("extra_alias")) .alias(new Alias("extra_alias"))
.get(); .get();
assertThat(response.getOldIndex(), equalTo("test_index-2")); assertThat(response.getOldIndex(), equalTo("test_index-2"));

View file

@ -10,7 +10,6 @@ package org.elasticsearch.action.bulk;
import org.apache.lucene.tests.mockfile.FilterFileChannel; import org.apache.lucene.tests.mockfile.FilterFileChannel;
import org.apache.lucene.tests.mockfile.FilterFileSystemProvider; import org.apache.lucene.tests.mockfile.FilterFileSystemProvider;
import org.elasticsearch.cluster.metadata.IndexMetadata;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.core.PathUtils; import org.elasticsearch.core.PathUtils;
import org.elasticsearch.core.PathUtilsForTesting; import org.elasticsearch.core.PathUtilsForTesting;
@ -59,13 +58,7 @@ public class BulkAfterWriteFsyncFailureIT extends ESSingleNodeTestCase {
client().admin() client().admin()
.indices() .indices()
.prepareCreate(indexName) .prepareCreate(indexName)
.setSettings( .setSettings(indexSettings(1, 0).put(INDEX_REFRESH_INTERVAL_SETTING.getKey(), -1))
Settings.builder()
.put(INDEX_REFRESH_INTERVAL_SETTING.getKey(), -1)
.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)
.build()
)
.setMapping("key", "type=keyword", "val", "type=long") .setMapping("key", "type=keyword", "val", "type=long")
.get(); .get();
ensureGreen(indexName); ensureGreen(indexName);

View file

@ -19,7 +19,6 @@ import org.elasticsearch.action.support.ActiveShardCount;
import org.elasticsearch.action.support.WriteRequest.RefreshPolicy; import org.elasticsearch.action.support.WriteRequest.RefreshPolicy;
import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.cluster.health.ClusterHealthStatus;
import org.elasticsearch.cluster.metadata.IndexMetadata;
import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.cluster.routing.ShardRoutingState;
@ -395,14 +394,7 @@ public class ClusterRerouteIT extends ESIntegTestCase {
assertThat(healthResponse.isTimedOut(), equalTo(false)); assertThat(healthResponse.isTimedOut(), equalTo(false));
final String indexName = "test_index"; final String indexName = "test_index";
indicesAdmin().prepareCreate(indexName) indicesAdmin().prepareCreate(indexName).setWaitForActiveShards(ActiveShardCount.NONE).setSettings(indexSettings(2, 1)).get();
.setWaitForActiveShards(ActiveShardCount.NONE)
.setSettings(
Settings.builder()
.put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 2)
.put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1)
)
.get();
try (var dryRunMockLog = MockLog.capture(TransportClusterRerouteAction.class)) { try (var dryRunMockLog = MockLog.capture(TransportClusterRerouteAction.class)) {
dryRunMockLog.addExpectation( dryRunMockLog.addExpectation(

View file

@ -106,7 +106,7 @@ public class MockDiskUsagesIT extends ESIntegTestCase {
} }
updateClusterSettings(settings); updateClusterSettings(settings);
// Create an index with 10 shards so we can check allocation for it // Create an index with 10 shards so we can check allocation for it
assertAcked(prepareCreate("test").setSettings(Settings.builder().put("number_of_shards", 10).put("number_of_replicas", 0))); assertAcked(prepareCreate("test").setSettings(indexSettings(10, 0)));
ensureGreen("test"); ensureGreen("test");
assertBusy(() -> { assertBusy(() -> {
@ -184,7 +184,7 @@ public class MockDiskUsagesIT extends ESIntegTestCase {
updateClusterSettings(builder); updateClusterSettings(builder);
// Create an index with 6 shards so we can check allocation for it // Create an index with 6 shards so we can check allocation for it
prepareCreate("test").setSettings(Settings.builder().put("number_of_shards", 6).put("number_of_replicas", 0)).get(); prepareCreate("test").setSettings(indexSettings(6, 0)).get();
ensureGreen("test"); ensureGreen("test");
{ {
@ -269,7 +269,7 @@ public class MockDiskUsagesIT extends ESIntegTestCase {
.map(RoutingNode::nodeId) .map(RoutingNode::nodeId)
.toList(); .toList();
assertAcked(prepareCreate("test").setSettings(Settings.builder().put("number_of_shards", 6).put("number_of_replicas", 0))); assertAcked(prepareCreate("test").setSettings(indexSettings(6, 0)));
ensureGreen("test"); ensureGreen("test");
@ -355,10 +355,10 @@ public class MockDiskUsagesIT extends ESIntegTestCase {
assertAcked( assertAcked(
prepareCreate("test").setSettings( prepareCreate("test").setSettings(
Settings.builder() indexSettings(6, 0).put(
.put("number_of_shards", 6) IndexMetadata.INDEX_ROUTING_EXCLUDE_GROUP_SETTING.getConcreteSettingForNamespace("_id").getKey(),
.put("number_of_replicas", 0) nodeIds.get(2)
.put(IndexMetadata.INDEX_ROUTING_EXCLUDE_GROUP_SETTING.getConcreteSettingForNamespace("_id").getKey(), nodeIds.get(2)) )
) )
); );
ensureGreen("test"); ensureGreen("test");
@ -422,7 +422,7 @@ public class MockDiskUsagesIT extends ESIntegTestCase {
.map(RoutingNode::nodeId) .map(RoutingNode::nodeId)
.toList(); .toList();
assertAcked(prepareCreate("test").setSettings(Settings.builder().put("number_of_shards", 6).put("number_of_replicas", 0))); assertAcked(prepareCreate("test").setSettings(indexSettings(6, 0)));
ensureGreen("test"); ensureGreen("test");

View file

@ -17,7 +17,6 @@ import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.breaker.CircuitBreaker;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase;
import org.elasticsearch.test.disruption.NetworkDisruption; import org.elasticsearch.test.disruption.NetworkDisruption;
import org.elasticsearch.test.disruption.ServiceDisruptionScheme; import org.elasticsearch.test.disruption.ServiceDisruptionScheme;
@ -31,9 +30,6 @@ import java.util.Set;
import java.util.concurrent.CountDownLatch; import java.util.concurrent.CountDownLatch;
import java.util.concurrent.CyclicBarrier; import java.util.concurrent.CyclicBarrier;
import static org.elasticsearch.cluster.metadata.IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING;
import static org.elasticsearch.cluster.metadata.IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING;
/** /**
* Tests for discovery during disruptions. * Tests for discovery during disruptions.
*/ */
@ -136,13 +132,7 @@ public class DiscoveryDisruptionIT extends AbstractDisruptionTestCase {
internalCluster().setDisruptionScheme(isolatePreferredMaster); internalCluster().setDisruptionScheme(isolatePreferredMaster);
isolatePreferredMaster.startDisrupting(); isolatePreferredMaster.startDisrupting();
client(randomFrom(nonPreferredNodes)).admin() client(randomFrom(nonPreferredNodes)).admin().indices().prepareCreate("test").setSettings(indexSettings(1, 0)).get();
.indices()
.prepareCreate("test")
.setSettings(
Settings.builder().put(INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1).put(INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0)
)
.get();
internalCluster().clearDisruptionScheme(false); internalCluster().clearDisruptionScheme(false);
internalCluster().setDisruptionScheme(isolateAllNodes); internalCluster().setDisruptionScheme(isolateAllNodes);

View file

@ -441,13 +441,9 @@ public class RecoveryFromGatewayIT extends ESIntegTestCase {
.indices() .indices()
.prepareCreate("test") .prepareCreate("test")
.setSettings( .setSettings(
Settings.builder() indexSettings(1, 1)
.put("number_of_shards", 1)
.put("number_of_replicas", 1)
// disable merges to keep segments the same // disable merges to keep segments the same
.put(MergePolicyConfig.INDEX_MERGE_ENABLED, false) .put(MergePolicyConfig.INDEX_MERGE_ENABLED, false)
// expire retention leases quickly // expire retention leases quickly
.put(IndexService.RETENTION_LEASE_SYNC_INTERVAL_SETTING.getKey(), "100ms") .put(IndexService.RETENTION_LEASE_SYNC_INTERVAL_SETTING.getKey(), "100ms")
) )

View file

@ -22,7 +22,6 @@ import org.elasticsearch.action.get.MultiGetResponse;
import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.support.DefaultShardOperationFailedException; import org.elasticsearch.action.support.DefaultShardOperationFailedException;
import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.action.support.broadcast.BroadcastResponse;
import org.elasticsearch.cluster.metadata.IndexMetadata;
import org.elasticsearch.common.Randomness; import org.elasticsearch.common.Randomness;
import org.elasticsearch.common.Strings; import org.elasticsearch.common.Strings;
import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.bytes.BytesReference;
@ -849,13 +848,7 @@ public class GetActionIT extends ESIntegTestCase {
SearcherWrapperPlugin.enabled = true; SearcherWrapperPlugin.enabled = true;
assertAcked( assertAcked(
prepareCreate("test").setMapping("f", "type=keyword") prepareCreate("test").setMapping("f", "type=keyword")
.setSettings( .setSettings(indexSettings(1, 0).put("index.refresh_interval", "-1").put("index.routing.rebalance.enable", "none"))
Settings.builder()
.put("index.refresh_interval", "-1")
.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)
.put("index.routing.rebalance.enable", "none")
)
); );
// start tracking translog locations in the live version map // start tracking translog locations in the live version map
{ {

View file

@ -13,9 +13,7 @@ import org.elasticsearch.action.admin.indices.alias.Alias;
import org.elasticsearch.action.get.TransportGetFromTranslogAction; import org.elasticsearch.action.get.TransportGetFromTranslogAction;
import org.elasticsearch.action.get.TransportGetFromTranslogAction.Response; import org.elasticsearch.action.get.TransportGetFromTranslogAction.Response;
import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.action.support.PlainActionFuture;
import org.elasticsearch.cluster.metadata.IndexMetadata;
import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.IndicesService;
import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase;
import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPool;
@ -34,11 +32,8 @@ public class GetFromTranslogActionIT extends ESIntegTestCase {
assertAcked( assertAcked(
prepareCreate(INDEX).setMapping("field1", "type=keyword,store=true") prepareCreate(INDEX).setMapping("field1", "type=keyword,store=true")
.setSettings( .setSettings(
Settings.builder() // A GetFromTranslogAction runs only Stateless where there is only one active indexing shard.
.put("index.refresh_interval", -1) indexSettings(1, 0).put("index.refresh_interval", -1)
// A GetFromTranslogAction runs only Stateless where there is only one active indexing shard.
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)
.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
) )
.addAlias(new Alias(ALIAS).writeIndex(randomFrom(true, false, null))) .addAlias(new Alias(ALIAS).writeIndex(randomFrom(true, false, null)))
); );

View file

@ -15,9 +15,7 @@ import org.elasticsearch.action.get.TransportShardMultiGetFomTranslogAction;
import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.action.support.PlainActionFuture;
import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.action.support.WriteRequest;
import org.elasticsearch.cluster.metadata.IndexMetadata;
import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.IndicesService;
import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase;
import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPool;
@ -42,11 +40,8 @@ public class ShardMultiGetFomTranslogActionIT extends ESIntegTestCase {
public void testShardMultiGetFromTranslog() throws Exception { public void testShardMultiGetFromTranslog() throws Exception {
assertAcked( assertAcked(
prepareCreate(INDEX).setSettings( prepareCreate(INDEX).setSettings(
Settings.builder() // A ShardMultiGetFromTranslogAction runs only Stateless where there is only one active indexing shard.
.put("index.refresh_interval", -1) indexSettings(1, 0).put("index.refresh_interval", -1)
// A ShardMultiGetFromTranslogAction runs only Stateless where there is only one active indexing shard.
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)
.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
).addAlias(new Alias(ALIAS).writeIndex(randomFrom(true, false, null))) ).addAlias(new Alias(ALIAS).writeIndex(randomFrom(true, false, null)))
); );
ensureGreen(); ensureGreen();

View file

@ -914,10 +914,7 @@ public class IndexRecoveryIT extends AbstractIndexRecoveryIntegTestCase {
prepareCreate( prepareCreate(
name, name,
nodeCount, nodeCount,
Settings.builder() indexSettings(shardCount, replicaCount).put(Store.INDEX_STORE_STATS_REFRESH_INTERVAL_SETTING.getKey(), 0)
.put("number_of_shards", shardCount)
.put("number_of_replicas", replicaCount)
.put(Store.INDEX_STORE_STATS_REFRESH_INTERVAL_SETTING.getKey(), 0)
) )
); );
ensureGreen(); ensureGreen();

View file

@ -227,10 +227,7 @@ public class OpenCloseIndexIT extends ESIntegTestCase {
public void testOpenWaitingForActiveShardsFailed() throws Exception { public void testOpenWaitingForActiveShardsFailed() throws Exception {
Client client = client(); Client client = client();
Settings settings = Settings.builder() Settings settings = indexSettings(1, 0).build();
.put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1)
.put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0)
.build();
assertAcked(client.admin().indices().prepareCreate("test").setSettings(settings).get()); assertAcked(client.admin().indices().prepareCreate("test").setSettings(settings).get());
assertAcked(client.admin().indices().prepareClose("test").get()); assertAcked(client.admin().indices().prepareClose("test").get());

View file

@ -1549,8 +1549,7 @@ public class DateHistogramIT extends ESIntegTestCase {
*/ */
public void testScriptCaching() throws Exception { public void testScriptCaching() throws Exception {
assertAcked( assertAcked(
prepareCreate("cache_test_idx").setMapping("d", "type=date") prepareCreate("cache_test_idx").setMapping("d", "type=date").setSettings(indexSettings(1, 1).put("requests.cache.enable", true))
.setSettings(Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1))
); );
String date = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.format(date(1, 1)); String date = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.format(date(1, 1));
String date2 = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.format(date(2, 1)); String date2 = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.format(date(2, 1));

View file

@ -10,7 +10,6 @@ package org.elasticsearch.search.aggregations.bucket;
import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.index.IndexRequestBuilder;
import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchPhaseExecutionException;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.script.Script; import org.elasticsearch.script.Script;
import org.elasticsearch.script.ScriptType; import org.elasticsearch.script.ScriptType;
@ -616,7 +615,7 @@ public class DateRangeIT extends ESIntegTestCase {
public void testScriptCaching() throws Exception { public void testScriptCaching() throws Exception {
assertAcked( assertAcked(
prepareCreate("cache_test_idx").setMapping("date", "type=date") prepareCreate("cache_test_idx").setMapping("date", "type=date")
.setSettings(Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1)) .setSettings(indexSettings(1, 1).put("requests.cache.enable", true))
); );
indexRandom( indexRandom(
true, true,

View file

@ -9,7 +9,6 @@ package org.elasticsearch.search.aggregations.bucket;
import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.index.IndexRequestBuilder;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.core.Strings; import org.elasticsearch.core.Strings;
import org.elasticsearch.index.fielddata.ScriptDocValues; import org.elasticsearch.index.fielddata.ScriptDocValues;
import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.query.QueryBuilders;
@ -942,7 +941,7 @@ public class DoubleTermsIT extends AbstractTermsTestCase {
public void testScriptCaching() throws Exception { public void testScriptCaching() throws Exception {
assertAcked( assertAcked(
prepareCreate("cache_test_idx").setMapping("d", "type=float") prepareCreate("cache_test_idx").setMapping("d", "type=float")
.setSettings(Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1)) .setSettings(indexSettings(1, 1).put("requests.cache.enable", true))
); );
indexRandom( indexRandom(
true, true,

View file

@ -10,7 +10,6 @@ package org.elasticsearch.search.aggregations.bucket;
import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.index.IndexRequestBuilder;
import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchPhaseExecutionException;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.script.MockScriptPlugin; import org.elasticsearch.script.MockScriptPlugin;
@ -1115,7 +1114,7 @@ public class HistogramIT extends ESIntegTestCase {
public void testScriptCaching() throws Exception { public void testScriptCaching() throws Exception {
assertAcked( assertAcked(
prepareCreate("cache_test_idx").setMapping("d", "type=float") prepareCreate("cache_test_idx").setMapping("d", "type=float")
.setSettings(Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1)) .setSettings(indexSettings(1, 1).put("requests.cache.enable", true))
); );
indexRandom( indexRandom(
true, true,

View file

@ -9,7 +9,6 @@ package org.elasticsearch.search.aggregations.bucket;
import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.index.IndexRequestBuilder;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.core.Strings; import org.elasticsearch.core.Strings;
import org.elasticsearch.index.fielddata.ScriptDocValues; import org.elasticsearch.index.fielddata.ScriptDocValues;
import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.query.QueryBuilders;
@ -899,8 +898,7 @@ public class LongTermsIT extends AbstractTermsTestCase {
*/ */
public void testScriptCaching() throws Exception { public void testScriptCaching() throws Exception {
assertAcked( assertAcked(
prepareCreate("cache_test_idx").setMapping("d", "type=long") prepareCreate("cache_test_idx").setMapping("d", "type=long").setSettings(indexSettings(1, 1).put("requests.cache.enable", true))
.setSettings(Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1))
); );
indexRandom( indexRandom(
true, true,

View file

@ -9,7 +9,6 @@ package org.elasticsearch.search.aggregations.bucket;
import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.index.IndexRequestBuilder;
import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchPhaseExecutionException;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.fielddata.ScriptDocValues; import org.elasticsearch.index.fielddata.ScriptDocValues;
import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.script.Script; import org.elasticsearch.script.Script;
@ -896,7 +895,7 @@ public class RangeIT extends ESIntegTestCase {
public void testScriptCaching() throws Exception { public void testScriptCaching() throws Exception {
assertAcked( assertAcked(
prepareCreate("cache_test_idx").setMapping("i", "type=integer") prepareCreate("cache_test_idx").setMapping("i", "type=integer")
.setSettings(Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1)) .setSettings(indexSettings(1, 1).put("requests.cache.enable", true))
); );
indexRandom( indexRandom(
true, true,

View file

@ -10,7 +10,6 @@ package org.elasticsearch.search.aggregations.bucket;
import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.index.IndexRequestBuilder;
import org.elasticsearch.action.search.SearchRequestBuilder; import org.elasticsearch.action.search.SearchRequestBuilder;
import org.elasticsearch.common.Strings; import org.elasticsearch.common.Strings;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.query.QueryBuilders;
@ -548,7 +547,7 @@ public class SignificantTermsSignificanceScoreIT extends ESIntegTestCase {
public void testScriptCaching() throws Exception { public void testScriptCaching() throws Exception {
assertAcked( assertAcked(
prepareCreate("cache_test_idx").setMapping("s", "type=long", "t", "type=text") prepareCreate("cache_test_idx").setMapping("s", "type=long", "t", "type=text")
.setSettings(Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1)) .setSettings(indexSettings(1, 1).put("requests.cache.enable", true))
); );
indexRandom( indexRandom(
true, true,

View file

@ -10,7 +10,6 @@ package org.elasticsearch.search.aggregations.bucket.terms;
import org.elasticsearch.action.bulk.BulkRequestBuilder; import org.elasticsearch.action.bulk.BulkRequestBuilder;
import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.cluster.metadata.IndexMetadata;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHit;
import org.elasticsearch.search.aggregations.InternalMultiBucketAggregation; import org.elasticsearch.search.aggregations.InternalMultiBucketAggregation;
@ -46,9 +45,7 @@ public class RareTermsIT extends ESSingleNodeTestCase {
} }
public void testSingleValuedString() { public void testSingleValuedString() {
final Settings.Builder settings = Settings.builder() final Settings.Builder settings = indexSettings(2, 0);
.put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 2)
.put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0);
createIndex(index, settings.build()); createIndex(index, settings.build());
// We want to trigger the usage of cuckoo filters that happen only when there are // We want to trigger the usage of cuckoo filters that happen only when there are
// more than 10k distinct values in one shard. // more than 10k distinct values in one shard.

View file

@ -11,7 +11,6 @@ import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.index.IndexRequestBuilder;
import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchPhaseExecutionException;
import org.elasticsearch.common.Strings; import org.elasticsearch.common.Strings;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.fielddata.ScriptDocValues; import org.elasticsearch.index.fielddata.ScriptDocValues;
import org.elasticsearch.index.mapper.IndexFieldMapper; import org.elasticsearch.index.mapper.IndexFieldMapper;
import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.query.QueryBuilders;
@ -1198,7 +1197,7 @@ public class StringTermsIT extends AbstractTermsTestCase {
public void testScriptCaching() throws Exception { public void testScriptCaching() throws Exception {
assertAcked( assertAcked(
prepareCreate("cache_test_idx").setMapping("d", "type=keyword") prepareCreate("cache_test_idx").setMapping("d", "type=keyword")
.setSettings(Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1)) .setSettings(indexSettings(1, 1).put("requests.cache.enable", true))
); );
indexRandom( indexRandom(
true, true,

View file

@ -7,7 +7,6 @@
*/ */
package org.elasticsearch.search.aggregations.metrics; package org.elasticsearch.search.aggregations.metrics;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.script.Script; import org.elasticsearch.script.Script;
import org.elasticsearch.script.ScriptType; import org.elasticsearch.script.ScriptType;
@ -905,8 +904,7 @@ public class ExtendedStatsIT extends AbstractNumericTestCase {
*/ */
public void testScriptCaching() throws Exception { public void testScriptCaching() throws Exception {
assertAcked( assertAcked(
prepareCreate("cache_test_idx").setMapping("d", "type=long") prepareCreate("cache_test_idx").setMapping("d", "type=long").setSettings(indexSettings(1, 1).put("requests.cache.enable", true))
.setSettings(Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1))
); );
indexRandom( indexRandom(
true, true,

View file

@ -8,7 +8,6 @@
package org.elasticsearch.search.aggregations.metrics; package org.elasticsearch.search.aggregations.metrics;
import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.LogManager;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.script.Script; import org.elasticsearch.script.Script;
import org.elasticsearch.script.ScriptType; import org.elasticsearch.script.ScriptType;
@ -568,8 +567,7 @@ public class HDRPercentileRanksIT extends AbstractNumericTestCase {
*/ */
public void testScriptCaching() throws Exception { public void testScriptCaching() throws Exception {
assertAcked( assertAcked(
prepareCreate("cache_test_idx").setMapping("d", "type=long") prepareCreate("cache_test_idx").setMapping("d", "type=long").setSettings(indexSettings(1, 1).put("requests.cache.enable", true))
.setSettings(Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1))
); );
indexRandom( indexRandom(
true, true,

View file

@ -8,7 +8,6 @@
package org.elasticsearch.search.aggregations.metrics; package org.elasticsearch.search.aggregations.metrics;
import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.LogManager;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.common.util.CollectionUtils;
import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.script.Script; import org.elasticsearch.script.Script;
@ -541,8 +540,7 @@ public class HDRPercentilesIT extends AbstractNumericTestCase {
*/ */
public void testScriptCaching() throws Exception { public void testScriptCaching() throws Exception {
assertAcked( assertAcked(
prepareCreate("cache_test_idx").setMapping("d", "type=long") prepareCreate("cache_test_idx").setMapping("d", "type=long").setSettings(indexSettings(1, 1).put("requests.cache.enable", true))
.setSettings(Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1))
); );
indexRandom( indexRandom(
true, true,

View file

@ -9,7 +9,6 @@
package org.elasticsearch.search.aggregations.metrics; package org.elasticsearch.search.aggregations.metrics;
import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.index.IndexRequestBuilder;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.script.Script; import org.elasticsearch.script.Script;
import org.elasticsearch.script.ScriptType; import org.elasticsearch.script.ScriptType;
@ -494,8 +493,7 @@ public class MedianAbsoluteDeviationIT extends AbstractNumericTestCase {
*/ */
public void testScriptCaching() throws Exception { public void testScriptCaching() throws Exception {
assertAcked( assertAcked(
prepareCreate("cache_test_idx").setMapping("d", "type=long") prepareCreate("cache_test_idx").setMapping("d", "type=long").setSettings(indexSettings(1, 1).put("requests.cache.enable", true))
.setSettings(Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1))
); );
indexRandom( indexRandom(

View file

@ -10,7 +10,6 @@ package org.elasticsearch.search.aggregations.metrics;
import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.index.IndexRequestBuilder;
import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchPhaseExecutionException;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.common.xcontent.support.XContentMapValues;
import org.elasticsearch.core.Strings; import org.elasticsearch.core.Strings;
import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.Plugin;
@ -1138,8 +1137,7 @@ public class ScriptedMetricIT extends ESIntegTestCase {
Script ndRandom = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "return Math.random()", Collections.emptyMap()); Script ndRandom = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "return Math.random()", Collections.emptyMap());
assertAcked( assertAcked(
prepareCreate("cache_test_idx").setMapping("d", "type=long") prepareCreate("cache_test_idx").setMapping("d", "type=long").setSettings(indexSettings(1, 1).put("requests.cache.enable", true))
.setSettings(Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1))
); );
indexRandom( indexRandom(
true, true,

View file

@ -9,7 +9,6 @@ package org.elasticsearch.search.aggregations.metrics;
import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.action.search.ShardSearchFailure;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.script.Script; import org.elasticsearch.script.Script;
import org.elasticsearch.script.ScriptType; import org.elasticsearch.script.ScriptType;
@ -234,8 +233,7 @@ public class StatsIT extends AbstractNumericTestCase {
*/ */
public void testScriptCaching() throws Exception { public void testScriptCaching() throws Exception {
assertAcked( assertAcked(
prepareCreate("cache_test_idx").setMapping("d", "type=long") prepareCreate("cache_test_idx").setMapping("d", "type=long").setSettings(indexSettings(1, 1).put("requests.cache.enable", true))
.setSettings(Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1))
); );
indexRandom( indexRandom(
true, true,

View file

@ -8,7 +8,6 @@
package org.elasticsearch.search.aggregations.metrics; package org.elasticsearch.search.aggregations.metrics;
import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.index.IndexRequestBuilder;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.script.Script; import org.elasticsearch.script.Script;
import org.elasticsearch.script.ScriptType; import org.elasticsearch.script.ScriptType;
@ -208,8 +207,7 @@ public class SumIT extends AbstractNumericTestCase {
*/ */
public void testScriptCaching() throws Exception { public void testScriptCaching() throws Exception {
assertAcked( assertAcked(
prepareCreate("cache_test_idx").setMapping("d", "type=long") prepareCreate("cache_test_idx").setMapping("d", "type=long").setSettings(indexSettings(1, 1).put("requests.cache.enable", true))
.setSettings(Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1))
); );
indexRandom( indexRandom(
true, true,

View file

@ -8,7 +8,6 @@
package org.elasticsearch.search.aggregations.metrics; package org.elasticsearch.search.aggregations.metrics;
import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.LogManager;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.common.util.CollectionUtils;
import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.script.Script; import org.elasticsearch.script.Script;
@ -485,8 +484,7 @@ public class TDigestPercentileRanksIT extends AbstractNumericTestCase {
*/ */
public void testScriptCaching() throws Exception { public void testScriptCaching() throws Exception {
assertAcked( assertAcked(
prepareCreate("cache_test_idx").setMapping("d", "type=long") prepareCreate("cache_test_idx").setMapping("d", "type=long").setSettings(indexSettings(1, 1).put("requests.cache.enable", true))
.setSettings(Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1))
); );
indexRandom( indexRandom(
true, true,

View file

@ -8,7 +8,6 @@
package org.elasticsearch.search.aggregations.metrics; package org.elasticsearch.search.aggregations.metrics;
import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.LogManager;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.common.util.CollectionUtils;
import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.script.Script; import org.elasticsearch.script.Script;
@ -457,8 +456,7 @@ public class TDigestPercentilesIT extends AbstractNumericTestCase {
*/ */
public void testScriptCaching() throws Exception { public void testScriptCaching() throws Exception {
assertAcked( assertAcked(
prepareCreate("cache_test_idx").setMapping("d", "type=long") prepareCreate("cache_test_idx").setMapping("d", "type=long").setSettings(indexSettings(1, 1).put("requests.cache.enable", true))
.setSettings(Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1))
); );
indexRandom( indexRandom(
true, true,

View file

@ -1082,9 +1082,7 @@ public class TopHitsIT extends ESIntegTestCase {
try { try {
assertAcked( assertAcked(
prepareCreate("cache_test_idx").setMapping("d", "type=long") prepareCreate("cache_test_idx").setMapping("d", "type=long")
.setSettings( .setSettings(indexSettings(1, 1).put("requests.cache.enable", true))
Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1)
)
); );
indexRandom( indexRandom(
true, true,

View file

@ -7,7 +7,6 @@
*/ */
package org.elasticsearch.search.aggregations.metrics; package org.elasticsearch.search.aggregations.metrics;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.script.Script; import org.elasticsearch.script.Script;
import org.elasticsearch.script.ScriptType; import org.elasticsearch.script.ScriptType;
@ -215,8 +214,7 @@ public class ValueCountIT extends ESIntegTestCase {
*/ */
public void testScriptCaching() throws Exception { public void testScriptCaching() throws Exception {
assertAcked( assertAcked(
prepareCreate("cache_test_idx").setMapping("d", "type=long") prepareCreate("cache_test_idx").setMapping("d", "type=long").setSettings(indexSettings(1, 1).put("requests.cache.enable", true))
.setSettings(Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1))
); );
indexRandom( indexRandom(
true, true,

View file

@ -93,7 +93,7 @@ public class AggregationProfilerIT extends ESIntegTestCase {
protected void setupSuiteScopeCluster() throws Exception { protected void setupSuiteScopeCluster() throws Exception {
assertAcked( assertAcked(
indicesAdmin().prepareCreate("idx") indicesAdmin().prepareCreate("idx")
.setSettings(Map.of("number_of_shards", 1, "number_of_replicas", 0)) .setSettings(indexSettings(1, 0))
.setMapping(STRING_FIELD, "type=keyword", NUMBER_FIELD, "type=integer", TAG_FIELD, "type=keyword") .setMapping(STRING_FIELD, "type=keyword", NUMBER_FIELD, "type=integer", TAG_FIELD, "type=keyword")
); );
List<IndexRequestBuilder> builders = new ArrayList<>(); List<IndexRequestBuilder> builders = new ArrayList<>();
@ -634,11 +634,7 @@ public class AggregationProfilerIT extends ESIntegTestCase {
* documents and that is hard to express in yaml. * documents and that is hard to express in yaml.
*/ */
public void testFilterByFilter() throws InterruptedException, IOException { public void testFilterByFilter() throws InterruptedException, IOException {
assertAcked( assertAcked(indicesAdmin().prepareCreate("dateidx").setSettings(indexSettings(1, 0)).setMapping("date", "type=date"));
indicesAdmin().prepareCreate("dateidx")
.setSettings(Map.of("number_of_shards", 1, "number_of_replicas", 0))
.setMapping("date", "type=date")
);
List<IndexRequestBuilder> builders = new ArrayList<>(); List<IndexRequestBuilder> builders = new ArrayList<>();
for (int i = 0; i < RangeAggregator.DOCS_PER_RANGE_TO_USE_FILTERS * 2; i++) { for (int i = 0; i < RangeAggregator.DOCS_PER_RANGE_TO_USE_FILTERS * 2; i++) {
String date = Instant.ofEpochSecond(i).toString(); String date = Instant.ofEpochSecond(i).toString();
@ -713,7 +709,7 @@ public class AggregationProfilerIT extends ESIntegTestCase {
try { try {
assertAcked( assertAcked(
indicesAdmin().prepareCreate("date_filter_by_filter_disabled") indicesAdmin().prepareCreate("date_filter_by_filter_disabled")
.setSettings(Map.of("number_of_shards", 1, "number_of_replicas", 0)) .setSettings(indexSettings(1, 0))
.setMapping("date", "type=date", "keyword", "type=keyword") .setMapping("date", "type=date", "keyword", "type=keyword")
); );
List<IndexRequestBuilder> builders = new ArrayList<>(); List<IndexRequestBuilder> builders = new ArrayList<>();

View file

@ -298,7 +298,7 @@ public class RepositoriesIT extends AbstractSnapshotIntegTestCase {
logger.info("--> snapshot"); logger.info("--> snapshot");
final String index = "test-idx"; final String index = "test-idx";
assertAcked(prepareCreate(index, 1, Settings.builder().put("number_of_shards", 1).put("number_of_replicas", 0))); assertAcked(prepareCreate(index, 1, indexSettings(1, 0)));
for (int i = 0; i < 10; i++) { for (int i = 0; i < 10; i++) {
indexDoc(index, Integer.toString(i), "foo", "bar" + i); indexDoc(index, Integer.toString(i), "foo", "bar" + i);
} }

View file

@ -1788,9 +1788,7 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas
final String index = "test-idx"; final String index = "test-idx";
final String snapshot = "test-snap"; final String snapshot = "test-snap";
assertAcked( assertAcked(prepareCreate(index, 1, indexSettings(numPrimaries, numReplicas)));
prepareCreate(index, 1, Settings.builder().put("number_of_shards", numPrimaries).put("number_of_replicas", numReplicas))
);
indexRandomDocs(index, 100); indexRandomDocs(index, 100);

View file

@ -29,7 +29,6 @@ import org.elasticsearch.action.support.master.AcknowledgedResponse;
import org.elasticsearch.client.internal.Client; import org.elasticsearch.client.internal.Client;
import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.ClusterStateUpdateTask; import org.elasticsearch.cluster.ClusterStateUpdateTask;
import org.elasticsearch.cluster.metadata.IndexMetadata;
import org.elasticsearch.cluster.metadata.NodesShutdownMetadata; import org.elasticsearch.cluster.metadata.NodesShutdownMetadata;
import org.elasticsearch.cluster.metadata.SingleNodeShutdownMetadata; import org.elasticsearch.cluster.metadata.SingleNodeShutdownMetadata;
import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNode;
@ -1461,11 +1460,7 @@ public class SnapshotStressTestsIT extends AbstractSnapshotIntegTestCase {
docPermits = new Semaphore(between(1000, 3000)); docPermits = new Semaphore(between(1000, 3000));
logger.info("--> create index [{}] with max [{}] docs", indexName, docPermits.availablePermits()); logger.info("--> create index [{}] with max [{}] docs", indexName, docPermits.availablePermits());
indicesAdmin().prepareCreate(indexName) indicesAdmin().prepareCreate(indexName)
.setSettings( .setSettings(indexSettings(shardCount, between(0, cluster.numDataNodes() - 1)))
Settings.builder()
.put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), shardCount)
.put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), between(0, cluster.numDataNodes() - 1))
)
.execute(mustSucceed(response -> { .execute(mustSucceed(response -> {
assertTrue(response.isAcknowledged()); assertTrue(response.isAcknowledged());
logger.info("--> finished create index [{}]", indexName); logger.info("--> finished create index [{}]", indexName);

View file

@ -1039,11 +1039,7 @@ public class DataStreamTests extends AbstractXContentSerializingTestCase<DataStr
dataStream.validate( dataStream.validate(
(index) -> IndexMetadata.builder(index) (index) -> IndexMetadata.builder(index)
.settings( .settings(
Settings.builder() indexSettings(1, 1).put(IndexMetadata.SETTING_INDEX_VERSION_CREATED.getKey(), IndexVersion.current()).build()
.put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1)
.put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1)
.put(IndexMetadata.SETTING_INDEX_VERSION_CREATED.getKey(), IndexVersion.current())
.build()
) )
.build() .build()
); );
@ -1058,10 +1054,7 @@ public class DataStreamTests extends AbstractXContentSerializingTestCase<DataStr
() -> dataStream.validate( () -> dataStream.validate(
(index) -> IndexMetadata.builder(index) (index) -> IndexMetadata.builder(index)
.settings( .settings(
Settings.builder() indexSettings(1, 1).put(IndexMetadata.SETTING_INDEX_VERSION_CREATED.getKey(), IndexVersion.current())
.put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1)
.put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1)
.put(IndexMetadata.SETTING_INDEX_VERSION_CREATED.getKey(), IndexVersion.current())
.put(IndexSettings.MODE.getKey(), IndexMode.TIME_SERIES) .put(IndexSettings.MODE.getKey(), IndexMode.TIME_SERIES)
.put(IndexSettings.TIME_SERIES_START_TIME.getKey(), start3.toEpochMilli()) .put(IndexSettings.TIME_SERIES_START_TIME.getKey(), start3.toEpochMilli())
.put(IndexSettings.TIME_SERIES_END_TIME.getKey(), end3.toEpochMilli()) .put(IndexSettings.TIME_SERIES_END_TIME.getKey(), end3.toEpochMilli())

View file

@ -2109,10 +2109,7 @@ public class ShardsAvailabilityHealthIndicatorServiceTests extends ESTestCase {
for (Map.Entry<String, Integer> indexNameToPriority : indexNameToPriorityMap.entrySet()) { for (Map.Entry<String, Integer> indexNameToPriority : indexNameToPriorityMap.entrySet()) {
String indexName = indexNameToPriority.getKey(); String indexName = indexNameToPriority.getKey();
IndexMetadata.Builder indexMetadataBuilder = new IndexMetadata.Builder(indexName); IndexMetadata.Builder indexMetadataBuilder = new IndexMetadata.Builder(indexName);
Settings settings = Settings.builder() Settings settings = indexSettings(1, 1).put(IndexMetadata.SETTING_PRIORITY, indexNameToPriority.getValue())
.put(IndexMetadata.SETTING_PRIORITY, indexNameToPriority.getValue())
.put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1)
.put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1)
.put(IndexMetadata.SETTING_INDEX_VERSION_CREATED.getKey(), IndexVersion.current()) .put(IndexMetadata.SETTING_INDEX_VERSION_CREATED.getKey(), IndexVersion.current())
.build(); .build();
indexMetadataBuilder.settings(settings); indexMetadataBuilder.settings(settings);

View file

@ -726,10 +726,7 @@ public class PersistedClusterStateServiceTests extends ESTestCase {
.version(1L) .version(1L)
.putMapping(randomMappingMetadataOrNull()) .putMapping(randomMappingMetadataOrNull())
.settings( .settings(
Settings.builder() indexSettings(1, 0).put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current())
.put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1)
.put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0)
.put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current())
.put(IndexMetadata.SETTING_INDEX_UUID, indexUUID) .put(IndexMetadata.SETTING_INDEX_UUID, indexUUID)
) )
) )
@ -797,10 +794,7 @@ public class PersistedClusterStateServiceTests extends ESTestCase {
.version(indexMetadataVersion - 1) // -1 because it's incremented in .put() .version(indexMetadataVersion - 1) // -1 because it's incremented in .put()
.putMapping(randomMappingMetadataOrNull()) .putMapping(randomMappingMetadataOrNull())
.settings( .settings(
Settings.builder() indexSettings(1, 0).put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current())
.put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1)
.put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0)
.put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current())
.put(IndexMetadata.SETTING_INDEX_UUID, indexUUID) .put(IndexMetadata.SETTING_INDEX_UUID, indexUUID)
) )
) )
@ -931,10 +925,7 @@ public class PersistedClusterStateServiceTests extends ESTestCase {
.putMapping(randomMappingMetadataOrNull()) .putMapping(randomMappingMetadataOrNull())
.version(randomLongBetween(0L, Long.MAX_VALUE - 1) - 1) // -1 because it's incremented in .put() .version(randomLongBetween(0L, Long.MAX_VALUE - 1) - 1) // -1 because it's incremented in .put()
.settings( .settings(
Settings.builder() indexSettings(1, 1).put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current())
.put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1)
.put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1)
.put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current())
.put(IndexMetadata.SETTING_INDEX_UUID, updatedIndexUuid) .put(IndexMetadata.SETTING_INDEX_UUID, updatedIndexUuid)
) )
) )
@ -943,10 +934,7 @@ public class PersistedClusterStateServiceTests extends ESTestCase {
.putMapping(randomMappingMetadataOrNull()) .putMapping(randomMappingMetadataOrNull())
.version(randomLongBetween(0L, Long.MAX_VALUE - 1) - 1) // -1 because it's incremented in .put() .version(randomLongBetween(0L, Long.MAX_VALUE - 1) - 1) // -1 because it's incremented in .put()
.settings( .settings(
Settings.builder() indexSettings(1, 1).put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current())
.put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1)
.put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1)
.put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current())
.put(IndexMetadata.SETTING_INDEX_UUID, deletedIndexUuid) .put(IndexMetadata.SETTING_INDEX_UUID, deletedIndexUuid)
) )
) )
@ -990,10 +978,7 @@ public class PersistedClusterStateServiceTests extends ESTestCase {
.version(randomLongBetween(0L, Long.MAX_VALUE - 1) - 1) // -1 because it's incremented in .put() .version(randomLongBetween(0L, Long.MAX_VALUE - 1) - 1) // -1 because it's incremented in .put()
.putMapping(randomMappingMetadataOrNull()) .putMapping(randomMappingMetadataOrNull())
.settings( .settings(
Settings.builder() indexSettings(1, 1).put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current())
.put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1)
.put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1)
.put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current())
.put(IndexMetadata.SETTING_INDEX_UUID, addedIndexUuid) .put(IndexMetadata.SETTING_INDEX_UUID, addedIndexUuid)
) )
) )
@ -1040,10 +1025,7 @@ public class PersistedClusterStateServiceTests extends ESTestCase {
IndexMetadata.builder(index.getName()) IndexMetadata.builder(index.getName())
.putMapping(randomMappingMetadataOrNull()) .putMapping(randomMappingMetadataOrNull())
.settings( .settings(
Settings.builder() indexSettings(1, 0).put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current())
.put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1)
.put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0)
.put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current())
.put(IndexMetadata.SETTING_INDEX_UUID, index.getUUID()) .put(IndexMetadata.SETTING_INDEX_UUID, index.getUUID())
) )
) )
@ -1074,10 +1056,7 @@ public class PersistedClusterStateServiceTests extends ESTestCase {
IndexMetadata.builder("test-" + i) IndexMetadata.builder("test-" + i)
.putMapping(randomMappingMetadataOrNull()) .putMapping(randomMappingMetadataOrNull())
.settings( .settings(
Settings.builder() indexSettings(1, 0).put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current())
.put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1)
.put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0)
.put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current())
.put(IndexMetadata.SETTING_INDEX_UUID, UUIDs.randomBase64UUID(random())) .put(IndexMetadata.SETTING_INDEX_UUID, UUIDs.randomBase64UUID(random()))
) )
); );
@ -1254,10 +1233,7 @@ public class PersistedClusterStateServiceTests extends ESTestCase {
IndexMetadata.builder("test") IndexMetadata.builder("test")
.putMapping(randomMappingMetadata()) .putMapping(randomMappingMetadata())
.settings( .settings(
Settings.builder() indexSettings(1, 0).put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current())
.put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1)
.put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0)
.put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current())
.put(IndexMetadata.SETTING_INDEX_UUID, "test-uuid") .put(IndexMetadata.SETTING_INDEX_UUID, "test-uuid")
) )
) )
@ -1369,10 +1345,7 @@ public class PersistedClusterStateServiceTests extends ESTestCase {
IndexMetadata.builder("index-" + i) IndexMetadata.builder("index-" + i)
.putMapping(randomMappingMetadataOrNull()) .putMapping(randomMappingMetadataOrNull())
.settings( .settings(
Settings.builder() indexSettings(1, 0).put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current())
.put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1)
.put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0)
.put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current())
.put(IndexMetadata.SETTING_INDEX_UUID, UUIDs.randomBase64UUID(random())) .put(IndexMetadata.SETTING_INDEX_UUID, UUIDs.randomBase64UUID(random()))
) )
) )
@ -1592,10 +1565,7 @@ public class PersistedClusterStateServiceTests extends ESTestCase {
IndexMetadata.builder("test-1") IndexMetadata.builder("test-1")
.putMapping(randomMappingMetadata()) .putMapping(randomMappingMetadata())
.settings( .settings(
Settings.builder() indexSettings(1, 0).put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current())
.put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1)
.put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0)
.put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current())
.put(IndexMetadata.SETTING_INDEX_UUID, UUIDs.randomBase64UUID(random())) .put(IndexMetadata.SETTING_INDEX_UUID, UUIDs.randomBase64UUID(random()))
) )
) )
@ -1647,10 +1617,7 @@ public class PersistedClusterStateServiceTests extends ESTestCase {
IndexMetadata.builder("test-1") IndexMetadata.builder("test-1")
.putMapping(randomMappingMetadata()) .putMapping(randomMappingMetadata())
.settings( .settings(
Settings.builder() indexSettings(1, 0).put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current())
.put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1)
.put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0)
.put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current())
.put(IndexMetadata.SETTING_INDEX_UUID, UUIDs.randomBase64UUID(random())) .put(IndexMetadata.SETTING_INDEX_UUID, UUIDs.randomBase64UUID(random()))
) )
) )
@ -1713,10 +1680,7 @@ public class PersistedClusterStateServiceTests extends ESTestCase {
IndexMetadata.builder("test-" + i) IndexMetadata.builder("test-" + i)
.putMapping(mapping1) .putMapping(mapping1)
.settings( .settings(
Settings.builder() indexSettings(1, 0).put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current())
.put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1)
.put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0)
.put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current())
.put(IndexMetadata.SETTING_INDEX_UUID, UUIDs.randomBase64UUID(random())) .put(IndexMetadata.SETTING_INDEX_UUID, UUIDs.randomBase64UUID(random()))
) )
); );
@ -1740,10 +1704,7 @@ public class PersistedClusterStateServiceTests extends ESTestCase {
IndexMetadata.builder("test-" + 99) IndexMetadata.builder("test-" + 99)
.putMapping(mapping2) .putMapping(mapping2)
.settings( .settings(
Settings.builder() indexSettings(1, 0).put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current())
.put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1)
.put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0)
.put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current())
.put(IndexMetadata.SETTING_INDEX_UUID, UUIDs.randomBase64UUID(random())) .put(IndexMetadata.SETTING_INDEX_UUID, UUIDs.randomBase64UUID(random()))
) )
); );

View file

@ -1816,10 +1816,7 @@ public class SnapshotResiliencyTests extends ESTestCase {
private static Settings defaultIndexSettings(int shards) { private static Settings defaultIndexSettings(int shards) {
// TODO: randomize replica count settings once recovery operations aren't blocking anymore // TODO: randomize replica count settings once recovery operations aren't blocking anymore
return Settings.builder() return indexSettings(shards, 0).build();
.put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), shards)
.put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0)
.build();
} }
private static <T> void continueOrDie(SubscribableListener<T> listener, CheckedConsumer<T, Exception> onResponse) { private static <T> void continueOrDie(SubscribableListener<T> listener, CheckedConsumer<T, Exception> onResponse) {

View file

@ -1400,12 +1400,7 @@ public abstract class EngineTestCase extends ESTestCase {
public static MapperService createMapperService() throws IOException { public static MapperService createMapperService() throws IOException {
IndexMetadata indexMetadata = IndexMetadata.builder("test") IndexMetadata indexMetadata = IndexMetadata.builder("test")
.settings( .settings(indexSettings(1, 1).put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()))
Settings.builder()
.put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current())
.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1)
)
.putMapping("{\"properties\": {}}") .putMapping("{\"properties\": {}}")
.build(); .build();
MapperService mapperService = MapperTestUtils.newMapperService( MapperService mapperService = MapperTestUtils.newMapperService(

View file

@ -128,10 +128,7 @@ public abstract class ESIndexLevelReplicationTestCase extends IndexShardTestCase
} }
protected IndexMetadata buildIndexMetadata(int replicas, Settings indexSettings, String mappings) { protected IndexMetadata buildIndexMetadata(int replicas, Settings indexSettings, String mappings) {
Settings settings = Settings.builder() Settings settings = indexSettings(1, replicas).put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current())
.put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current())
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, replicas)
.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
.put(IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.getKey(), between(0, 1000)) .put(IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.getKey(), between(0, 1000))
.put(indexSettings) .put(indexSettings)
.build(); .build();

View file

@ -301,10 +301,7 @@ public abstract class IndexShardTestCase extends ESTestCase {
final IndexingOperationListener... listeners final IndexingOperationListener... listeners
) throws IOException { ) throws IOException {
assert shardRouting.initializing() : shardRouting; assert shardRouting.initializing() : shardRouting;
Settings indexSettings = Settings.builder() Settings indexSettings = indexSettings(1, 0).put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current())
.put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current())
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)
.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
.put( .put(
IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.getKey(), IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.getKey(),
randomBoolean() ? IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.get(Settings.EMPTY) : between(0, 1000) randomBoolean() ? IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.get(Settings.EMPTY) : between(0, 1000)

View file

@ -107,12 +107,7 @@ public abstract class AbstractIndexRecoveryIntegTestCase extends ESIntegTestCase
assertThat(response.isTimedOut(), is(false)); assertThat(response.isTimedOut(), is(false));
indicesAdmin().prepareCreate(indexName) indicesAdmin().prepareCreate(indexName)
.setSettings( .setSettings(indexSettings(1, 0).put(IndexMetadata.INDEX_ROUTING_INCLUDE_GROUP_SETTING.getKey() + "color", "blue"))
Settings.builder()
.put(IndexMetadata.INDEX_ROUTING_INCLUDE_GROUP_SETTING.getKey() + "color", "blue")
.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)
)
.get(); .get();
List<IndexRequestBuilder> requests = new ArrayList<>(); List<IndexRequestBuilder> requests = new ArrayList<>();
@ -213,12 +208,7 @@ public abstract class AbstractIndexRecoveryIntegTestCase extends ESIntegTestCase
assertThat(response.isTimedOut(), is(false)); assertThat(response.isTimedOut(), is(false));
indicesAdmin().prepareCreate(indexName) indicesAdmin().prepareCreate(indexName)
.setSettings( .setSettings(indexSettings(1, 0).put(IndexMetadata.INDEX_ROUTING_INCLUDE_GROUP_SETTING.getKey() + "color", "blue"))
Settings.builder()
.put(IndexMetadata.INDEX_ROUTING_INCLUDE_GROUP_SETTING.getKey() + "color", "blue")
.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)
)
.get(); .get();
List<IndexRequestBuilder> requests = new ArrayList<>(); List<IndexRequestBuilder> requests = new ArrayList<>();
@ -314,12 +304,7 @@ public abstract class AbstractIndexRecoveryIntegTestCase extends ESIntegTestCase
final String redNodeName = internalCluster().startNode(Settings.builder().put("node.attr.color", "red").put(nodeSettings).build()); final String redNodeName = internalCluster().startNode(Settings.builder().put("node.attr.color", "red").put(nodeSettings).build());
indicesAdmin().prepareCreate(indexName) indicesAdmin().prepareCreate(indexName)
.setSettings( .setSettings(indexSettings(1, 0).put(IndexMetadata.INDEX_ROUTING_INCLUDE_GROUP_SETTING.getKey() + "color", "blue"))
Settings.builder()
.put(IndexMetadata.INDEX_ROUTING_INCLUDE_GROUP_SETTING.getKey() + "color", "blue")
.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)
)
.get(); .get();
List<IndexRequestBuilder> requests = new ArrayList<>(); List<IndexRequestBuilder> requests = new ArrayList<>();

View file

@ -99,7 +99,7 @@ public abstract class ESSingleNodeTestCase extends ESTestCase {
indicesAdmin().preparePutTemplate("one_shard_index_template") indicesAdmin().preparePutTemplate("one_shard_index_template")
.setPatterns(Collections.singletonList("*")) .setPatterns(Collections.singletonList("*"))
.setOrder(0) .setOrder(0)
.setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)) .setSettings(indexSettings(1, 0))
.get(); .get();
indicesAdmin().preparePutTemplate("random-soft-deletes-template") indicesAdmin().preparePutTemplate("random-soft-deletes-template")
.setPatterns(Collections.singletonList("*")) .setPatterns(Collections.singletonList("*"))

View file

@ -49,10 +49,8 @@ public class IndexSettingsModule extends AbstractModule {
} }
public static IndexSettings newIndexSettings(Index index, Settings indexSetting, Settings nodeSettings, Setting<?>... setting) { public static IndexSettings newIndexSettings(Index index, Settings indexSetting, Settings nodeSettings, Setting<?>... setting) {
Settings build = Settings.builder() Settings build = ESTestCase.indexSettings(1, 1)
.put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()) .put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current())
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1)
.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
.put(indexSetting) .put(indexSetting)
.build(); .build();
IndexMetadata metadata = IndexMetadata.builder(index.getName()) IndexMetadata metadata = IndexMetadata.builder(index.getName())
@ -67,10 +65,8 @@ public class IndexSettingsModule extends AbstractModule {
} }
public static IndexSettings newIndexSettings(Index index, Settings settings, IndexScopedSettings indexScopedSettings) { public static IndexSettings newIndexSettings(Index index, Settings settings, IndexScopedSettings indexScopedSettings) {
Settings build = Settings.builder() Settings build = ESTestCase.indexSettings(1, 1)
.put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()) .put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current())
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1)
.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
.put(settings) .put(settings)
.build(); .build();
IndexMetadata metadata = IndexMetadata.builder(index.getName()) IndexMetadata metadata = IndexMetadata.builder(index.getName())

View file

@ -103,10 +103,7 @@ public class AutoFollowIT extends CcrIntegTestCase {
} }
public void testAutoFollow() throws Exception { public void testAutoFollow() throws Exception {
Settings leaderIndexSettings = Settings.builder() Settings leaderIndexSettings = indexSettings(1, 0).build();
.put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1)
.put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0)
.build();
createLeaderIndex("logs-201812", leaderIndexSettings); createLeaderIndex("logs-201812", leaderIndexSettings);
@ -139,10 +136,7 @@ public class AutoFollowIT extends CcrIntegTestCase {
// Trigger system index creation // Trigger system index creation
leaderClient().prepareIndex(FakeSystemIndex.SYSTEM_INDEX_NAME).setSource(Map.of("a", "b")).get(); leaderClient().prepareIndex(FakeSystemIndex.SYSTEM_INDEX_NAME).setSource(Map.of("a", "b")).get();
Settings leaderIndexSettings = Settings.builder() Settings leaderIndexSettings = indexSettings(1, 0).build();
.put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1)
.put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0)
.build();
createLeaderIndex("logs-201901", leaderIndexSettings); createLeaderIndex("logs-201901", leaderIndexSettings);
assertLongBusy(() -> { assertLongBusy(() -> {
AutoFollowStats autoFollowStats = getAutoFollowStats(); AutoFollowStats autoFollowStats = getAutoFollowStats();
@ -153,10 +147,7 @@ public class AutoFollowIT extends CcrIntegTestCase {
} }
public void testCleanFollowedLeaderIndexUUIDs() throws Exception { public void testCleanFollowedLeaderIndexUUIDs() throws Exception {
Settings leaderIndexSettings = Settings.builder() Settings leaderIndexSettings = indexSettings(1, 0).build();
.put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1)
.put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0)
.build();
putAutoFollowPatterns("my-pattern", new String[] { "logs-*" }); putAutoFollowPatterns("my-pattern", new String[] { "logs-*" });
createLeaderIndex("logs-201901", leaderIndexSettings); createLeaderIndex("logs-201901", leaderIndexSettings);
@ -192,10 +183,7 @@ public class AutoFollowIT extends CcrIntegTestCase {
} }
public void testAutoFollowManyIndices() throws Exception { public void testAutoFollowManyIndices() throws Exception {
Settings leaderIndexSettings = Settings.builder() Settings leaderIndexSettings = indexSettings(1, 0).build();
.put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1)
.put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0)
.build();
putAutoFollowPatterns("my-pattern", new String[] { "logs-*" }); putAutoFollowPatterns("my-pattern", new String[] { "logs-*" });
long numIndices = randomIntBetween(4, 8); long numIndices = randomIntBetween(4, 8);
@ -267,10 +255,7 @@ public class AutoFollowIT extends CcrIntegTestCase {
} }
public void testAutoFollowParameterAreDelegated() throws Exception { public void testAutoFollowParameterAreDelegated() throws Exception {
Settings leaderIndexSettings = Settings.builder() Settings leaderIndexSettings = indexSettings(1, 0).build();
.put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1)
.put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0)
.build();
// Enabling auto following: // Enabling auto following:
PutAutoFollowPatternAction.Request request = new PutAutoFollowPatternAction.Request(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT); PutAutoFollowPatternAction.Request request = new PutAutoFollowPatternAction.Request(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT);
@ -377,10 +362,7 @@ public class AutoFollowIT extends CcrIntegTestCase {
} }
public void testConflictingPatterns() throws Exception { public void testConflictingPatterns() throws Exception {
Settings leaderIndexSettings = Settings.builder() Settings leaderIndexSettings = indexSettings(1, 0).build();
.put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1)
.put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0)
.build();
// Enabling auto following: // Enabling auto following:
putAutoFollowPatterns("my-pattern1", new String[] { "logs-*" }); putAutoFollowPatterns("my-pattern1", new String[] { "logs-*" });
@ -422,10 +404,7 @@ public class AutoFollowIT extends CcrIntegTestCase {
} }
public void testPauseAndResumeAutoFollowPattern() throws Exception { public void testPauseAndResumeAutoFollowPattern() throws Exception {
final Settings leaderIndexSettings = Settings.builder() final Settings leaderIndexSettings = indexSettings(1, 0).build();
.put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1)
.put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0)
.build();
// index created in the remote cluster before the auto follow pattern exists won't be auto followed // index created in the remote cluster before the auto follow pattern exists won't be auto followed
createLeaderIndex("test-existing-index-is-ignored", leaderIndexSettings); createLeaderIndex("test-existing-index-is-ignored", leaderIndexSettings);
@ -504,10 +483,7 @@ public class AutoFollowIT extends CcrIntegTestCase {
} }
public void testPauseAndResumeWithMultipleAutoFollowPatterns() throws Exception { public void testPauseAndResumeWithMultipleAutoFollowPatterns() throws Exception {
final Settings leaderIndexSettings = Settings.builder() final Settings leaderIndexSettings = indexSettings(1, 0).build();
.put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1)
.put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0)
.build();
final String[] prefixes = { "logs-", "users-", "docs-", "monitoring-", "data-", "system-", "events-", "files-" }; final String[] prefixes = { "logs-", "users-", "docs-", "monitoring-", "data-", "system-", "events-", "files-" };
@ -609,10 +585,7 @@ public class AutoFollowIT extends CcrIntegTestCase {
} }
public void testAutoFollowExclusion() throws Exception { public void testAutoFollowExclusion() throws Exception {
Settings leaderIndexSettings = Settings.builder() Settings leaderIndexSettings = indexSettings(1, 0).build();
.put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1)
.put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0)
.build();
putAutoFollowPatterns("my-pattern1", new String[] { "logs-*" }, Collections.singletonList("logs-2018*")); putAutoFollowPatterns("my-pattern1", new String[] { "logs-*" }, Collections.singletonList("logs-2018*"));

View file

@ -12,7 +12,6 @@ import org.elasticsearch.cluster.metadata.DataStreamTestHelper;
import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.IndexMetadata;
import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.metadata.Metadata;
import org.elasticsearch.common.Strings; import org.elasticsearch.common.Strings;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.core.Tuple; import org.elasticsearch.core.Tuple;
import org.elasticsearch.index.Index; import org.elasticsearch.index.Index;
import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.IndexVersion;
@ -124,13 +123,7 @@ public class WaitUntilTimeSeriesEndTimePassesStepTests extends AbstractStepTestC
{ {
// regular indices (non-ts) meet the step condition // regular indices (non-ts) meet the step condition
IndexMetadata indexMeta = IndexMetadata.builder(randomAlphaOfLengthBetween(10, 30)) IndexMetadata indexMeta = IndexMetadata.builder(randomAlphaOfLengthBetween(10, 30))
.settings( .settings(indexSettings(1, 1).put(IndexMetadata.SETTING_INDEX_VERSION_CREATED.getKey(), IndexVersion.current()).build())
Settings.builder()
.put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1)
.put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1)
.put(IndexMetadata.SETTING_INDEX_VERSION_CREATED.getKey(), IndexVersion.current())
.build()
)
.build(); .build();
Metadata newMetadata = Metadata.builder(clusterState.metadata()).put(indexMeta, true).build(); Metadata newMetadata = Metadata.builder(clusterState.metadata()).put(indexMeta, true).build();

View file

@ -11,8 +11,6 @@ import org.apache.http.HttpHost;
import org.elasticsearch.client.Request; import org.elasticsearch.client.Request;
import org.elasticsearch.client.Response; import org.elasticsearch.client.Response;
import org.elasticsearch.client.RestClient; import org.elasticsearch.client.RestClient;
import org.elasticsearch.cluster.metadata.IndexMetadata;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.test.NotEqualMessageBuilder; import org.elasticsearch.test.NotEqualMessageBuilder;
import org.elasticsearch.test.rest.ESRestTestCase; import org.elasticsearch.test.rest.ESRestTestCase;
@ -68,14 +66,7 @@ public class EqlSearchIT extends ESRestTestCase {
bwcNodes = new ArrayList<>(nodes.getBWCNodes()); bwcNodes = new ArrayList<>(nodes.getBWCNodes());
String mappings = readResource(EqlSearchIT.class.getResourceAsStream("/eql_mapping.json")); String mappings = readResource(EqlSearchIT.class.getResourceAsStream("/eql_mapping.json"));
createIndex( createIndex(index, indexSettings(numShards, numReplicas).build(), mappings);
index,
Settings.builder()
.put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), numShards)
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, numReplicas)
.build(),
mappings
);
} }
@After @After

View file

@ -82,12 +82,7 @@ public abstract class AbstractPausableIntegTestCase extends AbstractEsqlIntegTes
mapping.endObject(); mapping.endObject();
} }
mapping.endObject(); mapping.endObject();
client().admin() client().admin().indices().prepareCreate("test").setSettings(indexSettings(1, 0)).setMapping(mapping.endObject()).get();
.indices()
.prepareCreate("test")
.setSettings(Map.of("number_of_shards", 1, "number_of_replicas", 0))
.setMapping(mapping.endObject())
.get();
BulkRequestBuilder bulk = client().prepareBulk().setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); BulkRequestBuilder bulk = client().prepareBulk().setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE);
for (int i = 0; i < numberOfDocs(); i++) { for (int i = 0; i < numberOfDocs(); i++) {

View file

@ -9,7 +9,6 @@ package org.elasticsearch.xpack.ml.transforms;
import org.apache.http.util.EntityUtils; import org.apache.http.util.EntityUtils;
import org.elasticsearch.client.Request; import org.elasticsearch.client.Request;
import org.elasticsearch.client.Response; import org.elasticsearch.client.Response;
import org.elasticsearch.cluster.metadata.IndexMetadata;
import org.elasticsearch.common.Strings; import org.elasticsearch.common.Strings;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.test.rest.ESRestTestCase; import org.elasticsearch.test.rest.ESRestTestCase;
@ -186,9 +185,7 @@ public class PainlessDomainSplitIT extends ESRestTestCase {
} }
public void testIsolated() throws Exception { public void testIsolated() throws Exception {
Settings.Builder settings = Settings.builder() Settings.Builder settings = indexSettings(1, 0);
.put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1)
.put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0);
createIndex("painless", settings.build()); createIndex("painless", settings.build());
Request createDoc = new Request("PUT", "/painless/_doc/1"); Request createDoc = new Request("PUT", "/painless/_doc/1");
@ -282,9 +279,7 @@ public class PainlessDomainSplitIT extends ESRestTestCase {
client().performRequest(new Request("POST", BASE_PATH + "anomaly_detectors/hrd-split-job/_open")); client().performRequest(new Request("POST", BASE_PATH + "anomaly_detectors/hrd-split-job/_open"));
// Create index to hold data // Create index to hold data
Settings.Builder settings = Settings.builder() Settings.Builder settings = indexSettings(1, 0);
.put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1)
.put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0);
createIndex("painless", settings.build(), """ createIndex("painless", settings.build(), """
"properties": { "domain": { "type": "keyword" },"time": { "type": "date" } }"""); "properties": { "domain": { "type": "keyword" },"time": { "type": "date" } }""");

View file

@ -20,7 +20,6 @@ import java.util.List;
import static org.elasticsearch.cluster.metadata.IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING; import static org.elasticsearch.cluster.metadata.IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING;
import static org.elasticsearch.cluster.metadata.IndexMetadata.INDEX_NUMBER_OF_ROUTING_SHARDS_SETTING; import static org.elasticsearch.cluster.metadata.IndexMetadata.INDEX_NUMBER_OF_ROUTING_SHARDS_SETTING;
import static org.elasticsearch.cluster.metadata.IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING;
import static org.elasticsearch.index.IndexSettings.INDEX_SOFT_DELETES_SETTING; import static org.elasticsearch.index.IndexSettings.INDEX_SOFT_DELETES_SETTING;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
import static org.elasticsearch.xpack.core.searchablesnapshots.MountSearchableSnapshotRequest.Storage; import static org.elasticsearch.xpack.core.searchablesnapshots.MountSearchableSnapshotRequest.Storage;
@ -37,11 +36,7 @@ public class SearchableSnapshotsResizeIntegTests extends BaseFrozenSearchableSna
assertAcked( assertAcked(
prepareCreate( prepareCreate(
"index", "index",
Settings.builder() indexSettings(2, 0).put(INDEX_NUMBER_OF_ROUTING_SHARDS_SETTING.getKey(), 4).put(INDEX_SOFT_DELETES_SETTING.getKey(), true)
.put(INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0)
.put(INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 2)
.put(INDEX_NUMBER_OF_ROUTING_SHARDS_SETTING.getKey(), 4)
.put(INDEX_SOFT_DELETES_SETTING.getKey(), true)
) )
); );
indexRandomDocs("index", scaledRandomIntBetween(0, 1_000)); indexRandomDocs("index", scaledRandomIntBetween(0, 1_000));

View file

@ -54,7 +54,7 @@ public class SnapshotUserRoleIntegTests extends NativeRealmIntegTestCase {
logger.info("--> creating ordinary index"); logger.info("--> creating ordinary index");
final int shards = between(1, 10); final int shards = between(1, 10);
ordinaryIndex = randomAlphaOfLength(4).toLowerCase(Locale.ROOT); ordinaryIndex = randomAlphaOfLength(4).toLowerCase(Locale.ROOT);
assertAcked(prepareCreate(ordinaryIndex, 0, Settings.builder().put("number_of_shards", shards).put("number_of_replicas", 0))); assertAcked(prepareCreate(ordinaryIndex, 0, indexSettings(shards, 0)));
ensureGreen(); ensureGreen();
logger.info("--> creating snapshot_user user"); logger.info("--> creating snapshot_user user");

View file

@ -13,8 +13,6 @@ import org.elasticsearch.Version;
import org.elasticsearch.client.Request; import org.elasticsearch.client.Request;
import org.elasticsearch.client.Response; import org.elasticsearch.client.Response;
import org.elasticsearch.client.RestClient; import org.elasticsearch.client.RestClient;
import org.elasticsearch.cluster.metadata.IndexMetadata;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.test.NotEqualMessageBuilder; import org.elasticsearch.test.NotEqualMessageBuilder;
import org.elasticsearch.test.rest.ESRestTestCase; import org.elasticsearch.test.rest.ESRestTestCase;
@ -63,14 +61,7 @@ public class SqlSearchIT extends ESRestTestCase {
bwcNodes = new ArrayList<>(nodes.getBWCNodes()); bwcNodes = new ArrayList<>(nodes.getBWCNodes());
String mappings = readResource(SqlSearchIT.class.getResourceAsStream("/all_field_types.json")); String mappings = readResource(SqlSearchIT.class.getResourceAsStream("/all_field_types.json"));
createIndex( createIndex(index, indexSettings(numShards, numReplicas).build(), mappings);
index,
Settings.builder()
.put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), numShards)
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, numReplicas)
.build(),
mappings
);
} }
@After @After

View file

@ -9,7 +9,6 @@ package org.elasticsearch.xpack.transform.integration;
import org.elasticsearch.client.Request; import org.elasticsearch.client.Request;
import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.RequestOptions;
import org.elasticsearch.cluster.metadata.IndexMetadata;
import org.elasticsearch.common.Strings; import org.elasticsearch.common.Strings;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.xpack.core.transform.transforms.persistence.TransformInternalIndexConstants; import org.elasticsearch.xpack.core.transform.transforms.persistence.TransformInternalIndexConstants;
@ -95,9 +94,7 @@ public class TransformAuditorIT extends TransformRestTestCase {
} }
public void testAliasCreatedforBWCIndexes() throws Exception { public void testAliasCreatedforBWCIndexes() throws Exception {
Settings.Builder settings = Settings.builder() Settings.Builder settings = indexSettings(1, 0);
.put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1)
.put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0);
// These indices should only exist if created in previous versions, ignore the deprecation warning for this test // These indices should only exist if created in previous versions, ignore the deprecation warning for this test
RequestOptions options = expectWarnings( RequestOptions options = expectWarnings(

View file

@ -133,10 +133,7 @@ public class WatchStoreUtilsTests extends ESTestCase {
private IndexMetadata createIndexMetaData(String indexName, AliasMetadata aliasMetadata) { private IndexMetadata createIndexMetaData(String indexName, AliasMetadata aliasMetadata) {
IndexMetadata.Builder indexMetadataBuilder = new IndexMetadata.Builder(indexName); IndexMetadata.Builder indexMetadataBuilder = new IndexMetadata.Builder(indexName);
Settings settings = Settings.builder() Settings settings = indexSettings(1, 1).put(IndexMetadata.SETTING_PRIORITY, 5)
.put(IndexMetadata.SETTING_PRIORITY, 5)
.put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1)
.put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1)
.put(IndexMetadata.SETTING_INDEX_VERSION_CREATED.getKey(), IndexVersion.current()) .put(IndexMetadata.SETTING_INDEX_VERSION_CREATED.getKey(), IndexVersion.current())
.build(); .build();
indexMetadataBuilder.settings(settings); indexMetadataBuilder.settings(settings);