mirror of
https://github.com/elastic/elasticsearch.git
synced 2025-06-28 09:28:55 -04:00
Save 400 LoC in tests by using indexSettings shortcut (#111573)
It's in the title, randomly saw a bunch of spots where we're not using the shortcut, figured I'd clean this up quickly to save ~400 lines.
This commit is contained in:
parent
e58678da4e
commit
bf7be8e23a
69 changed files with 132 additions and 498 deletions
|
@ -17,7 +17,6 @@ import org.elasticsearch.action.support.WriteRequest;
|
|||
import org.elasticsearch.aggregations.AggregationIntegTestCase;
|
||||
import org.elasticsearch.aggregations.bucket.timeseries.InternalTimeSeries;
|
||||
import org.elasticsearch.aggregations.bucket.timeseries.TimeSeriesAggregationBuilder;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.mapper.DateFieldMapper;
|
||||
import org.elasticsearch.index.mapper.MapperService;
|
||||
import org.elasticsearch.index.query.MatchAllQueryBuilder;
|
||||
|
@ -102,15 +101,11 @@ public class TimeSeriesAggregationsUnlimitedDimensionsIT extends AggregationInte
|
|||
final String[] routingDimensions
|
||||
) {
|
||||
return prepareCreate("index").setSettings(
|
||||
Settings.builder()
|
||||
.put("mode", "time_series")
|
||||
indexSettings(randomIntBetween(1, 3), randomIntBetween(1, 3)).put("mode", "time_series")
|
||||
.put("routing_path", String.join(",", routingDimensions))
|
||||
.put("index.number_of_shards", randomIntBetween(1, 3))
|
||||
.put("index.number_of_replicas", randomIntBetween(1, 3))
|
||||
.put("time_series.start_time", startMillis)
|
||||
.put("time_series.end_time", endMillis)
|
||||
.put(MapperService.INDEX_MAPPING_FIELD_NAME_LENGTH_LIMIT_SETTING.getKey(), 4192)
|
||||
.build()
|
||||
).setMapping(mapping).get();
|
||||
}
|
||||
|
||||
|
|
|
@ -16,7 +16,6 @@ import org.elasticsearch.action.support.WriteRequest;
|
|||
import org.elasticsearch.aggregations.AggregationIntegTestCase;
|
||||
import org.elasticsearch.aggregations.bucket.timeseries.InternalTimeSeries;
|
||||
import org.elasticsearch.aggregations.bucket.timeseries.TimeSeriesAggregationBuilder;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.mapper.DateFieldMapper;
|
||||
import org.elasticsearch.index.mapper.MapperService;
|
||||
import org.elasticsearch.index.query.MatchAllQueryBuilder;
|
||||
|
@ -103,15 +102,11 @@ public class TimeSeriesNestedAggregationsIT extends AggregationIntegTestCase {
|
|||
final String[] routingDimensions
|
||||
) {
|
||||
return prepareCreate("index").setSettings(
|
||||
Settings.builder()
|
||||
.put("mode", "time_series")
|
||||
indexSettings(randomIntBetween(1, 3), randomIntBetween(1, 3)).put("mode", "time_series")
|
||||
.put("routing_path", String.join(",", routingDimensions))
|
||||
.put("index.number_of_shards", randomIntBetween(1, 3))
|
||||
.put("index.number_of_replicas", randomIntBetween(1, 3))
|
||||
.put("time_series.start_time", startMillis)
|
||||
.put("time_series.end_time", endMillis)
|
||||
.put(MapperService.INDEX_MAPPING_FIELD_NAME_LENGTH_LIMIT_SETTING.getKey(), 4192)
|
||||
.build()
|
||||
).setMapping(mapping).get();
|
||||
}
|
||||
|
||||
|
|
|
@ -112,11 +112,7 @@ public class DataStreamAutoshardingIT extends ESIntegTestCase {
|
|||
public void testRolloverOnAutoShardCondition() throws Exception {
|
||||
final String dataStreamName = "logs-es";
|
||||
|
||||
putComposableIndexTemplate(
|
||||
"my-template",
|
||||
List.of("logs-*"),
|
||||
Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 3).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0).build()
|
||||
);
|
||||
putComposableIndexTemplate("my-template", List.of("logs-*"), indexSettings(3, 0).build());
|
||||
final var createDataStreamRequest = new CreateDataStreamAction.Request(dataStreamName);
|
||||
assertAcked(client().execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest).actionGet());
|
||||
|
||||
|
@ -277,11 +273,7 @@ public class DataStreamAutoshardingIT extends ESIntegTestCase {
|
|||
final String dataStreamName = "logs-es";
|
||||
|
||||
// start with 3 shards
|
||||
putComposableIndexTemplate(
|
||||
"my-template",
|
||||
List.of("logs-*"),
|
||||
Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 3).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0).build()
|
||||
);
|
||||
putComposableIndexTemplate("my-template", List.of("logs-*"), indexSettings(3, 0).build());
|
||||
final var createDataStreamRequest = new CreateDataStreamAction.Request(dataStreamName);
|
||||
assertAcked(client().execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest).actionGet());
|
||||
|
||||
|
@ -391,11 +383,7 @@ public class DataStreamAutoshardingIT extends ESIntegTestCase {
|
|||
public void testLazyRolloverKeepsPreviousAutoshardingDecision() throws IOException {
|
||||
final String dataStreamName = "logs-es";
|
||||
|
||||
putComposableIndexTemplate(
|
||||
"my-template",
|
||||
List.of("logs-*"),
|
||||
Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 3).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0).build()
|
||||
);
|
||||
putComposableIndexTemplate("my-template", List.of("logs-*"), indexSettings(3, 0).build());
|
||||
final var createDataStreamRequest = new CreateDataStreamAction.Request(dataStreamName);
|
||||
assertAcked(client().execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest).actionGet());
|
||||
|
||||
|
|
|
@ -142,10 +142,7 @@ public class TSDBPassthroughIndexingIT extends ESSingleNodeTestCase {
|
|||
}
|
||||
|
||||
public void testIndexingGettingAndSearching() throws Exception {
|
||||
var templateSettings = Settings.builder()
|
||||
.put("index.mode", "time_series")
|
||||
.put("index.number_of_shards", randomIntBetween(2, 10))
|
||||
.put("index.number_of_replicas", 0);
|
||||
var templateSettings = indexSettings(randomIntBetween(2, 10), 0).put("index.mode", "time_series");
|
||||
|
||||
var request = new TransportPutComposableIndexTemplateAction.Request("id");
|
||||
request.indexTemplate(
|
||||
|
@ -218,10 +215,7 @@ public class TSDBPassthroughIndexingIT extends ESSingleNodeTestCase {
|
|||
|
||||
public void testIndexingGettingAndSearchingShrunkIndex() throws Exception {
|
||||
String dataStreamName = "k8s";
|
||||
var templateSettings = Settings.builder()
|
||||
.put("index.mode", "time_series")
|
||||
.put("index.number_of_shards", 8)
|
||||
.put("index.number_of_replicas", 0);
|
||||
var templateSettings = indexSettings(8, 0).put("index.mode", "time_series");
|
||||
|
||||
var request = new TransportPutComposableIndexTemplateAction.Request("id");
|
||||
request.indexTemplate(
|
||||
|
|
|
@ -416,10 +416,7 @@ public class DataStreamLifecycleServiceIT extends ESIntegTestCase {
|
|||
"id1",
|
||||
null,
|
||||
List.of(dataStreamName + "*"),
|
||||
Settings.builder()
|
||||
.put("index.number_of_replicas", 1)
|
||||
.put("index.number_of_shards", 1)
|
||||
.put(MergePolicyConfig.INDEX_MERGE_POLICY_FLOOR_SEGMENT_SETTING.getKey(), ONE_HUNDRED_MB)
|
||||
indexSettings(1, 1).put(MergePolicyConfig.INDEX_MERGE_POLICY_FLOOR_SEGMENT_SETTING.getKey(), ONE_HUNDRED_MB)
|
||||
.put(MergePolicyConfig.INDEX_MERGE_POLICY_MERGE_FACTOR_SETTING.getKey(), TARGET_MERGE_FACTOR_VALUE)
|
||||
.build(),
|
||||
null,
|
||||
|
|
|
@ -1392,13 +1392,7 @@ public class DataStreamLifecycleServiceTests extends ESTestCase {
|
|||
{
|
||||
// non time_series indices are not within time bounds (they don't have any)
|
||||
IndexMetadata indexMeta = IndexMetadata.builder(randomAlphaOfLengthBetween(10, 30))
|
||||
.settings(
|
||||
Settings.builder()
|
||||
.put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1)
|
||||
.put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1)
|
||||
.put(IndexMetadata.SETTING_INDEX_VERSION_CREATED.getKey(), IndexVersion.current())
|
||||
.build()
|
||||
)
|
||||
.settings(indexSettings(1, 1).put(IndexMetadata.SETTING_INDEX_VERSION_CREATED.getKey(), IndexVersion.current()))
|
||||
.build();
|
||||
|
||||
Metadata newMetadata = Metadata.builder(clusterState.metadata()).put(indexMeta, true).build();
|
||||
|
@ -1596,12 +1590,14 @@ public class DataStreamLifecycleServiceTests extends ESTestCase {
|
|||
var routingTableBuilder = RoutingTable.builder();
|
||||
Metadata.Builder metadataBuilder = Metadata.builder();
|
||||
Map<String, IndexMetadata> indices = new HashMap<>();
|
||||
Settings indexSettings = Settings.builder()
|
||||
.put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), randomIntBetween(1, 10))
|
||||
.put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), randomIntBetween(0, 3))
|
||||
.put(IndexMetadata.SETTING_INDEX_VERSION_CREATED.getKey(), IndexVersion.current())
|
||||
.build();
|
||||
IndexMetadata.Builder indexMetadataBuilder = IndexMetadata.builder(indexName).version(randomLong()).settings(indexSettings);
|
||||
IndexMetadata.Builder indexMetadataBuilder = IndexMetadata.builder(indexName)
|
||||
.version(randomLong())
|
||||
.settings(
|
||||
indexSettings(randomIntBetween(1, 10), randomIntBetween(0, 3)).put(
|
||||
IndexMetadata.SETTING_INDEX_VERSION_CREATED.getKey(),
|
||||
IndexVersion.current()
|
||||
)
|
||||
);
|
||||
if (customDataStreamLifecycleMetadata != null) {
|
||||
indexMetadataBuilder.putCustom(LIFECYCLE_CUSTOM_INDEX_METADATA_KEY, customDataStreamLifecycleMetadata);
|
||||
}
|
||||
|
|
|
@ -920,9 +920,7 @@ public class FullClusterRestartIT extends ParameterizedFullClusterRestartTestCas
|
|||
final String indexName = "test_empty_shard";
|
||||
|
||||
if (isRunningAgainstOldCluster()) {
|
||||
Settings.Builder settings = Settings.builder()
|
||||
.put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1)
|
||||
.put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1)
|
||||
Settings.Builder settings = indexSettings(1, 1)
|
||||
// if the node with the replica is the first to be restarted, while a replica is still recovering
|
||||
// then delayed allocation will kick in. When the node comes back, the master will search for a copy
|
||||
// but the recovering copy will be seen as invalid and the cluster health won't return to GREEN
|
||||
|
@ -1522,14 +1520,7 @@ public class FullClusterRestartIT extends ParameterizedFullClusterRestartTestCas
|
|||
*/
|
||||
public void testTurnOffTranslogRetentionAfterUpgraded() throws Exception {
|
||||
if (isRunningAgainstOldCluster()) {
|
||||
createIndex(
|
||||
index,
|
||||
Settings.builder()
|
||||
.put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1)
|
||||
.put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1)
|
||||
.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true)
|
||||
.build()
|
||||
);
|
||||
createIndex(index, indexSettings(1, 1).put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true).build());
|
||||
ensureGreen(index);
|
||||
int numDocs = randomIntBetween(10, 100);
|
||||
for (int i = 0; i < numDocs; i++) {
|
||||
|
@ -1549,9 +1540,7 @@ public class FullClusterRestartIT extends ParameterizedFullClusterRestartTestCas
|
|||
public void testResize() throws Exception {
|
||||
int numDocs;
|
||||
if (isRunningAgainstOldCluster()) {
|
||||
final Settings.Builder settings = Settings.builder()
|
||||
.put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 3)
|
||||
.put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1);
|
||||
final Settings.Builder settings = indexSettings(3, 1);
|
||||
if (minimumIndexVersion().before(IndexVersions.V_8_0_0) && randomBoolean()) {
|
||||
settings.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), false);
|
||||
}
|
||||
|
|
|
@ -13,7 +13,6 @@ import org.elasticsearch.client.RequestOptions;
|
|||
import org.elasticsearch.client.Response;
|
||||
import org.elasticsearch.client.ResponseException;
|
||||
import org.elasticsearch.client.RestClient;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetadata;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.support.XContentMapValues;
|
||||
|
@ -75,10 +74,7 @@ public class IndexingIT extends ESRestTestCase {
|
|||
logger.info("cluster discovered: {}", nodes.toString());
|
||||
final List<String> bwcNamesList = nodes.getBWCNodes().stream().map(MixedClusterTestNode::nodeName).collect(Collectors.toList());
|
||||
final String bwcNames = bwcNamesList.stream().collect(Collectors.joining(","));
|
||||
Settings.Builder settings = Settings.builder()
|
||||
.put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1)
|
||||
.put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 2)
|
||||
.put("index.routing.allocation.include._name", bwcNames);
|
||||
Settings.Builder settings = indexSettings(1, 2).put("index.routing.allocation.include._name", bwcNames);
|
||||
final String index = "indexversionprop";
|
||||
final int minUpdates = 5;
|
||||
final int maxUpdates = 10;
|
||||
|
@ -165,10 +161,7 @@ public class IndexingIT extends ESRestTestCase {
|
|||
logger.info("cluster discovered: {}", nodes.toString());
|
||||
final List<String> bwcNamesList = nodes.getBWCNodes().stream().map(MixedClusterTestNode::nodeName).collect(Collectors.toList());
|
||||
final String bwcNames = bwcNamesList.stream().collect(Collectors.joining(","));
|
||||
Settings.Builder settings = Settings.builder()
|
||||
.put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1)
|
||||
.put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 2)
|
||||
.put("index.routing.allocation.include._name", bwcNames);
|
||||
Settings.Builder settings = indexSettings(1, 2).put("index.routing.allocation.include._name", bwcNames);
|
||||
|
||||
final String index = "test";
|
||||
createIndex(index, settings.build());
|
||||
|
@ -251,10 +244,7 @@ public class IndexingIT extends ESRestTestCase {
|
|||
String bwcNames = nodes.getBWCNodes().stream().map(MixedClusterTestNode::nodeName).collect(Collectors.joining(","));
|
||||
|
||||
// Allocating shards on the BWC nodes to makes sure that taking snapshot happens on those nodes.
|
||||
Settings.Builder settings = Settings.builder()
|
||||
.put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), between(5, 10))
|
||||
.put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1)
|
||||
.put("index.routing.allocation.include._name", bwcNames);
|
||||
Settings.Builder settings = indexSettings(between(5, 10), 1).put("index.routing.allocation.include._name", bwcNames);
|
||||
|
||||
final String index = "test-snapshot-index";
|
||||
createIndex(index, settings.build());
|
||||
|
@ -315,14 +305,7 @@ public class IndexingIT extends ESRestTestCase {
|
|||
int numOfReplicas = randomIntBetween(0, nodes.getNewNodes().size() - 1);
|
||||
int totalShards = numShards * (numOfReplicas + 1);
|
||||
final String index = "test_synced_flush";
|
||||
createIndex(
|
||||
index,
|
||||
Settings.builder()
|
||||
.put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), numShards)
|
||||
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, numOfReplicas)
|
||||
.put("index.routing.allocation.include._name", newNodes)
|
||||
.build()
|
||||
);
|
||||
createIndex(index, indexSettings(numShards, numOfReplicas).put("index.routing.allocation.include._name", newNodes).build());
|
||||
ensureGreen(index);
|
||||
indexDocs(index, randomIntBetween(0, 100), between(1, 100));
|
||||
try (
|
||||
|
@ -394,14 +377,7 @@ public class IndexingIT extends ESRestTestCase {
|
|||
int numOfReplicas = randomIntBetween(0, nodes.getNewNodes().size() - 1);
|
||||
int totalShards = numShards * (numOfReplicas + 1);
|
||||
final String index = "test_flush";
|
||||
createIndex(
|
||||
index,
|
||||
Settings.builder()
|
||||
.put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), numShards)
|
||||
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, numOfReplicas)
|
||||
.put("index.routing.allocation.include._name", newNodes)
|
||||
.build()
|
||||
);
|
||||
createIndex(index, indexSettings(numShards, numOfReplicas).put("index.routing.allocation.include._name", newNodes).build());
|
||||
ensureGreen(index);
|
||||
indexDocs(index, randomIntBetween(0, 100), between(1, 100));
|
||||
try (
|
||||
|
|
|
@ -10,7 +10,6 @@ package org.elasticsearch.backwards;
|
|||
|
||||
import org.elasticsearch.client.Request;
|
||||
import org.elasticsearch.client.Response;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetadata;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.support.XContentMapValues;
|
||||
import org.elasticsearch.core.Strings;
|
||||
|
@ -43,9 +42,7 @@ public class RareTermsIT extends ESRestTestCase {
|
|||
}
|
||||
|
||||
public void testSingleValuedString() throws Exception {
|
||||
final Settings.Builder settings = Settings.builder()
|
||||
.put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 2)
|
||||
.put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0);
|
||||
final Settings.Builder settings = indexSettings(2, 0);
|
||||
createIndex(index, settings.build());
|
||||
// We want to trigger the usage oif cuckoo filters that happen only when there are
|
||||
// more than 10k distinct values in one shard.
|
||||
|
|
|
@ -12,8 +12,6 @@ import org.elasticsearch.client.Request;
|
|||
import org.elasticsearch.client.Response;
|
||||
import org.elasticsearch.client.ResponseException;
|
||||
import org.elasticsearch.client.RestClient;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetadata;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.core.Strings;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
import org.elasticsearch.test.rest.ESRestTestCase;
|
||||
|
@ -50,13 +48,7 @@ public class SearchWithMinCompatibleSearchNodeIT extends ESRestTestCase {
|
|||
allNodes.addAll(nodes.getNewNodes());
|
||||
|
||||
if (client().performRequest(new Request("HEAD", "/" + index)).getStatusLine().getStatusCode() == 404) {
|
||||
createIndex(
|
||||
index,
|
||||
Settings.builder()
|
||||
.put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), numShards)
|
||||
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, numReplicas)
|
||||
.build()
|
||||
);
|
||||
createIndex(index, indexSettings(numShards, numReplicas).build());
|
||||
for (int i = 0; i < numDocs; i++) {
|
||||
Request request = new Request("PUT", index + "/_doc/" + i);
|
||||
request.setJsonEntity("{\"test\": \"test_" + randomAlphaOfLength(2) + "\"}");
|
||||
|
|
|
@ -60,9 +60,7 @@ public class RecoveryIT extends AbstractRollingTestCase {
|
|||
public void testHistoryUUIDIsGenerated() throws Exception {
|
||||
final String index = "index_history_uuid";
|
||||
if (CLUSTER_TYPE == ClusterType.OLD) {
|
||||
Settings.Builder settings = Settings.builder()
|
||||
.put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1)
|
||||
.put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1)
|
||||
Settings.Builder settings = indexSettings(1, 1)
|
||||
// if the node with the replica is the first to be restarted, while a replica is still recovering
|
||||
// then delayed allocation will kick in. When the node comes back, the master will search for a copy
|
||||
// but the recovering copy will be seen as invalid and the cluster health won't return to GREEN
|
||||
|
@ -128,9 +126,7 @@ public class RecoveryIT extends AbstractRollingTestCase {
|
|||
|
||||
switch (CLUSTER_TYPE) {
|
||||
case OLD -> {
|
||||
Settings.Builder settings = Settings.builder()
|
||||
.put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1)
|
||||
.put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 2)
|
||||
Settings.Builder settings = indexSettings(1, 2)
|
||||
// if the node with the replica is the first to be restarted, while a replica is still recovering
|
||||
// then delayed allocation will kick in. When the node comes back, the master will search for a copy
|
||||
// but the recovering copy will be seen as invalid and the cluster health won't return to GREEN
|
||||
|
@ -217,9 +213,7 @@ public class RecoveryIT extends AbstractRollingTestCase {
|
|||
final String index = "relocation_with_concurrent_indexing";
|
||||
switch (CLUSTER_TYPE) {
|
||||
case OLD -> {
|
||||
Settings.Builder settings = Settings.builder()
|
||||
.put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1)
|
||||
.put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 2)
|
||||
Settings.Builder settings = indexSettings(1, 2)
|
||||
// if the node with the replica is the first to be restarted, while a replica is still recovering
|
||||
// then delayed allocation will kick in. When the node comes back, the master will search for a copy
|
||||
// but the recovering copy will be seen as invalid and the cluster health won't return to GREEN
|
||||
|
@ -296,9 +290,7 @@ public class RecoveryIT extends AbstractRollingTestCase {
|
|||
public void testRecovery() throws Exception {
|
||||
final String index = "test_recovery";
|
||||
if (CLUSTER_TYPE == ClusterType.OLD) {
|
||||
Settings.Builder settings = Settings.builder()
|
||||
.put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1)
|
||||
.put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1)
|
||||
Settings.Builder settings = indexSettings(1, 1)
|
||||
// if the node with the replica is the first to be restarted, while a replica is still recovering
|
||||
// then delayed allocation will kick in. When the node comes back, the master will search for a copy
|
||||
// but the recovering copy will be seen as invalid and the cluster health won't return to GREEN
|
||||
|
@ -413,9 +405,7 @@ public class RecoveryIT extends AbstractRollingTestCase {
|
|||
if (CLUSTER_TYPE == ClusterType.OLD) {
|
||||
createIndex(
|
||||
indexName,
|
||||
Settings.builder()
|
||||
.put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1)
|
||||
.put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1)
|
||||
indexSettings(1, 1)
|
||||
// if the node with the replica is the first to be restarted, while a replica is still recovering
|
||||
// then delayed allocation will kick in. When the node comes back, the master will search for a copy
|
||||
// but the recovering copy will be seen as invalid and the cluster health won't return to GREEN
|
||||
|
@ -453,13 +443,7 @@ public class RecoveryIT extends AbstractRollingTestCase {
|
|||
final String indexName = String.join("_", "index", CLUSTER_TYPE.toString(), Integer.toString(id)).toLowerCase(Locale.ROOT);
|
||||
|
||||
if (indexExists(indexName) == false) {
|
||||
createIndex(
|
||||
indexName,
|
||||
Settings.builder()
|
||||
.put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1)
|
||||
.put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0)
|
||||
.build()
|
||||
);
|
||||
createIndex(indexName, indexSettings(1, 0).build());
|
||||
ensureGreen(indexName);
|
||||
closeIndex(indexName);
|
||||
}
|
||||
|
@ -482,10 +466,7 @@ public class RecoveryIT extends AbstractRollingTestCase {
|
|||
if (CLUSTER_TYPE == ClusterType.OLD) {
|
||||
createIndex(
|
||||
indexName,
|
||||
Settings.builder()
|
||||
.put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1)
|
||||
.put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0)
|
||||
.put(EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), "none")
|
||||
indexSettings(1, 0).put(EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), "none")
|
||||
.put(INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), "24h")
|
||||
.put("index.routing.allocation.include._name", CLUSTER_NAME + "-0")
|
||||
.build()
|
||||
|
@ -578,9 +559,7 @@ public class RecoveryIT extends AbstractRollingTestCase {
|
|||
public void testUpdateDoc() throws Exception {
|
||||
final String index = "test_update_doc";
|
||||
if (CLUSTER_TYPE == ClusterType.OLD) {
|
||||
Settings.Builder settings = Settings.builder()
|
||||
.put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1)
|
||||
.put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 2);
|
||||
Settings.Builder settings = indexSettings(1, 2);
|
||||
createIndex(index, settings.build());
|
||||
indexDocs(index, 0, 100);
|
||||
}
|
||||
|
@ -648,9 +627,7 @@ public class RecoveryIT extends AbstractRollingTestCase {
|
|||
public void testOperationBasedRecovery() throws Exception {
|
||||
final String index = "test_operation_based_recovery";
|
||||
if (CLUSTER_TYPE == ClusterType.OLD) {
|
||||
final Settings.Builder settings = Settings.builder()
|
||||
.put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1)
|
||||
.put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 2);
|
||||
final Settings.Builder settings = indexSettings(1, 2);
|
||||
if (minimumIndexVersion().before(IndexVersions.V_8_0_0) && randomBoolean()) {
|
||||
settings.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), randomBoolean());
|
||||
}
|
||||
|
|
|
@ -59,10 +59,7 @@ public class SnapshotBasedRecoveryIT extends AbstractRollingUpgradeTestCase {
|
|||
final String repositoryName = "snapshot_based_recovery_repo";
|
||||
final int numDocs = 200;
|
||||
if (isOldCluster()) {
|
||||
Settings.Builder settings = Settings.builder()
|
||||
.put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1)
|
||||
.put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0)
|
||||
.put(INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), "100ms")
|
||||
Settings.Builder settings = indexSettings(1, 0).put(INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), "100ms")
|
||||
.put(SETTING_ALLOCATION_MAX_RETRY.getKey(), "0"); // fail faster
|
||||
createIndex(indexName, settings.build());
|
||||
ensureGreen(indexName);
|
||||
|
|
|
@ -161,7 +161,7 @@ public class ClusterStatsIT extends ESIntegTestCase {
|
|||
ClusterStatsResponse response = clusterAdmin().prepareClusterStats().get();
|
||||
assertThat(response.getStatus(), Matchers.equalTo(ClusterHealthStatus.GREEN));
|
||||
|
||||
prepareCreate("test1").setSettings(Settings.builder().put("number_of_shards", 2).put("number_of_replicas", 1)).get();
|
||||
prepareCreate("test1").setSettings(indexSettings(2, 1)).get();
|
||||
|
||||
response = clusterAdmin().prepareClusterStats().get();
|
||||
assertThat(response.getStatus(), Matchers.equalTo(ClusterHealthStatus.YELLOW));
|
||||
|
@ -179,7 +179,7 @@ public class ClusterStatsIT extends ESIntegTestCase {
|
|||
assertThat(response.indicesStats.getDocs().getCount(), Matchers.equalTo(1L));
|
||||
assertShardStats(response.getIndicesStats().getShards(), 1, 4, 2, 1.0);
|
||||
|
||||
prepareCreate("test2").setSettings(Settings.builder().put("number_of_shards", 3).put("number_of_replicas", 0)).get();
|
||||
prepareCreate("test2").setSettings(indexSettings(3, 0)).get();
|
||||
ensureGreen();
|
||||
response = clusterAdmin().prepareClusterStats().get();
|
||||
assertThat(response.getStatus(), Matchers.equalTo(ClusterHealthStatus.GREEN));
|
||||
|
|
|
@ -306,10 +306,7 @@ public class CreateIndexIT extends ESIntegTestCase {
|
|||
|
||||
public void testFailureToCreateIndexCleansUpIndicesService() {
|
||||
final int numReplicas = internalCluster().numDataNodes();
|
||||
Settings settings = Settings.builder()
|
||||
.put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1)
|
||||
.put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), numReplicas)
|
||||
.build();
|
||||
Settings settings = indexSettings(1, numReplicas).build();
|
||||
assertAcked(indicesAdmin().prepareCreate("test-idx-1").setSettings(settings).addAlias(new Alias("alias1").writeIndex(true)).get());
|
||||
|
||||
ActionRequestBuilder<?, ?> builder = indicesAdmin().prepareCreate("test-idx-2")
|
||||
|
@ -328,10 +325,7 @@ public class CreateIndexIT extends ESIntegTestCase {
|
|||
*/
|
||||
public void testDefaultWaitForActiveShardsUsesIndexSetting() throws Exception {
|
||||
final int numReplicas = internalCluster().numDataNodes();
|
||||
Settings settings = Settings.builder()
|
||||
.put(SETTING_WAIT_FOR_ACTIVE_SHARDS.getKey(), Integer.toString(numReplicas))
|
||||
.put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1)
|
||||
.put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), numReplicas)
|
||||
Settings settings = indexSettings(1, numReplicas).put(SETTING_WAIT_FOR_ACTIVE_SHARDS.getKey(), Integer.toString(numReplicas))
|
||||
.build();
|
||||
assertAcked(indicesAdmin().prepareCreate("test-idx-1").setSettings(settings).get());
|
||||
|
||||
|
|
|
@ -211,9 +211,8 @@ public class RolloverIT extends ESIntegTestCase {
|
|||
assertAcked(prepareCreate("test_index-2").addAlias(testAlias).get());
|
||||
indexDoc("test_index-2", "1", "field", "value");
|
||||
flush("test_index-2");
|
||||
final Settings settings = Settings.builder().put("number_of_shards", 1).put("number_of_replicas", 0).build();
|
||||
final RolloverResponse response = indicesAdmin().prepareRolloverIndex("test_alias")
|
||||
.settings(settings)
|
||||
.settings(indexSettings(1, 0).build())
|
||||
.alias(new Alias("extra_alias"))
|
||||
.get();
|
||||
assertThat(response.getOldIndex(), equalTo("test_index-2"));
|
||||
|
|
|
@ -10,7 +10,6 @@ package org.elasticsearch.action.bulk;
|
|||
|
||||
import org.apache.lucene.tests.mockfile.FilterFileChannel;
|
||||
import org.apache.lucene.tests.mockfile.FilterFileSystemProvider;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetadata;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.core.PathUtils;
|
||||
import org.elasticsearch.core.PathUtilsForTesting;
|
||||
|
@ -59,13 +58,7 @@ public class BulkAfterWriteFsyncFailureIT extends ESSingleNodeTestCase {
|
|||
client().admin()
|
||||
.indices()
|
||||
.prepareCreate(indexName)
|
||||
.setSettings(
|
||||
Settings.builder()
|
||||
.put(INDEX_REFRESH_INTERVAL_SETTING.getKey(), -1)
|
||||
.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
|
||||
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)
|
||||
.build()
|
||||
)
|
||||
.setSettings(indexSettings(1, 0).put(INDEX_REFRESH_INTERVAL_SETTING.getKey(), -1))
|
||||
.setMapping("key", "type=keyword", "val", "type=long")
|
||||
.get();
|
||||
ensureGreen(indexName);
|
||||
|
|
|
@ -19,7 +19,6 @@ import org.elasticsearch.action.support.ActiveShardCount;
|
|||
import org.elasticsearch.action.support.WriteRequest.RefreshPolicy;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.health.ClusterHealthStatus;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetadata;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.routing.ShardRoutingState;
|
||||
|
@ -395,14 +394,7 @@ public class ClusterRerouteIT extends ESIntegTestCase {
|
|||
assertThat(healthResponse.isTimedOut(), equalTo(false));
|
||||
|
||||
final String indexName = "test_index";
|
||||
indicesAdmin().prepareCreate(indexName)
|
||||
.setWaitForActiveShards(ActiveShardCount.NONE)
|
||||
.setSettings(
|
||||
Settings.builder()
|
||||
.put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 2)
|
||||
.put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1)
|
||||
)
|
||||
.get();
|
||||
indicesAdmin().prepareCreate(indexName).setWaitForActiveShards(ActiveShardCount.NONE).setSettings(indexSettings(2, 1)).get();
|
||||
|
||||
try (var dryRunMockLog = MockLog.capture(TransportClusterRerouteAction.class)) {
|
||||
dryRunMockLog.addExpectation(
|
||||
|
|
|
@ -106,7 +106,7 @@ public class MockDiskUsagesIT extends ESIntegTestCase {
|
|||
}
|
||||
updateClusterSettings(settings);
|
||||
// Create an index with 10 shards so we can check allocation for it
|
||||
assertAcked(prepareCreate("test").setSettings(Settings.builder().put("number_of_shards", 10).put("number_of_replicas", 0)));
|
||||
assertAcked(prepareCreate("test").setSettings(indexSettings(10, 0)));
|
||||
ensureGreen("test");
|
||||
|
||||
assertBusy(() -> {
|
||||
|
@ -184,7 +184,7 @@ public class MockDiskUsagesIT extends ESIntegTestCase {
|
|||
updateClusterSettings(builder);
|
||||
|
||||
// Create an index with 6 shards so we can check allocation for it
|
||||
prepareCreate("test").setSettings(Settings.builder().put("number_of_shards", 6).put("number_of_replicas", 0)).get();
|
||||
prepareCreate("test").setSettings(indexSettings(6, 0)).get();
|
||||
ensureGreen("test");
|
||||
|
||||
{
|
||||
|
@ -269,7 +269,7 @@ public class MockDiskUsagesIT extends ESIntegTestCase {
|
|||
.map(RoutingNode::nodeId)
|
||||
.toList();
|
||||
|
||||
assertAcked(prepareCreate("test").setSettings(Settings.builder().put("number_of_shards", 6).put("number_of_replicas", 0)));
|
||||
assertAcked(prepareCreate("test").setSettings(indexSettings(6, 0)));
|
||||
|
||||
ensureGreen("test");
|
||||
|
||||
|
@ -355,10 +355,10 @@ public class MockDiskUsagesIT extends ESIntegTestCase {
|
|||
|
||||
assertAcked(
|
||||
prepareCreate("test").setSettings(
|
||||
Settings.builder()
|
||||
.put("number_of_shards", 6)
|
||||
.put("number_of_replicas", 0)
|
||||
.put(IndexMetadata.INDEX_ROUTING_EXCLUDE_GROUP_SETTING.getConcreteSettingForNamespace("_id").getKey(), nodeIds.get(2))
|
||||
indexSettings(6, 0).put(
|
||||
IndexMetadata.INDEX_ROUTING_EXCLUDE_GROUP_SETTING.getConcreteSettingForNamespace("_id").getKey(),
|
||||
nodeIds.get(2)
|
||||
)
|
||||
)
|
||||
);
|
||||
ensureGreen("test");
|
||||
|
@ -422,7 +422,7 @@ public class MockDiskUsagesIT extends ESIntegTestCase {
|
|||
.map(RoutingNode::nodeId)
|
||||
.toList();
|
||||
|
||||
assertAcked(prepareCreate("test").setSettings(Settings.builder().put("number_of_shards", 6).put("number_of_replicas", 0)));
|
||||
assertAcked(prepareCreate("test").setSettings(indexSettings(6, 0)));
|
||||
|
||||
ensureGreen("test");
|
||||
|
||||
|
|
|
@ -17,7 +17,6 @@ import org.elasticsearch.cluster.node.DiscoveryNode;
|
|||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.breaker.CircuitBreaker;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.test.ESIntegTestCase;
|
||||
import org.elasticsearch.test.disruption.NetworkDisruption;
|
||||
import org.elasticsearch.test.disruption.ServiceDisruptionScheme;
|
||||
|
@ -31,9 +30,6 @@ import java.util.Set;
|
|||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.CyclicBarrier;
|
||||
|
||||
import static org.elasticsearch.cluster.metadata.IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING;
|
||||
import static org.elasticsearch.cluster.metadata.IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING;
|
||||
|
||||
/**
|
||||
* Tests for discovery during disruptions.
|
||||
*/
|
||||
|
@ -136,13 +132,7 @@ public class DiscoveryDisruptionIT extends AbstractDisruptionTestCase {
|
|||
internalCluster().setDisruptionScheme(isolatePreferredMaster);
|
||||
isolatePreferredMaster.startDisrupting();
|
||||
|
||||
client(randomFrom(nonPreferredNodes)).admin()
|
||||
.indices()
|
||||
.prepareCreate("test")
|
||||
.setSettings(
|
||||
Settings.builder().put(INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1).put(INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0)
|
||||
)
|
||||
.get();
|
||||
client(randomFrom(nonPreferredNodes)).admin().indices().prepareCreate("test").setSettings(indexSettings(1, 0)).get();
|
||||
|
||||
internalCluster().clearDisruptionScheme(false);
|
||||
internalCluster().setDisruptionScheme(isolateAllNodes);
|
||||
|
|
|
@ -441,13 +441,9 @@ public class RecoveryFromGatewayIT extends ESIntegTestCase {
|
|||
.indices()
|
||||
.prepareCreate("test")
|
||||
.setSettings(
|
||||
Settings.builder()
|
||||
.put("number_of_shards", 1)
|
||||
.put("number_of_replicas", 1)
|
||||
|
||||
indexSettings(1, 1)
|
||||
// disable merges to keep segments the same
|
||||
.put(MergePolicyConfig.INDEX_MERGE_ENABLED, false)
|
||||
|
||||
// expire retention leases quickly
|
||||
.put(IndexService.RETENTION_LEASE_SYNC_INTERVAL_SETTING.getKey(), "100ms")
|
||||
)
|
||||
|
|
|
@ -22,7 +22,6 @@ import org.elasticsearch.action.get.MultiGetResponse;
|
|||
import org.elasticsearch.action.index.IndexRequest;
|
||||
import org.elasticsearch.action.support.DefaultShardOperationFailedException;
|
||||
import org.elasticsearch.action.support.broadcast.BroadcastResponse;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetadata;
|
||||
import org.elasticsearch.common.Randomness;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
|
@ -849,13 +848,7 @@ public class GetActionIT extends ESIntegTestCase {
|
|||
SearcherWrapperPlugin.enabled = true;
|
||||
assertAcked(
|
||||
prepareCreate("test").setMapping("f", "type=keyword")
|
||||
.setSettings(
|
||||
Settings.builder()
|
||||
.put("index.refresh_interval", "-1")
|
||||
.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
|
||||
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)
|
||||
.put("index.routing.rebalance.enable", "none")
|
||||
)
|
||||
.setSettings(indexSettings(1, 0).put("index.refresh_interval", "-1").put("index.routing.rebalance.enable", "none"))
|
||||
);
|
||||
// start tracking translog locations in the live version map
|
||||
{
|
||||
|
|
|
@ -13,9 +13,7 @@ import org.elasticsearch.action.admin.indices.alias.Alias;
|
|||
import org.elasticsearch.action.get.TransportGetFromTranslogAction;
|
||||
import org.elasticsearch.action.get.TransportGetFromTranslogAction.Response;
|
||||
import org.elasticsearch.action.support.PlainActionFuture;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetadata;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.indices.IndicesService;
|
||||
import org.elasticsearch.test.ESIntegTestCase;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
@ -34,11 +32,8 @@ public class GetFromTranslogActionIT extends ESIntegTestCase {
|
|||
assertAcked(
|
||||
prepareCreate(INDEX).setMapping("field1", "type=keyword,store=true")
|
||||
.setSettings(
|
||||
Settings.builder()
|
||||
.put("index.refresh_interval", -1)
|
||||
// A GetFromTranslogAction runs only Stateless where there is only one active indexing shard.
|
||||
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)
|
||||
.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
|
||||
// A GetFromTranslogAction runs only Stateless where there is only one active indexing shard.
|
||||
indexSettings(1, 0).put("index.refresh_interval", -1)
|
||||
)
|
||||
.addAlias(new Alias(ALIAS).writeIndex(randomFrom(true, false, null)))
|
||||
);
|
||||
|
|
|
@ -15,9 +15,7 @@ import org.elasticsearch.action.get.TransportShardMultiGetFomTranslogAction;
|
|||
import org.elasticsearch.action.index.IndexRequest;
|
||||
import org.elasticsearch.action.support.PlainActionFuture;
|
||||
import org.elasticsearch.action.support.WriteRequest;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetadata;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.indices.IndicesService;
|
||||
import org.elasticsearch.test.ESIntegTestCase;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
@ -42,11 +40,8 @@ public class ShardMultiGetFomTranslogActionIT extends ESIntegTestCase {
|
|||
public void testShardMultiGetFromTranslog() throws Exception {
|
||||
assertAcked(
|
||||
prepareCreate(INDEX).setSettings(
|
||||
Settings.builder()
|
||||
.put("index.refresh_interval", -1)
|
||||
// A ShardMultiGetFromTranslogAction runs only Stateless where there is only one active indexing shard.
|
||||
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)
|
||||
.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
|
||||
// A ShardMultiGetFromTranslogAction runs only Stateless where there is only one active indexing shard.
|
||||
indexSettings(1, 0).put("index.refresh_interval", -1)
|
||||
).addAlias(new Alias(ALIAS).writeIndex(randomFrom(true, false, null)))
|
||||
);
|
||||
ensureGreen();
|
||||
|
|
|
@ -914,10 +914,7 @@ public class IndexRecoveryIT extends AbstractIndexRecoveryIntegTestCase {
|
|||
prepareCreate(
|
||||
name,
|
||||
nodeCount,
|
||||
Settings.builder()
|
||||
.put("number_of_shards", shardCount)
|
||||
.put("number_of_replicas", replicaCount)
|
||||
.put(Store.INDEX_STORE_STATS_REFRESH_INTERVAL_SETTING.getKey(), 0)
|
||||
indexSettings(shardCount, replicaCount).put(Store.INDEX_STORE_STATS_REFRESH_INTERVAL_SETTING.getKey(), 0)
|
||||
)
|
||||
);
|
||||
ensureGreen();
|
||||
|
|
|
@ -227,10 +227,7 @@ public class OpenCloseIndexIT extends ESIntegTestCase {
|
|||
|
||||
public void testOpenWaitingForActiveShardsFailed() throws Exception {
|
||||
Client client = client();
|
||||
Settings settings = Settings.builder()
|
||||
.put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1)
|
||||
.put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0)
|
||||
.build();
|
||||
Settings settings = indexSettings(1, 0).build();
|
||||
assertAcked(client.admin().indices().prepareCreate("test").setSettings(settings).get());
|
||||
assertAcked(client.admin().indices().prepareClose("test").get());
|
||||
|
||||
|
|
|
@ -1549,8 +1549,7 @@ public class DateHistogramIT extends ESIntegTestCase {
|
|||
*/
|
||||
public void testScriptCaching() throws Exception {
|
||||
assertAcked(
|
||||
prepareCreate("cache_test_idx").setMapping("d", "type=date")
|
||||
.setSettings(Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1))
|
||||
prepareCreate("cache_test_idx").setMapping("d", "type=date").setSettings(indexSettings(1, 1).put("requests.cache.enable", true))
|
||||
);
|
||||
String date = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.format(date(1, 1));
|
||||
String date2 = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.format(date(2, 1));
|
||||
|
|
|
@ -10,7 +10,6 @@ package org.elasticsearch.search.aggregations.bucket;
|
|||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.action.index.IndexRequestBuilder;
|
||||
import org.elasticsearch.action.search.SearchPhaseExecutionException;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.script.Script;
|
||||
import org.elasticsearch.script.ScriptType;
|
||||
|
@ -616,7 +615,7 @@ public class DateRangeIT extends ESIntegTestCase {
|
|||
public void testScriptCaching() throws Exception {
|
||||
assertAcked(
|
||||
prepareCreate("cache_test_idx").setMapping("date", "type=date")
|
||||
.setSettings(Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1))
|
||||
.setSettings(indexSettings(1, 1).put("requests.cache.enable", true))
|
||||
);
|
||||
indexRandom(
|
||||
true,
|
||||
|
|
|
@ -9,7 +9,6 @@ package org.elasticsearch.search.aggregations.bucket;
|
|||
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.action.index.IndexRequestBuilder;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.core.Strings;
|
||||
import org.elasticsearch.index.fielddata.ScriptDocValues;
|
||||
import org.elasticsearch.index.query.QueryBuilders;
|
||||
|
@ -942,7 +941,7 @@ public class DoubleTermsIT extends AbstractTermsTestCase {
|
|||
public void testScriptCaching() throws Exception {
|
||||
assertAcked(
|
||||
prepareCreate("cache_test_idx").setMapping("d", "type=float")
|
||||
.setSettings(Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1))
|
||||
.setSettings(indexSettings(1, 1).put("requests.cache.enable", true))
|
||||
);
|
||||
indexRandom(
|
||||
true,
|
||||
|
|
|
@ -10,7 +10,6 @@ package org.elasticsearch.search.aggregations.bucket;
|
|||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.action.index.IndexRequestBuilder;
|
||||
import org.elasticsearch.action.search.SearchPhaseExecutionException;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.query.QueryBuilders;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.script.MockScriptPlugin;
|
||||
|
@ -1115,7 +1114,7 @@ public class HistogramIT extends ESIntegTestCase {
|
|||
public void testScriptCaching() throws Exception {
|
||||
assertAcked(
|
||||
prepareCreate("cache_test_idx").setMapping("d", "type=float")
|
||||
.setSettings(Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1))
|
||||
.setSettings(indexSettings(1, 1).put("requests.cache.enable", true))
|
||||
);
|
||||
indexRandom(
|
||||
true,
|
||||
|
|
|
@ -9,7 +9,6 @@ package org.elasticsearch.search.aggregations.bucket;
|
|||
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.action.index.IndexRequestBuilder;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.core.Strings;
|
||||
import org.elasticsearch.index.fielddata.ScriptDocValues;
|
||||
import org.elasticsearch.index.query.QueryBuilders;
|
||||
|
@ -899,8 +898,7 @@ public class LongTermsIT extends AbstractTermsTestCase {
|
|||
*/
|
||||
public void testScriptCaching() throws Exception {
|
||||
assertAcked(
|
||||
prepareCreate("cache_test_idx").setMapping("d", "type=long")
|
||||
.setSettings(Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1))
|
||||
prepareCreate("cache_test_idx").setMapping("d", "type=long").setSettings(indexSettings(1, 1).put("requests.cache.enable", true))
|
||||
);
|
||||
indexRandom(
|
||||
true,
|
||||
|
|
|
@ -9,7 +9,6 @@ package org.elasticsearch.search.aggregations.bucket;
|
|||
|
||||
import org.elasticsearch.action.index.IndexRequestBuilder;
|
||||
import org.elasticsearch.action.search.SearchPhaseExecutionException;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.fielddata.ScriptDocValues;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.script.Script;
|
||||
|
@ -896,7 +895,7 @@ public class RangeIT extends ESIntegTestCase {
|
|||
public void testScriptCaching() throws Exception {
|
||||
assertAcked(
|
||||
prepareCreate("cache_test_idx").setMapping("i", "type=integer")
|
||||
.setSettings(Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1))
|
||||
.setSettings(indexSettings(1, 1).put("requests.cache.enable", true))
|
||||
);
|
||||
indexRandom(
|
||||
true,
|
||||
|
|
|
@ -10,7 +10,6 @@ package org.elasticsearch.search.aggregations.bucket;
|
|||
import org.elasticsearch.action.index.IndexRequestBuilder;
|
||||
import org.elasticsearch.action.search.SearchRequestBuilder;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.XContentHelper;
|
||||
import org.elasticsearch.index.query.QueryBuilder;
|
||||
import org.elasticsearch.index.query.QueryBuilders;
|
||||
|
@ -548,7 +547,7 @@ public class SignificantTermsSignificanceScoreIT extends ESIntegTestCase {
|
|||
public void testScriptCaching() throws Exception {
|
||||
assertAcked(
|
||||
prepareCreate("cache_test_idx").setMapping("s", "type=long", "t", "type=text")
|
||||
.setSettings(Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1))
|
||||
.setSettings(indexSettings(1, 1).put("requests.cache.enable", true))
|
||||
);
|
||||
indexRandom(
|
||||
true,
|
||||
|
|
|
@ -10,7 +10,6 @@ package org.elasticsearch.search.aggregations.bucket.terms;
|
|||
|
||||
import org.elasticsearch.action.bulk.BulkRequestBuilder;
|
||||
import org.elasticsearch.action.index.IndexRequest;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetadata;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.search.SearchHit;
|
||||
import org.elasticsearch.search.aggregations.InternalMultiBucketAggregation;
|
||||
|
@ -46,9 +45,7 @@ public class RareTermsIT extends ESSingleNodeTestCase {
|
|||
}
|
||||
|
||||
public void testSingleValuedString() {
|
||||
final Settings.Builder settings = Settings.builder()
|
||||
.put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 2)
|
||||
.put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0);
|
||||
final Settings.Builder settings = indexSettings(2, 0);
|
||||
createIndex(index, settings.build());
|
||||
// We want to trigger the usage of cuckoo filters that happen only when there are
|
||||
// more than 10k distinct values in one shard.
|
||||
|
|
|
@ -11,7 +11,6 @@ import org.elasticsearch.ElasticsearchException;
|
|||
import org.elasticsearch.action.index.IndexRequestBuilder;
|
||||
import org.elasticsearch.action.search.SearchPhaseExecutionException;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.fielddata.ScriptDocValues;
|
||||
import org.elasticsearch.index.mapper.IndexFieldMapper;
|
||||
import org.elasticsearch.index.query.QueryBuilders;
|
||||
|
@ -1198,7 +1197,7 @@ public class StringTermsIT extends AbstractTermsTestCase {
|
|||
public void testScriptCaching() throws Exception {
|
||||
assertAcked(
|
||||
prepareCreate("cache_test_idx").setMapping("d", "type=keyword")
|
||||
.setSettings(Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1))
|
||||
.setSettings(indexSettings(1, 1).put("requests.cache.enable", true))
|
||||
);
|
||||
indexRandom(
|
||||
true,
|
||||
|
|
|
@ -7,7 +7,6 @@
|
|||
*/
|
||||
package org.elasticsearch.search.aggregations.metrics;
|
||||
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.script.Script;
|
||||
import org.elasticsearch.script.ScriptType;
|
||||
|
@ -905,8 +904,7 @@ public class ExtendedStatsIT extends AbstractNumericTestCase {
|
|||
*/
|
||||
public void testScriptCaching() throws Exception {
|
||||
assertAcked(
|
||||
prepareCreate("cache_test_idx").setMapping("d", "type=long")
|
||||
.setSettings(Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1))
|
||||
prepareCreate("cache_test_idx").setMapping("d", "type=long").setSettings(indexSettings(1, 1).put("requests.cache.enable", true))
|
||||
);
|
||||
indexRandom(
|
||||
true,
|
||||
|
|
|
@ -8,7 +8,6 @@
|
|||
package org.elasticsearch.search.aggregations.metrics;
|
||||
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.script.Script;
|
||||
import org.elasticsearch.script.ScriptType;
|
||||
|
@ -568,8 +567,7 @@ public class HDRPercentileRanksIT extends AbstractNumericTestCase {
|
|||
*/
|
||||
public void testScriptCaching() throws Exception {
|
||||
assertAcked(
|
||||
prepareCreate("cache_test_idx").setMapping("d", "type=long")
|
||||
.setSettings(Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1))
|
||||
prepareCreate("cache_test_idx").setMapping("d", "type=long").setSettings(indexSettings(1, 1).put("requests.cache.enable", true))
|
||||
);
|
||||
indexRandom(
|
||||
true,
|
||||
|
|
|
@ -8,7 +8,6 @@
|
|||
package org.elasticsearch.search.aggregations.metrics;
|
||||
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.CollectionUtils;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.script.Script;
|
||||
|
@ -541,8 +540,7 @@ public class HDRPercentilesIT extends AbstractNumericTestCase {
|
|||
*/
|
||||
public void testScriptCaching() throws Exception {
|
||||
assertAcked(
|
||||
prepareCreate("cache_test_idx").setMapping("d", "type=long")
|
||||
.setSettings(Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1))
|
||||
prepareCreate("cache_test_idx").setMapping("d", "type=long").setSettings(indexSettings(1, 1).put("requests.cache.enable", true))
|
||||
);
|
||||
indexRandom(
|
||||
true,
|
||||
|
|
|
@ -9,7 +9,6 @@
|
|||
package org.elasticsearch.search.aggregations.metrics;
|
||||
|
||||
import org.elasticsearch.action.index.IndexRequestBuilder;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.script.Script;
|
||||
import org.elasticsearch.script.ScriptType;
|
||||
|
@ -494,8 +493,7 @@ public class MedianAbsoluteDeviationIT extends AbstractNumericTestCase {
|
|||
*/
|
||||
public void testScriptCaching() throws Exception {
|
||||
assertAcked(
|
||||
prepareCreate("cache_test_idx").setMapping("d", "type=long")
|
||||
.setSettings(Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1))
|
||||
prepareCreate("cache_test_idx").setMapping("d", "type=long").setSettings(indexSettings(1, 1).put("requests.cache.enable", true))
|
||||
);
|
||||
|
||||
indexRandom(
|
||||
|
|
|
@ -10,7 +10,6 @@ package org.elasticsearch.search.aggregations.metrics;
|
|||
|
||||
import org.elasticsearch.action.index.IndexRequestBuilder;
|
||||
import org.elasticsearch.action.search.SearchPhaseExecutionException;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.support.XContentMapValues;
|
||||
import org.elasticsearch.core.Strings;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
|
@ -1138,8 +1137,7 @@ public class ScriptedMetricIT extends ESIntegTestCase {
|
|||
Script ndRandom = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "return Math.random()", Collections.emptyMap());
|
||||
|
||||
assertAcked(
|
||||
prepareCreate("cache_test_idx").setMapping("d", "type=long")
|
||||
.setSettings(Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1))
|
||||
prepareCreate("cache_test_idx").setMapping("d", "type=long").setSettings(indexSettings(1, 1).put("requests.cache.enable", true))
|
||||
);
|
||||
indexRandom(
|
||||
true,
|
||||
|
|
|
@ -9,7 +9,6 @@ package org.elasticsearch.search.aggregations.metrics;
|
|||
|
||||
import org.elasticsearch.action.search.SearchResponse;
|
||||
import org.elasticsearch.action.search.ShardSearchFailure;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.script.Script;
|
||||
import org.elasticsearch.script.ScriptType;
|
||||
|
@ -234,8 +233,7 @@ public class StatsIT extends AbstractNumericTestCase {
|
|||
*/
|
||||
public void testScriptCaching() throws Exception {
|
||||
assertAcked(
|
||||
prepareCreate("cache_test_idx").setMapping("d", "type=long")
|
||||
.setSettings(Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1))
|
||||
prepareCreate("cache_test_idx").setMapping("d", "type=long").setSettings(indexSettings(1, 1).put("requests.cache.enable", true))
|
||||
);
|
||||
indexRandom(
|
||||
true,
|
||||
|
|
|
@ -8,7 +8,6 @@
|
|||
package org.elasticsearch.search.aggregations.metrics;
|
||||
|
||||
import org.elasticsearch.action.index.IndexRequestBuilder;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.script.Script;
|
||||
import org.elasticsearch.script.ScriptType;
|
||||
|
@ -208,8 +207,7 @@ public class SumIT extends AbstractNumericTestCase {
|
|||
*/
|
||||
public void testScriptCaching() throws Exception {
|
||||
assertAcked(
|
||||
prepareCreate("cache_test_idx").setMapping("d", "type=long")
|
||||
.setSettings(Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1))
|
||||
prepareCreate("cache_test_idx").setMapping("d", "type=long").setSettings(indexSettings(1, 1).put("requests.cache.enable", true))
|
||||
);
|
||||
indexRandom(
|
||||
true,
|
||||
|
|
|
@ -8,7 +8,6 @@
|
|||
package org.elasticsearch.search.aggregations.metrics;
|
||||
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.CollectionUtils;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.script.Script;
|
||||
|
@ -485,8 +484,7 @@ public class TDigestPercentileRanksIT extends AbstractNumericTestCase {
|
|||
*/
|
||||
public void testScriptCaching() throws Exception {
|
||||
assertAcked(
|
||||
prepareCreate("cache_test_idx").setMapping("d", "type=long")
|
||||
.setSettings(Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1))
|
||||
prepareCreate("cache_test_idx").setMapping("d", "type=long").setSettings(indexSettings(1, 1).put("requests.cache.enable", true))
|
||||
);
|
||||
indexRandom(
|
||||
true,
|
||||
|
|
|
@ -8,7 +8,6 @@
|
|||
package org.elasticsearch.search.aggregations.metrics;
|
||||
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.CollectionUtils;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.script.Script;
|
||||
|
@ -457,8 +456,7 @@ public class TDigestPercentilesIT extends AbstractNumericTestCase {
|
|||
*/
|
||||
public void testScriptCaching() throws Exception {
|
||||
assertAcked(
|
||||
prepareCreate("cache_test_idx").setMapping("d", "type=long")
|
||||
.setSettings(Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1))
|
||||
prepareCreate("cache_test_idx").setMapping("d", "type=long").setSettings(indexSettings(1, 1).put("requests.cache.enable", true))
|
||||
);
|
||||
indexRandom(
|
||||
true,
|
||||
|
|
|
@ -1082,9 +1082,7 @@ public class TopHitsIT extends ESIntegTestCase {
|
|||
try {
|
||||
assertAcked(
|
||||
prepareCreate("cache_test_idx").setMapping("d", "type=long")
|
||||
.setSettings(
|
||||
Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1)
|
||||
)
|
||||
.setSettings(indexSettings(1, 1).put("requests.cache.enable", true))
|
||||
);
|
||||
indexRandom(
|
||||
true,
|
||||
|
|
|
@ -7,7 +7,6 @@
|
|||
*/
|
||||
package org.elasticsearch.search.aggregations.metrics;
|
||||
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.script.Script;
|
||||
import org.elasticsearch.script.ScriptType;
|
||||
|
@ -215,8 +214,7 @@ public class ValueCountIT extends ESIntegTestCase {
|
|||
*/
|
||||
public void testScriptCaching() throws Exception {
|
||||
assertAcked(
|
||||
prepareCreate("cache_test_idx").setMapping("d", "type=long")
|
||||
.setSettings(Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1))
|
||||
prepareCreate("cache_test_idx").setMapping("d", "type=long").setSettings(indexSettings(1, 1).put("requests.cache.enable", true))
|
||||
);
|
||||
indexRandom(
|
||||
true,
|
||||
|
|
|
@ -93,7 +93,7 @@ public class AggregationProfilerIT extends ESIntegTestCase {
|
|||
protected void setupSuiteScopeCluster() throws Exception {
|
||||
assertAcked(
|
||||
indicesAdmin().prepareCreate("idx")
|
||||
.setSettings(Map.of("number_of_shards", 1, "number_of_replicas", 0))
|
||||
.setSettings(indexSettings(1, 0))
|
||||
.setMapping(STRING_FIELD, "type=keyword", NUMBER_FIELD, "type=integer", TAG_FIELD, "type=keyword")
|
||||
);
|
||||
List<IndexRequestBuilder> builders = new ArrayList<>();
|
||||
|
@ -634,11 +634,7 @@ public class AggregationProfilerIT extends ESIntegTestCase {
|
|||
* documents and that is hard to express in yaml.
|
||||
*/
|
||||
public void testFilterByFilter() throws InterruptedException, IOException {
|
||||
assertAcked(
|
||||
indicesAdmin().prepareCreate("dateidx")
|
||||
.setSettings(Map.of("number_of_shards", 1, "number_of_replicas", 0))
|
||||
.setMapping("date", "type=date")
|
||||
);
|
||||
assertAcked(indicesAdmin().prepareCreate("dateidx").setSettings(indexSettings(1, 0)).setMapping("date", "type=date"));
|
||||
List<IndexRequestBuilder> builders = new ArrayList<>();
|
||||
for (int i = 0; i < RangeAggregator.DOCS_PER_RANGE_TO_USE_FILTERS * 2; i++) {
|
||||
String date = Instant.ofEpochSecond(i).toString();
|
||||
|
@ -713,7 +709,7 @@ public class AggregationProfilerIT extends ESIntegTestCase {
|
|||
try {
|
||||
assertAcked(
|
||||
indicesAdmin().prepareCreate("date_filter_by_filter_disabled")
|
||||
.setSettings(Map.of("number_of_shards", 1, "number_of_replicas", 0))
|
||||
.setSettings(indexSettings(1, 0))
|
||||
.setMapping("date", "type=date", "keyword", "type=keyword")
|
||||
);
|
||||
List<IndexRequestBuilder> builders = new ArrayList<>();
|
||||
|
|
|
@ -298,7 +298,7 @@ public class RepositoriesIT extends AbstractSnapshotIntegTestCase {
|
|||
|
||||
logger.info("--> snapshot");
|
||||
final String index = "test-idx";
|
||||
assertAcked(prepareCreate(index, 1, Settings.builder().put("number_of_shards", 1).put("number_of_replicas", 0)));
|
||||
assertAcked(prepareCreate(index, 1, indexSettings(1, 0)));
|
||||
for (int i = 0; i < 10; i++) {
|
||||
indexDoc(index, Integer.toString(i), "foo", "bar" + i);
|
||||
}
|
||||
|
|
|
@ -1788,9 +1788,7 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas
|
|||
final String index = "test-idx";
|
||||
final String snapshot = "test-snap";
|
||||
|
||||
assertAcked(
|
||||
prepareCreate(index, 1, Settings.builder().put("number_of_shards", numPrimaries).put("number_of_replicas", numReplicas))
|
||||
);
|
||||
assertAcked(prepareCreate(index, 1, indexSettings(numPrimaries, numReplicas)));
|
||||
|
||||
indexRandomDocs(index, 100);
|
||||
|
||||
|
|
|
@ -29,7 +29,6 @@ import org.elasticsearch.action.support.master.AcknowledgedResponse;
|
|||
import org.elasticsearch.client.internal.Client;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.ClusterStateUpdateTask;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetadata;
|
||||
import org.elasticsearch.cluster.metadata.NodesShutdownMetadata;
|
||||
import org.elasticsearch.cluster.metadata.SingleNodeShutdownMetadata;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
|
@ -1461,11 +1460,7 @@ public class SnapshotStressTestsIT extends AbstractSnapshotIntegTestCase {
|
|||
docPermits = new Semaphore(between(1000, 3000));
|
||||
logger.info("--> create index [{}] with max [{}] docs", indexName, docPermits.availablePermits());
|
||||
indicesAdmin().prepareCreate(indexName)
|
||||
.setSettings(
|
||||
Settings.builder()
|
||||
.put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), shardCount)
|
||||
.put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), between(0, cluster.numDataNodes() - 1))
|
||||
)
|
||||
.setSettings(indexSettings(shardCount, between(0, cluster.numDataNodes() - 1)))
|
||||
.execute(mustSucceed(response -> {
|
||||
assertTrue(response.isAcknowledged());
|
||||
logger.info("--> finished create index [{}]", indexName);
|
||||
|
|
|
@ -1039,11 +1039,7 @@ public class DataStreamTests extends AbstractXContentSerializingTestCase<DataStr
|
|||
dataStream.validate(
|
||||
(index) -> IndexMetadata.builder(index)
|
||||
.settings(
|
||||
Settings.builder()
|
||||
.put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1)
|
||||
.put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1)
|
||||
.put(IndexMetadata.SETTING_INDEX_VERSION_CREATED.getKey(), IndexVersion.current())
|
||||
.build()
|
||||
indexSettings(1, 1).put(IndexMetadata.SETTING_INDEX_VERSION_CREATED.getKey(), IndexVersion.current()).build()
|
||||
)
|
||||
.build()
|
||||
);
|
||||
|
@ -1058,10 +1054,7 @@ public class DataStreamTests extends AbstractXContentSerializingTestCase<DataStr
|
|||
() -> dataStream.validate(
|
||||
(index) -> IndexMetadata.builder(index)
|
||||
.settings(
|
||||
Settings.builder()
|
||||
.put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1)
|
||||
.put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1)
|
||||
.put(IndexMetadata.SETTING_INDEX_VERSION_CREATED.getKey(), IndexVersion.current())
|
||||
indexSettings(1, 1).put(IndexMetadata.SETTING_INDEX_VERSION_CREATED.getKey(), IndexVersion.current())
|
||||
.put(IndexSettings.MODE.getKey(), IndexMode.TIME_SERIES)
|
||||
.put(IndexSettings.TIME_SERIES_START_TIME.getKey(), start3.toEpochMilli())
|
||||
.put(IndexSettings.TIME_SERIES_END_TIME.getKey(), end3.toEpochMilli())
|
||||
|
|
|
@ -2109,10 +2109,7 @@ public class ShardsAvailabilityHealthIndicatorServiceTests extends ESTestCase {
|
|||
for (Map.Entry<String, Integer> indexNameToPriority : indexNameToPriorityMap.entrySet()) {
|
||||
String indexName = indexNameToPriority.getKey();
|
||||
IndexMetadata.Builder indexMetadataBuilder = new IndexMetadata.Builder(indexName);
|
||||
Settings settings = Settings.builder()
|
||||
.put(IndexMetadata.SETTING_PRIORITY, indexNameToPriority.getValue())
|
||||
.put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1)
|
||||
.put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1)
|
||||
Settings settings = indexSettings(1, 1).put(IndexMetadata.SETTING_PRIORITY, indexNameToPriority.getValue())
|
||||
.put(IndexMetadata.SETTING_INDEX_VERSION_CREATED.getKey(), IndexVersion.current())
|
||||
.build();
|
||||
indexMetadataBuilder.settings(settings);
|
||||
|
|
|
@ -726,10 +726,7 @@ public class PersistedClusterStateServiceTests extends ESTestCase {
|
|||
.version(1L)
|
||||
.putMapping(randomMappingMetadataOrNull())
|
||||
.settings(
|
||||
Settings.builder()
|
||||
.put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1)
|
||||
.put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0)
|
||||
.put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current())
|
||||
indexSettings(1, 0).put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current())
|
||||
.put(IndexMetadata.SETTING_INDEX_UUID, indexUUID)
|
||||
)
|
||||
)
|
||||
|
@ -797,10 +794,7 @@ public class PersistedClusterStateServiceTests extends ESTestCase {
|
|||
.version(indexMetadataVersion - 1) // -1 because it's incremented in .put()
|
||||
.putMapping(randomMappingMetadataOrNull())
|
||||
.settings(
|
||||
Settings.builder()
|
||||
.put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1)
|
||||
.put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0)
|
||||
.put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current())
|
||||
indexSettings(1, 0).put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current())
|
||||
.put(IndexMetadata.SETTING_INDEX_UUID, indexUUID)
|
||||
)
|
||||
)
|
||||
|
@ -931,10 +925,7 @@ public class PersistedClusterStateServiceTests extends ESTestCase {
|
|||
.putMapping(randomMappingMetadataOrNull())
|
||||
.version(randomLongBetween(0L, Long.MAX_VALUE - 1) - 1) // -1 because it's incremented in .put()
|
||||
.settings(
|
||||
Settings.builder()
|
||||
.put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1)
|
||||
.put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1)
|
||||
.put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current())
|
||||
indexSettings(1, 1).put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current())
|
||||
.put(IndexMetadata.SETTING_INDEX_UUID, updatedIndexUuid)
|
||||
)
|
||||
)
|
||||
|
@ -943,10 +934,7 @@ public class PersistedClusterStateServiceTests extends ESTestCase {
|
|||
.putMapping(randomMappingMetadataOrNull())
|
||||
.version(randomLongBetween(0L, Long.MAX_VALUE - 1) - 1) // -1 because it's incremented in .put()
|
||||
.settings(
|
||||
Settings.builder()
|
||||
.put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1)
|
||||
.put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1)
|
||||
.put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current())
|
||||
indexSettings(1, 1).put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current())
|
||||
.put(IndexMetadata.SETTING_INDEX_UUID, deletedIndexUuid)
|
||||
)
|
||||
)
|
||||
|
@ -990,10 +978,7 @@ public class PersistedClusterStateServiceTests extends ESTestCase {
|
|||
.version(randomLongBetween(0L, Long.MAX_VALUE - 1) - 1) // -1 because it's incremented in .put()
|
||||
.putMapping(randomMappingMetadataOrNull())
|
||||
.settings(
|
||||
Settings.builder()
|
||||
.put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1)
|
||||
.put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1)
|
||||
.put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current())
|
||||
indexSettings(1, 1).put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current())
|
||||
.put(IndexMetadata.SETTING_INDEX_UUID, addedIndexUuid)
|
||||
)
|
||||
)
|
||||
|
@ -1040,10 +1025,7 @@ public class PersistedClusterStateServiceTests extends ESTestCase {
|
|||
IndexMetadata.builder(index.getName())
|
||||
.putMapping(randomMappingMetadataOrNull())
|
||||
.settings(
|
||||
Settings.builder()
|
||||
.put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1)
|
||||
.put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0)
|
||||
.put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current())
|
||||
indexSettings(1, 0).put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current())
|
||||
.put(IndexMetadata.SETTING_INDEX_UUID, index.getUUID())
|
||||
)
|
||||
)
|
||||
|
@ -1074,10 +1056,7 @@ public class PersistedClusterStateServiceTests extends ESTestCase {
|
|||
IndexMetadata.builder("test-" + i)
|
||||
.putMapping(randomMappingMetadataOrNull())
|
||||
.settings(
|
||||
Settings.builder()
|
||||
.put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1)
|
||||
.put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0)
|
||||
.put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current())
|
||||
indexSettings(1, 0).put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current())
|
||||
.put(IndexMetadata.SETTING_INDEX_UUID, UUIDs.randomBase64UUID(random()))
|
||||
)
|
||||
);
|
||||
|
@ -1254,10 +1233,7 @@ public class PersistedClusterStateServiceTests extends ESTestCase {
|
|||
IndexMetadata.builder("test")
|
||||
.putMapping(randomMappingMetadata())
|
||||
.settings(
|
||||
Settings.builder()
|
||||
.put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1)
|
||||
.put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0)
|
||||
.put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current())
|
||||
indexSettings(1, 0).put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current())
|
||||
.put(IndexMetadata.SETTING_INDEX_UUID, "test-uuid")
|
||||
)
|
||||
)
|
||||
|
@ -1369,10 +1345,7 @@ public class PersistedClusterStateServiceTests extends ESTestCase {
|
|||
IndexMetadata.builder("index-" + i)
|
||||
.putMapping(randomMappingMetadataOrNull())
|
||||
.settings(
|
||||
Settings.builder()
|
||||
.put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1)
|
||||
.put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0)
|
||||
.put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current())
|
||||
indexSettings(1, 0).put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current())
|
||||
.put(IndexMetadata.SETTING_INDEX_UUID, UUIDs.randomBase64UUID(random()))
|
||||
)
|
||||
)
|
||||
|
@ -1592,10 +1565,7 @@ public class PersistedClusterStateServiceTests extends ESTestCase {
|
|||
IndexMetadata.builder("test-1")
|
||||
.putMapping(randomMappingMetadata())
|
||||
.settings(
|
||||
Settings.builder()
|
||||
.put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1)
|
||||
.put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0)
|
||||
.put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current())
|
||||
indexSettings(1, 0).put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current())
|
||||
.put(IndexMetadata.SETTING_INDEX_UUID, UUIDs.randomBase64UUID(random()))
|
||||
)
|
||||
)
|
||||
|
@ -1647,10 +1617,7 @@ public class PersistedClusterStateServiceTests extends ESTestCase {
|
|||
IndexMetadata.builder("test-1")
|
||||
.putMapping(randomMappingMetadata())
|
||||
.settings(
|
||||
Settings.builder()
|
||||
.put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1)
|
||||
.put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0)
|
||||
.put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current())
|
||||
indexSettings(1, 0).put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current())
|
||||
.put(IndexMetadata.SETTING_INDEX_UUID, UUIDs.randomBase64UUID(random()))
|
||||
)
|
||||
)
|
||||
|
@ -1713,10 +1680,7 @@ public class PersistedClusterStateServiceTests extends ESTestCase {
|
|||
IndexMetadata.builder("test-" + i)
|
||||
.putMapping(mapping1)
|
||||
.settings(
|
||||
Settings.builder()
|
||||
.put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1)
|
||||
.put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0)
|
||||
.put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current())
|
||||
indexSettings(1, 0).put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current())
|
||||
.put(IndexMetadata.SETTING_INDEX_UUID, UUIDs.randomBase64UUID(random()))
|
||||
)
|
||||
);
|
||||
|
@ -1740,10 +1704,7 @@ public class PersistedClusterStateServiceTests extends ESTestCase {
|
|||
IndexMetadata.builder("test-" + 99)
|
||||
.putMapping(mapping2)
|
||||
.settings(
|
||||
Settings.builder()
|
||||
.put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1)
|
||||
.put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0)
|
||||
.put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current())
|
||||
indexSettings(1, 0).put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current())
|
||||
.put(IndexMetadata.SETTING_INDEX_UUID, UUIDs.randomBase64UUID(random()))
|
||||
)
|
||||
);
|
||||
|
|
|
@ -1816,10 +1816,7 @@ public class SnapshotResiliencyTests extends ESTestCase {
|
|||
|
||||
private static Settings defaultIndexSettings(int shards) {
|
||||
// TODO: randomize replica count settings once recovery operations aren't blocking anymore
|
||||
return Settings.builder()
|
||||
.put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), shards)
|
||||
.put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0)
|
||||
.build();
|
||||
return indexSettings(shards, 0).build();
|
||||
}
|
||||
|
||||
private static <T> void continueOrDie(SubscribableListener<T> listener, CheckedConsumer<T, Exception> onResponse) {
|
||||
|
|
|
@ -1400,12 +1400,7 @@ public abstract class EngineTestCase extends ESTestCase {
|
|||
|
||||
public static MapperService createMapperService() throws IOException {
|
||||
IndexMetadata indexMetadata = IndexMetadata.builder("test")
|
||||
.settings(
|
||||
Settings.builder()
|
||||
.put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current())
|
||||
.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
|
||||
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1)
|
||||
)
|
||||
.settings(indexSettings(1, 1).put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()))
|
||||
.putMapping("{\"properties\": {}}")
|
||||
.build();
|
||||
MapperService mapperService = MapperTestUtils.newMapperService(
|
||||
|
|
|
@ -128,10 +128,7 @@ public abstract class ESIndexLevelReplicationTestCase extends IndexShardTestCase
|
|||
}
|
||||
|
||||
protected IndexMetadata buildIndexMetadata(int replicas, Settings indexSettings, String mappings) {
|
||||
Settings settings = Settings.builder()
|
||||
.put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current())
|
||||
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, replicas)
|
||||
.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
|
||||
Settings settings = indexSettings(1, replicas).put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current())
|
||||
.put(IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.getKey(), between(0, 1000))
|
||||
.put(indexSettings)
|
||||
.build();
|
||||
|
|
|
@ -301,10 +301,7 @@ public abstract class IndexShardTestCase extends ESTestCase {
|
|||
final IndexingOperationListener... listeners
|
||||
) throws IOException {
|
||||
assert shardRouting.initializing() : shardRouting;
|
||||
Settings indexSettings = Settings.builder()
|
||||
.put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current())
|
||||
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)
|
||||
.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
|
||||
Settings indexSettings = indexSettings(1, 0).put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current())
|
||||
.put(
|
||||
IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.getKey(),
|
||||
randomBoolean() ? IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.get(Settings.EMPTY) : between(0, 1000)
|
||||
|
|
|
@ -107,12 +107,7 @@ public abstract class AbstractIndexRecoveryIntegTestCase extends ESIntegTestCase
|
|||
assertThat(response.isTimedOut(), is(false));
|
||||
|
||||
indicesAdmin().prepareCreate(indexName)
|
||||
.setSettings(
|
||||
Settings.builder()
|
||||
.put(IndexMetadata.INDEX_ROUTING_INCLUDE_GROUP_SETTING.getKey() + "color", "blue")
|
||||
.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
|
||||
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)
|
||||
)
|
||||
.setSettings(indexSettings(1, 0).put(IndexMetadata.INDEX_ROUTING_INCLUDE_GROUP_SETTING.getKey() + "color", "blue"))
|
||||
.get();
|
||||
|
||||
List<IndexRequestBuilder> requests = new ArrayList<>();
|
||||
|
@ -213,12 +208,7 @@ public abstract class AbstractIndexRecoveryIntegTestCase extends ESIntegTestCase
|
|||
assertThat(response.isTimedOut(), is(false));
|
||||
|
||||
indicesAdmin().prepareCreate(indexName)
|
||||
.setSettings(
|
||||
Settings.builder()
|
||||
.put(IndexMetadata.INDEX_ROUTING_INCLUDE_GROUP_SETTING.getKey() + "color", "blue")
|
||||
.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
|
||||
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)
|
||||
)
|
||||
.setSettings(indexSettings(1, 0).put(IndexMetadata.INDEX_ROUTING_INCLUDE_GROUP_SETTING.getKey() + "color", "blue"))
|
||||
.get();
|
||||
|
||||
List<IndexRequestBuilder> requests = new ArrayList<>();
|
||||
|
@ -314,12 +304,7 @@ public abstract class AbstractIndexRecoveryIntegTestCase extends ESIntegTestCase
|
|||
final String redNodeName = internalCluster().startNode(Settings.builder().put("node.attr.color", "red").put(nodeSettings).build());
|
||||
|
||||
indicesAdmin().prepareCreate(indexName)
|
||||
.setSettings(
|
||||
Settings.builder()
|
||||
.put(IndexMetadata.INDEX_ROUTING_INCLUDE_GROUP_SETTING.getKey() + "color", "blue")
|
||||
.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
|
||||
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)
|
||||
)
|
||||
.setSettings(indexSettings(1, 0).put(IndexMetadata.INDEX_ROUTING_INCLUDE_GROUP_SETTING.getKey() + "color", "blue"))
|
||||
.get();
|
||||
|
||||
List<IndexRequestBuilder> requests = new ArrayList<>();
|
||||
|
|
|
@ -99,7 +99,7 @@ public abstract class ESSingleNodeTestCase extends ESTestCase {
|
|||
indicesAdmin().preparePutTemplate("one_shard_index_template")
|
||||
.setPatterns(Collections.singletonList("*"))
|
||||
.setOrder(0)
|
||||
.setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0))
|
||||
.setSettings(indexSettings(1, 0))
|
||||
.get();
|
||||
indicesAdmin().preparePutTemplate("random-soft-deletes-template")
|
||||
.setPatterns(Collections.singletonList("*"))
|
||||
|
|
|
@ -49,10 +49,8 @@ public class IndexSettingsModule extends AbstractModule {
|
|||
}
|
||||
|
||||
public static IndexSettings newIndexSettings(Index index, Settings indexSetting, Settings nodeSettings, Setting<?>... setting) {
|
||||
Settings build = Settings.builder()
|
||||
Settings build = ESTestCase.indexSettings(1, 1)
|
||||
.put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current())
|
||||
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1)
|
||||
.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
|
||||
.put(indexSetting)
|
||||
.build();
|
||||
IndexMetadata metadata = IndexMetadata.builder(index.getName())
|
||||
|
@ -67,10 +65,8 @@ public class IndexSettingsModule extends AbstractModule {
|
|||
}
|
||||
|
||||
public static IndexSettings newIndexSettings(Index index, Settings settings, IndexScopedSettings indexScopedSettings) {
|
||||
Settings build = Settings.builder()
|
||||
Settings build = ESTestCase.indexSettings(1, 1)
|
||||
.put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current())
|
||||
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1)
|
||||
.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
|
||||
.put(settings)
|
||||
.build();
|
||||
IndexMetadata metadata = IndexMetadata.builder(index.getName())
|
||||
|
|
|
@ -103,10 +103,7 @@ public class AutoFollowIT extends CcrIntegTestCase {
|
|||
}
|
||||
|
||||
public void testAutoFollow() throws Exception {
|
||||
Settings leaderIndexSettings = Settings.builder()
|
||||
.put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1)
|
||||
.put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0)
|
||||
.build();
|
||||
Settings leaderIndexSettings = indexSettings(1, 0).build();
|
||||
|
||||
createLeaderIndex("logs-201812", leaderIndexSettings);
|
||||
|
||||
|
@ -139,10 +136,7 @@ public class AutoFollowIT extends CcrIntegTestCase {
|
|||
// Trigger system index creation
|
||||
leaderClient().prepareIndex(FakeSystemIndex.SYSTEM_INDEX_NAME).setSource(Map.of("a", "b")).get();
|
||||
|
||||
Settings leaderIndexSettings = Settings.builder()
|
||||
.put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1)
|
||||
.put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0)
|
||||
.build();
|
||||
Settings leaderIndexSettings = indexSettings(1, 0).build();
|
||||
createLeaderIndex("logs-201901", leaderIndexSettings);
|
||||
assertLongBusy(() -> {
|
||||
AutoFollowStats autoFollowStats = getAutoFollowStats();
|
||||
|
@ -153,10 +147,7 @@ public class AutoFollowIT extends CcrIntegTestCase {
|
|||
}
|
||||
|
||||
public void testCleanFollowedLeaderIndexUUIDs() throws Exception {
|
||||
Settings leaderIndexSettings = Settings.builder()
|
||||
.put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1)
|
||||
.put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0)
|
||||
.build();
|
||||
Settings leaderIndexSettings = indexSettings(1, 0).build();
|
||||
|
||||
putAutoFollowPatterns("my-pattern", new String[] { "logs-*" });
|
||||
createLeaderIndex("logs-201901", leaderIndexSettings);
|
||||
|
@ -192,10 +183,7 @@ public class AutoFollowIT extends CcrIntegTestCase {
|
|||
}
|
||||
|
||||
public void testAutoFollowManyIndices() throws Exception {
|
||||
Settings leaderIndexSettings = Settings.builder()
|
||||
.put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1)
|
||||
.put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0)
|
||||
.build();
|
||||
Settings leaderIndexSettings = indexSettings(1, 0).build();
|
||||
|
||||
putAutoFollowPatterns("my-pattern", new String[] { "logs-*" });
|
||||
long numIndices = randomIntBetween(4, 8);
|
||||
|
@ -267,10 +255,7 @@ public class AutoFollowIT extends CcrIntegTestCase {
|
|||
}
|
||||
|
||||
public void testAutoFollowParameterAreDelegated() throws Exception {
|
||||
Settings leaderIndexSettings = Settings.builder()
|
||||
.put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1)
|
||||
.put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0)
|
||||
.build();
|
||||
Settings leaderIndexSettings = indexSettings(1, 0).build();
|
||||
|
||||
// Enabling auto following:
|
||||
PutAutoFollowPatternAction.Request request = new PutAutoFollowPatternAction.Request(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT);
|
||||
|
@ -377,10 +362,7 @@ public class AutoFollowIT extends CcrIntegTestCase {
|
|||
}
|
||||
|
||||
public void testConflictingPatterns() throws Exception {
|
||||
Settings leaderIndexSettings = Settings.builder()
|
||||
.put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1)
|
||||
.put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0)
|
||||
.build();
|
||||
Settings leaderIndexSettings = indexSettings(1, 0).build();
|
||||
|
||||
// Enabling auto following:
|
||||
putAutoFollowPatterns("my-pattern1", new String[] { "logs-*" });
|
||||
|
@ -422,10 +404,7 @@ public class AutoFollowIT extends CcrIntegTestCase {
|
|||
}
|
||||
|
||||
public void testPauseAndResumeAutoFollowPattern() throws Exception {
|
||||
final Settings leaderIndexSettings = Settings.builder()
|
||||
.put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1)
|
||||
.put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0)
|
||||
.build();
|
||||
final Settings leaderIndexSettings = indexSettings(1, 0).build();
|
||||
|
||||
// index created in the remote cluster before the auto follow pattern exists won't be auto followed
|
||||
createLeaderIndex("test-existing-index-is-ignored", leaderIndexSettings);
|
||||
|
@ -504,10 +483,7 @@ public class AutoFollowIT extends CcrIntegTestCase {
|
|||
}
|
||||
|
||||
public void testPauseAndResumeWithMultipleAutoFollowPatterns() throws Exception {
|
||||
final Settings leaderIndexSettings = Settings.builder()
|
||||
.put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1)
|
||||
.put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0)
|
||||
.build();
|
||||
final Settings leaderIndexSettings = indexSettings(1, 0).build();
|
||||
|
||||
final String[] prefixes = { "logs-", "users-", "docs-", "monitoring-", "data-", "system-", "events-", "files-" };
|
||||
|
||||
|
@ -609,10 +585,7 @@ public class AutoFollowIT extends CcrIntegTestCase {
|
|||
}
|
||||
|
||||
public void testAutoFollowExclusion() throws Exception {
|
||||
Settings leaderIndexSettings = Settings.builder()
|
||||
.put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1)
|
||||
.put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0)
|
||||
.build();
|
||||
Settings leaderIndexSettings = indexSettings(1, 0).build();
|
||||
|
||||
putAutoFollowPatterns("my-pattern1", new String[] { "logs-*" }, Collections.singletonList("logs-2018*"));
|
||||
|
||||
|
|
|
@ -12,7 +12,6 @@ import org.elasticsearch.cluster.metadata.DataStreamTestHelper;
|
|||
import org.elasticsearch.cluster.metadata.IndexMetadata;
|
||||
import org.elasticsearch.cluster.metadata.Metadata;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.core.Tuple;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.IndexVersion;
|
||||
|
@ -124,13 +123,7 @@ public class WaitUntilTimeSeriesEndTimePassesStepTests extends AbstractStepTestC
|
|||
{
|
||||
// regular indices (non-ts) meet the step condition
|
||||
IndexMetadata indexMeta = IndexMetadata.builder(randomAlphaOfLengthBetween(10, 30))
|
||||
.settings(
|
||||
Settings.builder()
|
||||
.put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1)
|
||||
.put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1)
|
||||
.put(IndexMetadata.SETTING_INDEX_VERSION_CREATED.getKey(), IndexVersion.current())
|
||||
.build()
|
||||
)
|
||||
.settings(indexSettings(1, 1).put(IndexMetadata.SETTING_INDEX_VERSION_CREATED.getKey(), IndexVersion.current()).build())
|
||||
.build();
|
||||
|
||||
Metadata newMetadata = Metadata.builder(clusterState.metadata()).put(indexMeta, true).build();
|
||||
|
|
|
@ -11,8 +11,6 @@ import org.apache.http.HttpHost;
|
|||
import org.elasticsearch.client.Request;
|
||||
import org.elasticsearch.client.Response;
|
||||
import org.elasticsearch.client.RestClient;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetadata;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.XContentHelper;
|
||||
import org.elasticsearch.test.NotEqualMessageBuilder;
|
||||
import org.elasticsearch.test.rest.ESRestTestCase;
|
||||
|
@ -68,14 +66,7 @@ public class EqlSearchIT extends ESRestTestCase {
|
|||
bwcNodes = new ArrayList<>(nodes.getBWCNodes());
|
||||
|
||||
String mappings = readResource(EqlSearchIT.class.getResourceAsStream("/eql_mapping.json"));
|
||||
createIndex(
|
||||
index,
|
||||
Settings.builder()
|
||||
.put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), numShards)
|
||||
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, numReplicas)
|
||||
.build(),
|
||||
mappings
|
||||
);
|
||||
createIndex(index, indexSettings(numShards, numReplicas).build(), mappings);
|
||||
}
|
||||
|
||||
@After
|
||||
|
|
|
@ -82,12 +82,7 @@ public abstract class AbstractPausableIntegTestCase extends AbstractEsqlIntegTes
|
|||
mapping.endObject();
|
||||
}
|
||||
mapping.endObject();
|
||||
client().admin()
|
||||
.indices()
|
||||
.prepareCreate("test")
|
||||
.setSettings(Map.of("number_of_shards", 1, "number_of_replicas", 0))
|
||||
.setMapping(mapping.endObject())
|
||||
.get();
|
||||
client().admin().indices().prepareCreate("test").setSettings(indexSettings(1, 0)).setMapping(mapping.endObject()).get();
|
||||
|
||||
BulkRequestBuilder bulk = client().prepareBulk().setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE);
|
||||
for (int i = 0; i < numberOfDocs(); i++) {
|
||||
|
|
|
@ -9,7 +9,6 @@ package org.elasticsearch.xpack.ml.transforms;
|
|||
import org.apache.http.util.EntityUtils;
|
||||
import org.elasticsearch.client.Request;
|
||||
import org.elasticsearch.client.Response;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetadata;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.test.rest.ESRestTestCase;
|
||||
|
@ -186,9 +185,7 @@ public class PainlessDomainSplitIT extends ESRestTestCase {
|
|||
}
|
||||
|
||||
public void testIsolated() throws Exception {
|
||||
Settings.Builder settings = Settings.builder()
|
||||
.put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1)
|
||||
.put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0);
|
||||
Settings.Builder settings = indexSettings(1, 0);
|
||||
|
||||
createIndex("painless", settings.build());
|
||||
Request createDoc = new Request("PUT", "/painless/_doc/1");
|
||||
|
@ -282,9 +279,7 @@ public class PainlessDomainSplitIT extends ESRestTestCase {
|
|||
client().performRequest(new Request("POST", BASE_PATH + "anomaly_detectors/hrd-split-job/_open"));
|
||||
|
||||
// Create index to hold data
|
||||
Settings.Builder settings = Settings.builder()
|
||||
.put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1)
|
||||
.put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0);
|
||||
Settings.Builder settings = indexSettings(1, 0);
|
||||
|
||||
createIndex("painless", settings.build(), """
|
||||
"properties": { "domain": { "type": "keyword" },"time": { "type": "date" } }""");
|
||||
|
|
|
@ -20,7 +20,6 @@ import java.util.List;
|
|||
|
||||
import static org.elasticsearch.cluster.metadata.IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING;
|
||||
import static org.elasticsearch.cluster.metadata.IndexMetadata.INDEX_NUMBER_OF_ROUTING_SHARDS_SETTING;
|
||||
import static org.elasticsearch.cluster.metadata.IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING;
|
||||
import static org.elasticsearch.index.IndexSettings.INDEX_SOFT_DELETES_SETTING;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
|
||||
import static org.elasticsearch.xpack.core.searchablesnapshots.MountSearchableSnapshotRequest.Storage;
|
||||
|
@ -37,11 +36,7 @@ public class SearchableSnapshotsResizeIntegTests extends BaseFrozenSearchableSna
|
|||
assertAcked(
|
||||
prepareCreate(
|
||||
"index",
|
||||
Settings.builder()
|
||||
.put(INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0)
|
||||
.put(INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 2)
|
||||
.put(INDEX_NUMBER_OF_ROUTING_SHARDS_SETTING.getKey(), 4)
|
||||
.put(INDEX_SOFT_DELETES_SETTING.getKey(), true)
|
||||
indexSettings(2, 0).put(INDEX_NUMBER_OF_ROUTING_SHARDS_SETTING.getKey(), 4).put(INDEX_SOFT_DELETES_SETTING.getKey(), true)
|
||||
)
|
||||
);
|
||||
indexRandomDocs("index", scaledRandomIntBetween(0, 1_000));
|
||||
|
|
|
@ -54,7 +54,7 @@ public class SnapshotUserRoleIntegTests extends NativeRealmIntegTestCase {
|
|||
logger.info("--> creating ordinary index");
|
||||
final int shards = between(1, 10);
|
||||
ordinaryIndex = randomAlphaOfLength(4).toLowerCase(Locale.ROOT);
|
||||
assertAcked(prepareCreate(ordinaryIndex, 0, Settings.builder().put("number_of_shards", shards).put("number_of_replicas", 0)));
|
||||
assertAcked(prepareCreate(ordinaryIndex, 0, indexSettings(shards, 0)));
|
||||
ensureGreen();
|
||||
|
||||
logger.info("--> creating snapshot_user user");
|
||||
|
|
|
@ -13,8 +13,6 @@ import org.elasticsearch.Version;
|
|||
import org.elasticsearch.client.Request;
|
||||
import org.elasticsearch.client.Response;
|
||||
import org.elasticsearch.client.RestClient;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetadata;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.XContentHelper;
|
||||
import org.elasticsearch.test.NotEqualMessageBuilder;
|
||||
import org.elasticsearch.test.rest.ESRestTestCase;
|
||||
|
@ -63,14 +61,7 @@ public class SqlSearchIT extends ESRestTestCase {
|
|||
bwcNodes = new ArrayList<>(nodes.getBWCNodes());
|
||||
|
||||
String mappings = readResource(SqlSearchIT.class.getResourceAsStream("/all_field_types.json"));
|
||||
createIndex(
|
||||
index,
|
||||
Settings.builder()
|
||||
.put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), numShards)
|
||||
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, numReplicas)
|
||||
.build(),
|
||||
mappings
|
||||
);
|
||||
createIndex(index, indexSettings(numShards, numReplicas).build(), mappings);
|
||||
}
|
||||
|
||||
@After
|
||||
|
|
|
@ -9,7 +9,6 @@ package org.elasticsearch.xpack.transform.integration;
|
|||
|
||||
import org.elasticsearch.client.Request;
|
||||
import org.elasticsearch.client.RequestOptions;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetadata;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.xpack.core.transform.transforms.persistence.TransformInternalIndexConstants;
|
||||
|
@ -95,9 +94,7 @@ public class TransformAuditorIT extends TransformRestTestCase {
|
|||
}
|
||||
|
||||
public void testAliasCreatedforBWCIndexes() throws Exception {
|
||||
Settings.Builder settings = Settings.builder()
|
||||
.put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1)
|
||||
.put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0);
|
||||
Settings.Builder settings = indexSettings(1, 0);
|
||||
|
||||
// These indices should only exist if created in previous versions, ignore the deprecation warning for this test
|
||||
RequestOptions options = expectWarnings(
|
||||
|
|
|
@ -133,10 +133,7 @@ public class WatchStoreUtilsTests extends ESTestCase {
|
|||
|
||||
private IndexMetadata createIndexMetaData(String indexName, AliasMetadata aliasMetadata) {
|
||||
IndexMetadata.Builder indexMetadataBuilder = new IndexMetadata.Builder(indexName);
|
||||
Settings settings = Settings.builder()
|
||||
.put(IndexMetadata.SETTING_PRIORITY, 5)
|
||||
.put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1)
|
||||
.put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1)
|
||||
Settings settings = indexSettings(1, 1).put(IndexMetadata.SETTING_PRIORITY, 5)
|
||||
.put(IndexMetadata.SETTING_INDEX_VERSION_CREATED.getKey(), IndexVersion.current())
|
||||
.build();
|
||||
indexMetadataBuilder.settings(settings);
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue