mirror of
https://github.com/elastic/elasticsearch.git
synced 2025-04-25 07:37:19 -04:00
Migrate to persistent cluster settings in Java tests We are deprecating transient settings, therefore this PR changes uses of transient cluster settings to persistent cluster settings.
This commit is contained in:
parent
0d8ec896a7
commit
a7ae031ce7
80 changed files with 509 additions and 435 deletions
|
@ -50,6 +50,7 @@ import java.util.Collections;
|
|||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.function.BiConsumer;
|
||||
|
||||
import static java.util.Collections.emptyMap;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
|
||||
|
@ -76,13 +77,13 @@ public class ClusterClientIT extends ESRestHighLevelClientTestCase {
|
|||
setRequest.persistentSettings(map);
|
||||
|
||||
ClusterUpdateSettingsResponse setResponse = execute(setRequest, highLevelClient().cluster()::putSettings,
|
||||
highLevelClient().cluster()::putSettingsAsync);
|
||||
highLevelClient().cluster()::putSettingsAsync);
|
||||
|
||||
assertAcked(setResponse);
|
||||
assertThat(setResponse.getTransientSettings().get(transientSettingKey), notNullValue());
|
||||
assertThat(setResponse.getTransientSettings().get(persistentSettingKey), nullValue());
|
||||
assertThat(setResponse.getTransientSettings().get(transientSettingKey),
|
||||
equalTo(transientSettingValue + ByteSizeUnit.BYTES.getSuffix()));
|
||||
equalTo(transientSettingValue + ByteSizeUnit.BYTES.getSuffix()));
|
||||
assertThat(setResponse.getPersistentSettings().get(transientSettingKey), nullValue());
|
||||
assertThat(setResponse.getPersistentSettings().get(persistentSettingKey), notNullValue());
|
||||
assertThat(setResponse.getPersistentSettings().get(persistentSettingKey), equalTo(persistentSettingValue));
|
||||
|
@ -98,7 +99,7 @@ public class ClusterClientIT extends ESRestHighLevelClientTestCase {
|
|||
resetRequest.persistentSettings("{\"" + persistentSettingKey + "\": null }", XContentType.JSON);
|
||||
|
||||
ClusterUpdateSettingsResponse resetResponse = execute(resetRequest, highLevelClient().cluster()::putSettings,
|
||||
highLevelClient().cluster()::putSettingsAsync);
|
||||
highLevelClient().cluster()::putSettingsAsync);
|
||||
|
||||
assertThat(resetResponse.getTransientSettings().get(transientSettingKey), equalTo(null));
|
||||
assertThat(resetResponse.getPersistentSettings().get(persistentSettingKey), equalTo(null));
|
||||
|
@ -112,17 +113,28 @@ public class ClusterClientIT extends ESRestHighLevelClientTestCase {
|
|||
assertThat(persistentResetValue, equalTo(null));
|
||||
}
|
||||
|
||||
public void testClusterUpdateSettingNonExistent() {
|
||||
public void testClusterUpdateTransientSettingNonExistent() {
|
||||
testClusterUpdateSettingNonExistent((settings, request) -> request.transientSettings(settings), "transient");
|
||||
}
|
||||
|
||||
public void testClusterUpdatePersistentSettingNonExistent() {
|
||||
testClusterUpdateSettingNonExistent((settings, request) -> request.persistentSettings(settings), "persistent");
|
||||
}
|
||||
|
||||
private void testClusterUpdateSettingNonExistent(
|
||||
final BiConsumer<Settings.Builder, ClusterUpdateSettingsRequest> consumer,
|
||||
String label) {
|
||||
String setting = "no_idea_what_you_are_talking_about";
|
||||
int value = 10;
|
||||
ClusterUpdateSettingsRequest clusterUpdateSettingsRequest = new ClusterUpdateSettingsRequest();
|
||||
clusterUpdateSettingsRequest.transientSettings(Settings.builder().put(setting, value).build());
|
||||
consumer.accept(Settings.builder().put(setting, value), clusterUpdateSettingsRequest);
|
||||
|
||||
ElasticsearchException exception = expectThrows(ElasticsearchException.class, () -> execute(clusterUpdateSettingsRequest,
|
||||
highLevelClient().cluster()::putSettings, highLevelClient().cluster()::putSettingsAsync));
|
||||
highLevelClient().cluster()::putSettings, highLevelClient().cluster()::putSettingsAsync));
|
||||
assertThat(exception.status(), equalTo(RestStatus.BAD_REQUEST));
|
||||
assertThat(exception.getMessage(), equalTo(
|
||||
"Elasticsearch exception [type=illegal_argument_exception, reason=transient setting [" + setting + "], not recognized]"));
|
||||
"Elasticsearch exception [type=illegal_argument_exception, reason="
|
||||
+ label + " setting [" + setting + "], not recognized]"));
|
||||
}
|
||||
|
||||
public void testClusterGetSettings() throws IOException {
|
||||
|
@ -184,7 +196,7 @@ public class ClusterClientIT extends ESRestHighLevelClientTestCase {
|
|||
ClusterHealthResponse response = execute(request, highLevelClient().cluster()::health, highLevelClient().cluster()::healthAsync);
|
||||
|
||||
logger.info("Shard stats\n{}", EntityUtils.toString(
|
||||
client().performRequest(new Request("GET", "/_cat/shards")).getEntity()));
|
||||
client().performRequest(new Request("GET", "/_cat/shards")).getEntity()));
|
||||
assertThat(response.getIndices().size(), equalTo(0));
|
||||
}
|
||||
|
||||
|
@ -205,7 +217,7 @@ public class ClusterClientIT extends ESRestHighLevelClientTestCase {
|
|||
ClusterHealthResponse response = execute(request, highLevelClient().cluster()::health, highLevelClient().cluster()::healthAsync);
|
||||
|
||||
logger.info("Shard stats\n{}", EntityUtils.toString(
|
||||
client().performRequest(new Request("GET", "/_cat/shards")).getEntity()));
|
||||
client().performRequest(new Request("GET", "/_cat/shards")).getEntity()));
|
||||
assertYellowShards(response);
|
||||
assertThat(response.getIndices().size(), equalTo(2));
|
||||
for (Map.Entry<String, ClusterIndexHealth> entry : response.getIndices().entrySet()) {
|
||||
|
@ -227,7 +239,6 @@ public class ClusterClientIT extends ESRestHighLevelClientTestCase {
|
|||
assertThat(response.getUnassignedShards(), equalTo(2));
|
||||
}
|
||||
|
||||
|
||||
public void testClusterHealthYellowSpecificIndex() throws IOException {
|
||||
createIndex("index", Settings.EMPTY);
|
||||
createIndex("index2", Settings.EMPTY);
|
||||
|
@ -318,19 +329,19 @@ public class ClusterClientIT extends ESRestHighLevelClientTestCase {
|
|||
ClusterGetSettingsResponse settingsResponse = highLevelClient().cluster().getSettings(settingsRequest, RequestOptions.DEFAULT);
|
||||
|
||||
List<String> seeds = SniffConnectionStrategy.REMOTE_CLUSTER_SEEDS
|
||||
.getConcreteSettingForNamespace(clusterAlias)
|
||||
.get(settingsResponse.getTransientSettings());
|
||||
.getConcreteSettingForNamespace(clusterAlias)
|
||||
.get(settingsResponse.getPersistentSettings());
|
||||
int connectionsPerCluster = SniffConnectionStrategy.REMOTE_CONNECTIONS_PER_CLUSTER
|
||||
.get(settingsResponse.getTransientSettings());
|
||||
.get(settingsResponse.getPersistentSettings());
|
||||
TimeValue initialConnectionTimeout = RemoteClusterService.REMOTE_INITIAL_CONNECTION_TIMEOUT_SETTING
|
||||
.get(settingsResponse.getTransientSettings());
|
||||
.get(settingsResponse.getPersistentSettings());
|
||||
boolean skipUnavailable = RemoteClusterService.REMOTE_CLUSTER_SKIP_UNAVAILABLE
|
||||
.getConcreteSettingForNamespace(clusterAlias)
|
||||
.get(settingsResponse.getTransientSettings());
|
||||
.getConcreteSettingForNamespace(clusterAlias)
|
||||
.get(settingsResponse.getPersistentSettings());
|
||||
|
||||
RemoteInfoRequest request = new RemoteInfoRequest();
|
||||
RemoteInfoResponse response = execute(request, highLevelClient().cluster()::remoteInfo,
|
||||
highLevelClient().cluster()::remoteInfoAsync);
|
||||
highLevelClient().cluster()::remoteInfoAsync);
|
||||
|
||||
assertThat(response, notNullValue());
|
||||
assertThat(response.getInfos().size(), equalTo(1));
|
||||
|
|
|
@ -297,7 +297,7 @@ public abstract class ESRestHighLevelClientTestCase extends ESRestTestCase {
|
|||
String transportAddress = (String) nodesResponse.get("transport_address");
|
||||
|
||||
ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest();
|
||||
updateSettingsRequest.transientSettings(singletonMap("cluster.remote." + remoteClusterName + ".seeds", transportAddress));
|
||||
updateSettingsRequest.persistentSettings(singletonMap("cluster.remote." + remoteClusterName + ".seeds", transportAddress));
|
||||
ClusterUpdateSettingsResponse updateSettingsResponse =
|
||||
restHighLevelClient.cluster().putSettings(updateSettingsRequest, RequestOptions.DEFAULT);
|
||||
assertThat(updateSettingsResponse.isAcknowledged(), is(true));
|
||||
|
|
|
@ -367,6 +367,6 @@ public class DeleteByQueryBasicTests extends ReindexTestCase {
|
|||
DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING.getKey()).build() :
|
||||
Settings.builder().put(
|
||||
DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING.getKey(), value).build();
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(settings).get());
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings().setPersistentSettings(settings).get());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -101,7 +101,7 @@ public class Netty4HttpRequestSizeLimitIT extends ESNetty4IntegTestCase {
|
|||
List<Tuple<String, CharSequence>> requestUris = new ArrayList<>();
|
||||
for (int i = 0; i < 1500; i++) {
|
||||
requestUris.add(Tuple.tuple("/_cluster/settings",
|
||||
"{ \"transient\": {\"search.default_search_timeout\": \"40s\" } }"));
|
||||
"{ \"persistent\": {\"search.default_search_timeout\": \"40s\" } }"));
|
||||
}
|
||||
|
||||
HttpServerTransport httpServerTransport = internalCluster().getInstance(HttpServerTransport.class);
|
||||
|
|
|
@ -83,7 +83,7 @@ public class AutoCreateIndexIT extends ESRestTestCase {
|
|||
private void configureAutoCreateIndex(boolean value) throws IOException {
|
||||
XContentBuilder builder = JsonXContent.contentBuilder()
|
||||
.startObject()
|
||||
.startObject("transient")
|
||||
.startObject("persistent")
|
||||
.field(AutoCreateIndex.AUTO_CREATE_INDEX_SETTING.getKey(), value)
|
||||
.endObject()
|
||||
.endObject();
|
||||
|
|
|
@ -701,7 +701,7 @@ public final class ClusterAllocationExplainIT extends ESIntegTestCase {
|
|||
prepareIndex(5, 0);
|
||||
|
||||
logger.info("--> setting balancing threshold really high, so it won't be met");
|
||||
client().admin().cluster().prepareUpdateSettings().setTransientSettings(
|
||||
client().admin().cluster().prepareUpdateSettings().setPersistentSettings(
|
||||
Settings.builder().put("cluster.routing.allocation.balance.threshold", 1000.0f)).get();
|
||||
|
||||
logger.info("--> starting another node, with the rebalance threshold so high, it should not get any shards");
|
||||
|
|
|
@ -57,7 +57,7 @@ public class CloneIndexIT extends ESIntegTestCase {
|
|||
|
||||
// disable rebalancing to be able to capture the right stats. balancing can move the target primary
|
||||
// making it hard to pin point the source shards.
|
||||
client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder().put(
|
||||
client().admin().cluster().prepareUpdateSettings().setPersistentSettings(Settings.builder().put(
|
||||
EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), "none"
|
||||
)).get();
|
||||
try {
|
||||
|
@ -105,7 +105,7 @@ public class CloneIndexIT extends ESIntegTestCase {
|
|||
assertEquals(version, target.getIndexToSettings().get("target").getAsVersion("index.version.created", null));
|
||||
} finally {
|
||||
// clean up
|
||||
client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder().put(
|
||||
client().admin().cluster().prepareUpdateSettings().setPersistentSettings(Settings.builder().put(
|
||||
EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), (String)null
|
||||
)).get();
|
||||
}
|
||||
|
|
|
@ -255,7 +255,7 @@ public class ShrinkIndexIT extends ESIntegTestCase {
|
|||
|
||||
// disable rebalancing to be able to capture the right stats. balancing can move the target primary
|
||||
// making it hard to pin point the source shards.
|
||||
client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder().put(
|
||||
client().admin().cluster().prepareUpdateSettings().setPersistentSettings(Settings.builder().put(
|
||||
EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), "none"
|
||||
)).get();
|
||||
|
||||
|
@ -321,7 +321,7 @@ public class ShrinkIndexIT extends ESIntegTestCase {
|
|||
assertEquals(version, target.getIndexToSettings().get("target").getAsVersion("index.version.created", null));
|
||||
|
||||
// clean up
|
||||
client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder().put(
|
||||
client().admin().cluster().prepareUpdateSettings().setPersistentSettings(Settings.builder().put(
|
||||
EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), (String)null
|
||||
)).get();
|
||||
}
|
||||
|
@ -496,7 +496,7 @@ public class ShrinkIndexIT extends ESIntegTestCase {
|
|||
|
||||
// disable rebalancing to be able to capture the right stats. balancing can move the target primary
|
||||
// making it hard to pin point the source shards.
|
||||
client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder().put(
|
||||
client().admin().cluster().prepareUpdateSettings().setPersistentSettings(Settings.builder().put(
|
||||
EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), "none"
|
||||
)).get();
|
||||
|
||||
|
@ -537,7 +537,7 @@ public class ShrinkIndexIT extends ESIntegTestCase {
|
|||
});
|
||||
|
||||
// clean up
|
||||
client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder().put(
|
||||
client().admin().cluster().prepareUpdateSettings().setPersistentSettings(Settings.builder().put(
|
||||
EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), (String)null
|
||||
)).get();
|
||||
}
|
||||
|
|
|
@ -369,7 +369,7 @@ public class SplitIndexIT extends ESIntegTestCase {
|
|||
|
||||
// disable rebalancing to be able to capture the right stats. balancing can move the target primary
|
||||
// making it hard to pin point the source shards.
|
||||
client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder().put(
|
||||
client().admin().cluster().prepareUpdateSettings().setPersistentSettings(Settings.builder().put(
|
||||
EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), "none"
|
||||
)).get();
|
||||
try {
|
||||
|
@ -433,7 +433,7 @@ public class SplitIndexIT extends ESIntegTestCase {
|
|||
assertEquals(version, target.getIndexToSettings().get("target").getAsVersion("index.version.created", null));
|
||||
} finally {
|
||||
// clean up
|
||||
client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder().put(
|
||||
client().admin().cluster().prepareUpdateSettings().setPersistentSettings(Settings.builder().put(
|
||||
EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), (String)null
|
||||
)).get();
|
||||
}
|
||||
|
|
|
@ -76,7 +76,7 @@ public class DeleteIndexBlocksIT extends ESIntegTestCase {
|
|||
refresh();
|
||||
try {
|
||||
Settings settings = Settings.builder().put(Metadata.SETTING_READ_ONLY_ALLOW_DELETE_SETTING.getKey(), true).build();
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(settings).get());
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings().setPersistentSettings(settings).get());
|
||||
assertSearchHits(client().prepareSearch().get(), "1");
|
||||
assertBlocked(client().prepareIndex().setIndex("test").setType("doc").setId("2").setSource("foo", "bar"),
|
||||
Metadata.CLUSTER_READ_ONLY_ALLOW_DELETE_BLOCK);
|
||||
|
@ -86,7 +86,7 @@ public class DeleteIndexBlocksIT extends ESIntegTestCase {
|
|||
assertAcked(client().admin().indices().prepareDelete("test"));
|
||||
} finally {
|
||||
Settings settings = Settings.builder().putNull(Metadata.SETTING_READ_ONLY_ALLOW_DELETE_SETTING.getKey()).build();
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(settings).get());
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings().setPersistentSettings(settings).get());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -47,7 +47,7 @@ public class BulkProcessorClusterSettingsIT extends ESIntegTestCase {
|
|||
}
|
||||
|
||||
public void testIndexWithDisabledAutoCreateIndex() {
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder()
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings().setPersistentSettings(Settings.builder()
|
||||
.put(AutoCreateIndex.AUTO_CREATE_INDEX_SETTING.getKey(), randomFrom("-*", "+.*")).build()).get());
|
||||
final BulkItemResponse itemResponse =
|
||||
client().prepareBulk().add(client().prepareIndex("test-index", "type1").setSource("foo", "bar")).get().getItems()[0];
|
||||
|
|
|
@ -269,7 +269,7 @@ public class TransportSearchIT extends ESIntegTestCase {
|
|||
client().prepareSearch("test1").get();
|
||||
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings()
|
||||
.setTransientSettings(Collections.singletonMap(
|
||||
.setPersistentSettings(Collections.singletonMap(
|
||||
TransportSearchAction.SHARD_COUNT_LIMIT_SETTING.getKey(), numPrimaries1 - 1)));
|
||||
|
||||
IllegalArgumentException e = expectThrows(IllegalArgumentException.class,
|
||||
|
@ -278,7 +278,7 @@ public class TransportSearchIT extends ESIntegTestCase {
|
|||
+ " shards, which is over the limit of " + (numPrimaries1 - 1)));
|
||||
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings()
|
||||
.setTransientSettings(Collections.singletonMap(
|
||||
.setPersistentSettings(Collections.singletonMap(
|
||||
TransportSearchAction.SHARD_COUNT_LIMIT_SETTING.getKey(), numPrimaries1)));
|
||||
|
||||
// no exception
|
||||
|
@ -291,7 +291,7 @@ public class TransportSearchIT extends ESIntegTestCase {
|
|||
|
||||
} finally {
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings()
|
||||
.setTransientSettings(Collections.singletonMap(
|
||||
.setPersistentSettings(Collections.singletonMap(
|
||||
TransportSearchAction.SHARD_COUNT_LIMIT_SETTING.getKey(), null)));
|
||||
}
|
||||
}
|
||||
|
@ -364,7 +364,7 @@ public class TransportSearchIT extends ESIntegTestCase {
|
|||
Settings settings = Settings.builder()
|
||||
.put("indices.breaker.request.limit", "1b")
|
||||
.build();
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(settings));
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings().setPersistentSettings(settings));
|
||||
final Client client = client();
|
||||
assertBusy(() -> {
|
||||
SearchPhaseExecutionException exc = expectThrows(SearchPhaseExecutionException.class, () -> client.prepareSearch("test")
|
||||
|
@ -405,7 +405,7 @@ public class TransportSearchIT extends ESIntegTestCase {
|
|||
Settings settings = Settings.builder()
|
||||
.putNull("indices.breaker.request.limit")
|
||||
.build();
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(settings));
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings().setPersistentSettings(settings));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -124,7 +124,7 @@ public class ClusterInfoServiceIT extends ESIntegTestCase {
|
|||
}
|
||||
|
||||
private void setClusterInfoTimeout(String timeValue) {
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder()
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings().setPersistentSettings(Settings.builder()
|
||||
.put(InternalClusterInfoService.INTERNAL_CLUSTER_INFO_TIMEOUT_SETTING.getKey(), timeValue).build()));
|
||||
}
|
||||
|
||||
|
|
|
@ -450,14 +450,10 @@ public class ClusterStateDiffIT extends ESIntegTestCase {
|
|||
}
|
||||
|
||||
/**
|
||||
* Randomly updates persistent or transient settings of the given metadata
|
||||
* Updates persistent cluster settings of the given metadata
|
||||
*/
|
||||
private Metadata randomMetadataSettings(Metadata metadata) {
|
||||
if (randomBoolean()) {
|
||||
return Metadata.builder(metadata).persistentSettings(randomSettings(metadata.persistentSettings())).build();
|
||||
} else {
|
||||
return Metadata.builder(metadata).transientSettings(randomSettings(metadata.transientSettings())).build();
|
||||
}
|
||||
return Metadata.builder(metadata).persistentSettings(randomSettings(metadata.persistentSettings())).build();
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -281,7 +281,7 @@ public class AwarenessAllocationIT extends ESIntegTestCase {
|
|||
assertThat(counts.get(B_1), equalTo(2));
|
||||
assertThat(counts.containsKey(noZoneNode), equalTo(false));
|
||||
client().admin().cluster().prepareUpdateSettings()
|
||||
.setTransientSettings(Settings.builder().put("cluster.routing.allocation.awareness.attributes", "").build()).get();
|
||||
.setPersistentSettings(Settings.builder().put("cluster.routing.allocation.awareness.attributes", "").build()).get();
|
||||
|
||||
health = client().admin().cluster().prepareHealth()
|
||||
.setIndices("test")
|
||||
|
|
|
@ -328,7 +328,7 @@ public class ClusterRerouteIT extends ESIntegTestCase {
|
|||
Settings newSettings = Settings.builder()
|
||||
.put(CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), Allocation.NONE.name())
|
||||
.build();
|
||||
client().admin().cluster().prepareUpdateSettings().setTransientSettings(newSettings).execute().actionGet();
|
||||
client().admin().cluster().prepareUpdateSettings().setPersistentSettings(newSettings).execute().actionGet();
|
||||
|
||||
logger.info("--> starting a second node");
|
||||
String node_2 = internalCluster().startNode(commonSettings);
|
||||
|
|
|
@ -64,7 +64,7 @@ public class FilteringAllocationIT extends ESIntegTestCase {
|
|||
|
||||
logger.info("--> decommission the second node");
|
||||
client().admin().cluster().prepareUpdateSettings()
|
||||
.setTransientSettings(Settings.builder().put("cluster.routing.allocation.exclude._name", node_1))
|
||||
.setPersistentSettings(Settings.builder().put("cluster.routing.allocation.exclude._name", node_1))
|
||||
.execute().actionGet();
|
||||
ensureGreen("test");
|
||||
|
||||
|
@ -105,7 +105,7 @@ public class FilteringAllocationIT extends ESIntegTestCase {
|
|||
logger.info("--> filter out the second node");
|
||||
if (randomBoolean()) {
|
||||
client().admin().cluster().prepareUpdateSettings()
|
||||
.setTransientSettings(Settings.builder().put("cluster.routing.allocation.exclude._name", node_1))
|
||||
.setPersistentSettings(Settings.builder().put("cluster.routing.allocation.exclude._name", node_1))
|
||||
.execute().actionGet();
|
||||
} else {
|
||||
client().admin().indices().prepareUpdateSettings("test")
|
||||
|
@ -167,7 +167,7 @@ public class FilteringAllocationIT extends ESIntegTestCase {
|
|||
|
||||
if (numShardsOnNode1 > ThrottlingAllocationDecider.DEFAULT_CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES) {
|
||||
client().admin().cluster().prepareUpdateSettings()
|
||||
.setTransientSettings(Settings.builder()
|
||||
.setPersistentSettings(Settings.builder()
|
||||
.put("cluster.routing.allocation.node_concurrent_recoveries", numShardsOnNode1)).execute().actionGet();
|
||||
// make sure we can recover all the nodes at once otherwise we might run into a state where
|
||||
// one of the shards has not yet started relocating but we already fired up the request to wait for 0 relocating shards.
|
||||
|
@ -205,7 +205,7 @@ public class FilteringAllocationIT extends ESIntegTestCase {
|
|||
Setting<String> filterSetting = randomFrom(FilterAllocationDecider.CLUSTER_ROUTING_REQUIRE_GROUP_SETTING,
|
||||
FilterAllocationDecider.CLUSTER_ROUTING_INCLUDE_GROUP_SETTING, FilterAllocationDecider.CLUSTER_ROUTING_EXCLUDE_GROUP_SETTING);
|
||||
IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> client().admin().cluster().prepareUpdateSettings()
|
||||
.setTransientSettings(Settings.builder().put(filterSetting.getKey() + ipKey, "192.168.1.1."))
|
||||
.setPersistentSettings(Settings.builder().put(filterSetting.getKey() + ipKey, "192.168.1.1."))
|
||||
.execute().actionGet());
|
||||
assertEquals("invalid IP address [192.168.1.1.] for [" + filterSetting.getKey() + ipKey + "]", e.getMessage());
|
||||
}
|
||||
|
|
|
@ -106,7 +106,7 @@ public class TemplateUpgradeServiceIT extends ESIntegTestCase {
|
|||
assertBusy(() -> {
|
||||
// the updates only happen on cluster state updates, so we need to make sure that the cluster state updates are happening
|
||||
// so we need to simulate updates to make sure the template upgrade kicks in
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings().setPersistentSettings(
|
||||
Settings.builder().put(TestPlugin.UPDATE_TEMPLATE_DUMMY_SETTING.getKey(), updateCount.incrementAndGet())
|
||||
).get());
|
||||
List<IndexTemplateMetadata> templates = client().admin().indices().prepareGetTemplates("test_*").get().getIndexTemplates();
|
||||
|
@ -153,7 +153,7 @@ public class TemplateUpgradeServiceIT extends ESIntegTestCase {
|
|||
assertBusy(() -> {
|
||||
// the updates only happen on cluster state updates, so we need to make sure that the cluster state updates are happening
|
||||
// so we need to simulate updates to make sure the template upgrade kicks in
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings().setPersistentSettings(
|
||||
Settings.builder().put(TestPlugin.UPDATE_TEMPLATE_DUMMY_SETTING.getKey(), updateCount.incrementAndGet())
|
||||
).get());
|
||||
|
||||
|
|
|
@ -499,8 +499,10 @@ public class PrimaryAllocationIT extends ESIntegTestCase {
|
|||
String timeout = randomFrom("0s", "1s", "2s");
|
||||
assertAcked(
|
||||
client(master).admin().cluster().prepareUpdateSettings()
|
||||
.setTransientSettings(Settings.builder().put("cluster.routing.allocation.enable", "none"))
|
||||
.setPersistentSettings(Settings.builder().put("indices.replication.retry_timeout", timeout)).get());
|
||||
.setPersistentSettings(
|
||||
Settings.builder()
|
||||
.put("indices.replication.retry_timeout", timeout)
|
||||
.put("cluster.routing.allocation.enable", "none")).get());
|
||||
logger.info("--> Indexing with gap in seqno to ensure that some operations will be replayed in resync");
|
||||
long numDocs = scaledRandomIntBetween(5, 50);
|
||||
for (int i = 0; i < numDocs; i++) {
|
||||
|
@ -536,7 +538,7 @@ public class PrimaryAllocationIT extends ESIntegTestCase {
|
|||
}, 1, TimeUnit.MINUTES);
|
||||
assertAcked(
|
||||
client(master).admin().cluster().prepareUpdateSettings()
|
||||
.setTransientSettings(Settings.builder().put("cluster.routing.allocation.enable", "all")).get());
|
||||
.setPersistentSettings(Settings.builder().put("cluster.routing.allocation.enable", "all")).get());
|
||||
partition.stopDisrupting();
|
||||
partition.ensureHealthy(internalCluster());
|
||||
logger.info("--> stop disrupting network and re-enable allocation");
|
||||
|
|
|
@ -135,7 +135,7 @@ public class DiskThresholdDeciderIT extends DiskUsageIntegTestCase {
|
|||
refreshDiskUsage();
|
||||
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings()
|
||||
.setTransientSettings(Settings.builder()
|
||||
.setPersistentSettings(Settings.builder()
|
||||
.put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), Rebalance.NONE.toString())
|
||||
.build())
|
||||
.get());
|
||||
|
@ -149,7 +149,7 @@ public class DiskThresholdDeciderIT extends DiskUsageIntegTestCase {
|
|||
assertBusy(() -> assertThat(getShardRoutings(dataNode0Id, indexName), empty()));
|
||||
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings()
|
||||
.setTransientSettings(Settings.builder()
|
||||
.setPersistentSettings(Settings.builder()
|
||||
.putNull(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey())
|
||||
.build())
|
||||
.get());
|
||||
|
|
|
@ -87,7 +87,7 @@ public class MockDiskUsagesIT extends ESIntegTestCase {
|
|||
clusterInfoService.setDiskUsageFunctionAndRefresh((discoveryNode, fsInfoPath) -> setDiskUsage(fsInfoPath, 100, between(10, 100)));
|
||||
|
||||
final boolean watermarkBytes = randomBoolean(); // we have to consistently use bytes or percentage for the disk watermark settings
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder()
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings().setPersistentSettings(Settings.builder()
|
||||
.put(CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), watermarkBytes ? "10b" : "90%")
|
||||
.put(CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), watermarkBytes ? "10b" : "90%")
|
||||
.put(CLUSTER_ROUTING_ALLOCATION_DISK_FLOOD_STAGE_WATERMARK_SETTING.getKey(), watermarkBytes ? "0b" : "100%")
|
||||
|
@ -148,7 +148,7 @@ public class MockDiskUsagesIT extends ESIntegTestCase {
|
|||
clusterInfoService.setDiskUsageFunctionAndRefresh((discoveryNode, fsInfoPath) -> setDiskUsage(fsInfoPath, 100, between(15, 100)));
|
||||
|
||||
final boolean watermarkBytes = randomBoolean(); // we have to consistently use bytes or percentage for the disk watermark settings
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder()
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings().setPersistentSettings(Settings.builder()
|
||||
.put(CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), watermarkBytes ? "10b" : "90%")
|
||||
.put(CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), watermarkBytes ? "10b" : "90%")
|
||||
.put(CLUSTER_ROUTING_ALLOCATION_DISK_FLOOD_STAGE_WATERMARK_SETTING.getKey(), watermarkBytes ? "5b" : "95%")
|
||||
|
@ -223,7 +223,7 @@ public class MockDiskUsagesIT extends ESIntegTestCase {
|
|||
// start with all nodes below the watermark
|
||||
clusterInfoService.setDiskUsageFunctionAndRefresh((discoveryNode, fsInfoPath) -> setDiskUsage(fsInfoPath, 1000L, 1000L));
|
||||
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder()
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings().setPersistentSettings(Settings.builder()
|
||||
.put(CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), "90%")
|
||||
.put(CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), "90%")
|
||||
.put(CLUSTER_ROUTING_ALLOCATION_DISK_FLOOD_STAGE_WATERMARK_SETTING.getKey(), "100%")
|
||||
|
@ -244,7 +244,7 @@ public class MockDiskUsagesIT extends ESIntegTestCase {
|
|||
});
|
||||
|
||||
// disable rebalancing, or else we might move too many shards away and then rebalance them back again
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder()
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings().setPersistentSettings(Settings.builder()
|
||||
.put(CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), EnableAllocationDecider.Rebalance.NONE)));
|
||||
|
||||
// node2 suddenly has 99 bytes free, less than 10%, but moving one shard is enough to bring it up to 100 bytes free:
|
||||
|
@ -284,7 +284,7 @@ public class MockDiskUsagesIT extends ESIntegTestCase {
|
|||
ClusterInfoServiceUtils.refresh(clusterInfoService); // so that subsequent reroutes see disk usage according to current state
|
||||
});
|
||||
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder()
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings().setPersistentSettings(Settings.builder()
|
||||
.put(CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), "85%")
|
||||
.put(CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), "100%")
|
||||
.put(CLUSTER_ROUTING_ALLOCATION_DISK_FLOOD_STAGE_WATERMARK_SETTING.getKey(), "100%")));
|
||||
|
|
|
@ -31,7 +31,7 @@ public class UpdateShardAllocationSettingsIT extends ESIntegTestCase {
|
|||
*/
|
||||
public void testEnableRebalance() throws InterruptedException {
|
||||
final String firstNode = internalCluster().startNode();
|
||||
client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder()
|
||||
client().admin().cluster().prepareUpdateSettings().setPersistentSettings(Settings.builder()
|
||||
.put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), EnableAllocationDecider.Rebalance.NONE))
|
||||
.get();
|
||||
// we test with 2 shards since otherwise it's pretty fragile if there are difference in the num or shards such that
|
||||
|
@ -69,7 +69,7 @@ public class UpdateShardAllocationSettingsIT extends ESIntegTestCase {
|
|||
// flip the cluster wide setting such that we can also balance for index
|
||||
// test_1 eventually we should have one shard of each index on each node
|
||||
client().admin().cluster().prepareUpdateSettings()
|
||||
.setTransientSettings(Settings.builder().put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(),
|
||||
.setPersistentSettings(Settings.builder().put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(),
|
||||
randomBoolean() ? EnableAllocationDecider.Rebalance.PRIMARIES : EnableAllocationDecider.Rebalance.ALL))
|
||||
.get();
|
||||
logger.info("--> balance index [test_1]");
|
||||
|
@ -89,7 +89,7 @@ public class UpdateShardAllocationSettingsIT extends ESIntegTestCase {
|
|||
internalCluster().startNodes(2);
|
||||
// same same_host to true, since 2 nodes are started on the same host,
|
||||
// only primaries should be assigned
|
||||
client().admin().cluster().prepareUpdateSettings().setTransientSettings(
|
||||
client().admin().cluster().prepareUpdateSettings().setPersistentSettings(
|
||||
Settings.builder().put(CLUSTER_ROUTING_ALLOCATION_SAME_HOST_SETTING.getKey(), true)
|
||||
).get();
|
||||
final String indexName = "idx";
|
||||
|
@ -102,7 +102,7 @@ public class UpdateShardAllocationSettingsIT extends ESIntegTestCase {
|
|||
clusterState.getRoutingTable().index(indexName).shardsWithState(ShardRoutingState.UNASSIGNED).isEmpty());
|
||||
// now, update the same_host setting to allow shards to be allocated to multiple nodes on
|
||||
// the same host - the replica should get assigned
|
||||
client().admin().cluster().prepareUpdateSettings().setTransientSettings(
|
||||
client().admin().cluster().prepareUpdateSettings().setPersistentSettings(
|
||||
Settings.builder().put(CLUSTER_ROUTING_ALLOCATION_SAME_HOST_SETTING.getKey(), false)
|
||||
).get();
|
||||
clusterState = client().admin().cluster().prepareState().get().getState();
|
||||
|
|
|
@ -26,6 +26,8 @@ import org.elasticsearch.test.ESIntegTestCase;
|
|||
import org.junit.After;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.function.BiConsumer;
|
||||
import java.util.function.Function;
|
||||
|
||||
import static org.elasticsearch.cluster.routing.allocation.DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL_SETTING;
|
||||
import static org.elasticsearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES_SETTING;
|
||||
|
@ -45,65 +47,88 @@ public class ClusterSettingsIT extends ESIntegTestCase {
|
|||
.setTransientSettings(Settings.builder().putNull("*")));
|
||||
}
|
||||
|
||||
public void testClusterNonExistingSettingsUpdate() {
|
||||
public void testClusterNonExistingPersistentSettingsUpdate() {
|
||||
testClusterNonExistingSettingsUpdate((settings, builder) -> builder.setPersistentSettings(settings), "persistent");
|
||||
}
|
||||
|
||||
public void testClusterNonExistingTransientSettingsUpdate() {
|
||||
testClusterNonExistingSettingsUpdate((settings, builder) -> builder.setTransientSettings(settings), "transient");
|
||||
}
|
||||
|
||||
private void testClusterNonExistingSettingsUpdate(
|
||||
final BiConsumer<Settings.Builder, ClusterUpdateSettingsRequestBuilder> consumer,
|
||||
String label) {
|
||||
String key1 = "no_idea_what_you_are_talking_about";
|
||||
int value1 = 10;
|
||||
try {
|
||||
client().admin().cluster()
|
||||
.prepareUpdateSettings()
|
||||
.setTransientSettings(Settings.builder().put(key1, value1).build())
|
||||
.get();
|
||||
ClusterUpdateSettingsRequestBuilder builder = client().admin().cluster().prepareUpdateSettings();
|
||||
consumer.accept(Settings.builder().put(key1, value1), builder);
|
||||
|
||||
builder.get();
|
||||
fail("bogus value");
|
||||
} catch (IllegalArgumentException ex) {
|
||||
assertEquals("transient setting [no_idea_what_you_are_talking_about], not recognized", ex.getMessage());
|
||||
assertEquals(label + " setting [no_idea_what_you_are_talking_about], not recognized", ex.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
public void testDeleteIsAppliedFirst() {
|
||||
public void testDeleteIsAppliedFirstWithPersistentSettings() {
|
||||
testDeleteIsAppliedFirst(
|
||||
(settings, builder) -> builder.setPersistentSettings(settings), ClusterUpdateSettingsResponse::getPersistentSettings);
|
||||
}
|
||||
|
||||
public void testDeleteIsAppliedFirstWithTransientSettings() {
|
||||
testDeleteIsAppliedFirst(
|
||||
(settings, builder) -> builder.setTransientSettings(settings), ClusterUpdateSettingsResponse::getTransientSettings);
|
||||
}
|
||||
|
||||
private void testDeleteIsAppliedFirst(
|
||||
final BiConsumer<Settings.Builder, ClusterUpdateSettingsRequestBuilder> consumer,
|
||||
final Function<ClusterUpdateSettingsResponse, Settings> settingsFunction) {
|
||||
final Setting<Integer> INITIAL_RECOVERIES = CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES_SETTING;
|
||||
final Setting<TimeValue> REROUTE_INTERVAL = CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL_SETTING;
|
||||
|
||||
ClusterUpdateSettingsResponse response = client().admin().cluster()
|
||||
.prepareUpdateSettings()
|
||||
.setTransientSettings(Settings.builder()
|
||||
.put(INITIAL_RECOVERIES.getKey(), 7)
|
||||
.put(REROUTE_INTERVAL.getKey(), "42s").build())
|
||||
.get();
|
||||
ClusterUpdateSettingsRequestBuilder builder = client().admin().cluster().prepareUpdateSettings();
|
||||
consumer.accept(Settings.builder()
|
||||
.put(INITIAL_RECOVERIES.getKey(), 7)
|
||||
.put(REROUTE_INTERVAL.getKey(), "42s"), builder);
|
||||
|
||||
ClusterUpdateSettingsResponse response = builder.get();
|
||||
|
||||
assertAcked(response);
|
||||
assertThat(INITIAL_RECOVERIES.get(response.getTransientSettings()), equalTo(7));
|
||||
assertThat(INITIAL_RECOVERIES.get(settingsFunction.apply(response)), equalTo(7));
|
||||
assertThat(clusterService().getClusterSettings().get(INITIAL_RECOVERIES), equalTo(7));
|
||||
assertThat(REROUTE_INTERVAL.get(response.getTransientSettings()), equalTo(TimeValue.timeValueSeconds(42)));
|
||||
assertThat(REROUTE_INTERVAL.get(settingsFunction.apply(response)), equalTo(TimeValue.timeValueSeconds(42)));
|
||||
assertThat(clusterService().getClusterSettings().get(REROUTE_INTERVAL), equalTo(TimeValue.timeValueSeconds(42)));
|
||||
|
||||
response = client().admin().cluster()
|
||||
.prepareUpdateSettings()
|
||||
.setTransientSettings(Settings.builder().putNull((randomBoolean() ? "cluster.routing.*" : "*"))
|
||||
.put(REROUTE_INTERVAL.getKey(), "43s"))
|
||||
.get();
|
||||
assertThat(INITIAL_RECOVERIES.get(response.getTransientSettings()), equalTo(INITIAL_RECOVERIES.get(Settings.EMPTY)));
|
||||
ClusterUpdateSettingsRequestBuilder undoBuilder = client().admin().cluster().prepareUpdateSettings();
|
||||
consumer.accept(Settings.builder().putNull((randomBoolean() ? "cluster.routing.*" : "*"))
|
||||
.put(REROUTE_INTERVAL.getKey(), "43s"), undoBuilder);
|
||||
|
||||
response = undoBuilder.get();
|
||||
|
||||
assertThat(INITIAL_RECOVERIES.get(settingsFunction.apply(response)), equalTo(INITIAL_RECOVERIES.get(Settings.EMPTY)));
|
||||
assertThat(clusterService().getClusterSettings().get(INITIAL_RECOVERIES), equalTo(INITIAL_RECOVERIES.get(Settings.EMPTY)));
|
||||
assertThat(REROUTE_INTERVAL.get(response.getTransientSettings()), equalTo(TimeValue.timeValueSeconds(43)));
|
||||
assertThat(REROUTE_INTERVAL.get(settingsFunction.apply(response)), equalTo(TimeValue.timeValueSeconds(43)));
|
||||
assertThat(clusterService().getClusterSettings().get(REROUTE_INTERVAL), equalTo(TimeValue.timeValueSeconds(43)));
|
||||
}
|
||||
|
||||
public void testResetClusterSetting() {
|
||||
public void testResetClusterTransientSetting() {
|
||||
final Setting<Integer> INITIAL_RECOVERIES = CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES_SETTING;
|
||||
final Setting<TimeValue> REROUTE_INTERVAL = CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL_SETTING;
|
||||
|
||||
ClusterUpdateSettingsResponse response = client().admin().cluster()
|
||||
.prepareUpdateSettings()
|
||||
.setTransientSettings(Settings.builder().put(INITIAL_RECOVERIES.getKey(), 7).build())
|
||||
.get();
|
||||
.prepareUpdateSettings()
|
||||
.setTransientSettings(Settings.builder().put(INITIAL_RECOVERIES.getKey(), 7).build())
|
||||
.get();
|
||||
|
||||
assertAcked(response);
|
||||
assertThat(INITIAL_RECOVERIES.get(response.getTransientSettings()), equalTo(7));
|
||||
assertThat(clusterService().getClusterSettings().get(INITIAL_RECOVERIES), equalTo(7));
|
||||
|
||||
response = client().admin().cluster()
|
||||
.prepareUpdateSettings()
|
||||
.setTransientSettings(Settings.builder().putNull(INITIAL_RECOVERIES.getKey()))
|
||||
.get();
|
||||
.prepareUpdateSettings()
|
||||
.setTransientSettings(Settings.builder().putNull(INITIAL_RECOVERIES.getKey()))
|
||||
.get();
|
||||
|
||||
assertAcked(response);
|
||||
assertNull(response.getTransientSettings().get(INITIAL_RECOVERIES.getKey()));
|
||||
|
@ -111,11 +136,11 @@ public class ClusterSettingsIT extends ESIntegTestCase {
|
|||
equalTo(INITIAL_RECOVERIES.get(Settings.EMPTY)));
|
||||
|
||||
response = client().admin().cluster()
|
||||
.prepareUpdateSettings()
|
||||
.setTransientSettings(Settings.builder()
|
||||
.put(INITIAL_RECOVERIES.getKey(), 8)
|
||||
.put(REROUTE_INTERVAL.getKey(), "43s").build())
|
||||
.get();
|
||||
.prepareUpdateSettings()
|
||||
.setTransientSettings(Settings.builder()
|
||||
.put(INITIAL_RECOVERIES.getKey(), 8)
|
||||
.put(REROUTE_INTERVAL.getKey(), "43s").build())
|
||||
.get();
|
||||
|
||||
assertAcked(response);
|
||||
assertThat(INITIAL_RECOVERIES.get(response.getTransientSettings()), equalTo(8));
|
||||
|
@ -123,40 +148,45 @@ public class ClusterSettingsIT extends ESIntegTestCase {
|
|||
assertThat(REROUTE_INTERVAL.get(response.getTransientSettings()), equalTo(TimeValue.timeValueSeconds(43)));
|
||||
assertThat(clusterService().getClusterSettings().get(REROUTE_INTERVAL), equalTo(TimeValue.timeValueSeconds(43)));
|
||||
response = client().admin().cluster()
|
||||
.prepareUpdateSettings()
|
||||
.setTransientSettings(Settings.builder().putNull((randomBoolean() ? "cluster.routing.*" : "*")))
|
||||
.get();
|
||||
.prepareUpdateSettings()
|
||||
.setTransientSettings(Settings.builder().putNull((randomBoolean() ? "cluster.routing.*" : "*")))
|
||||
.get();
|
||||
|
||||
assertThat(INITIAL_RECOVERIES.get(response.getTransientSettings()), equalTo(INITIAL_RECOVERIES.get(Settings.EMPTY)));
|
||||
assertThat(clusterService().getClusterSettings().get(INITIAL_RECOVERIES), equalTo(INITIAL_RECOVERIES.get(Settings.EMPTY)));
|
||||
assertThat(REROUTE_INTERVAL.get(response.getTransientSettings()), equalTo(REROUTE_INTERVAL.get(Settings.EMPTY)));
|
||||
assertThat(clusterService().getClusterSettings().get(REROUTE_INTERVAL), equalTo(REROUTE_INTERVAL.get(Settings.EMPTY)));
|
||||
|
||||
// now persistent
|
||||
response = client().admin().cluster()
|
||||
.prepareUpdateSettings()
|
||||
.setPersistentSettings(Settings.builder().put(INITIAL_RECOVERIES.getKey(), 9).build())
|
||||
.get();
|
||||
}
|
||||
|
||||
public void testResetClusterPersistentSetting() {
|
||||
final Setting<Integer> INITIAL_RECOVERIES = CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES_SETTING;
|
||||
final Setting<TimeValue> REROUTE_INTERVAL = CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL_SETTING;
|
||||
|
||||
ClusterUpdateSettingsResponse response = client().admin().cluster()
|
||||
.prepareUpdateSettings()
|
||||
.setPersistentSettings(Settings.builder().put(INITIAL_RECOVERIES.getKey(), 9).build())
|
||||
.get();
|
||||
|
||||
assertAcked(response);
|
||||
assertThat(INITIAL_RECOVERIES.get(response.getPersistentSettings()), equalTo(9));
|
||||
assertThat(clusterService().getClusterSettings().get(INITIAL_RECOVERIES), equalTo(9));
|
||||
|
||||
response = client().admin().cluster()
|
||||
.prepareUpdateSettings()
|
||||
.setPersistentSettings(Settings.builder().putNull(INITIAL_RECOVERIES.getKey()))
|
||||
.get();
|
||||
.prepareUpdateSettings()
|
||||
.setPersistentSettings(Settings.builder().putNull(INITIAL_RECOVERIES.getKey()))
|
||||
.get();
|
||||
|
||||
assertAcked(response);
|
||||
assertThat(INITIAL_RECOVERIES.get(response.getPersistentSettings()), equalTo(INITIAL_RECOVERIES.get(Settings.EMPTY)));
|
||||
assertThat(clusterService().getClusterSettings().get(INITIAL_RECOVERIES), equalTo(INITIAL_RECOVERIES.get(Settings.EMPTY)));
|
||||
|
||||
response = client().admin().cluster()
|
||||
.prepareUpdateSettings()
|
||||
.setPersistentSettings(Settings.builder()
|
||||
.put(INITIAL_RECOVERIES.getKey(), 10)
|
||||
.put(REROUTE_INTERVAL.getKey(), "44s").build())
|
||||
.get();
|
||||
.prepareUpdateSettings()
|
||||
.setPersistentSettings(Settings.builder()
|
||||
.put(INITIAL_RECOVERIES.getKey(), 10)
|
||||
.put(REROUTE_INTERVAL.getKey(), "44s").build())
|
||||
.get();
|
||||
|
||||
assertAcked(response);
|
||||
assertThat(INITIAL_RECOVERIES.get(response.getPersistentSettings()), equalTo(10));
|
||||
|
@ -164,9 +194,9 @@ public class ClusterSettingsIT extends ESIntegTestCase {
|
|||
assertThat(REROUTE_INTERVAL.get(response.getPersistentSettings()), equalTo(TimeValue.timeValueSeconds(44)));
|
||||
assertThat(clusterService().getClusterSettings().get(REROUTE_INTERVAL), equalTo(TimeValue.timeValueSeconds(44)));
|
||||
response = client().admin().cluster()
|
||||
.prepareUpdateSettings()
|
||||
.setPersistentSettings(Settings.builder().putNull((randomBoolean() ? "cluster.routing.*" : "*")))
|
||||
.get();
|
||||
.prepareUpdateSettings()
|
||||
.setPersistentSettings(Settings.builder().putNull((randomBoolean() ? "cluster.routing.*" : "*")))
|
||||
.get();
|
||||
|
||||
assertThat(INITIAL_RECOVERIES.get(response.getPersistentSettings()), equalTo(INITIAL_RECOVERIES.get(Settings.EMPTY)));
|
||||
assertThat(clusterService().getClusterSettings().get(INITIAL_RECOVERIES), equalTo(INITIAL_RECOVERIES.get(Settings.EMPTY)));
|
||||
|
@ -185,11 +215,11 @@ public class ClusterSettingsIT extends ESIntegTestCase {
|
|||
Settings persistentSettings1 = Settings.builder().put(key2, value2).build();
|
||||
|
||||
ClusterUpdateSettingsResponse response1 = client().admin().cluster()
|
||||
.prepareUpdateSettings()
|
||||
.setTransientSettings(transientSettings1)
|
||||
.setPersistentSettings(persistentSettings1)
|
||||
.execute()
|
||||
.actionGet();
|
||||
.prepareUpdateSettings()
|
||||
.setTransientSettings(transientSettings1)
|
||||
.setPersistentSettings(persistentSettings1)
|
||||
.execute()
|
||||
.actionGet();
|
||||
|
||||
assertAcked(response1);
|
||||
assertThat(response1.getTransientSettings().get(key1), notNullValue());
|
||||
|
@ -201,11 +231,11 @@ public class ClusterSettingsIT extends ESIntegTestCase {
|
|||
Settings persistentSettings2 = Settings.EMPTY;
|
||||
|
||||
ClusterUpdateSettingsResponse response2 = client().admin().cluster()
|
||||
.prepareUpdateSettings()
|
||||
.setTransientSettings(transientSettings2)
|
||||
.setPersistentSettings(persistentSettings2)
|
||||
.execute()
|
||||
.actionGet();
|
||||
.prepareUpdateSettings()
|
||||
.setTransientSettings(transientSettings2)
|
||||
.setPersistentSettings(persistentSettings2)
|
||||
.execute()
|
||||
.actionGet();
|
||||
|
||||
assertAcked(response2);
|
||||
assertThat(response2.getTransientSettings().get(key1), notNullValue());
|
||||
|
@ -217,11 +247,11 @@ public class ClusterSettingsIT extends ESIntegTestCase {
|
|||
Settings persistentSettings3 = Settings.builder().put(key1, value1, ByteSizeUnit.BYTES).put(key2, value2).build();
|
||||
|
||||
ClusterUpdateSettingsResponse response3 = client().admin().cluster()
|
||||
.prepareUpdateSettings()
|
||||
.setTransientSettings(transientSettings3)
|
||||
.setPersistentSettings(persistentSettings3)
|
||||
.execute()
|
||||
.actionGet();
|
||||
.prepareUpdateSettings()
|
||||
.setTransientSettings(transientSettings3)
|
||||
.setPersistentSettings(persistentSettings3)
|
||||
.execute()
|
||||
.actionGet();
|
||||
|
||||
assertAcked(response3);
|
||||
assertThat(response3.getTransientSettings().get(key1), nullValue());
|
||||
|
@ -230,33 +260,55 @@ public class ClusterSettingsIT extends ESIntegTestCase {
|
|||
assertThat(response3.getPersistentSettings().get(key2), notNullValue());
|
||||
}
|
||||
|
||||
public void testCanUpdateTracerSettings() {
|
||||
ClusterUpdateSettingsResponse clusterUpdateSettingsResponse = client().admin().cluster()
|
||||
.prepareUpdateSettings()
|
||||
.setTransientSettings(Settings.builder().putList("transport.tracer.include", "internal:index/shard/recovery/*",
|
||||
"internal:gateway/local*"))
|
||||
.get();
|
||||
assertEquals(clusterUpdateSettingsResponse.getTransientSettings().getAsList("transport.tracer.include"),
|
||||
|
||||
public void testCanUpdateTransientTracerSettings() {
|
||||
testCanUpdateTracerSettings(
|
||||
(settings, builder) -> builder.setTransientSettings(settings), ClusterUpdateSettingsResponse::getTransientSettings);
|
||||
}
|
||||
|
||||
public void testCanUpdatePersistentTracerSettings() {
|
||||
testCanUpdateTracerSettings(
|
||||
(settings, builder) -> builder.setPersistentSettings(settings), ClusterUpdateSettingsResponse::getPersistentSettings);
|
||||
}
|
||||
|
||||
private void testCanUpdateTracerSettings(final BiConsumer<Settings.Builder, ClusterUpdateSettingsRequestBuilder> consumer,
|
||||
final Function<ClusterUpdateSettingsResponse, Settings> settingsFunction) {
|
||||
ClusterUpdateSettingsRequestBuilder builder = client().admin().cluster().prepareUpdateSettings();
|
||||
consumer.accept(Settings.builder().putList("transport.tracer.include", "internal:index/shard/recovery/*",
|
||||
"internal:gateway/local*"), builder);
|
||||
|
||||
ClusterUpdateSettingsResponse clusterUpdateSettingsResponse = builder.get();
|
||||
assertEquals(settingsFunction.apply(clusterUpdateSettingsResponse).getAsList("transport.tracer.include"),
|
||||
Arrays.asList("internal:index/shard/recovery/*", "internal:gateway/local*"));
|
||||
}
|
||||
|
||||
public void testUpdateSettings() {
|
||||
public void testUpdateTransientSettings() {
|
||||
testUpdateSettings(
|
||||
(settings, builder) -> builder.setTransientSettings(settings), ClusterUpdateSettingsResponse::getTransientSettings);
|
||||
}
|
||||
|
||||
public void testUpdatePersistentSettings() {
|
||||
testUpdateSettings(
|
||||
(settings, builder) -> builder.setPersistentSettings(settings), ClusterUpdateSettingsResponse::getPersistentSettings);
|
||||
}
|
||||
|
||||
private void testUpdateSettings(final BiConsumer<Settings.Builder, ClusterUpdateSettingsRequestBuilder> consumer,
|
||||
final Function<ClusterUpdateSettingsResponse, Settings> settingsFunction) {
|
||||
final Setting<Integer> INITIAL_RECOVERIES = CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES_SETTING;
|
||||
|
||||
ClusterUpdateSettingsResponse response = client().admin().cluster()
|
||||
.prepareUpdateSettings()
|
||||
.setTransientSettings(Settings.builder().put(INITIAL_RECOVERIES.getKey(), 42).build())
|
||||
.get();
|
||||
ClusterUpdateSettingsRequestBuilder initialBuilder = client().admin().cluster().prepareUpdateSettings();
|
||||
consumer.accept(Settings.builder().put(INITIAL_RECOVERIES.getKey(), 42), initialBuilder);
|
||||
|
||||
ClusterUpdateSettingsResponse response = initialBuilder.get();
|
||||
|
||||
assertAcked(response);
|
||||
assertThat(INITIAL_RECOVERIES.get(response.getTransientSettings()), equalTo(42));
|
||||
assertThat(INITIAL_RECOVERIES.get(settingsFunction.apply(response)), equalTo(42));
|
||||
assertThat(clusterService().getClusterSettings().get(INITIAL_RECOVERIES), equalTo(42));
|
||||
|
||||
try {
|
||||
client().admin().cluster()
|
||||
.prepareUpdateSettings()
|
||||
.setTransientSettings(Settings.builder().put(INITIAL_RECOVERIES.getKey(), "whatever").build())
|
||||
.get();
|
||||
ClusterUpdateSettingsRequestBuilder badBuilder = client().admin().cluster().prepareUpdateSettings();
|
||||
consumer.accept(Settings.builder().put(INITIAL_RECOVERIES.getKey(), "whatever"), badBuilder);
|
||||
badBuilder.get();
|
||||
fail("bogus value");
|
||||
} catch (IllegalArgumentException ex) {
|
||||
assertEquals(ex.getMessage(), "Failed to parse value [whatever] for setting [" + INITIAL_RECOVERIES.getKey() + "]");
|
||||
|
@ -265,11 +317,10 @@ public class ClusterSettingsIT extends ESIntegTestCase {
|
|||
assertThat(clusterService().getClusterSettings().get(INITIAL_RECOVERIES), equalTo(42));
|
||||
|
||||
try {
|
||||
client().admin().cluster()
|
||||
.prepareUpdateSettings()
|
||||
.setTransientSettings(Settings.builder()
|
||||
.put(INITIAL_RECOVERIES.getKey(), -1).build())
|
||||
.get();
|
||||
ClusterUpdateSettingsRequestBuilder badBuilder = client().admin().cluster().prepareUpdateSettings();
|
||||
consumer.accept(Settings.builder()
|
||||
.put(INITIAL_RECOVERIES.getKey(), -1), badBuilder);
|
||||
badBuilder.get();
|
||||
fail("bogus value");
|
||||
} catch (IllegalArgumentException ex) {
|
||||
assertEquals(ex.getMessage(), "Failed to parse value [-1] for setting [" + INITIAL_RECOVERIES.getKey() + "] must be >= 0");
|
||||
|
@ -332,8 +383,8 @@ public class ClusterSettingsIT extends ESIntegTestCase {
|
|||
expectThrows(
|
||||
ClusterBlockException.class,
|
||||
() -> assertAcked(client().admin().cluster().prepareUpdateSettings()
|
||||
.setPersistentSettings(Settings.builder().putNull("cluster.routing.allocation.enable"))
|
||||
.setTransientSettings(Settings.builder().putNull("archived.*")).get()));
|
||||
.setPersistentSettings(Settings.builder().putNull("cluster.routing.allocation.enable"))
|
||||
.setTransientSettings(Settings.builder().putNull("archived.*")).get()));
|
||||
if (readOnly) {
|
||||
assertTrue(e2.getMessage().contains("cluster read-only (api)"));
|
||||
}
|
||||
|
@ -427,8 +478,8 @@ public class ClusterSettingsIT extends ESIntegTestCase {
|
|||
Settings persistentSettings = Settings.builder().put(key2, "5").build();
|
||||
|
||||
ClusterUpdateSettingsRequestBuilder request = client().admin().cluster().prepareUpdateSettings()
|
||||
.setTransientSettings(transientSettings)
|
||||
.setPersistentSettings(persistentSettings);
|
||||
.setTransientSettings(transientSettings)
|
||||
.setPersistentSettings(persistentSettings);
|
||||
|
||||
// Cluster settings updates are blocked when the cluster is read only
|
||||
try {
|
||||
|
@ -478,30 +529,49 @@ public class ClusterSettingsIT extends ESIntegTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
public void testLoggerLevelUpdate() {
|
||||
public void testLoggerLevelUpdateWithPersistentSettings() {
|
||||
testLoggerLevelUpdate((settings, builder) -> builder.setPersistentSettings(settings));
|
||||
}
|
||||
|
||||
public void testLoggerLevelUpdateWithTransientSettings() {
|
||||
testLoggerLevelUpdate((settings, builder) -> builder.setTransientSettings(settings));
|
||||
}
|
||||
|
||||
private void testLoggerLevelUpdate(final BiConsumer<Settings.Builder, ClusterUpdateSettingsRequestBuilder> consumer) {
|
||||
assertAcked(prepareCreate("test"));
|
||||
|
||||
final Level level = LogManager.getRootLogger().getLevel();
|
||||
|
||||
ClusterUpdateSettingsRequestBuilder throwBuilder = client().admin().cluster().prepareUpdateSettings();
|
||||
consumer.accept(Settings.builder().put("logger._root", "BOOM"), throwBuilder);
|
||||
|
||||
final IllegalArgumentException e =
|
||||
expectThrows(
|
||||
IllegalArgumentException.class,
|
||||
() -> client().admin().cluster().prepareUpdateSettings()
|
||||
.setTransientSettings(Settings.builder().put("logger._root", "BOOM")).execute().actionGet());
|
||||
() -> throwBuilder.execute().actionGet());
|
||||
assertEquals("Unknown level constant [BOOM].", e.getMessage());
|
||||
|
||||
try {
|
||||
final Settings.Builder testSettings = Settings.builder().put("logger.test", "TRACE").put("logger._root", "trace");
|
||||
client().admin().cluster().prepareUpdateSettings().setTransientSettings(testSettings).execute().actionGet();
|
||||
ClusterUpdateSettingsRequestBuilder updateBuilder = client().admin().cluster().prepareUpdateSettings();
|
||||
consumer.accept(testSettings, updateBuilder);
|
||||
|
||||
updateBuilder.execute().actionGet();
|
||||
assertEquals(Level.TRACE, LogManager.getLogger("test").getLevel());
|
||||
assertEquals(Level.TRACE, LogManager.getRootLogger().getLevel());
|
||||
} finally {
|
||||
ClusterUpdateSettingsRequestBuilder undoBuilder = client().admin().cluster().prepareUpdateSettings();
|
||||
|
||||
if (randomBoolean()) {
|
||||
final Settings.Builder defaultSettings = Settings.builder().putNull("logger.test").putNull("logger._root");
|
||||
client().admin().cluster().prepareUpdateSettings().setTransientSettings(defaultSettings).execute().actionGet();
|
||||
consumer.accept(defaultSettings, undoBuilder);
|
||||
|
||||
undoBuilder.execute().actionGet();
|
||||
} else {
|
||||
final Settings.Builder defaultSettings = Settings.builder().putNull("logger.*");
|
||||
client().admin().cluster().prepareUpdateSettings().setTransientSettings(defaultSettings).execute().actionGet();
|
||||
consumer.accept(defaultSettings, undoBuilder);
|
||||
|
||||
undoBuilder.execute().actionGet();
|
||||
}
|
||||
assertEquals(level, LogManager.getLogger("test").getLevel());
|
||||
assertEquals(level, LogManager.getRootLogger().getLevel());
|
||||
|
@ -516,26 +586,28 @@ public class ClusterSettingsIT extends ESIntegTestCase {
|
|||
|
||||
final Settings settings = Settings.builder().put(key, value).build();
|
||||
final Settings updatedSettings = Settings.builder().put(key, updatedValue).build();
|
||||
if (randomBoolean()) {
|
||||
logger.info("Using persistent settings");
|
||||
|
||||
client().admin().cluster().prepareUpdateSettings().setPersistentSettings(settings).execute().actionGet();
|
||||
ClusterStateResponse state = client().admin().cluster().prepareState().execute().actionGet();
|
||||
assertEquals(value, state.getState().getMetadata().persistentSettings().get(key));
|
||||
boolean persistent = randomBoolean();
|
||||
|
||||
client().admin().cluster().prepareUpdateSettings().setPersistentSettings(updatedSettings).execute().actionGet();
|
||||
ClusterStateResponse updatedState = client().admin().cluster().prepareState().execute().actionGet();
|
||||
assertEquals(updatedValue, updatedState.getState().getMetadata().persistentSettings().get(key));
|
||||
} else {
|
||||
logger.info("Using transient settings");
|
||||
client().admin().cluster().prepareUpdateSettings().setTransientSettings(settings).execute().actionGet();
|
||||
ClusterStateResponse state = client().admin().cluster().prepareState().execute().actionGet();
|
||||
assertEquals(value, state.getState().getMetadata().transientSettings().get(key));
|
||||
BiConsumer<Settings, ClusterUpdateSettingsRequestBuilder> consumer =
|
||||
(persistent) ? (s, b) -> b.setPersistentSettings(s) : (s, b) -> b.setTransientSettings(s);
|
||||
Function<Metadata, Settings> getter =
|
||||
(persistent) ? Metadata::persistentSettings : Metadata::transientSettings;
|
||||
|
||||
client().admin().cluster().prepareUpdateSettings().setTransientSettings(updatedSettings).execute().actionGet();
|
||||
ClusterStateResponse updatedState = client().admin().cluster().prepareState().execute().actionGet();
|
||||
assertEquals(updatedValue, updatedState.getState().getMetadata().transientSettings().get(key));
|
||||
}
|
||||
logger.info("Using " + ((persistent) ? "persistent" : "transient") + " settings");
|
||||
|
||||
ClusterUpdateSettingsRequestBuilder builder = client().admin().cluster().prepareUpdateSettings();
|
||||
consumer.accept(settings, builder);
|
||||
|
||||
builder.execute().actionGet();
|
||||
ClusterStateResponse state = client().admin().cluster().prepareState().execute().actionGet();
|
||||
assertEquals(value, getter.apply(state.getState().getMetadata()).get(key));
|
||||
|
||||
ClusterUpdateSettingsRequestBuilder updateBuilder = client().admin().cluster().prepareUpdateSettings();
|
||||
consumer.accept(updatedSettings, updateBuilder);
|
||||
updateBuilder.execute().actionGet();
|
||||
|
||||
ClusterStateResponse updatedState = client().admin().cluster().prepareState().execute().actionGet();
|
||||
assertEquals(updatedValue, getter.apply(updatedState.getState().getMetadata()).get(key));
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -56,7 +56,7 @@ public class ClusterShardLimitIT extends ESIntegTestCase {
|
|||
} else {
|
||||
client().admin().cluster()
|
||||
.prepareUpdateSettings()
|
||||
.setTransientSettings(Settings.builder().put(shardsPerNodeKey, negativeShardsPerNode).build())
|
||||
.setPersistentSettings(Settings.builder().put(shardsPerNodeKey, negativeShardsPerNode).build())
|
||||
.get();
|
||||
}
|
||||
fail("should not be able to set negative shards per node");
|
||||
|
@ -338,9 +338,9 @@ public class ClusterShardLimitIT extends ESIntegTestCase {
|
|||
} else {
|
||||
response = client().admin().cluster()
|
||||
.prepareUpdateSettings()
|
||||
.setTransientSettings(Settings.builder().put(shardsPerNodeKey, shardsPerNode).build())
|
||||
.setPersistentSettings(Settings.builder().put(shardsPerNodeKey, shardsPerNode).build())
|
||||
.get();
|
||||
assertEquals(shardsPerNode, response.getTransientSettings().getAsInt(shardsPerNodeKey, -1).intValue());
|
||||
assertEquals(shardsPerNode, response.getPersistentSettings().getAsInt(shardsPerNodeKey, -1).intValue());
|
||||
}
|
||||
} catch (IllegalArgumentException ex) {
|
||||
fail(ex.getMessage());
|
||||
|
|
|
@ -30,13 +30,13 @@ public class UpgradeSettingsIT extends ESSingleNodeTestCase {
|
|||
|
||||
@After
|
||||
public void cleanup() throws Exception {
|
||||
client()
|
||||
.admin()
|
||||
.cluster()
|
||||
.prepareUpdateSettings()
|
||||
.setPersistentSettings(Settings.builder().putNull("*"))
|
||||
.setTransientSettings(Settings.builder().putNull("*"))
|
||||
.get();
|
||||
client()
|
||||
.admin()
|
||||
.cluster()
|
||||
.prepareUpdateSettings()
|
||||
.setPersistentSettings(Settings.builder().putNull("*"))
|
||||
.setTransientSettings(Settings.builder().putNull("*"))
|
||||
.get();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -89,24 +89,24 @@ public class UpgradeSettingsIT extends ESSingleNodeTestCase {
|
|||
}
|
||||
|
||||
private void runUpgradeSettingsOnUpdateTest(
|
||||
final BiConsumer<Settings, ClusterUpdateSettingsRequestBuilder> consumer,
|
||||
final Function<Metadata, Settings> settingsFunction) {
|
||||
final BiConsumer<Settings, ClusterUpdateSettingsRequestBuilder> consumer,
|
||||
final Function<Metadata, Settings> settingsFunction) {
|
||||
final String value = randomAlphaOfLength(8);
|
||||
final ClusterUpdateSettingsRequestBuilder builder =
|
||||
client()
|
||||
.admin()
|
||||
.cluster()
|
||||
.prepareUpdateSettings();
|
||||
client()
|
||||
.admin()
|
||||
.cluster()
|
||||
.prepareUpdateSettings();
|
||||
consumer.accept(Settings.builder().put("foo.old", value).build(), builder);
|
||||
builder.get();
|
||||
|
||||
final ClusterStateResponse response = client()
|
||||
.admin()
|
||||
.cluster()
|
||||
.prepareState()
|
||||
.clear()
|
||||
.setMetadata(true)
|
||||
.get();
|
||||
.admin()
|
||||
.cluster()
|
||||
.prepareState()
|
||||
.clear()
|
||||
.setMetadata(true)
|
||||
.get();
|
||||
|
||||
assertFalse(UpgradeSettingsPlugin.oldSetting.exists(settingsFunction.apply(response.getState().metadata())));
|
||||
assertTrue(UpgradeSettingsPlugin.newSetting.exists(settingsFunction.apply(response.getState().metadata())));
|
||||
|
|
|
@ -213,7 +213,7 @@ public class MasterDisruptionIT extends AbstractDisruptionTestCase {
|
|||
|
||||
logger.info("Verify no master block with {} set to {}", NoMasterBlockService.NO_MASTER_BLOCK_SETTING.getKey(), "all");
|
||||
client().admin().cluster().prepareUpdateSettings()
|
||||
.setTransientSettings(Settings.builder().put(NoMasterBlockService.NO_MASTER_BLOCK_SETTING.getKey(), "all"))
|
||||
.setPersistentSettings(Settings.builder().put(NoMasterBlockService.NO_MASTER_BLOCK_SETTING.getKey(), "all"))
|
||||
.get();
|
||||
|
||||
networkDisruption.startDisrupting();
|
||||
|
@ -245,7 +245,7 @@ public class MasterDisruptionIT extends AbstractDisruptionTestCase {
|
|||
|
||||
ensureGreen();
|
||||
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings().setPersistentSettings(
|
||||
Settings.builder().put("indices.mapping.dynamic_timeout", "1ms")));
|
||||
|
||||
ServiceDisruptionScheme disruption = new BlockMasterServiceOnMaster(random());
|
||||
|
|
|
@ -69,7 +69,7 @@ public class UpdateMappingIntegrationIT extends ESIntegTestCase {
|
|||
.put(MapperService.INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING.getKey(), Long.MAX_VALUE)
|
||||
).execute().actionGet();
|
||||
client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
|
||||
client().admin().cluster().prepareUpdateSettings().setTransientSettings(
|
||||
client().admin().cluster().prepareUpdateSettings().setPersistentSettings(
|
||||
Settings.builder().put(MappingUpdatedAction.INDICES_MAPPING_DYNAMIC_TIMEOUT_SETTING.getKey(), TimeValue.timeValueMinutes(5)))
|
||||
.get();
|
||||
|
||||
|
@ -97,7 +97,7 @@ public class UpdateMappingIntegrationIT extends ESIntegTestCase {
|
|||
assertConcreteMappingsOnAll("test", type, fieldName);
|
||||
}
|
||||
|
||||
client().admin().cluster().prepareUpdateSettings().setTransientSettings(
|
||||
client().admin().cluster().prepareUpdateSettings().setPersistentSettings(
|
||||
Settings.builder().putNull(MappingUpdatedAction.INDICES_MAPPING_DYNAMIC_TIMEOUT_SETTING.getKey())).get();
|
||||
}
|
||||
|
||||
|
|
|
@ -81,7 +81,7 @@ public class CircuitBreakerServiceIT extends ESIntegTestCase {
|
|||
HierarchyCircuitBreakerService.IN_FLIGHT_REQUESTS_CIRCUIT_BREAKER_LIMIT_SETTING,
|
||||
HierarchyCircuitBreakerService.IN_FLIGHT_REQUESTS_CIRCUIT_BREAKER_OVERHEAD_SETTING,
|
||||
HierarchyCircuitBreakerService.TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING).forEach(s -> resetSettings.putNull(s.getKey()));
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(resetSettings));
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings().setPersistentSettings(resetSettings));
|
||||
}
|
||||
|
||||
@Before
|
||||
|
@ -138,7 +138,7 @@ public class CircuitBreakerServiceIT extends ESIntegTestCase {
|
|||
.put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), "100b")
|
||||
.put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING.getKey(), 1.05)
|
||||
.build();
|
||||
assertAcked(client.admin().cluster().prepareUpdateSettings().setTransientSettings(settings));
|
||||
assertAcked(client.admin().cluster().prepareUpdateSettings().setPersistentSettings(settings));
|
||||
|
||||
// execute a search that loads field data (sorting on the "test" field)
|
||||
// again, this time it should trip the breaker
|
||||
|
@ -192,7 +192,7 @@ public class CircuitBreakerServiceIT extends ESIntegTestCase {
|
|||
.put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), "100b")
|
||||
.put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING.getKey(), 1.05)
|
||||
.build();
|
||||
assertAcked(client.admin().cluster().prepareUpdateSettings().setTransientSettings(settings));
|
||||
assertAcked(client.admin().cluster().prepareUpdateSettings().setPersistentSettings(settings));
|
||||
|
||||
// execute a search that loads field data (sorting on the "test" field)
|
||||
// again, this time it should trip the breaker
|
||||
|
@ -226,7 +226,7 @@ public class CircuitBreakerServiceIT extends ESIntegTestCase {
|
|||
Settings resetSettings = Settings.builder()
|
||||
.put(HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), "10b")
|
||||
.build();
|
||||
assertAcked(client.admin().cluster().prepareUpdateSettings().setTransientSettings(resetSettings));
|
||||
assertAcked(client.admin().cluster().prepareUpdateSettings().setPersistentSettings(resetSettings));
|
||||
|
||||
// index some different terms so we have some field data for loading
|
||||
int docCount = scaledRandomIntBetween(300, 1000);
|
||||
|
@ -260,7 +260,7 @@ public class CircuitBreakerServiceIT extends ESIntegTestCase {
|
|||
Settings resetSettings = Settings.builder()
|
||||
.put(HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), "100b")
|
||||
.build();
|
||||
assertAcked(client.admin().cluster().prepareUpdateSettings().setTransientSettings(resetSettings));
|
||||
assertAcked(client.admin().cluster().prepareUpdateSettings().setPersistentSettings(resetSettings));
|
||||
|
||||
// index some different terms so we have some field data for loading
|
||||
int docCount = scaledRandomIntBetween(100, 1000);
|
||||
|
@ -309,14 +309,14 @@ public class CircuitBreakerServiceIT extends ESIntegTestCase {
|
|||
Settings insane = Settings.builder()
|
||||
.put(HierarchyCircuitBreakerService.IN_FLIGHT_REQUESTS_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), "5b")
|
||||
.build();
|
||||
client().admin().cluster().prepareUpdateSettings().setTransientSettings(insane).get();
|
||||
client().admin().cluster().prepareUpdateSettings().setPersistentSettings(insane).get();
|
||||
|
||||
// calls updates settings to reset everything to default, checking that the request
|
||||
// is not blocked by the above inflight circuit breaker
|
||||
reset();
|
||||
|
||||
assertThat(client().admin().cluster().prepareState().get()
|
||||
.getState().metadata().transientSettings().get(HierarchyCircuitBreakerService.TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING.getKey()),
|
||||
.getState().metadata().persistentSettings().get(HierarchyCircuitBreakerService.TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING.getKey()),
|
||||
nullValue());
|
||||
|
||||
}
|
||||
|
@ -367,7 +367,7 @@ public class CircuitBreakerServiceIT extends ESIntegTestCase {
|
|||
.put(HierarchyCircuitBreakerService.IN_FLIGHT_REQUESTS_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), inFlightRequestsLimit)
|
||||
.build();
|
||||
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(limitSettings));
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings().setPersistentSettings(limitSettings));
|
||||
|
||||
// can either fail directly with an exception or the response contains exceptions (depending on client)
|
||||
try {
|
||||
|
|
|
@ -214,7 +214,7 @@ public class IndexRecoveryIT extends ESIntegTestCase {
|
|||
private void slowDownRecovery(ByteSizeValue shardSize) {
|
||||
long chunkSize = Math.max(1, shardSize.getBytes() / 10);
|
||||
assertTrue(client().admin().cluster().prepareUpdateSettings()
|
||||
.setTransientSettings(Settings.builder()
|
||||
.setPersistentSettings(Settings.builder()
|
||||
// one chunk per sec..
|
||||
.put(RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey(), chunkSize, ByteSizeUnit.BYTES)
|
||||
// small chunks
|
||||
|
@ -224,7 +224,7 @@ public class IndexRecoveryIT extends ESIntegTestCase {
|
|||
|
||||
private void restoreRecoverySpeed() {
|
||||
assertTrue(client().admin().cluster().prepareUpdateSettings()
|
||||
.setTransientSettings(Settings.builder()
|
||||
.setPersistentSettings(Settings.builder()
|
||||
.put(RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey(), "20mb")
|
||||
.put(CHUNK_SIZE_SETTING.getKey(), RecoverySettings.DEFAULT_CHUNK_SIZE)
|
||||
).get().isAcknowledged());
|
||||
|
|
|
@ -75,7 +75,7 @@ public class UpdateSettingsIT extends ESIntegTestCase {
|
|||
@Override
|
||||
protected Collection<Class<? extends Plugin>> nodePlugins() {
|
||||
return Arrays.asList(
|
||||
DummySettingPlugin.class, FinalSettingPlugin.class);
|
||||
DummySettingPlugin.class, FinalSettingPlugin.class);
|
||||
}
|
||||
|
||||
public static class DummySettingPlugin extends Plugin {
|
||||
|
@ -144,7 +144,7 @@ public class UpdateSettingsIT extends ESIntegTestCase {
|
|||
iae = expectThrows(IllegalArgumentException.class, () ->
|
||||
client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder()
|
||||
.put("cluster.acc.test.pw", "asdf")).setPersistentSettings(Settings.builder()
|
||||
.put("cluster.acc.test.user", "asdf")).get());
|
||||
.put("cluster.acc.test.user", "asdf")).get());
|
||||
assertEquals("missing required setting [cluster.acc.test.user] for setting [cluster.acc.test.pw]", iae.getMessage());
|
||||
|
||||
if (randomBoolean()) {
|
||||
|
@ -172,7 +172,6 @@ public class UpdateSettingsIT extends ESIntegTestCase {
|
|||
.putNull("cluster.acc.test.pw")
|
||||
.putNull("cluster.acc.test.user")).get();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
public void testUpdateDependentIndexSettings() {
|
||||
|
@ -291,10 +290,10 @@ public class UpdateSettingsIT extends ESIntegTestCase {
|
|||
.indices()
|
||||
.prepareUpdateSettings("test")
|
||||
.setSettings(
|
||||
Settings.builder()
|
||||
.put("index.refresh_interval", -1)
|
||||
.put("index.translog.flush_threshold_size", "1024b")
|
||||
.put("index.translog.generation_threshold_size", "4096b"))
|
||||
Settings.builder()
|
||||
.put("index.refresh_interval", -1)
|
||||
.put("index.translog.flush_threshold_size", "1024b")
|
||||
.put("index.translog.generation_threshold_size", "4096b"))
|
||||
.execute()
|
||||
.actionGet();
|
||||
IndexMetadata indexMetadata = client().admin().cluster().prepareState().execute().actionGet().getState().metadata().index("test");
|
||||
|
@ -493,46 +492,46 @@ public class UpdateSettingsIT extends ESIntegTestCase {
|
|||
|
||||
{
|
||||
final long settingsVersion =
|
||||
client().admin().cluster().prepareState().get().getState().metadata().index("test").getSettingsVersion();
|
||||
client().admin().cluster().prepareState().get().getState().metadata().index("test").getSettingsVersion();
|
||||
assertAcked(client()
|
||||
.admin()
|
||||
.indices()
|
||||
.prepareUpdateSettings("test")
|
||||
.setSettings(Settings.builder().put("index.refresh_interval", "500ms"))
|
||||
.get());
|
||||
.admin()
|
||||
.indices()
|
||||
.prepareUpdateSettings("test")
|
||||
.setSettings(Settings.builder().put("index.refresh_interval", "500ms"))
|
||||
.get());
|
||||
final long newSettingsVersion =
|
||||
client().admin().cluster().prepareState().get().getState().metadata().index("test").getSettingsVersion();
|
||||
client().admin().cluster().prepareState().get().getState().metadata().index("test").getSettingsVersion();
|
||||
assertThat(newSettingsVersion, equalTo(1 + settingsVersion));
|
||||
}
|
||||
|
||||
{
|
||||
final boolean block = randomBoolean();
|
||||
assertAcked(client()
|
||||
.admin()
|
||||
.indices()
|
||||
.prepareUpdateSettings("test")
|
||||
.setSettings(Settings.builder().put("index.blocks.read_only", block))
|
||||
.get());
|
||||
.admin()
|
||||
.indices()
|
||||
.prepareUpdateSettings("test")
|
||||
.setSettings(Settings.builder().put("index.blocks.read_only", block))
|
||||
.get());
|
||||
final long settingsVersion =
|
||||
client().admin().cluster().prepareState().get().getState().metadata().index("test").getSettingsVersion();
|
||||
client().admin().cluster().prepareState().get().getState().metadata().index("test").getSettingsVersion();
|
||||
assertAcked(client()
|
||||
.admin()
|
||||
.indices()
|
||||
.prepareUpdateSettings("test")
|
||||
.setSettings(Settings.builder().put("index.blocks.read_only", block == false))
|
||||
.get());
|
||||
.admin()
|
||||
.indices()
|
||||
.prepareUpdateSettings("test")
|
||||
.setSettings(Settings.builder().put("index.blocks.read_only", block == false))
|
||||
.get());
|
||||
final long newSettingsVersion =
|
||||
client().admin().cluster().prepareState().get().getState().metadata().index("test").getSettingsVersion();
|
||||
client().admin().cluster().prepareState().get().getState().metadata().index("test").getSettingsVersion();
|
||||
assertThat(newSettingsVersion, equalTo(1 + settingsVersion));
|
||||
|
||||
// if the read-only block is present, remove it
|
||||
if (block == false) {
|
||||
assertAcked(client()
|
||||
.admin()
|
||||
.indices()
|
||||
.prepareUpdateSettings("test")
|
||||
.setSettings(Settings.builder().put("index.blocks.read_only", false))
|
||||
.get());
|
||||
.admin()
|
||||
.indices()
|
||||
.prepareUpdateSettings("test")
|
||||
.setSettings(Settings.builder().put("index.blocks.read_only", false))
|
||||
.get());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -543,49 +542,49 @@ public class UpdateSettingsIT extends ESIntegTestCase {
|
|||
|
||||
{
|
||||
final long settingsVersion =
|
||||
client().admin().cluster().prepareState().get().getState().metadata().index("test").getSettingsVersion();
|
||||
client().admin().cluster().prepareState().get().getState().metadata().index("test").getSettingsVersion();
|
||||
final String refreshInterval =
|
||||
client().admin().indices().prepareGetSettings("test").get().getSetting("test", "index.refresh_interval");
|
||||
client().admin().indices().prepareGetSettings("test").get().getSetting("test", "index.refresh_interval");
|
||||
assertAcked(client()
|
||||
.admin()
|
||||
.indices()
|
||||
.prepareUpdateSettings("test")
|
||||
.setSettings(Settings.builder().put("index.refresh_interval", refreshInterval))
|
||||
.get());
|
||||
.admin()
|
||||
.indices()
|
||||
.prepareUpdateSettings("test")
|
||||
.setSettings(Settings.builder().put("index.refresh_interval", refreshInterval))
|
||||
.get());
|
||||
final long newSettingsVersion =
|
||||
client().admin().cluster().prepareState().get().getState().metadata().index("test").getSettingsVersion();
|
||||
client().admin().cluster().prepareState().get().getState().metadata().index("test").getSettingsVersion();
|
||||
assertThat(newSettingsVersion, equalTo(settingsVersion));
|
||||
}
|
||||
|
||||
{
|
||||
final boolean block = randomBoolean();
|
||||
assertAcked(client()
|
||||
.admin()
|
||||
.indices()
|
||||
.prepareUpdateSettings("test")
|
||||
.setSettings(Settings.builder().put("index.blocks.read_only", block))
|
||||
.get());
|
||||
.admin()
|
||||
.indices()
|
||||
.prepareUpdateSettings("test")
|
||||
.setSettings(Settings.builder().put("index.blocks.read_only", block))
|
||||
.get());
|
||||
// now put the same block again
|
||||
final long settingsVersion =
|
||||
client().admin().cluster().prepareState().get().getState().metadata().index("test").getSettingsVersion();
|
||||
client().admin().cluster().prepareState().get().getState().metadata().index("test").getSettingsVersion();
|
||||
assertAcked(client()
|
||||
.admin()
|
||||
.indices()
|
||||
.prepareUpdateSettings("test")
|
||||
.setSettings(Settings.builder().put("index.blocks.read_only", block))
|
||||
.get());
|
||||
.admin()
|
||||
.indices()
|
||||
.prepareUpdateSettings("test")
|
||||
.setSettings(Settings.builder().put("index.blocks.read_only", block))
|
||||
.get());
|
||||
final long newSettingsVersion =
|
||||
client().admin().cluster().prepareState().get().getState().metadata().index("test").getSettingsVersion();
|
||||
client().admin().cluster().prepareState().get().getState().metadata().index("test").getSettingsVersion();
|
||||
assertThat(newSettingsVersion, equalTo(settingsVersion));
|
||||
|
||||
// if the read-only block is present, remove it
|
||||
if (block) {
|
||||
assertAcked(client()
|
||||
.admin()
|
||||
.indices()
|
||||
.prepareUpdateSettings("test")
|
||||
.setSettings(Settings.builder().put("index.blocks.read_only", false))
|
||||
.get());
|
||||
.admin()
|
||||
.indices()
|
||||
.prepareUpdateSettings("test")
|
||||
.setSettings(Settings.builder().put("index.blocks.read_only", false))
|
||||
.get());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -599,17 +598,17 @@ public class UpdateSettingsIT extends ESIntegTestCase {
|
|||
createIndex("test");
|
||||
|
||||
final long settingsVersion =
|
||||
client().admin().cluster().prepareState().get().getState().metadata().index("test").getSettingsVersion();
|
||||
client().admin().cluster().prepareState().get().getState().metadata().index("test").getSettingsVersion();
|
||||
final int numberOfReplicas = Integer.valueOf(
|
||||
client().admin().indices().prepareGetSettings("test").get().getSetting("test", "index.number_of_replicas"));
|
||||
client().admin().indices().prepareGetSettings("test").get().getSetting("test", "index.number_of_replicas"));
|
||||
assertAcked(client()
|
||||
.admin()
|
||||
.indices()
|
||||
.prepareUpdateSettings("test")
|
||||
.setSettings(Settings.builder().put("index.number_of_replicas", numberOfReplicas))
|
||||
.get());
|
||||
.admin()
|
||||
.indices()
|
||||
.prepareUpdateSettings("test")
|
||||
.setSettings(Settings.builder().put("index.number_of_replicas", numberOfReplicas))
|
||||
.get());
|
||||
final long newSettingsVersion =
|
||||
client().admin().cluster().prepareState().get().getState().metadata().index("test").getSettingsVersion();
|
||||
client().admin().cluster().prepareState().get().getState().metadata().index("test").getSettingsVersion();
|
||||
assertThat(newSettingsVersion, equalTo(settingsVersion));
|
||||
}
|
||||
|
||||
|
@ -622,18 +621,18 @@ public class UpdateSettingsIT extends ESIntegTestCase {
|
|||
createIndex("test");
|
||||
|
||||
final long settingsVersion =
|
||||
client().admin().cluster().prepareState().get().getState().metadata().index("test").getSettingsVersion();
|
||||
client().admin().cluster().prepareState().get().getState().metadata().index("test").getSettingsVersion();
|
||||
final int numberOfReplicas =
|
||||
Integer.valueOf(
|
||||
client().admin().indices().prepareGetSettings("test").get().getSetting("test", "index.number_of_replicas"));
|
||||
Integer.valueOf(
|
||||
client().admin().indices().prepareGetSettings("test").get().getSetting("test", "index.number_of_replicas"));
|
||||
assertAcked(client()
|
||||
.admin()
|
||||
.indices()
|
||||
.prepareUpdateSettings("test")
|
||||
.setSettings(Settings.builder().put("index.number_of_replicas", 1 + numberOfReplicas))
|
||||
.get());
|
||||
.admin()
|
||||
.indices()
|
||||
.prepareUpdateSettings("test")
|
||||
.setSettings(Settings.builder().put("index.number_of_replicas", 1 + numberOfReplicas))
|
||||
.get());
|
||||
final long newSettingsVersion =
|
||||
client().admin().cluster().prepareState().get().getState().metadata().index("test").getSettingsVersion();
|
||||
client().admin().cluster().prepareState().get().getState().metadata().index("test").getSettingsVersion();
|
||||
assertThat(newSettingsVersion, equalTo(1 + settingsVersion));
|
||||
}
|
||||
|
||||
|
|
|
@ -22,7 +22,7 @@ public class CloseIndexDisableCloseAllIT extends ESIntegTestCase {
|
|||
public void afterTest() {
|
||||
Settings settings = Settings.builder().put(TransportCloseIndexAction.CLUSTER_INDICES_CLOSE_ENABLE_SETTING.getKey(), (String)null)
|
||||
.build();
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(settings));
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings().setPersistentSettings(settings));
|
||||
}
|
||||
|
||||
public void testCloseAllRequiresName() {
|
||||
|
@ -34,7 +34,7 @@ public class CloseIndexDisableCloseAllIT extends ESIntegTestCase {
|
|||
// disable closing
|
||||
createIndex("test_no_close");
|
||||
Settings settings = Settings.builder().put(TransportCloseIndexAction.CLUSTER_INDICES_CLOSE_ENABLE_SETTING.getKey(), false).build();
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(settings));
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings().setPersistentSettings(settings));
|
||||
|
||||
IllegalStateException illegalStateException = expectThrows(IllegalStateException.class,
|
||||
() -> client().admin().indices().prepareClose("test_no_close").get());
|
||||
|
|
|
@ -110,7 +110,7 @@ public class CloseWhileRelocatingShardsIT extends ESIntegTestCase {
|
|||
|
||||
ensureGreen(TimeValue.timeValueSeconds(60L),indices);
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings()
|
||||
.setTransientSettings(Settings.builder()
|
||||
.setPersistentSettings(Settings.builder()
|
||||
.put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), Rebalance.NONE.toString())));
|
||||
|
||||
final String targetNode = internalCluster().startDataOnlyNode();
|
||||
|
@ -250,7 +250,7 @@ public class CloseWhileRelocatingShardsIT extends ESIntegTestCase {
|
|||
}
|
||||
} finally {
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings()
|
||||
.setTransientSettings(Settings.builder()
|
||||
.setPersistentSettings(Settings.builder()
|
||||
.putNull(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey())));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -324,7 +324,7 @@ public class IndicesStoreIntegrationIT extends ESIntegTestCase {
|
|||
.setWaitForNodes("5").get().isTimedOut());
|
||||
|
||||
// disable allocation to control the situation more easily
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder()
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings().setPersistentSettings(Settings.builder()
|
||||
.put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), "none")));
|
||||
|
||||
logger.debug("--> shutting down two random nodes");
|
||||
|
@ -346,7 +346,7 @@ public class IndicesStoreIntegrationIT extends ESIntegTestCase {
|
|||
Settings.builder()
|
||||
.put(IndexMetadata.INDEX_ROUTING_EXCLUDE_GROUP_SETTING.getKey() + "_name", "NONE")));
|
||||
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder()
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings().setPersistentSettings(Settings.builder()
|
||||
.put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), "all")));
|
||||
|
||||
logger.debug("--> waiting for shards to recover on [{}]", node4);
|
||||
|
@ -360,7 +360,7 @@ public class IndicesStoreIntegrationIT extends ESIntegTestCase {
|
|||
assertFalse(client().admin().cluster().prepareHealth().setWaitForActiveShards(4).get().isTimedOut());
|
||||
|
||||
// disable allocation again to control concurrency a bit and allow shard active to kick in before allocation
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder()
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings().setPersistentSettings(Settings.builder()
|
||||
.put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), "none")));
|
||||
|
||||
logger.debug("--> starting the two old nodes back");
|
||||
|
@ -370,7 +370,7 @@ public class IndicesStoreIntegrationIT extends ESIntegTestCase {
|
|||
assertFalse(client().admin().cluster().prepareHealth().setWaitForNodes("5").get().isTimedOut());
|
||||
|
||||
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder()
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings().setPersistentSettings(Settings.builder()
|
||||
.put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), "all")));
|
||||
|
||||
logger.debug("--> waiting for the lost shard to be recovered");
|
||||
|
@ -410,7 +410,7 @@ public class IndicesStoreIntegrationIT extends ESIntegTestCase {
|
|||
|
||||
// disable relocations when we do this, to make sure the shards are not relocated from node2
|
||||
// due to rebalancing, and delete its content
|
||||
client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder()
|
||||
client().admin().cluster().prepareUpdateSettings().setPersistentSettings(Settings.builder()
|
||||
.put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), EnableAllocationDecider.Rebalance.NONE))
|
||||
.get();
|
||||
|
||||
|
|
|
@ -25,14 +25,14 @@ public class DestructiveOperationsIT extends ESIntegTestCase {
|
|||
@After
|
||||
public void afterTest() {
|
||||
Settings settings = Settings.builder().put(DestructiveOperations.REQUIRES_NAME_SETTING.getKey(), (String)null).build();
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(settings));
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings().setPersistentSettings(settings));
|
||||
}
|
||||
|
||||
public void testDeleteIndexIsRejected() throws Exception {
|
||||
Settings settings = Settings.builder()
|
||||
.put(DestructiveOperations.REQUIRES_NAME_SETTING.getKey(), true)
|
||||
.build();
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(settings));
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings().setPersistentSettings(settings));
|
||||
|
||||
createIndex("index1", "1index");
|
||||
|
||||
|
@ -50,7 +50,7 @@ public class DestructiveOperationsIT extends ESIntegTestCase {
|
|||
Settings settings = Settings.builder()
|
||||
.put(DestructiveOperations.REQUIRES_NAME_SETTING.getKey(), false)
|
||||
.build();
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(settings));
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings().setPersistentSettings(settings));
|
||||
}
|
||||
|
||||
createIndex("index1", "1index");
|
||||
|
@ -68,7 +68,7 @@ public class DestructiveOperationsIT extends ESIntegTestCase {
|
|||
Settings settings = Settings.builder()
|
||||
.put(DestructiveOperations.REQUIRES_NAME_SETTING.getKey(), true)
|
||||
.build();
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(settings));
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings().setPersistentSettings(settings));
|
||||
|
||||
createIndex("index1", "1index");
|
||||
|
||||
|
@ -86,7 +86,7 @@ public class DestructiveOperationsIT extends ESIntegTestCase {
|
|||
Settings settings = Settings.builder()
|
||||
.put(DestructiveOperations.REQUIRES_NAME_SETTING.getKey(), false)
|
||||
.build();
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(settings));
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings().setPersistentSettings(settings));
|
||||
}
|
||||
|
||||
createIndex("index1", "1index");
|
||||
|
@ -107,7 +107,7 @@ public class DestructiveOperationsIT extends ESIntegTestCase {
|
|||
Settings settings = Settings.builder()
|
||||
.put(DestructiveOperations.REQUIRES_NAME_SETTING.getKey(), true)
|
||||
.build();
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(settings));
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings().setPersistentSettings(settings));
|
||||
|
||||
createIndex("index1", "1index");
|
||||
assertAcked(client().admin().indices().prepareClose("1index", "index1").get());
|
||||
|
@ -124,7 +124,7 @@ public class DestructiveOperationsIT extends ESIntegTestCase {
|
|||
Settings settings = Settings.builder()
|
||||
.put(DestructiveOperations.REQUIRES_NAME_SETTING.getKey(), false)
|
||||
.build();
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(settings));
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings().setPersistentSettings(settings));
|
||||
}
|
||||
|
||||
createIndex("index1", "1index");
|
||||
|
@ -146,7 +146,7 @@ public class DestructiveOperationsIT extends ESIntegTestCase {
|
|||
Settings settings = Settings.builder()
|
||||
.put(DestructiveOperations.REQUIRES_NAME_SETTING.getKey(), true)
|
||||
.build();
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(settings));
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings().setPersistentSettings(settings));
|
||||
|
||||
createIndex("index1", "1index");
|
||||
|
||||
|
@ -166,7 +166,7 @@ public class DestructiveOperationsIT extends ESIntegTestCase {
|
|||
Settings settings = Settings.builder()
|
||||
.put(DestructiveOperations.REQUIRES_NAME_SETTING.getKey(), false)
|
||||
.build();
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(settings));
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings().setPersistentSettings(settings));
|
||||
}
|
||||
|
||||
createIndex("index1", "1index");
|
||||
|
|
|
@ -406,7 +406,7 @@ public class RelocationIT extends ESIntegTestCase {
|
|||
|
||||
logger.info("--> stopping replica assignment");
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings()
|
||||
.setTransientSettings(Settings.builder()
|
||||
.setPersistentSettings(Settings.builder()
|
||||
.put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), "none")));
|
||||
|
||||
logger.info("--> wait for all replica shards to be removed, on all nodes");
|
||||
|
|
|
@ -55,7 +55,7 @@ public class TruncatedRecoveryIT extends ESIntegTestCase {
|
|||
* Later we allow full recovery to ensure we can still recover and don't run into corruptions.
|
||||
*/
|
||||
public void testCancelRecoveryAndResume() throws Exception {
|
||||
assertTrue(client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder()
|
||||
assertTrue(client().admin().cluster().prepareUpdateSettings().setPersistentSettings(Settings.builder()
|
||||
.put(CHUNK_SIZE_SETTING.getKey(), new ByteSizeValue(randomIntBetween(50, 300), ByteSizeUnit.BYTES)))
|
||||
.get().isAcknowledged());
|
||||
|
||||
|
|
|
@ -88,13 +88,17 @@ public class EquivalenceIT extends ESIntegTestCase {
|
|||
client().admin()
|
||||
.cluster()
|
||||
.prepareUpdateSettings()
|
||||
.setTransientSettings(Collections.singletonMap("search.max_buckets", Integer.MAX_VALUE))
|
||||
.setPersistentSettings(Collections.singletonMap("search.max_buckets", Integer.MAX_VALUE))
|
||||
.get();
|
||||
}
|
||||
|
||||
@After
|
||||
private void cleanupMaxBuckets() {
|
||||
client().admin().cluster().prepareUpdateSettings().setTransientSettings(Collections.singletonMap("search.max_buckets", null)).get();
|
||||
client().admin()
|
||||
.cluster()
|
||||
.prepareUpdateSettings()
|
||||
.setPersistentSettings(Collections.singletonMap("search.max_buckets", null))
|
||||
.get();
|
||||
}
|
||||
|
||||
// Make sure that unordered, reversed, disjoint and/or overlapping ranges are supported
|
||||
|
|
|
@ -47,7 +47,7 @@ public class CardinalityWithRequestBreakerIT extends ESIntegTestCase {
|
|||
client().admin()
|
||||
.cluster()
|
||||
.prepareUpdateSettings()
|
||||
.setTransientSettings(
|
||||
.setPersistentSettings(
|
||||
Settings.builder().put(HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), requestBreaker)
|
||||
)
|
||||
.get();
|
||||
|
@ -70,7 +70,9 @@ public class CardinalityWithRequestBreakerIT extends ESIntegTestCase {
|
|||
client().admin()
|
||||
.cluster()
|
||||
.prepareUpdateSettings()
|
||||
.setTransientSettings(Settings.builder().putNull(HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING.getKey()))
|
||||
.setPersistentSettings(
|
||||
Settings.builder().putNull(HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING.getKey())
|
||||
)
|
||||
.get();
|
||||
|
||||
// validation done by InternalTestCluster.ensureEstimatedStats()
|
||||
|
|
|
@ -98,15 +98,15 @@ public class SearchRedStateIndexIT extends ESIntegTestCase {
|
|||
private void setClusterDefaultAllowPartialResults(boolean allowPartialResults) {
|
||||
String key = SearchService.DEFAULT_ALLOW_PARTIAL_SEARCH_RESULTS.getKey();
|
||||
|
||||
Settings transientSettings = Settings.builder().put(key, allowPartialResults).build();
|
||||
Settings persistentSettings = Settings.builder().put(key, allowPartialResults).build();
|
||||
|
||||
ClusterUpdateSettingsResponse response1 = client().admin().cluster()
|
||||
.prepareUpdateSettings()
|
||||
.setTransientSettings(transientSettings)
|
||||
.setPersistentSettings(persistentSettings)
|
||||
.get();
|
||||
|
||||
assertAcked(response1);
|
||||
assertEquals(response1.getTransientSettings().getAsBoolean(key, null), allowPartialResults);
|
||||
assertEquals(response1.getPersistentSettings().getAsBoolean(key, null), allowPartialResults);
|
||||
}
|
||||
|
||||
private void buildRedIndex(int numShards) throws Exception {
|
||||
|
@ -133,6 +133,6 @@ public class SearchRedStateIndexIT extends ESIntegTestCase {
|
|||
@After
|
||||
public void cleanup() throws Exception {
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings()
|
||||
.setTransientSettings(Settings.builder().putNull(SearchService.DEFAULT_ALLOW_PARTIAL_SEARCH_RESULTS.getKey())));
|
||||
.setPersistentSettings(Settings.builder().putNull(SearchService.DEFAULT_ALLOW_PARTIAL_SEARCH_RESULTS.getKey())));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1875,7 +1875,7 @@ public class SearchQueryIT extends ESIntegTestCase {
|
|||
.setSource("field", "value");
|
||||
indexRandom(true, false, indexRequest);
|
||||
client().admin().cluster().prepareUpdateSettings()
|
||||
.setTransientSettings(Settings.builder().put(IndicesService.INDICES_ID_FIELD_DATA_ENABLED_SETTING.getKey(), true))
|
||||
.setPersistentSettings(Settings.builder().put(IndicesService.INDICES_ID_FIELD_DATA_ENABLED_SETTING.getKey(), true))
|
||||
.get();
|
||||
try {
|
||||
SearchResponse searchResponse = client().prepareSearch()
|
||||
|
@ -1893,7 +1893,7 @@ public class SearchQueryIT extends ESIntegTestCase {
|
|||
} finally {
|
||||
// unset cluster setting
|
||||
client().admin().cluster().prepareUpdateSettings()
|
||||
.setTransientSettings(Settings.builder().putNull(IndicesService.INDICES_ID_FIELD_DATA_ENABLED_SETTING.getKey()))
|
||||
.setPersistentSettings(Settings.builder().putNull(IndicesService.INDICES_ID_FIELD_DATA_ENABLED_SETTING.getKey()))
|
||||
.get();
|
||||
}
|
||||
|
||||
|
|
|
@ -66,8 +66,7 @@ public class SearchScrollIT extends ESIntegTestCase {
|
|||
@After
|
||||
public void cleanup() throws Exception {
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings()
|
||||
.setPersistentSettings(Settings.builder().putNull("*"))
|
||||
.setTransientSettings(Settings.builder().putNull("*")));
|
||||
.setPersistentSettings(Settings.builder().putNull("*")));
|
||||
}
|
||||
|
||||
public void testSimpleScrollQueryThenFetch() throws Exception {
|
||||
|
|
|
@ -1507,7 +1507,7 @@ public class FieldSortIT extends ESIntegTestCase {
|
|||
|
||||
public void testSortMetaField() throws Exception {
|
||||
client().admin().cluster().prepareUpdateSettings()
|
||||
.setTransientSettings(Settings.builder().put(IndicesService.INDICES_ID_FIELD_DATA_ENABLED_SETTING.getKey(), true))
|
||||
.setPersistentSettings(Settings.builder().put(IndicesService.INDICES_ID_FIELD_DATA_ENABLED_SETTING.getKey(), true))
|
||||
.get();
|
||||
try {
|
||||
createIndex("test");
|
||||
|
@ -1540,7 +1540,7 @@ public class FieldSortIT extends ESIntegTestCase {
|
|||
} finally {
|
||||
// unset cluster setting
|
||||
client().admin().cluster().prepareUpdateSettings()
|
||||
.setTransientSettings(Settings.builder().putNull(IndicesService.INDICES_ID_FIELD_DATA_ENABLED_SETTING.getKey()))
|
||||
.setPersistentSettings(Settings.builder().putNull(IndicesService.INDICES_ID_FIELD_DATA_ENABLED_SETTING.getKey()))
|
||||
.get();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -591,7 +591,7 @@ public class RestoreSnapshotIT extends AbstractSnapshotIntegTestCase {
|
|||
client.admin()
|
||||
.cluster()
|
||||
.prepareUpdateSettings()
|
||||
.setTransientSettings(Settings.builder().put(INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey(), "100b").build())
|
||||
.setPersistentSettings(Settings.builder().put(INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey(), "100b").build())
|
||||
.get();
|
||||
ActionFuture<RestoreSnapshotResponse> restoreSnapshotResponse = client.admin()
|
||||
.cluster()
|
||||
|
@ -613,7 +613,7 @@ public class RestoreSnapshotIT extends AbstractSnapshotIntegTestCase {
|
|||
client.admin()
|
||||
.cluster()
|
||||
.prepareUpdateSettings()
|
||||
.setTransientSettings(Settings.builder().putNull(INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey()).build())
|
||||
.setPersistentSettings(Settings.builder().putNull(INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey()).build())
|
||||
.get();
|
||||
|
||||
// check that restore now completes quickly (i.e. within 20 seconds)
|
||||
|
|
|
@ -1119,7 +1119,7 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas
|
|||
client.admin()
|
||||
.cluster()
|
||||
.prepareUpdateSettings()
|
||||
.setTransientSettings(
|
||||
.setPersistentSettings(
|
||||
Settings.builder()
|
||||
.put(INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey(), throttleRestoreViaRecoverySettings ? "10k" : "0")
|
||||
.build()
|
||||
|
@ -1155,7 +1155,7 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas
|
|||
client.admin()
|
||||
.cluster()
|
||||
.prepareUpdateSettings()
|
||||
.setTransientSettings(Settings.builder().putNull(INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey()).build())
|
||||
.setPersistentSettings(Settings.builder().putNull(INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey()).build())
|
||||
.get();
|
||||
}
|
||||
|
||||
|
|
|
@ -210,7 +210,7 @@ public class DiskThresholdSettingsTests extends ESTestCase {
|
|||
.put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_DISK_FLOOD_STAGE_WATERMARK_SETTING.getKey(), "99%")
|
||||
.build();
|
||||
final Settings.Builder updates = Settings.builder();
|
||||
assertTrue(clusterSettings.updateSettings(settings, target, updates, "transient"));
|
||||
assertTrue(clusterSettings.updateSettings(settings, target, updates, "persistent"));
|
||||
assertNull(target.get(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey()));
|
||||
assertNull(target.get(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey()));
|
||||
assertThat(target.get(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_DISK_FLOOD_STAGE_WATERMARK_SETTING.getKey()),
|
||||
|
@ -222,7 +222,7 @@ public class DiskThresholdSettingsTests extends ESTestCase {
|
|||
.put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), "97%")
|
||||
.build();
|
||||
final Settings.Builder updates = Settings.builder();
|
||||
assertTrue(clusterSettings.updateSettings(settings, target, updates, "transient"));
|
||||
assertTrue(clusterSettings.updateSettings(settings, target, updates, "persistent"));
|
||||
assertNull(target.get(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey()));
|
||||
assertThat(target.get(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey()),
|
||||
equalTo("97%"));
|
||||
|
@ -235,7 +235,7 @@ public class DiskThresholdSettingsTests extends ESTestCase {
|
|||
.put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), "95%")
|
||||
.build();
|
||||
final Settings.Builder updates = Settings.builder();
|
||||
assertTrue(clusterSettings.updateSettings(settings, target, updates, "transient"));
|
||||
assertTrue(clusterSettings.updateSettings(settings, target, updates, "persistent"));
|
||||
assertThat(target.get(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey()),
|
||||
equalTo("95%"));
|
||||
assertThat(target.get(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey()),
|
||||
|
|
|
@ -1629,7 +1629,7 @@ public abstract class ESIntegTestCase extends ESTestCase {
|
|||
Settings settings = value ? Settings.builder().put(Metadata.SETTING_READ_ONLY_SETTING.getKey(), value).build() :
|
||||
Settings.builder().putNull(Metadata.SETTING_READ_ONLY_SETTING.getKey()).build() ;
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings()
|
||||
.setPersistentSettings(settings).setTransientSettings(settings).get());
|
||||
.setPersistentSettings(settings).get());
|
||||
}
|
||||
|
||||
private static CountDownLatch newLatch(List<CountDownLatch> latches) {
|
||||
|
|
|
@ -470,7 +470,7 @@ public class AsyncSearchActionIT extends AsyncSearchIntegTestCase {
|
|||
|
||||
int limit = 1000; // is not big enough to store the response
|
||||
ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest();
|
||||
updateSettingsRequest.transientSettings(Settings.builder().put("search.max_async_search_response_size", limit + "b"));
|
||||
updateSettingsRequest.persistentSettings(Settings.builder().put("search.max_async_search_response_size", limit + "b"));
|
||||
assertAcked(client().admin().cluster().updateSettings(updateSettingsRequest).actionGet());
|
||||
|
||||
ExecutionException e = expectThrows(ExecutionException.class,
|
||||
|
@ -480,7 +480,7 @@ public class AsyncSearchActionIT extends AsyncSearchIntegTestCase {
|
|||
"This limit can be set by changing the [" + MAX_ASYNC_SEARCH_RESPONSE_SIZE_SETTING.getKey() + "] setting."));
|
||||
|
||||
updateSettingsRequest = new ClusterUpdateSettingsRequest();
|
||||
updateSettingsRequest.transientSettings(Settings.builder().put("search.max_async_search_response_size", (String) null));
|
||||
updateSettingsRequest.persistentSettings(Settings.builder().put("search.max_async_search_response_size", (String) null));
|
||||
assertAcked(client().admin().cluster().updateSettings(updateSettingsRequest).actionGet());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -142,7 +142,7 @@ public class IndexFollowingIT extends CcrIntegTestCase {
|
|||
int numberOfReplicas = between(0, 1);
|
||||
|
||||
followerClient().admin().cluster().prepareUpdateSettings().setMasterNodeTimeout(TimeValue.MAX_VALUE)
|
||||
.setTransientSettings(Settings.builder().put(CcrSettings.RECOVERY_CHUNK_SIZE.getKey(),
|
||||
.setPersistentSettings(Settings.builder().put(CcrSettings.RECOVERY_CHUNK_SIZE.getKey(),
|
||||
new ByteSizeValue(randomIntBetween(1, 1000), ByteSizeUnit.KB)))
|
||||
.get();
|
||||
|
||||
|
@ -859,7 +859,7 @@ public class IndexFollowingIT extends CcrIntegTestCase {
|
|||
public void testLeaderIndexRed() throws Exception {
|
||||
try {
|
||||
ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest();
|
||||
updateSettingsRequest.transientSettings(Settings.builder().put("cluster.routing.allocation.enable", "none"));
|
||||
updateSettingsRequest.persistentSettings(Settings.builder().put("cluster.routing.allocation.enable", "none"));
|
||||
assertAcked(leaderClient().admin().cluster().updateSettings(updateSettingsRequest).actionGet());
|
||||
assertAcked(leaderClient().admin().indices().prepareCreate("index1")
|
||||
.setWaitForActiveShards(ActiveShardCount.NONE)
|
||||
|
@ -879,7 +879,7 @@ public class IndexFollowingIT extends CcrIntegTestCase {
|
|||
} finally {
|
||||
// Always unset allocation enable setting to avoid other assertions from failing too when this test fails:
|
||||
ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest();
|
||||
updateSettingsRequest.transientSettings(Settings.builder().put("cluster.routing.allocation.enable", (String) null));
|
||||
updateSettingsRequest.persistentSettings(Settings.builder().put("cluster.routing.allocation.enable", (String) null));
|
||||
assertAcked(leaderClient().admin().cluster().updateSettings(updateSettingsRequest).actionGet());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -69,7 +69,7 @@ public class AutoCreateDataStreamIT extends ESRestTestCase {
|
|||
private void configureAutoCreateIndex(boolean value) throws IOException {
|
||||
XContentBuilder builder = JsonXContent.contentBuilder()
|
||||
.startObject()
|
||||
.startObject("transient")
|
||||
.startObject("persistent")
|
||||
.field(AutoCreateIndex.AUTO_CREATE_INDEX_SETTING.getKey(), value)
|
||||
.endObject()
|
||||
.endObject();
|
||||
|
|
|
@ -124,7 +124,7 @@ public class TransportDeleteEnrichPolicyActionTests extends AbstractEnrichTestCa
|
|||
Settings settings = Settings.builder()
|
||||
.put(DestructiveOperations.REQUIRES_NAME_SETTING.getKey(), destructiveRequiresName)
|
||||
.build();
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(settings));
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings().setPersistentSettings(settings));
|
||||
}
|
||||
|
||||
createIndex(EnrichPolicy.getBaseName(name) + "-foo1");
|
||||
|
@ -165,7 +165,7 @@ public class TransportDeleteEnrichPolicyActionTests extends AbstractEnrichTestCa
|
|||
|
||||
if (destructiveRequiresName) {
|
||||
Settings settings = Settings.builder().putNull(DestructiveOperations.REQUIRES_NAME_SETTING.getKey()).build();
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(settings));
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings().setPersistentSettings(settings));
|
||||
}
|
||||
|
||||
EnrichPolicyLocks enrichPolicyLocks = getInstanceFromNode(EnrichPolicyLocks.class);
|
||||
|
|
|
@ -410,7 +410,7 @@ public class SnapshotLifecycleRestIT extends ESRestTestCase {
|
|||
|
||||
// Run retention every second
|
||||
ClusterUpdateSettingsRequest req = new ClusterUpdateSettingsRequest();
|
||||
req.transientSettings(Settings.builder().put(LifecycleSettings.SLM_RETENTION_SCHEDULE, "*/1 * * * * ?"));
|
||||
req.persistentSettings(Settings.builder().put(LifecycleSettings.SLM_RETENTION_SCHEDULE, "*/1 * * * * ?"));
|
||||
try (XContentBuilder builder = jsonBuilder()) {
|
||||
req.toXContent(builder, ToXContent.EMPTY_PARAMS);
|
||||
Request r = new Request("PUT", "/_cluster/settings");
|
||||
|
@ -452,7 +452,7 @@ public class SnapshotLifecycleRestIT extends ESRestTestCase {
|
|||
} finally {
|
||||
// Unset retention
|
||||
ClusterUpdateSettingsRequest unsetRequest = new ClusterUpdateSettingsRequest();
|
||||
unsetRequest.transientSettings(Settings.builder().put(LifecycleSettings.SLM_RETENTION_SCHEDULE, (String) null));
|
||||
unsetRequest.persistentSettings(Settings.builder().put(LifecycleSettings.SLM_RETENTION_SCHEDULE, (String) null));
|
||||
try (XContentBuilder builder = jsonBuilder()) {
|
||||
unsetRequest.toXContent(builder, ToXContent.EMPTY_PARAMS);
|
||||
Request r = new Request("PUT", "/_cluster/settings");
|
||||
|
@ -707,7 +707,7 @@ public class SnapshotLifecycleRestIT extends ESRestTestCase {
|
|||
|
||||
private void disableSLMMinimumIntervalValidation() throws IOException {
|
||||
ClusterUpdateSettingsRequest req = new ClusterUpdateSettingsRequest();
|
||||
req.transientSettings(Settings.builder().put(LifecycleSettings.SLM_MINIMUM_INTERVAL, "0s"));
|
||||
req.persistentSettings(Settings.builder().put(LifecycleSettings.SLM_MINIMUM_INTERVAL, "0s"));
|
||||
try (XContentBuilder builder = jsonBuilder()) {
|
||||
req.toXContent(builder, ToXContent.EMPTY_PARAMS);
|
||||
Request r = new Request("PUT", "/_cluster/settings");
|
||||
|
|
|
@ -95,7 +95,7 @@ public class PermissionsIT extends ESRestTestCase {
|
|||
Request request = new Request("PUT", "/_cluster/settings");
|
||||
XContentBuilder pollIntervalEntity = JsonXContent.contentBuilder();
|
||||
pollIntervalEntity.startObject();
|
||||
pollIntervalEntity.startObject("transient");
|
||||
pollIntervalEntity.startObject("persistent");
|
||||
pollIntervalEntity.field(LifecycleSettings.LIFECYCLE_POLL_INTERVAL, "1s");
|
||||
pollIntervalEntity.endObject();
|
||||
pollIntervalEntity.endObject();
|
||||
|
|
|
@ -440,7 +440,7 @@ public class IndexLifecycleInitialisationTests extends ESIntegTestCase {
|
|||
TimeValue newPollInterval = TimeValue.timeValueHours(randomLongBetween(6, 1000));
|
||||
Settings newIntervalSettings = Settings.builder().put(LifecycleSettings.LIFECYCLE_POLL_INTERVAL,
|
||||
newPollInterval.getStringRep()).build();
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(newIntervalSettings));
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings().setPersistentSettings(newIntervalSettings));
|
||||
{
|
||||
TimeValueSchedule schedule = (TimeValueSchedule) indexLifecycleService.getScheduledJob().getSchedule();
|
||||
assertThat(schedule.getInterval(), equalTo(newPollInterval));
|
||||
|
|
|
@ -61,7 +61,7 @@ public class BulkFailureRetryIT extends MlNativeAutodetectIntegTestCase {
|
|||
client().admin()
|
||||
.cluster()
|
||||
.prepareUpdateSettings()
|
||||
.setTransientSettings(Settings.builder()
|
||||
.setPersistentSettings(Settings.builder()
|
||||
.putNull("xpack.ml.persist_results_max_retries")
|
||||
.putNull("logger.org.elasticsearch.xpack.ml.datafeed.DatafeedJob")
|
||||
.putNull("logger.org.elasticsearch.xpack.ml.job.persistence.JobResultsPersister")
|
||||
|
@ -118,7 +118,7 @@ public class BulkFailureRetryIT extends MlNativeAutodetectIntegTestCase {
|
|||
client().admin()
|
||||
.cluster()
|
||||
.prepareUpdateSettings()
|
||||
.setTransientSettings(Settings.builder()
|
||||
.setPersistentSettings(Settings.builder()
|
||||
.put("logger.org.elasticsearch.xpack.ml.datafeed.DatafeedJob", "TRACE")
|
||||
.put("logger.org.elasticsearch.xpack.ml.job.persistence.JobResultsPersister", "TRACE")
|
||||
.put("logger.org.elasticsearch.xpack.ml.utils.persistence.ResultsPersisterService", "TRACE")
|
||||
|
|
|
@ -71,7 +71,7 @@ public class ClassificationEvaluationIT extends MlNativeDataFrameAnalyticsIntegT
|
|||
cleanUp();
|
||||
client().admin().cluster()
|
||||
.prepareUpdateSettings()
|
||||
.setTransientSettings(Settings.builder().putNull("search.max_buckets"))
|
||||
.setPersistentSettings(Settings.builder().putNull("search.max_buckets"))
|
||||
.get();
|
||||
}
|
||||
|
||||
|
@ -589,13 +589,13 @@ public class ClassificationEvaluationIT extends MlNativeDataFrameAnalyticsIntegT
|
|||
public void testEvaluate_ConfusionMatrixMetricWithDefaultSize() {
|
||||
evaluateMulticlassConfusionMatrix();
|
||||
|
||||
client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder().put("search.max_buckets", 20)).get();
|
||||
client().admin().cluster().prepareUpdateSettings().setPersistentSettings(Settings.builder().put("search.max_buckets", 20)).get();
|
||||
evaluateMulticlassConfusionMatrix();
|
||||
|
||||
client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder().put("search.max_buckets", 7)).get();
|
||||
client().admin().cluster().prepareUpdateSettings().setPersistentSettings(Settings.builder().put("search.max_buckets", 7)).get();
|
||||
evaluateMulticlassConfusionMatrix();
|
||||
|
||||
client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder().put("search.max_buckets", 6)).get();
|
||||
client().admin().cluster().prepareUpdateSettings().setPersistentSettings(Settings.builder().put("search.max_buckets", 6)).get();
|
||||
ElasticsearchException e = expectThrows(ElasticsearchException.class, this::evaluateMulticlassConfusionMatrix);
|
||||
|
||||
assertThat(e.getCause(), is(instanceOf(TooManyBucketsException.class)));
|
||||
|
|
|
@ -77,7 +77,7 @@ public class DataFrameAnalysisCustomFeatureIT extends MlNativeDataFrameAnalytics
|
|||
public void setupLogging() {
|
||||
client().admin().cluster()
|
||||
.prepareUpdateSettings()
|
||||
.setTransientSettings(Settings.builder()
|
||||
.setPersistentSettings(Settings.builder()
|
||||
.put("logger.org.elasticsearch.xpack.ml.dataframe", "DEBUG")
|
||||
.put("logger.org.elasticsearch.xpack.core.ml.inference", "DEBUG"))
|
||||
.get();
|
||||
|
@ -88,7 +88,7 @@ public class DataFrameAnalysisCustomFeatureIT extends MlNativeDataFrameAnalytics
|
|||
cleanUp();
|
||||
client().admin().cluster()
|
||||
.prepareUpdateSettings()
|
||||
.setTransientSettings(Settings.builder()
|
||||
.setPersistentSettings(Settings.builder()
|
||||
.putNull("logger.org.elasticsearch.xpack.ml.dataframe")
|
||||
.putNull("logger.org.elasticsearch.xpack.core.ml.inference"))
|
||||
.get();
|
||||
|
|
|
@ -76,7 +76,7 @@ public class DatafeedJobsIT extends MlNativeAutodetectIntegTestCase {
|
|||
client().admin()
|
||||
.cluster()
|
||||
.prepareUpdateSettings()
|
||||
.setTransientSettings(Settings.builder()
|
||||
.setPersistentSettings(Settings.builder()
|
||||
.putNull("logger.org.elasticsearch.xpack.ml.datafeed")
|
||||
.build()).get();
|
||||
cleanUp();
|
||||
|
@ -343,7 +343,7 @@ public class DatafeedJobsIT extends MlNativeAutodetectIntegTestCase {
|
|||
client().admin()
|
||||
.cluster()
|
||||
.prepareUpdateSettings()
|
||||
.setTransientSettings(Settings.builder()
|
||||
.setPersistentSettings(Settings.builder()
|
||||
.put("logger.org.elasticsearch.xpack.ml.datafeed", "TRACE")
|
||||
.build()).get();
|
||||
String indexName = "stop-restart-data";
|
||||
|
|
|
@ -65,7 +65,7 @@ public class InferenceIngestIT extends ESRestTestCase {
|
|||
Request loggingSettings = new Request("PUT", "_cluster/settings");
|
||||
loggingSettings.setJsonEntity("" +
|
||||
"{" +
|
||||
"\"transient\" : {\n" +
|
||||
"\"persistent\" : {\n" +
|
||||
" \"logger.org.elasticsearch.xpack.ml.inference\" : \"TRACE\"\n" +
|
||||
" }" +
|
||||
"}");
|
||||
|
@ -84,7 +84,7 @@ public class InferenceIngestIT extends ESRestTestCase {
|
|||
Request loggingSettings = new Request("PUT", "_cluster/settings");
|
||||
loggingSettings.setJsonEntity("" +
|
||||
"{" +
|
||||
"\"transient\" : {\n" +
|
||||
"\"persistent\" : {\n" +
|
||||
" \"logger.org.elasticsearch.xpack.ml.inference\" : null\n" +
|
||||
" }" +
|
||||
"}");
|
||||
|
|
|
@ -390,7 +390,7 @@ public class MlJobIT extends ESRestTestCase {
|
|||
|
||||
Request disablePersistentTaskAssignmentRequest = new Request("PUT", "_cluster/settings");
|
||||
disablePersistentTaskAssignmentRequest.setJsonEntity("{\n" +
|
||||
" \"transient\": {\n" +
|
||||
" \"persistent\": {\n" +
|
||||
" \"cluster.persistent_tasks.allocation.enable\": \"none\"\n" +
|
||||
" }\n" +
|
||||
"}");
|
||||
|
@ -411,7 +411,7 @@ public class MlJobIT extends ESRestTestCase {
|
|||
// because otherwise this setting will cause many other tests to fail
|
||||
Request enablePersistentTaskAssignmentRequest = new Request("PUT", "_cluster/settings");
|
||||
enablePersistentTaskAssignmentRequest.setJsonEntity("{\n" +
|
||||
" \"transient\": {\n" +
|
||||
" \"persistent\": {\n" +
|
||||
" \"cluster.persistent_tasks.allocation.enable\": \"all\"\n" +
|
||||
" }\n" +
|
||||
"}");
|
||||
|
|
|
@ -31,7 +31,7 @@ public class InferenceProcessorIT extends InferenceTestCase {
|
|||
public void enableLogging() throws IOException {
|
||||
Request setTrace = new Request("PUT", "_cluster/settings");
|
||||
setTrace.setJsonEntity(
|
||||
"{\"transient\": {\"logger.org.elasticsearch.xpack.ml.inference\": \"TRACE\"}}"
|
||||
"{\"persistent\": {\"logger.org.elasticsearch.xpack.ml.inference\": \"TRACE\"}}"
|
||||
);
|
||||
assertThat(client().performRequest(setTrace).getStatusLine().getStatusCode(), equalTo(200));
|
||||
}
|
||||
|
|
|
@ -71,7 +71,7 @@ public class BasicDistributedJobsIT extends BaseMlIntegTestCase {
|
|||
client().admin()
|
||||
.cluster()
|
||||
.prepareUpdateSettings()
|
||||
.setTransientSettings(Settings.builder()
|
||||
.setPersistentSettings(Settings.builder()
|
||||
.put("logger.org.elasticsearch.xpack.ml.action.TransportCloseJobAction", "TRACE")
|
||||
.put("logger.org.elasticsearch.xpack.ml.action.TransportOpenJobAction", "TRACE")
|
||||
.put("logger.org.elasticsearch.xpack.ml.job.task.OpenJobPersistentTasksExecutor", "TRACE")
|
||||
|
@ -84,7 +84,7 @@ public class BasicDistributedJobsIT extends BaseMlIntegTestCase {
|
|||
client().admin()
|
||||
.cluster()
|
||||
.prepareUpdateSettings()
|
||||
.setTransientSettings(Settings.builder()
|
||||
.setPersistentSettings(Settings.builder()
|
||||
.putNull("logger.org.elasticsearch.xpack.ml.action.TransportCloseJobAction")
|
||||
.putNull("logger.org.elasticsearch.xpack.ml.action.TransportOpenJobAction")
|
||||
.putNull("logger.org.elasticsearch.xpack.ml.job.task.OpenJobPersistentTasksExecutor")
|
||||
|
@ -288,7 +288,7 @@ public class BasicDistributedJobsIT extends BaseMlIntegTestCase {
|
|||
|
||||
int maxConcurrentJobAllocations = randomIntBetween(1, 4);
|
||||
client().admin().cluster().prepareUpdateSettings()
|
||||
.setTransientSettings(Settings.builder()
|
||||
.setPersistentSettings(Settings.builder()
|
||||
.put(MachineLearning.CONCURRENT_JOB_ALLOCATIONS.getKey(), maxConcurrentJobAllocations))
|
||||
.get();
|
||||
|
||||
|
|
|
@ -115,7 +115,7 @@ public class MlDistributedFailureIT extends BaseMlIntegTestCase {
|
|||
client().admin()
|
||||
.cluster()
|
||||
.prepareUpdateSettings()
|
||||
.setTransientSettings(Settings.builder()
|
||||
.setPersistentSettings(Settings.builder()
|
||||
.put("logger.org.elasticsearch.xpack.ml.utils.persistence", "TRACE")
|
||||
.build()).get();
|
||||
}
|
||||
|
@ -125,7 +125,7 @@ public class MlDistributedFailureIT extends BaseMlIntegTestCase {
|
|||
client().admin()
|
||||
.cluster()
|
||||
.prepareUpdateSettings()
|
||||
.setTransientSettings(Settings.builder()
|
||||
.setPersistentSettings(Settings.builder()
|
||||
.putNull("logger.org.elasticsearch.xpack.ml.utils.persistence")
|
||||
.build()).get();
|
||||
}
|
||||
|
|
|
@ -81,7 +81,7 @@ public class TooManyJobsIT extends BaseMlIntegTestCase {
|
|||
assertTrue(client().admin()
|
||||
.cluster()
|
||||
.prepareUpdateSettings()
|
||||
.setTransientSettings(
|
||||
.setPersistentSettings(
|
||||
Settings.builder()
|
||||
.put(MachineLearning.MAX_LAZY_ML_NODES.getKey(), maxNumberOfLazyNodes))
|
||||
.get()
|
||||
|
@ -144,7 +144,7 @@ public class TooManyJobsIT extends BaseMlIntegTestCase {
|
|||
boolean expectMemoryLimitBeforeCountLimit = maxJobsPerNodeDueToMemoryLimit < maxNumberOfJobsPerNode;
|
||||
for (int i = 1; i <= (clusterWideMaxNumberOfJobs + 1); i++) {
|
||||
if (i == 2 && testDynamicChange) {
|
||||
ClusterUpdateSettingsRequest clusterUpdateSettingsRequest = new ClusterUpdateSettingsRequest().transientSettings(
|
||||
ClusterUpdateSettingsRequest clusterUpdateSettingsRequest = new ClusterUpdateSettingsRequest().persistentSettings(
|
||||
Settings.builder().put(MachineLearning.MAX_OPEN_JOBS_PER_NODE.getKey(), maxNumberOfJobsPerNode).build());
|
||||
client().execute(ClusterUpdateSettingsAction.INSTANCE, clusterUpdateSettingsRequest).actionGet();
|
||||
}
|
||||
|
|
|
@ -140,7 +140,7 @@ public class HttpExporterSslIT extends MonitoringIntegTestCase {
|
|||
final ActionFuture<ClusterUpdateSettingsResponse> future = setVerificationMode("plaintext", VerificationMode.CERTIFICATE);
|
||||
final ClusterUpdateSettingsResponse response = future.actionGet();
|
||||
assertThat(response, notNullValue());
|
||||
clearTransientSettings("plaintext");
|
||||
clearPersistentSettings("plaintext");
|
||||
}
|
||||
|
||||
public void testCanAddNewExporterWithSsl() {
|
||||
|
@ -155,13 +155,13 @@ public class HttpExporterSslIT extends MonitoringIntegTestCase {
|
|||
.put("xpack.monitoring.exporters._new.ssl.truststore.password", "testnode")
|
||||
.put("xpack.monitoring.exporters._new.ssl.verification_mode", VerificationMode.CERTIFICATE.name())
|
||||
.build();
|
||||
updateSettings.transientSettings(settings);
|
||||
updateSettings.persistentSettings(settings);
|
||||
final ActionFuture<ClusterUpdateSettingsResponse> future = client().admin().cluster().updateSettings(updateSettings);
|
||||
final ClusterUpdateSettingsResponse response = future.actionGet();
|
||||
assertThat(response, notNullValue());
|
||||
|
||||
assertExporterExists("_new");
|
||||
clearTransientSettings("_new");
|
||||
clearPersistentSettings("_new");
|
||||
}
|
||||
|
||||
private void assertExporterExists(String secure) {
|
||||
|
@ -183,17 +183,17 @@ public class HttpExporterSslIT extends MonitoringIntegTestCase {
|
|||
.put("xpack.monitoring.exporters." + name + ".host", "https://" + webServer.getHostName() + ":" + webServer.getPort())
|
||||
.put("xpack.monitoring.exporters." + name + ".ssl.verification_mode", mode.name())
|
||||
.build();
|
||||
updateSettings.transientSettings(settings);
|
||||
updateSettings.persistentSettings(settings);
|
||||
return client().admin().cluster().updateSettings(updateSettings);
|
||||
}
|
||||
|
||||
private void clearTransientSettings(String... names) {
|
||||
private void clearPersistentSettings(String... names) {
|
||||
final ClusterUpdateSettingsRequest updateSettings = new ClusterUpdateSettingsRequest();
|
||||
final Settings.Builder builder = Settings.builder();
|
||||
for (String name : names) {
|
||||
builder.put("xpack.monitoring.exporters." + name + ".*", (String) null);
|
||||
}
|
||||
updateSettings.transientSettings(builder.build());
|
||||
updateSettings.persistentSettings(builder.build());
|
||||
client().admin().cluster().updateSettings(updateSettings).actionGet();
|
||||
}
|
||||
|
||||
|
|
|
@ -112,8 +112,8 @@ public class TransportMonitoringMigrateAlertsActionTests extends MonitoringInteg
|
|||
}
|
||||
|
||||
private void stopMonitoring() {
|
||||
// Clean up any transient settings we have added
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder()
|
||||
// Clean up any persistent settings we have added
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings().setPersistentSettings(Settings.builder()
|
||||
.putNull(MonitoringService.ENABLED.getKey())
|
||||
.putNull("xpack.monitoring.elasticsearch.collection.enabled")
|
||||
.putNull("xpack.monitoring.exporters._local.type")
|
||||
|
@ -143,7 +143,7 @@ public class TransportMonitoringMigrateAlertsActionTests extends MonitoringInteg
|
|||
.put("xpack.monitoring.exporters._local.cluster_alerts.management.enabled", true);
|
||||
|
||||
// enable local exporter
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(exporterSettings));
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings().setPersistentSettings(exporterSettings));
|
||||
|
||||
// ensure resources exist
|
||||
ensureInitialLocalResources();
|
||||
|
@ -180,7 +180,7 @@ public class TransportMonitoringMigrateAlertsActionTests extends MonitoringInteg
|
|||
.put("xpack.monitoring.exporters._local.cluster_alerts.management.enabled", true);
|
||||
|
||||
// enable local exporter
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(exporterSettings));
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings().setPersistentSettings(exporterSettings));
|
||||
|
||||
// ensure resources exist
|
||||
ensureInitialLocalResources();
|
||||
|
@ -226,7 +226,7 @@ public class TransportMonitoringMigrateAlertsActionTests extends MonitoringInteg
|
|||
.put("xpack.monitoring.exporters._local.cluster_alerts.management.enabled", true);
|
||||
|
||||
// enable local exporter
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(exporterSettings));
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings().setPersistentSettings(exporterSettings));
|
||||
|
||||
// ensure resources exist
|
||||
ensureInitialLocalResources();
|
||||
|
@ -237,7 +237,7 @@ public class TransportMonitoringMigrateAlertsActionTests extends MonitoringInteg
|
|||
.put("xpack.monitoring.exporters._local.type", LocalExporter.TYPE)
|
||||
.put("xpack.monitoring.exporters._local.enabled", false)
|
||||
.put("xpack.monitoring.exporters._local.cluster_alerts.management.enabled", true);
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(disableSettings));
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings().setPersistentSettings(disableSettings));
|
||||
|
||||
// call migration api
|
||||
MonitoringMigrateAlertsResponse response = client().execute(MonitoringMigrateAlertsAction.INSTANCE,
|
||||
|
@ -269,7 +269,7 @@ public class TransportMonitoringMigrateAlertsActionTests extends MonitoringInteg
|
|||
.put("xpack.monitoring.exporters._local.cluster_alerts.management.enabled", true);
|
||||
|
||||
// enable local exporter
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(exporterSettings));
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings().setPersistentSettings(exporterSettings));
|
||||
|
||||
// ensure resources exist
|
||||
ensureInitialLocalResources();
|
||||
|
@ -280,7 +280,7 @@ public class TransportMonitoringMigrateAlertsActionTests extends MonitoringInteg
|
|||
.put("xpack.monitoring.exporters._local.type", LocalExporter.TYPE)
|
||||
.put("xpack.monitoring.exporters._local.enabled", true)
|
||||
.put("xpack.monitoring.exporters._local.cluster_alerts.management.enabled", false);
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(disableSettings));
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings().setPersistentSettings(disableSettings));
|
||||
|
||||
// call migration api
|
||||
MonitoringMigrateAlertsResponse response = client().execute(MonitoringMigrateAlertsAction.INSTANCE,
|
||||
|
@ -312,7 +312,7 @@ public class TransportMonitoringMigrateAlertsActionTests extends MonitoringInteg
|
|||
.put("xpack.monitoring.exporters.remoteCluster.cluster_alerts.management.enabled", true);
|
||||
|
||||
// enable http exporter
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(exporterSettings));
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings().setPersistentSettings(exporterSettings));
|
||||
|
||||
// enqueue delete request expectations for alerts
|
||||
enqueueWatcherResponses(webServer, true);
|
||||
|
@ -350,7 +350,7 @@ public class TransportMonitoringMigrateAlertsActionTests extends MonitoringInteg
|
|||
.put("xpack.monitoring.exporters.remoteCluster.cluster_alerts.management.enabled", true);
|
||||
|
||||
// configure disabled http exporter
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(exporterSettings));
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings().setPersistentSettings(exporterSettings));
|
||||
|
||||
// enqueue delete request expectations for alerts
|
||||
enqueueWatcherResponses(webServer, true);
|
||||
|
@ -388,7 +388,7 @@ public class TransportMonitoringMigrateAlertsActionTests extends MonitoringInteg
|
|||
.put("xpack.monitoring.exporters.remoteCluster.cluster_alerts.management.enabled", true);
|
||||
|
||||
// create a disabled http exporter
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(exporterSettings));
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings().setPersistentSettings(exporterSettings));
|
||||
|
||||
// call migration api
|
||||
MonitoringMigrateAlertsResponse response = client().execute(MonitoringMigrateAlertsAction.INSTANCE,
|
||||
|
@ -421,7 +421,7 @@ public class TransportMonitoringMigrateAlertsActionTests extends MonitoringInteg
|
|||
.put("xpack.monitoring.exporters.remoteCluster.cluster_alerts.management.enabled", true);
|
||||
|
||||
// enable http exporter
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(exporterSettings));
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings().setPersistentSettings(exporterSettings));
|
||||
|
||||
// enqueue a "watcher available" response, but then a "failure to delete watch" response
|
||||
enqueueResponse(webServer, 200, "{\"features\":{\"watcher\":{\"available\":true,\"enabled\":true}}}");
|
||||
|
@ -460,7 +460,7 @@ public class TransportMonitoringMigrateAlertsActionTests extends MonitoringInteg
|
|||
.put("xpack.monitoring.exporters.remoteCluster.cluster_alerts.management.enabled", true);
|
||||
|
||||
// enable http exporter
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(exporterSettings));
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings().setPersistentSettings(exporterSettings));
|
||||
|
||||
// enqueue a "watcher available" response, but then a "failure to delete watch" response
|
||||
enqueueWatcherResponses(webServer, false);
|
||||
|
|
|
@ -65,7 +65,7 @@ public class LocalExporterIntegTests extends LocalExporterIntegTestCase {
|
|||
|
||||
private void stopMonitoring() {
|
||||
// Now disabling the monitoring service, so that no more collection are started
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings().setPersistentSettings(
|
||||
Settings.builder().putNull(MonitoringService.ENABLED.getKey())
|
||||
.putNull("xpack.monitoring.exporters._local.type")
|
||||
.putNull("xpack.monitoring.exporters._local.enabled")
|
||||
|
@ -97,7 +97,7 @@ public class LocalExporterIntegTests extends LocalExporterIntegTestCase {
|
|||
}
|
||||
|
||||
// local exporter is now enabled
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(exporterSettings));
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings().setPersistentSettings(exporterSettings));
|
||||
|
||||
if (randomBoolean()) {
|
||||
// export some documents now, before starting the monitoring service
|
||||
|
@ -254,7 +254,7 @@ public class LocalExporterIntegTests extends LocalExporterIntegTestCase {
|
|||
*/
|
||||
private void checkMonitoringDocs() {
|
||||
ClusterStateResponse response = client().admin().cluster().prepareState().get();
|
||||
String customTimeFormat = response.getState().getMetadata().transientSettings()
|
||||
String customTimeFormat = response.getState().getMetadata().persistentSettings()
|
||||
.get("xpack.monitoring.exporters._local.index.name.time_format");
|
||||
assertEquals(indexTimeFormat, customTimeFormat);
|
||||
if (customTimeFormat == null) {
|
||||
|
|
|
@ -104,7 +104,7 @@ public class SearchableSnapshotAllocationIntegTests extends BaseSearchableSnapsh
|
|||
client().admin()
|
||||
.cluster()
|
||||
.prepareUpdateSettings()
|
||||
.setTransientSettings(
|
||||
.setPersistentSettings(
|
||||
Settings.builder()
|
||||
.put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), allocation.name())
|
||||
.build()
|
||||
|
|
|
@ -18,13 +18,13 @@ public class DestructiveOperationsTests extends SecurityIntegTestCase {
|
|||
@After
|
||||
public void afterTest() {
|
||||
Settings settings = Settings.builder().put(DestructiveOperations.REQUIRES_NAME_SETTING.getKey(), (String)null).build();
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(settings));
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings().setPersistentSettings(settings));
|
||||
}
|
||||
|
||||
public void testDeleteIndexDestructiveOperationsRequireName() {
|
||||
createIndex("index1");
|
||||
Settings settings = Settings.builder().put(DestructiveOperations.REQUIRES_NAME_SETTING.getKey(), true).build();
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(settings));
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings().setPersistentSettings(settings));
|
||||
{
|
||||
IllegalArgumentException illegalArgumentException = expectThrows(IllegalArgumentException.class,
|
||||
() -> client().admin().indices().prepareDelete("*").get());
|
||||
|
@ -58,7 +58,7 @@ public class DestructiveOperationsTests extends SecurityIntegTestCase {
|
|||
public void testDestructiveOperationsDefaultBehaviour() {
|
||||
if (randomBoolean()) {
|
||||
Settings settings = Settings.builder().put(DestructiveOperations.REQUIRES_NAME_SETTING.getKey(), false).build();
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(settings));
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings().setPersistentSettings(settings));
|
||||
}
|
||||
createIndex("index1", "index2");
|
||||
|
||||
|
@ -85,7 +85,7 @@ public class DestructiveOperationsTests extends SecurityIntegTestCase {
|
|||
|
||||
public void testOpenCloseIndexDestructiveOperationsRequireName() {
|
||||
Settings settings = Settings.builder().put(DestructiveOperations.REQUIRES_NAME_SETTING.getKey(), true).build();
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(settings));
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings().setPersistentSettings(settings));
|
||||
{
|
||||
IllegalArgumentException illegalArgumentException = expectThrows(IllegalArgumentException.class,
|
||||
() -> client().admin().indices().prepareClose("*").get());
|
||||
|
|
|
@ -80,7 +80,7 @@ public class AuditTrailSettingsUpdateTests extends SecurityIntegTestCase {
|
|||
final LoggingAuditTrail auditTrail = new LoggingAuditTrail(settingsBuilder.build(), clusterService, logger, threadContext);
|
||||
final String expected = auditTrail.eventFilterPolicyRegistry.toString();
|
||||
// update settings on internal cluster
|
||||
updateSettings(updateFilterSettings, randomBoolean());
|
||||
updateSettings(updateFilterSettings);
|
||||
final String actual = ((LoggingAuditTrail) internalCluster().getInstances(AuditTrailService.class)
|
||||
.iterator()
|
||||
.next()
|
||||
|
@ -100,7 +100,7 @@ public class AuditTrailSettingsUpdateTests extends SecurityIntegTestCase {
|
|||
"xpack.security.audit.logfile.events.ignore_filters.invalid.actions"};
|
||||
settingsBuilder.put(randomFrom(allSettingsKeys), invalidLuceneRegex);
|
||||
final IllegalArgumentException e = expectThrows(IllegalArgumentException.class,
|
||||
() -> client().admin().cluster().prepareUpdateSettings().setTransientSettings(settingsBuilder.build()).get());
|
||||
() -> client().admin().cluster().prepareUpdateSettings().setPersistentSettings(settingsBuilder.build()).get());
|
||||
assertThat(e.getMessage(), containsString("invalid pattern [/invalid]"));
|
||||
}
|
||||
|
||||
|
@ -111,7 +111,7 @@ public class AuditTrailSettingsUpdateTests extends SecurityIntegTestCase {
|
|||
settingsBuilder.put(LoggingAuditTrail.EMIT_HOST_NAME_SETTING.getKey(), true);
|
||||
settingsBuilder.put(LoggingAuditTrail.EMIT_NODE_NAME_SETTING.getKey(), true);
|
||||
settingsBuilder.put(LoggingAuditTrail.EMIT_NODE_ID_SETTING.getKey(), true);
|
||||
updateSettings(settingsBuilder.build(), persistent);
|
||||
updateSettings(settingsBuilder.build());
|
||||
final LoggingAuditTrail loggingAuditTrail = (LoggingAuditTrail) internalCluster().getInstances(AuditTrailService.class)
|
||||
.iterator()
|
||||
.next()
|
||||
|
@ -123,25 +123,25 @@ public class AuditTrailSettingsUpdateTests extends SecurityIntegTestCase {
|
|||
assertThat(loggingAuditTrail.entryCommonFields.commonFields.get(LoggingAuditTrail.HOST_ADDRESS_FIELD_NAME), is("127.0.0.1"));
|
||||
assertThat(loggingAuditTrail.entryCommonFields.commonFields.get(LoggingAuditTrail.HOST_NAME_FIELD_NAME), is("127.0.0.1"));
|
||||
settingsBuilder.put(LoggingAuditTrail.EMIT_HOST_ADDRESS_SETTING.getKey(), false);
|
||||
updateSettings(settingsBuilder.build(), persistent);
|
||||
updateSettings(settingsBuilder.build());
|
||||
assertThat(loggingAuditTrail.entryCommonFields.commonFields.get(LoggingAuditTrail.NODE_NAME_FIELD_NAME), startsWith("node_"));
|
||||
assertThat(loggingAuditTrail.entryCommonFields.commonFields.containsKey(LoggingAuditTrail.NODE_ID_FIELD_NAME), is(true));
|
||||
assertThat(loggingAuditTrail.entryCommonFields.commonFields.containsKey(LoggingAuditTrail.HOST_ADDRESS_FIELD_NAME), is(false));
|
||||
assertThat(loggingAuditTrail.entryCommonFields.commonFields.get(LoggingAuditTrail.HOST_NAME_FIELD_NAME), is("127.0.0.1"));
|
||||
settingsBuilder.put(LoggingAuditTrail.EMIT_HOST_NAME_SETTING.getKey(), false);
|
||||
updateSettings(settingsBuilder.build(), persistent);
|
||||
updateSettings(settingsBuilder.build());
|
||||
assertThat(loggingAuditTrail.entryCommonFields.commonFields.get(LoggingAuditTrail.NODE_NAME_FIELD_NAME), startsWith("node_"));
|
||||
assertThat(loggingAuditTrail.entryCommonFields.commonFields.containsKey(LoggingAuditTrail.NODE_ID_FIELD_NAME), is(true));
|
||||
assertThat(loggingAuditTrail.entryCommonFields.commonFields.containsKey(LoggingAuditTrail.HOST_ADDRESS_FIELD_NAME), is(false));
|
||||
assertThat(loggingAuditTrail.entryCommonFields.commonFields.containsKey(LoggingAuditTrail.HOST_NAME_FIELD_NAME), is(false));
|
||||
settingsBuilder.put(LoggingAuditTrail.EMIT_NODE_NAME_SETTING.getKey(), false);
|
||||
updateSettings(settingsBuilder.build(), persistent);
|
||||
updateSettings(settingsBuilder.build());
|
||||
assertThat(loggingAuditTrail.entryCommonFields.commonFields.containsKey(LoggingAuditTrail.NODE_NAME_FIELD_NAME), is(false));
|
||||
assertThat(loggingAuditTrail.entryCommonFields.commonFields.containsKey(LoggingAuditTrail.NODE_ID_FIELD_NAME), is(true));
|
||||
assertThat(loggingAuditTrail.entryCommonFields.commonFields.containsKey(LoggingAuditTrail.HOST_ADDRESS_FIELD_NAME), is(false));
|
||||
assertThat(loggingAuditTrail.entryCommonFields.commonFields.containsKey(LoggingAuditTrail.HOST_NAME_FIELD_NAME), is(false));
|
||||
settingsBuilder.put(LoggingAuditTrail.EMIT_NODE_ID_SETTING.getKey(), false);
|
||||
updateSettings(settingsBuilder.build(), persistent);
|
||||
updateSettings(settingsBuilder.build());
|
||||
assertThat(loggingAuditTrail.entryCommonFields.commonFields.containsKey(LoggingAuditTrail.NODE_NAME_FIELD_NAME), is(false));
|
||||
assertThat(loggingAuditTrail.entryCommonFields.commonFields.containsKey(LoggingAuditTrail.NODE_ID_FIELD_NAME), is(false));
|
||||
assertThat(loggingAuditTrail.entryCommonFields.commonFields.containsKey(LoggingAuditTrail.HOST_ADDRESS_FIELD_NAME), is(false));
|
||||
|
@ -149,11 +149,10 @@ public class AuditTrailSettingsUpdateTests extends SecurityIntegTestCase {
|
|||
}
|
||||
|
||||
public void testDynamicRequestBodySettings() {
|
||||
final boolean persistent = randomBoolean();
|
||||
final boolean enableRequestBody = randomBoolean();
|
||||
final Settings.Builder settingsBuilder = Settings.builder();
|
||||
settingsBuilder.put(LoggingAuditTrail.INCLUDE_REQUEST_BODY.getKey(), enableRequestBody);
|
||||
updateSettings(settingsBuilder.build(), persistent);
|
||||
updateSettings(settingsBuilder.build());
|
||||
final LoggingAuditTrail loggingAuditTrail = (LoggingAuditTrail) internalCluster().getInstances(AuditTrailService.class)
|
||||
.iterator()
|
||||
.next()
|
||||
|
@ -162,7 +161,7 @@ public class AuditTrailSettingsUpdateTests extends SecurityIntegTestCase {
|
|||
.next();
|
||||
assertEquals(enableRequestBody, loggingAuditTrail.includeRequestBody);
|
||||
settingsBuilder.put(LoggingAuditTrail.INCLUDE_REQUEST_BODY.getKey(), enableRequestBody == false);
|
||||
updateSettings(settingsBuilder.build(), persistent);
|
||||
updateSettings(settingsBuilder.build());
|
||||
assertEquals(enableRequestBody == false, loggingAuditTrail.includeRequestBody);
|
||||
}
|
||||
|
||||
|
@ -175,7 +174,7 @@ public class AuditTrailSettingsUpdateTests extends SecurityIntegTestCase {
|
|||
final Settings.Builder settingsBuilder = Settings.builder();
|
||||
settingsBuilder.putList(LoggingAuditTrail.INCLUDE_EVENT_SETTINGS.getKey(), includedEvents);
|
||||
settingsBuilder.putList(LoggingAuditTrail.EXCLUDE_EVENT_SETTINGS.getKey(), excludedEvents);
|
||||
updateSettings(settingsBuilder.build(), randomBoolean());
|
||||
updateSettings(settingsBuilder.build());
|
||||
final LoggingAuditTrail loggingAuditTrail = (LoggingAuditTrail) internalCluster().getInstances(AuditTrailService.class)
|
||||
.iterator()
|
||||
.next()
|
||||
|
@ -185,12 +184,8 @@ public class AuditTrailSettingsUpdateTests extends SecurityIntegTestCase {
|
|||
assertEquals(AuditLevel.parse(includedEvents, excludedEvents), loggingAuditTrail.events);
|
||||
}
|
||||
|
||||
private void updateSettings(Settings settings, boolean persistent) {
|
||||
if (persistent) {
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings().setPersistentSettings(settings));
|
||||
} else {
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(settings));
|
||||
}
|
||||
private void updateSettings(Settings settings) {
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings().setPersistentSettings(settings));
|
||||
}
|
||||
|
||||
private static List<String> randomNonEmptyListOfFilteredNames(String... namePrefix) {
|
||||
|
|
|
@ -215,7 +215,7 @@ public class ApiKeyIntegTests extends SecurityIntegTestCase {
|
|||
client().filterWithHeader(Collections.singletonMap("Authorization", "ApiKey " + base64ApiKeyKeyValue))
|
||||
.admin()
|
||||
.cluster()
|
||||
.prepareUpdateSettings().setTransientSettings(Settings.builder().put(IPFilter.IP_FILTER_ENABLED_SETTING.getKey(), true))
|
||||
.prepareUpdateSettings().setPersistentSettings(Settings.builder().put(IPFilter.IP_FILTER_ENABLED_SETTING.getKey(), true))
|
||||
.get());
|
||||
assertThat(e.getMessage(), containsString("unauthorized"));
|
||||
assertThat(e.status(), is(RestStatus.FORBIDDEN));
|
||||
|
|
|
@ -117,9 +117,7 @@ public class IpFilteringUpdateTests extends SecurityIntegTestCase {
|
|||
settings = Settings.builder()
|
||||
.put(IPFilter.IP_FILTER_ENABLED_HTTP_SETTING.getKey(), false)
|
||||
.build();
|
||||
// as we permanently switch between persistent and transient settings, just set both here to make sure we overwrite
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings().setPersistentSettings(settings));
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(settings));
|
||||
assertConnectionAccepted(".http", "127.0.0.8");
|
||||
}
|
||||
}
|
||||
|
@ -190,11 +188,7 @@ public class IpFilteringUpdateTests extends SecurityIntegTestCase {
|
|||
|
||||
|
||||
private void updateSettings(Settings settings) {
|
||||
if (randomBoolean()) {
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings().setPersistentSettings(settings));
|
||||
} else {
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(settings));
|
||||
}
|
||||
assertAcked(client().admin().cluster().prepareUpdateSettings().setPersistentSettings(settings));
|
||||
}
|
||||
|
||||
private void assertConnectionAccepted(String profile, String host) throws UnknownHostException {
|
||||
|
|
|
@ -130,7 +130,7 @@ public class AbstractXPackRestTest extends ESClientYamlSuiteTestCase {
|
|||
settings.put("xpack.monitoring.exporters._local.enabled", true);
|
||||
|
||||
awaitCallApi("cluster.put_settings", emptyMap(),
|
||||
singletonList(singletonMap("transient", settings)),
|
||||
singletonList(singletonMap("persistent", settings)),
|
||||
response -> {
|
||||
Object acknowledged = response.evaluate("acknowledged");
|
||||
return acknowledged != null && (Boolean) acknowledged;
|
||||
|
@ -156,7 +156,7 @@ public class AbstractXPackRestTest extends ESClientYamlSuiteTestCase {
|
|||
settings.put("xpack.monitoring.exporters._local.enabled", null);
|
||||
|
||||
awaitCallApi("cluster.put_settings", emptyMap(),
|
||||
singletonList(singletonMap("transient", settings)),
|
||||
singletonList(singletonMap("persistent", settings)),
|
||||
response -> {
|
||||
Object acknowledged = response.evaluate("acknowledged");
|
||||
return acknowledged != null && (Boolean) acknowledged;
|
||||
|
|
|
@ -39,7 +39,7 @@ public class TestFeatureResetIT extends TransformIntegTestCase {
|
|||
public void setLogging() throws IOException {
|
||||
Request settingsRequest = new Request("PUT", "/_cluster/settings");
|
||||
settingsRequest.setJsonEntity(
|
||||
"{\"transient\": {"
|
||||
"{\"persistent\": {"
|
||||
+ "\"logger.org.elasticsearch.xpack.core.indexing.AsyncTwoPhaseIndexer\": \"debug\","
|
||||
+ "\"logger.org.elasticsearch.xpack.transform\": \"trace\"}}"
|
||||
);
|
||||
|
|
|
@ -70,7 +70,7 @@ public class TransformIT extends TransformIntegTestCase {
|
|||
public void setClusterSettings() throws IOException {
|
||||
Request settingsRequest = new Request("PUT", "/_cluster/settings");
|
||||
settingsRequest.setJsonEntity(
|
||||
"{\"transient\": {"
|
||||
"{\"persistent\": {"
|
||||
+ "\"logger.org.elasticsearch.xpack.core.indexing.AsyncTwoPhaseIndexer\": \"debug\","
|
||||
+ "\"logger.org.elasticsearch.xpack.transform\": \"debug\"}}"
|
||||
);
|
||||
|
|
|
@ -114,7 +114,7 @@ public class TransformContinuousIT extends ESRestTestCase {
|
|||
// see: https://github.com/elastic/elasticsearch/issues/45562
|
||||
Request addFailureRetrySetting = new Request("PUT", "/_cluster/settings");
|
||||
addFailureRetrySetting.setJsonEntity(
|
||||
"{\"transient\": {\"xpack.transform.num_transform_failure_retries\": \""
|
||||
"{\"persistent\": {\"xpack.transform.num_transform_failure_retries\": \""
|
||||
+ 0
|
||||
+ "\","
|
||||
+ "\"logger.org.elasticsearch.action.bulk\": \"info\","
|
||||
|
|
|
@ -1517,7 +1517,7 @@ public class TransformPivotRestIT extends TransformRestTestCase {
|
|||
public void testContinuousStopWaitForCheckpoint() throws Exception {
|
||||
Request updateLoggingLevels = new Request("PUT", "/_cluster/settings");
|
||||
updateLoggingLevels.setJsonEntity(
|
||||
"{\"transient\": {"
|
||||
"{\"persistent\": {"
|
||||
+ "\"logger.org.elasticsearch.xpack.core.indexing.AsyncTwoPhaseIndexer\": \"trace\","
|
||||
+ "\"logger.org.elasticsearch.xpack.transform\": \"trace\"}}"
|
||||
);
|
||||
|
|
|
@ -38,7 +38,7 @@ public class TransformTaskFailedStateIT extends TransformRestTestCase {
|
|||
// see: https://github.com/elastic/elasticsearch/issues/45562
|
||||
Request addFailureRetrySetting = new Request("PUT", "/_cluster/settings");
|
||||
addFailureRetrySetting.setJsonEntity(
|
||||
"{\"transient\": {\"xpack.transform.num_transform_failure_retries\": \""
|
||||
"{\"persistent\": {\"xpack.transform.num_transform_failure_retries\": \""
|
||||
+ 0
|
||||
+ "\","
|
||||
+ "\"logger.org.elasticsearch.action.bulk\": \"info\","
|
||||
|
|
|
@ -37,7 +37,7 @@ public class MonitoringWithWatcherRestIT extends ESRestTestCase {
|
|||
public void cleanExporters() throws Exception {
|
||||
Request cleanupSettingsRequest = new Request("PUT", "/_cluster/settings");
|
||||
cleanupSettingsRequest.setJsonEntity(Strings.toString(jsonBuilder().startObject()
|
||||
.startObject("transient")
|
||||
.startObject("persistent")
|
||||
.nullField("xpack.monitoring.exporters.*")
|
||||
.endObject().endObject()));
|
||||
adminClient().performRequest(cleanupSettingsRequest);
|
||||
|
@ -50,7 +50,7 @@ public class MonitoringWithWatcherRestIT extends ESRestTestCase {
|
|||
|
||||
Request request = new Request("PUT", "/_cluster/settings");
|
||||
request.setJsonEntity(Strings.toString(jsonBuilder().startObject()
|
||||
.startObject("transient")
|
||||
.startObject("persistent")
|
||||
.field("xpack.monitoring.exporters.my_local_exporter.type", "local")
|
||||
.field("xpack.monitoring.exporters.my_local_exporter.cluster_alerts.management.enabled", true)
|
||||
.endObject().endObject()));
|
||||
|
@ -67,7 +67,7 @@ public class MonitoringWithWatcherRestIT extends ESRestTestCase {
|
|||
|
||||
Request request = new Request("PUT", "/_cluster/settings");
|
||||
request.setJsonEntity(Strings.toString(jsonBuilder().startObject()
|
||||
.startObject("transient")
|
||||
.startObject("persistent")
|
||||
.field("xpack.monitoring.exporters.my_http_exporter.type", "http")
|
||||
.field("xpack.monitoring.exporters.my_http_exporter.host", httpHost)
|
||||
.field("xpack.monitoring.exporters.my_http_exporter.cluster_alerts.management.enabled", true)
|
||||
|
|
|
@ -95,7 +95,7 @@ public class MlJobSnapshotUpgradeIT extends AbstractUpgradeTestCase {
|
|||
hlrc = new HLRC(client()).machineLearning();
|
||||
Request adjustLoggingLevels = new Request("PUT", "/_cluster/settings");
|
||||
adjustLoggingLevels.setJsonEntity(
|
||||
"{\"transient\": {" +
|
||||
"{\"persistent\": {" +
|
||||
"\"logger.org.elasticsearch.xpack.ml\": \"trace\"" +
|
||||
"}}");
|
||||
client().performRequest(adjustLoggingLevels);
|
||||
|
|
|
@ -128,7 +128,7 @@ public class TransformSurvivesUpgradeIT extends AbstractUpgradeTestCase {
|
|||
assumeTrue("Continuous transform time sync not fixed until 7.4", UPGRADE_FROM_VERSION.onOrAfter(Version.V_7_4_0));
|
||||
Request adjustLoggingLevels = new Request("PUT", "/_cluster/settings");
|
||||
adjustLoggingLevels.setJsonEntity(
|
||||
"{\"transient\": {" +
|
||||
"{\"persistent\": {" +
|
||||
"\"logger.org.elasticsearch.xpack.core.indexing.AsyncTwoPhaseIndexer\": \"trace\"," +
|
||||
"\"logger.org.elasticsearch.xpack.dataframe\": \"trace\"," +
|
||||
"\"logger.org.elasticsearch.xpack.transform\": \"trace\"" +
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue