mirror of
https://github.com/elastic/elasticsearch.git
synced 2025-04-24 23:27:25 -04:00
This PR changes uses of transient cluster settings to persistent cluster settings. The PR also deprecates the transient settings usage. Relates to #49540
This commit is contained in:
parent
4a9e95bbc0
commit
8512037aaa
27 changed files with 225 additions and 90 deletions
|
@ -75,9 +75,10 @@ public class ClusterClientIT extends ESRestHighLevelClientTestCase {
|
|||
ClusterUpdateSettingsRequest setRequest = new ClusterUpdateSettingsRequest();
|
||||
setRequest.transientSettings(transientSettings);
|
||||
setRequest.persistentSettings(map);
|
||||
RequestOptions options = RequestOptions.DEFAULT.toBuilder().setWarningsHandler(WarningsHandler.PERMISSIVE).build();
|
||||
|
||||
ClusterUpdateSettingsResponse setResponse = execute(setRequest, highLevelClient().cluster()::putSettings,
|
||||
highLevelClient().cluster()::putSettingsAsync);
|
||||
highLevelClient().cluster()::putSettingsAsync, options);
|
||||
|
||||
assertAcked(setResponse);
|
||||
assertThat(setResponse.getTransientSettings().get(transientSettingKey), notNullValue());
|
||||
|
@ -99,7 +100,7 @@ public class ClusterClientIT extends ESRestHighLevelClientTestCase {
|
|||
resetRequest.persistentSettings("{\"" + persistentSettingKey + "\": null }", XContentType.JSON);
|
||||
|
||||
ClusterUpdateSettingsResponse resetResponse = execute(resetRequest, highLevelClient().cluster()::putSettings,
|
||||
highLevelClient().cluster()::putSettingsAsync);
|
||||
highLevelClient().cluster()::putSettingsAsync, options);
|
||||
|
||||
assertThat(resetResponse.getTransientSettings().get(transientSettingKey), equalTo(null));
|
||||
assertThat(resetResponse.getPersistentSettings().get(persistentSettingKey), equalTo(null));
|
||||
|
|
|
@ -221,8 +221,9 @@ public abstract class ESRestHighLevelClientTestCase extends ESRestTestCase {
|
|||
ClusterUpdateSettingsRequest request = new ClusterUpdateSettingsRequest();
|
||||
request.persistentSettings(persistentSettings);
|
||||
request.transientSettings(transientSettings);
|
||||
RequestOptions options = RequestOptions.DEFAULT.toBuilder().setWarningsHandler(WarningsHandler.PERMISSIVE).build();
|
||||
assertTrue(execute(
|
||||
request, highLevelClient().cluster()::putSettings, highLevelClient().cluster()::putSettingsAsync).isAcknowledged());
|
||||
request, highLevelClient().cluster()::putSettings, highLevelClient().cluster()::putSettingsAsync, options).isAcknowledged());
|
||||
}
|
||||
|
||||
protected void putConflictPipeline() throws IOException {
|
||||
|
@ -298,8 +299,9 @@ public abstract class ESRestHighLevelClientTestCase extends ESRestTestCase {
|
|||
|
||||
ClusterUpdateSettingsRequest updateSettingsRequest = new ClusterUpdateSettingsRequest();
|
||||
updateSettingsRequest.persistentSettings(singletonMap("cluster.remote." + remoteClusterName + ".seeds", transportAddress));
|
||||
RequestOptions options = RequestOptions.DEFAULT.toBuilder().setWarningsHandler(WarningsHandler.PERMISSIVE).build();
|
||||
ClusterUpdateSettingsResponse updateSettingsResponse =
|
||||
restHighLevelClient.cluster().putSettings(updateSettingsRequest, RequestOptions.DEFAULT);
|
||||
restHighLevelClient.cluster().putSettings(updateSettingsRequest, options);
|
||||
assertThat(updateSettingsResponse.isAcknowledged(), is(true));
|
||||
|
||||
assertBusy(() -> {
|
||||
|
|
|
@ -21,6 +21,7 @@ import org.elasticsearch.action.support.master.AcknowledgedResponse;
|
|||
import org.elasticsearch.client.ESRestHighLevelClientTestCase;
|
||||
import org.elasticsearch.client.RequestOptions;
|
||||
import org.elasticsearch.client.RestHighLevelClient;
|
||||
import org.elasticsearch.client.WarningsHandler;
|
||||
import org.elasticsearch.client.cluster.RemoteConnectionInfo;
|
||||
import org.elasticsearch.client.cluster.RemoteInfoRequest;
|
||||
import org.elasticsearch.client.cluster.RemoteInfoResponse;
|
||||
|
@ -127,8 +128,9 @@ public class ClusterClientDocumentationIT extends ESRestHighLevelClientTestCase
|
|||
request.masterNodeTimeout("1m"); // <2>
|
||||
// end::put-settings-request-masterTimeout
|
||||
|
||||
RequestOptions options = RequestOptions.DEFAULT.toBuilder().setWarningsHandler(WarningsHandler.PERMISSIVE).build();
|
||||
// tag::put-settings-execute
|
||||
ClusterUpdateSettingsResponse response = client.cluster().putSettings(request, RequestOptions.DEFAULT);
|
||||
ClusterUpdateSettingsResponse response = client.cluster().putSettings(request, options);
|
||||
// end::put-settings-execute
|
||||
|
||||
// tag::put-settings-response
|
||||
|
@ -144,7 +146,7 @@ public class ClusterClientDocumentationIT extends ESRestHighLevelClientTestCase
|
|||
request.transientSettings(Settings.builder().putNull(transientSettingKey).build()); // <1>
|
||||
// tag::put-settings-request-reset-transient
|
||||
request.persistentSettings(Settings.builder().putNull(persistentSettingKey));
|
||||
ClusterUpdateSettingsResponse resetResponse = client.cluster().putSettings(request, RequestOptions.DEFAULT);
|
||||
ClusterUpdateSettingsResponse resetResponse = client.cluster().putSettings(request, options);
|
||||
|
||||
assertTrue(resetResponse.isAcknowledged());
|
||||
}
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
<titleabbrev>Cluster update settings</titleabbrev>
|
||||
++++
|
||||
|
||||
Updates cluster-wide settings.
|
||||
Updates cluster-wide settings.
|
||||
|
||||
|
||||
[[cluster-update-settings-api-request]]
|
||||
|
@ -21,21 +21,21 @@ Updates cluster-wide settings.
|
|||
[[cluster-update-settings-api-desc]]
|
||||
==== {api-description-title}
|
||||
|
||||
With specifications in the request body, this API call can update cluster
|
||||
settings. Updates to settings can be persistent, meaning they apply across
|
||||
With specifications in the request body, this API call can update cluster
|
||||
settings. Updates to settings can be persistent, meaning they apply across
|
||||
restarts, or transient, where they don't survive a full cluster restart.
|
||||
|
||||
You can reset persistent or transient settings by assigning a `null` value. If a
|
||||
transient setting is reset, the first one of these values that is defined is
|
||||
You can reset persistent or transient settings by assigning a `null` value. If a
|
||||
transient setting is reset, the first one of these values that is defined is
|
||||
applied:
|
||||
|
||||
* the persistent setting
|
||||
* the setting in the configuration file
|
||||
* the default value.
|
||||
* the default value.
|
||||
|
||||
The order of precedence for cluster settings is:
|
||||
|
||||
1. transient cluster settings
|
||||
1. transient cluster settings
|
||||
2. persistent cluster settings
|
||||
3. settings in the `elasticsearch.yml` configuration file.
|
||||
|
||||
|
@ -45,6 +45,8 @@ the setting is the same on all nodes. If, on the other hand, you define differen
|
|||
settings on different nodes by accident using the configuration file, it is very
|
||||
difficult to notice these discrepancies.
|
||||
|
||||
NOTE: Transient settings are deprecated and will be removed in a future release.
|
||||
Prefer using persistent cluster settings instead.
|
||||
|
||||
[[cluster-update-settings-api-query-params]]
|
||||
==== {api-query-parms-title}
|
||||
|
@ -52,7 +54,7 @@ difficult to notice these discrepancies.
|
|||
include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=flat-settings]
|
||||
|
||||
`include_defaults`::
|
||||
(Optional, Boolean) If `true`, returns all default cluster settings.
|
||||
(Optional, Boolean) If `true`, returns all default cluster settings.
|
||||
Defaults to `false`.
|
||||
|
||||
include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=timeoutparms]
|
||||
|
@ -85,9 +87,9 @@ PUT /_cluster/settings?flat_settings=true
|
|||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// TEST[warning:[transient settings removal] Updating cluster settings through transientSettings is deprecated. Use persistent settings instead.]
|
||||
|
||||
|
||||
The response to an update returns the changed setting, as in this response to
|
||||
The response to an update returns the changed setting, as in this response to
|
||||
the transient example:
|
||||
|
||||
[source,console-result]
|
||||
|
@ -114,6 +116,7 @@ PUT /_cluster/settings
|
|||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// TEST[warning:[transient settings removal] Updating cluster settings through transientSettings is deprecated. Use persistent settings instead.]
|
||||
|
||||
|
||||
The response does not include settings that have been reset:
|
||||
|
@ -141,3 +144,4 @@ PUT /_cluster/settings
|
|||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// TEST[warning:[transient settings removal] Updating cluster settings through transientSettings is deprecated. Use persistent settings instead.]
|
||||
|
|
|
@ -521,7 +521,7 @@ lowers the `indices.lifecycle.poll_interval` setting to `1m` (one minute).
|
|||
----
|
||||
PUT /_cluster/settings
|
||||
{
|
||||
"transient": {
|
||||
"persistent": {
|
||||
"indices.lifecycle.poll_interval": "1m"
|
||||
}
|
||||
}
|
||||
|
@ -657,7 +657,7 @@ The following update cluster settings API request resets the
|
|||
----
|
||||
PUT /_cluster/settings
|
||||
{
|
||||
"transient": {
|
||||
"persistent": {
|
||||
"indices.lifecycle.poll_interval": null
|
||||
}
|
||||
}
|
||||
|
|
|
@ -48,8 +48,8 @@ watermarks and remove the write block.
|
|||
[source,console]
|
||||
----
|
||||
PUT _cluster/settings
|
||||
{
|
||||
"transient": {
|
||||
{
|
||||
"persistent": {
|
||||
"cluster.routing.allocation.disk.watermark.low": "90%",
|
||||
"cluster.routing.allocation.disk.watermark.high": "95%",
|
||||
"cluster.routing.allocation.disk.watermark.flood_stage": "97%"
|
||||
|
@ -57,7 +57,7 @@ PUT _cluster/settings
|
|||
}
|
||||
|
||||
PUT */_settings?expand_wildcards=all
|
||||
{
|
||||
{
|
||||
"index.blocks.read_only_allow_delete": null
|
||||
}
|
||||
----
|
||||
|
@ -79,8 +79,8 @@ When a long-term solution is in place, reset or reconfigure the disk watermarks.
|
|||
[source,console]
|
||||
----
|
||||
PUT _cluster/settings
|
||||
{
|
||||
"transient": {
|
||||
{
|
||||
"persistent": {
|
||||
"cluster.routing.allocation.disk.watermark.low": null,
|
||||
"cluster.routing.allocation.disk.watermark.high": null,
|
||||
"cluster.routing.allocation.disk.watermark.flood_stage": null
|
||||
|
@ -208,7 +208,7 @@ include::{es-repo-dir}/tab-widgets/cpu-usage-widget.asciidoc[]
|
|||
**Check hot threads**
|
||||
|
||||
If a node has high CPU usage, use the <<cluster-nodes-hot-threads,nodes hot
|
||||
threads API>> to check for resource-intensive threads running on the node.
|
||||
threads API>> to check for resource-intensive threads running on the node.
|
||||
|
||||
[source,console]
|
||||
----
|
||||
|
|
|
@ -329,7 +329,7 @@ cluster settings API>> and retry the action.
|
|||
----
|
||||
PUT _cluster/settings
|
||||
{
|
||||
"transient" : {
|
||||
"persistent" : {
|
||||
"cluster.max_shards_per_node": 1200
|
||||
}
|
||||
}
|
||||
|
@ -353,7 +353,7 @@ When a long-term solution is in place, we recommend you reset the
|
|||
----
|
||||
PUT _cluster/settings
|
||||
{
|
||||
"transient" : {
|
||||
"persistent" : {
|
||||
"cluster.max_shards_per_node": null
|
||||
}
|
||||
}
|
||||
|
|
|
@ -6,51 +6,51 @@
|
|||
If you've been using Curator or some other mechanism to manage periodic indices,
|
||||
you have a couple options when migrating to {ilm-init}:
|
||||
|
||||
* Set up your index templates to use an {ilm-init} policy to manage your new indices.
|
||||
* Set up your index templates to use an {ilm-init} policy to manage your new indices.
|
||||
Once {ilm-init} is managing your current write index, you can apply an appropriate policy to your old indices.
|
||||
|
||||
* Reindex into an {ilm-init}-managed index.
|
||||
* Reindex into an {ilm-init}-managed index.
|
||||
|
||||
NOTE: Starting in Curator version 5.7, Curator ignores {ilm-init} managed indices.
|
||||
|
||||
[discrete]
|
||||
[[ilm-existing-indices-apply]]
|
||||
=== Apply policies to existing time series indices
|
||||
=== Apply policies to existing time series indices
|
||||
|
||||
The simplest way to transition to managing your periodic indices with {ilm-init} is
|
||||
to <<apply-policy-template, configure an index template>> to apply a lifecycle policy to new indices.
|
||||
Once the index you are writing to is being managed by {ilm-init},
|
||||
to <<apply-policy-template, configure an index template>> to apply a lifecycle policy to new indices.
|
||||
Once the index you are writing to is being managed by {ilm-init},
|
||||
you can <<apply-policy-multiple, manually apply a policy>> to your older indices.
|
||||
|
||||
Define a separate policy for your older indices that omits the rollover action.
|
||||
Rollover is used to manage where new data goes, so isn't applicable.
|
||||
Define a separate policy for your older indices that omits the rollover action.
|
||||
Rollover is used to manage where new data goes, so isn't applicable.
|
||||
|
||||
Keep in mind that policies applied to existing indices compare the `min_age` for each phase to
|
||||
Keep in mind that policies applied to existing indices compare the `min_age` for each phase to
|
||||
the original creation date of the index, and might proceed through multiple phases immediately.
|
||||
If your policy performs resource-intensive operations like force merge,
|
||||
If your policy performs resource-intensive operations like force merge,
|
||||
you don't want to have a lot of indices performing those operations all at once
|
||||
when you switch over to {ilm-init}.
|
||||
when you switch over to {ilm-init}.
|
||||
|
||||
You can specify different `min_age` values in the policy you use for existing indices,
|
||||
or set <<index-lifecycle-origination-date, `index.lifecycle.origination_date`>>
|
||||
to control how the index age is calculated.
|
||||
You can specify different `min_age` values in the policy you use for existing indices,
|
||||
or set <<index-lifecycle-origination-date, `index.lifecycle.origination_date`>>
|
||||
to control how the index age is calculated.
|
||||
|
||||
Once all pre-{ilm-init} indices have been aged out and removed,
|
||||
Once all pre-{ilm-init} indices have been aged out and removed,
|
||||
you can delete the policy you used to manage them.
|
||||
|
||||
NOTE: If you are using {beats} or {ls}, enabling {ilm-init} in version 7.0 and onward
|
||||
sets up {ilm-init} to manage new indices automatically.
|
||||
If you are using {beats} through {ls},
|
||||
you might need to change your {ls} output configuration and invoke the {beats} setup
|
||||
sets up {ilm-init} to manage new indices automatically.
|
||||
If you are using {beats} through {ls},
|
||||
you might need to change your {ls} output configuration and invoke the {beats} setup
|
||||
to use {ilm-init} for new data.
|
||||
|
||||
[discrete]
|
||||
[[ilm-existing-indices-reindex]]
|
||||
=== Reindex into a managed index
|
||||
|
||||
An alternative to <<ilm-with-existing-periodic-indices,applying policies to existing indices>> is to
|
||||
An alternative to <<ilm-with-existing-periodic-indices,applying policies to existing indices>> is to
|
||||
reindex your data into an {ilm-init}-managed index.
|
||||
You might want to do this if creating periodic indices with very small amounts of data
|
||||
You might want to do this if creating periodic indices with very small amounts of data
|
||||
has led to excessive shard counts, or if continually indexing into the same index has led to large shards
|
||||
and performance issues.
|
||||
|
||||
|
@ -58,24 +58,24 @@ First, you need to set up the new {ilm-init}-managed index:
|
|||
|
||||
. Update your index template to include the necessary {ilm-init} settings.
|
||||
. Bootstrap an initial index as the write index.
|
||||
. Stop writing to the old indices and index new documents using the alias that points to bootstrapped index.
|
||||
. Stop writing to the old indices and index new documents using the alias that points to bootstrapped index.
|
||||
|
||||
To reindex into the managed index:
|
||||
|
||||
. Pause indexing new documents if you do not want to mix new and old data in the {ilm-init}-managed index.
|
||||
Mixing old and new data in one index is safe,
|
||||
Mixing old and new data in one index is safe,
|
||||
but a combined index needs to be retained until you are ready to delete the new data.
|
||||
|
||||
. Reduce the {ilm-init} poll interval to ensure that the index doesn't
|
||||
. Reduce the {ilm-init} poll interval to ensure that the index doesn't
|
||||
grow too large while waiting for the rollover check.
|
||||
By default, {ilm-init} checks to see what actions need to be taken every 10 minutes.
|
||||
By default, {ilm-init} checks to see what actions need to be taken every 10 minutes.
|
||||
+
|
||||
--
|
||||
[source,console]
|
||||
-----------------------
|
||||
PUT _cluster/settings
|
||||
{
|
||||
"transient": {
|
||||
"persistent": {
|
||||
"indices.lifecycle.poll_interval": "1m" <1>
|
||||
}
|
||||
}
|
||||
|
@ -84,13 +84,13 @@ PUT _cluster/settings
|
|||
<1> Check once a minute to see if {ilm-init} actions such as rollover need to be performed.
|
||||
--
|
||||
|
||||
. Reindex your data using the <<docs-reindex,reindex API>>.
|
||||
If you want to partition the data in the order in which it was originally indexed,
|
||||
you can run separate reindex requests.
|
||||
. Reindex your data using the <<docs-reindex,reindex API>>.
|
||||
If you want to partition the data in the order in which it was originally indexed,
|
||||
you can run separate reindex requests.
|
||||
+
|
||||
--
|
||||
IMPORTANT: Documents retain their original IDs. If you don't use automatically generated document IDs,
|
||||
and are reindexing from multiple source indices, you might need to do additional processing to
|
||||
IMPORTANT: Documents retain their original IDs. If you don't use automatically generated document IDs,
|
||||
and are reindexing from multiple source indices, you might need to do additional processing to
|
||||
ensure that document IDs don't conflict. One way to do this is to use a
|
||||
<<reindex-scripts,script>> in the reindex call to append the original index name
|
||||
to the document ID.
|
||||
|
@ -174,19 +174,19 @@ POST _reindex
|
|||
<1> Matches your existing indices. Using the prefix for
|
||||
the new indices makes using this index pattern much easier.
|
||||
<2> The alias that points to your bootstrapped index.
|
||||
<3> Halts reindexing if multiple documents have the same ID.
|
||||
This is recommended to prevent accidentally overwriting documents
|
||||
<3> Halts reindexing if multiple documents have the same ID.
|
||||
This is recommended to prevent accidentally overwriting documents
|
||||
if documents in different source indices have the same ID.
|
||||
--
|
||||
|
||||
. When reindexing is complete, set the {ilm-init} poll interval back to its default value to
|
||||
. When reindexing is complete, set the {ilm-init} poll interval back to its default value to
|
||||
prevent unnecessary load on the master node:
|
||||
+
|
||||
[source,console]
|
||||
-----------------------
|
||||
PUT _cluster/settings
|
||||
{
|
||||
"transient": {
|
||||
"persistent": {
|
||||
"indices.lifecycle.poll_interval": null
|
||||
}
|
||||
}
|
||||
|
@ -198,5 +198,5 @@ PUT _cluster/settings
|
|||
+
|
||||
Querying using this alias will now search your new data and all of the reindexed data.
|
||||
|
||||
. Once you have verified that all of the reindexed data is available in the new managed indices,
|
||||
. Once you have verified that all of the reindexed data is available in the new managed indices,
|
||||
you can safely remove the old indices.
|
||||
|
|
|
@ -331,7 +331,7 @@ server log.
|
|||
--------------------------------------------------
|
||||
PUT _cluster/settings
|
||||
{
|
||||
"transient": {
|
||||
"persistent": {
|
||||
"logger.org.elasticsearch.ingest.common.GrokProcessor": "debug"
|
||||
}
|
||||
}
|
||||
|
|
|
@ -74,6 +74,7 @@ PUT /_cluster/settings
|
|||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// TEST[warning:[transient settings removal] Updating cluster settings through transientSettings is deprecated. Use persistent settings instead.]
|
||||
|
||||
|
||||
[discrete]
|
||||
|
|
|
@ -166,9 +166,9 @@ each data path.
|
|||
|
||||
// tag::mdp-migration[]
|
||||
If you currently use multiple data paths in a
|
||||
{ref}/high-availability-cluster-design.html[highly available cluster] then you
|
||||
can migrate to a setup that uses a single path for each node without downtime
|
||||
using a process similar to a
|
||||
{ref}/high-availability-cluster-design.html[highly available cluster] then you
|
||||
can migrate to a setup that uses a single path for each node without downtime
|
||||
using a process similar to a
|
||||
{ref}/restart-cluster.html#restart-cluster-rolling[rolling restart]: shut each
|
||||
node down in turn and replace it with one or more nodes each configured to use
|
||||
a single data path. In more detail, for each node that currently has multiple
|
||||
|
@ -188,13 +188,14 @@ PUT _cluster/settings
|
|||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// TEST[warning:[transient settings removal] Updating cluster settings through transientSettings is deprecated. Use persistent settings instead.]
|
||||
+
|
||||
You can use the {ref}/cat-allocation.html[cat allocation API] to track progress
|
||||
You can use the {ref}/cat-allocation.html[cat allocation API] to track progress
|
||||
of this data migration. If some shards do not migrate then the
|
||||
{ref}/cluster-allocation-explain.html[cluster allocation explain API] will help
|
||||
{ref}/cluster-allocation-explain.html[cluster allocation explain API] will help
|
||||
you to determine why.
|
||||
|
||||
3. Follow the steps in the
|
||||
3. Follow the steps in the
|
||||
{ref}/restart-cluster.html#restart-cluster-rolling[rolling restart process]
|
||||
up to and including shutting the target node down.
|
||||
|
||||
|
@ -212,6 +213,7 @@ PUT _cluster/settings
|
|||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// TEST[warning:[transient settings removal] Updating cluster settings through transientSettings is deprecated. Use persistent settings instead.]
|
||||
|
||||
6. Discard the data held by the stopped node by deleting the contents of its
|
||||
data paths.
|
||||
|
@ -225,7 +227,7 @@ has sufficient space for the data that it will hold.
|
|||
`path.data` setting pointing at a separate data path.
|
||||
|
||||
9. Start the new nodes and follow the rest of the
|
||||
{ref}/restart-cluster.html#restart-cluster-rolling[rolling restart process] for
|
||||
{ref}/restart-cluster.html#restart-cluster-rolling[rolling restart process] for
|
||||
them.
|
||||
|
||||
10. Ensure your cluster health is `green`, so that every shard has been
|
||||
|
@ -233,9 +235,9 @@ assigned.
|
|||
|
||||
You can alternatively add some number of single-data-path nodes to your
|
||||
cluster, migrate all your data over to these new nodes using
|
||||
{ref}/modules-cluster.html#cluster-shard-allocation-filtering[allocation filters],
|
||||
and then remove the old nodes from the cluster. This approach will temporarily
|
||||
double the size of your cluster so it will only work if you have the capacity to
|
||||
{ref}/modules-cluster.html#cluster-shard-allocation-filtering[allocation filters],
|
||||
and then remove the old nodes from the cluster. This approach will temporarily
|
||||
double the size of your cluster so it will only work if you have the capacity to
|
||||
expand your cluster like this.
|
||||
|
||||
If you currently use multiple data paths but your cluster is not highly
|
||||
|
|
|
@ -22,7 +22,7 @@ it down, you could create a filter that excludes the node by its IP address:
|
|||
--------------------------------------------------
|
||||
PUT _cluster/settings
|
||||
{
|
||||
"transient" : {
|
||||
"persistent" : {
|
||||
"cluster.routing.allocation.exclude._ip" : "10.0.0.1"
|
||||
}
|
||||
}
|
||||
|
@ -70,7 +70,7 @@ You can use wildcards when specifying attribute values, for example:
|
|||
------------------------
|
||||
PUT _cluster/settings
|
||||
{
|
||||
"transient": {
|
||||
"persistent": {
|
||||
"cluster.routing.allocation.exclude._ip": "192.168.2.*"
|
||||
}
|
||||
}
|
||||
|
|
|
@ -157,7 +157,7 @@ gigabytes free, and updating the information about the cluster every minute:
|
|||
--------------------------------------------------
|
||||
PUT _cluster/settings
|
||||
{
|
||||
"transient": {
|
||||
"persistent": {
|
||||
"cluster.routing.allocation.disk.watermark.low": "100gb",
|
||||
"cluster.routing.allocation.disk.watermark.high": "50gb",
|
||||
"cluster.routing.allocation.disk.watermark.flood_stage": "10gb",
|
||||
|
|
|
@ -159,7 +159,7 @@ The settings which control logging can be updated <<dynamic-cluster-setting,dyna
|
|||
-------------------------------
|
||||
PUT /_cluster/settings
|
||||
{
|
||||
"transient": {
|
||||
"persistent": {
|
||||
"logger.org.elasticsearch.indices.recovery": "DEBUG"
|
||||
}
|
||||
}
|
||||
|
|
|
@ -16,7 +16,7 @@ the `org.elasticsearch.http.HttpTracer` logger to `TRACE`:
|
|||
--------------------------------------------------
|
||||
PUT _cluster/settings
|
||||
{
|
||||
"transient" : {
|
||||
"persistent" : {
|
||||
"logger.org.elasticsearch.http.HttpTracer" : "TRACE"
|
||||
}
|
||||
}
|
||||
|
@ -29,7 +29,7 @@ exclude wildcard patterns. By default every request will be traced.
|
|||
--------------------------------------------------
|
||||
PUT _cluster/settings
|
||||
{
|
||||
"transient" : {
|
||||
"persistent" : {
|
||||
"http.tracer.include" : "*",
|
||||
"http.tracer.exclude" : ""
|
||||
}
|
||||
|
@ -47,7 +47,7 @@ requests and responses. Activate the tracer by setting the level of the
|
|||
--------------------------------------------------
|
||||
PUT _cluster/settings
|
||||
{
|
||||
"transient" : {
|
||||
"persistent" : {
|
||||
"logger.org.elasticsearch.transport.TransportService.tracer" : "TRACE"
|
||||
}
|
||||
}
|
||||
|
@ -61,7 +61,7 @@ fault detection pings:
|
|||
--------------------------------------------------
|
||||
PUT _cluster/settings
|
||||
{
|
||||
"transient" : {
|
||||
"persistent" : {
|
||||
"transport.tracer.include" : "*",
|
||||
"transport.tracer.exclude" : "internal:coordination/fault_detection/*"
|
||||
}
|
||||
|
|
|
@ -140,6 +140,9 @@ settings API and use `elasticsearch.yml` only for local configurations. Using
|
|||
the cluster update settings API ensures the setting is the same on all nodes. If
|
||||
you accidentally configure different settings in `elasticsearch.yml` on
|
||||
different nodes, it can be difficult to notice discrepancies.
|
||||
|
||||
NOTE: Transient settings are deprecated and will be removed in a future release.
|
||||
Prefer using persistent cluster settings instead.
|
||||
--
|
||||
|
||||
[[static-cluster-setting]]
|
||||
|
|
|
@ -155,7 +155,7 @@ only intended for expert use.
|
|||
----
|
||||
PUT /_cluster/settings
|
||||
{
|
||||
"transient": {
|
||||
"persistent": {
|
||||
"logger.org.elasticsearch.discovery": "DEBUG"
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,6 +1,13 @@
|
|||
---
|
||||
"Test put and reset transient settings":
|
||||
- skip:
|
||||
version: " - 7.15.99"
|
||||
reason: "transient settings deprecation"
|
||||
features: "warnings"
|
||||
|
||||
- do:
|
||||
warnings:
|
||||
- "[transient settings removal] Updating cluster settings through transientSettings is deprecated. Use persistent settings instead."
|
||||
cluster.put_settings:
|
||||
body:
|
||||
transient:
|
||||
|
@ -16,6 +23,8 @@
|
|||
- match: {transient: {cluster.routing.allocation.enable: "none"}}
|
||||
|
||||
- do:
|
||||
warnings:
|
||||
- "[transient settings removal] Updating cluster settings through transientSettings is deprecated. Use persistent settings instead."
|
||||
cluster.put_settings:
|
||||
body:
|
||||
transient:
|
||||
|
|
|
@ -64,6 +64,11 @@ public class ClusterUpdateSettingsRequest extends AcknowledgedRequest<ClusterUpd
|
|||
return validationException;
|
||||
}
|
||||
|
||||
/**
|
||||
* @deprecated Transient settings are in the process of being removed. Use
|
||||
* persistent settings to update your cluster settings instead.
|
||||
*/
|
||||
@Deprecated
|
||||
public Settings transientSettings() {
|
||||
return transientSettings;
|
||||
}
|
||||
|
@ -74,7 +79,11 @@ public class ClusterUpdateSettingsRequest extends AcknowledgedRequest<ClusterUpd
|
|||
|
||||
/**
|
||||
* Sets the transient settings to be updated. They will not survive a full cluster restart
|
||||
*
|
||||
* @deprecated Transient settings are in the process of being removed. Use
|
||||
* persistent settings to update your cluster settings instead.
|
||||
*/
|
||||
@Deprecated
|
||||
public ClusterUpdateSettingsRequest transientSettings(Settings settings) {
|
||||
this.transientSettings = settings;
|
||||
return this;
|
||||
|
@ -82,7 +91,11 @@ public class ClusterUpdateSettingsRequest extends AcknowledgedRequest<ClusterUpd
|
|||
|
||||
/**
|
||||
* Sets the transient settings to be updated. They will not survive a full cluster restart
|
||||
*
|
||||
* @deprecated Transient settings are in the process of being removed. Use
|
||||
* persistent settings to update your cluster settings instead.
|
||||
*/
|
||||
@Deprecated
|
||||
public ClusterUpdateSettingsRequest transientSettings(Settings.Builder settings) {
|
||||
this.transientSettings = settings.build();
|
||||
return this;
|
||||
|
@ -90,7 +103,11 @@ public class ClusterUpdateSettingsRequest extends AcknowledgedRequest<ClusterUpd
|
|||
|
||||
/**
|
||||
* Sets the source containing the transient settings to be updated. They will not survive a full cluster restart
|
||||
*
|
||||
* @deprecated Transient settings are in the process of being removed. Use
|
||||
* persistent settings to update your cluster settings instead.
|
||||
*/
|
||||
@Deprecated
|
||||
public ClusterUpdateSettingsRequest transientSettings(String source, XContentType xContentType) {
|
||||
this.transientSettings = Settings.builder().loadFromSource(source, xContentType).build();
|
||||
return this;
|
||||
|
@ -98,7 +115,11 @@ public class ClusterUpdateSettingsRequest extends AcknowledgedRequest<ClusterUpd
|
|||
|
||||
/**
|
||||
* Sets the transient settings to be updated. They will not survive a full cluster restart
|
||||
*
|
||||
* @deprecated Transient settings are in the process of being removed. Use
|
||||
* persistent settings to update your cluster settings instead.
|
||||
*/
|
||||
@Deprecated
|
||||
public ClusterUpdateSettingsRequest transientSettings(Map<String, ?> source) {
|
||||
this.transientSettings = Settings.builder().loadFromMap(source).build();
|
||||
return this;
|
||||
|
|
|
@ -11,6 +11,8 @@ package org.elasticsearch.rest.action.admin.cluster;
|
|||
import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest;
|
||||
import org.elasticsearch.client.Requests;
|
||||
import org.elasticsearch.client.node.NodeClient;
|
||||
import org.elasticsearch.common.logging.DeprecationCategory;
|
||||
import org.elasticsearch.common.logging.DeprecationLogger;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.xcontent.XContentParser;
|
||||
import org.elasticsearch.rest.BaseRestHandler;
|
||||
|
@ -26,6 +28,9 @@ import static java.util.Collections.singletonList;
|
|||
import static org.elasticsearch.rest.RestRequest.Method.PUT;
|
||||
|
||||
public class RestClusterUpdateSettingsAction extends BaseRestHandler {
|
||||
private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(RestClusterUpdateSettingsAction.class);
|
||||
static final String TRANSIENT_SETTINGS_DEPRECATION_MESSAGE = "[transient settings removal]" +
|
||||
" Updating cluster settings through transientSettings is deprecated. Use persistent settings instead.";
|
||||
|
||||
private static final String PERSISTENT = "persistent";
|
||||
private static final String TRANSIENT = "transient";
|
||||
|
@ -52,7 +57,15 @@ public class RestClusterUpdateSettingsAction extends BaseRestHandler {
|
|||
source = parser.map();
|
||||
}
|
||||
if (source.containsKey(TRANSIENT)) {
|
||||
clusterUpdateSettingsRequest.transientSettings((Map<String, ?>) source.get(TRANSIENT));
|
||||
Map<String, ?> transientSettings = (Map<String, ?>) source.get(TRANSIENT);
|
||||
|
||||
// We check for empty transient settings map because ClusterUpdateSettingsRequest initializes
|
||||
// each of the settings to an empty collection. When the RestClient is used, we'll get an empty
|
||||
// transient settings map, even if we never set any transient settings.
|
||||
if (transientSettings.isEmpty() == false) {
|
||||
deprecationLogger.warn(DeprecationCategory.SETTINGS, "transient_settings", TRANSIENT_SETTINGS_DEPRECATION_MESSAGE);
|
||||
}
|
||||
clusterUpdateSettingsRequest.transientSettings(transientSettings);
|
||||
}
|
||||
if (source.containsKey(PERSISTENT)) {
|
||||
clusterUpdateSettingsRequest.persistentSettings((Map<String, ?>) source.get(PERSISTENT));
|
||||
|
|
|
@ -189,8 +189,8 @@ public class ShardPathTests extends ESTestCase {
|
|||
|
||||
public void testShardPathSelection() throws IOException {
|
||||
try (NodeEnvironment env = newNodeEnvironment(Settings.builder().build())) {
|
||||
NodeEnvironment.NodePath path = env.nodePaths()[0];
|
||||
assertEquals(path, ShardPath.getPathWithMostFreeSpace(env));
|
||||
NodeEnvironment.NodePath[] paths = env.nodePaths();
|
||||
assertThat(org.elasticsearch.core.List.of(paths), hasItem(ShardPath.getPathWithMostFreeSpace(env)));
|
||||
ShardId shardId = new ShardId("foo", "0xDEADBEEF", 0);
|
||||
|
||||
Settings indexSettings = Settings.builder()
|
||||
|
@ -199,7 +199,13 @@ public class ShardPathTests extends ESTestCase {
|
|||
|
||||
ShardPath shardPath = ShardPath.selectNewPathForShard(env, shardId, idxSettings, 1L, new HashMap<>());
|
||||
assertNotNull(shardPath.getDataPath());
|
||||
assertEquals(path.indicesPath.resolve("0xDEADBEEF").resolve("0"), shardPath.getDataPath());
|
||||
|
||||
List<Path> indexPaths = new ArrayList<>();
|
||||
for (NodeEnvironment.NodePath nodePath : paths) {
|
||||
indexPaths.add(nodePath.indicesPath.resolve("0xDEADBEEF").resolve("0"));
|
||||
}
|
||||
|
||||
assertThat(indexPaths, hasItem(shardPath.getDataPath()));
|
||||
assertEquals("0xDEADBEEF", shardPath.getShardId().getIndex().getUUID());
|
||||
assertEquals("foo", shardPath.getShardId().getIndexName());
|
||||
assertFalse(shardPath.isCustomDataPath());
|
||||
|
|
|
@ -920,6 +920,17 @@ public abstract class ESRestTestCase extends ESTestCase {
|
|||
|
||||
if (mustClear) {
|
||||
Request request = new Request("PUT", "/_cluster/settings");
|
||||
|
||||
request.setOptions(RequestOptions.DEFAULT.toBuilder().setWarningsHandler(warnings -> {
|
||||
if (warnings.isEmpty()) {
|
||||
return false;
|
||||
} else if (warnings.size() > 1) {
|
||||
return true;
|
||||
} else {
|
||||
return warnings.get(0).startsWith("[transient settings removal]") == false;
|
||||
}
|
||||
}));
|
||||
|
||||
request.setJsonEntity(Strings.toString(clearCommand));
|
||||
adminClient().performRequest(request);
|
||||
}
|
||||
|
|
|
@ -684,13 +684,13 @@ the `basic` `authProvider` in {kib}. The process is documented in the
|
|||
|
||||
If the previous resolutions do not solve your issue, enable additional
|
||||
logging for the SAML realm to troubleshoot further. You can enable debug
|
||||
logging by configuring the following transient setting:
|
||||
logging by configuring the following persistent setting:
|
||||
|
||||
[source, console]
|
||||
----
|
||||
PUT /_cluster/settings
|
||||
{
|
||||
"transient": {
|
||||
"persistent": {
|
||||
"logger.org.elasticsearch.xpack.security.authc.saml": "debug"
|
||||
}
|
||||
}
|
||||
|
|
|
@ -361,7 +361,7 @@ public class DeprecationHttpIT extends ESRestTestCase {
|
|||
List<Map<String, Object>> documents = getIndexedDeprecations();
|
||||
|
||||
logger.warn(documents);
|
||||
assertThat(documents, hasSize(2));
|
||||
assertThat(documents, hasSize(3));
|
||||
|
||||
assertThat(
|
||||
documents,
|
||||
|
@ -373,6 +373,14 @@ public class DeprecationHttpIT extends ESRestTestCase {
|
|||
allOf(
|
||||
hasEntry("event.code", "deprecated_settings"),
|
||||
hasEntry("message", "[deprecated_settings] usage is deprecated. use [settings] instead")
|
||||
),
|
||||
allOf(
|
||||
hasEntry("event.code", "transient_settings"),
|
||||
hasEntry(
|
||||
"message",
|
||||
"[transient settings removal] Updating cluster settings through transientSettings"
|
||||
+ " is deprecated. Use persistent settings instead."
|
||||
)
|
||||
)
|
||||
)
|
||||
);
|
||||
|
|
|
@ -413,4 +413,16 @@ public class ClusterDeprecationChecks {
|
|||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
static DeprecationIssue checkTransientSettingsExistence(ClusterState state) {
|
||||
if (state.metadata().transientSettings().isEmpty() == false) {
|
||||
return new DeprecationIssue(DeprecationIssue.Level.WARNING,
|
||||
"Transient cluster settings are in the process of being removed.",
|
||||
"https://ela.st/es-deprecation-7-transient-cluster-settings",
|
||||
"Use persistent settings to define your cluster settings instead.",
|
||||
false,
|
||||
null);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -53,7 +53,8 @@ public class DeprecationChecks {
|
|||
ClusterDeprecationChecks::checkClusterRoutingAllocationIncludeRelocationsSetting,
|
||||
ClusterDeprecationChecks::checkGeoShapeTemplates,
|
||||
ClusterDeprecationChecks::checkSparseVectorTemplates,
|
||||
ClusterDeprecationChecks::checkILMFreezeActions
|
||||
ClusterDeprecationChecks::checkILMFreezeActions,
|
||||
ClusterDeprecationChecks::checkTransientSettingsExistence
|
||||
));
|
||||
|
||||
static final List<NodeDeprecationCheck<Settings, PluginsAndModules, ClusterState, XPackLicenseState, DeprecationIssue>>
|
||||
|
|
|
@ -48,6 +48,7 @@ import static org.elasticsearch.xpack.core.ilm.LifecycleSettings.LIFECYCLE_POLL_
|
|||
import static org.elasticsearch.xpack.deprecation.DeprecationChecks.CLUSTER_SETTINGS_CHECKS;
|
||||
import static org.elasticsearch.xpack.deprecation.IndexDeprecationChecksTests.addRandomFields;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.hasItem;
|
||||
import static org.hamcrest.Matchers.hasSize;
|
||||
|
||||
public class ClusterDeprecationChecksTests extends ESTestCase {
|
||||
|
@ -355,9 +356,17 @@ public class ClusterDeprecationChecksTests extends ESTestCase {
|
|||
null
|
||||
);
|
||||
|
||||
final DeprecationIssue otherExpectedIssue = new DeprecationIssue(DeprecationIssue.Level.WARNING,
|
||||
"Transient cluster settings are in the process of being removed.",
|
||||
"https://ela.st/es-deprecation-7-transient-cluster-settings",
|
||||
"Use persistent settings to define your cluster settings instead.",
|
||||
false, null);
|
||||
|
||||
List<DeprecationIssue> issues = DeprecationChecks.filterChecks(CLUSTER_SETTINGS_CHECKS, c -> c.apply(clusterState));
|
||||
assertThat(issues, hasSize(1));
|
||||
assertThat(issues.get(0), equalTo(expectedIssue));
|
||||
|
||||
assertThat(issues, hasSize(2));
|
||||
assertThat(issues, hasItem(expectedIssue));
|
||||
assertThat(issues, hasItem(otherExpectedIssue));
|
||||
|
||||
final String expectedWarning = String.format(Locale.ROOT,
|
||||
"[%s] setting was deprecated in Elasticsearch and will be removed in a future release! " +
|
||||
|
@ -553,4 +562,34 @@ public class ClusterDeprecationChecksTests extends ESTestCase {
|
|||
"remove freeze action from the following ilm policies: [policy1,policy2]", false, null)
|
||||
));
|
||||
}
|
||||
|
||||
public void testCheckTransientSettingsExistence() {
|
||||
Settings transientSettings = Settings.builder()
|
||||
.put("indices.recovery.max_bytes_per_sec", "20mb")
|
||||
.build();
|
||||
Metadata metadataWithTransientSettings = Metadata.builder()
|
||||
.transientSettings(transientSettings)
|
||||
.build();
|
||||
|
||||
ClusterState badState = ClusterState.builder(new ClusterName("test")).metadata(metadataWithTransientSettings).build();
|
||||
DeprecationIssue issue = ClusterDeprecationChecks.checkTransientSettingsExistence(badState);
|
||||
assertThat(issue, equalTo(
|
||||
new DeprecationIssue(DeprecationIssue.Level.WARNING,
|
||||
"Transient cluster settings are in the process of being removed.",
|
||||
"https://ela.st/es-deprecation-7-transient-cluster-settings",
|
||||
"Use persistent settings to define your cluster settings instead.",
|
||||
false, null)
|
||||
));
|
||||
|
||||
Settings persistentSettings = Settings.builder()
|
||||
.put("indices.recovery.max_bytes_per_sec", "20mb")
|
||||
.build();
|
||||
Metadata metadataWithoutTransientSettings = Metadata.builder()
|
||||
.persistentSettings(persistentSettings)
|
||||
.build();
|
||||
|
||||
ClusterState okState = ClusterState.builder(new ClusterName("test")).metadata(metadataWithoutTransientSettings).build();
|
||||
issue = ClusterDeprecationChecks.checkTransientSettingsExistence(okState);
|
||||
assertNull(issue);
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue