mirror of
https://github.com/elastic/elasticsearch.git
synced 2025-04-25 07:37:19 -04:00
Support max_single_primary_size in Resize Action and exposed in ILM (#67705)
This commit is contained in:
parent
ab3f8f5067
commit
d69c03359f
28 changed files with 534 additions and 106 deletions
|
@ -18,9 +18,12 @@
|
||||||
*/
|
*/
|
||||||
package org.elasticsearch.client.ilm;
|
package org.elasticsearch.client.ilm;
|
||||||
|
|
||||||
|
import org.elasticsearch.common.Nullable;
|
||||||
import org.elasticsearch.common.ParseField;
|
import org.elasticsearch.common.ParseField;
|
||||||
import org.elasticsearch.common.Strings;
|
import org.elasticsearch.common.Strings;
|
||||||
|
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||||
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
|
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
|
||||||
|
import org.elasticsearch.common.xcontent.ObjectParser;
|
||||||
import org.elasticsearch.common.xcontent.ToXContentObject;
|
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||||
import org.elasticsearch.common.xcontent.XContentParser;
|
import org.elasticsearch.common.xcontent.XContentParser;
|
||||||
|
@ -31,31 +34,53 @@ import java.util.Objects;
|
||||||
public class ShrinkAction implements LifecycleAction, ToXContentObject {
|
public class ShrinkAction implements LifecycleAction, ToXContentObject {
|
||||||
public static final String NAME = "shrink";
|
public static final String NAME = "shrink";
|
||||||
private static final ParseField NUMBER_OF_SHARDS_FIELD = new ParseField("number_of_shards");
|
private static final ParseField NUMBER_OF_SHARDS_FIELD = new ParseField("number_of_shards");
|
||||||
|
private static final ParseField MAX_SINGLE_PRIMARY_SIZE = new ParseField("max_single_primary_size");
|
||||||
|
|
||||||
private static final ConstructingObjectParser<ShrinkAction, Void> PARSER =
|
private static final ConstructingObjectParser<ShrinkAction, Void> PARSER =
|
||||||
new ConstructingObjectParser<>(NAME, true, a -> new ShrinkAction((Integer) a[0]));
|
new ConstructingObjectParser<>(NAME, true, a -> new ShrinkAction((Integer) a[0], (ByteSizeValue) a[1]));
|
||||||
|
|
||||||
static {
|
static {
|
||||||
PARSER.declareInt(ConstructingObjectParser.constructorArg(), NUMBER_OF_SHARDS_FIELD);
|
PARSER.declareInt(ConstructingObjectParser.optionalConstructorArg(), NUMBER_OF_SHARDS_FIELD);
|
||||||
|
PARSER.declareField(ConstructingObjectParser.optionalConstructorArg(),
|
||||||
|
(p, c) -> ByteSizeValue.parseBytesSizeValue(p.text(), MAX_SINGLE_PRIMARY_SIZE.getPreferredName()),
|
||||||
|
MAX_SINGLE_PRIMARY_SIZE, ObjectParser.ValueType.STRING);
|
||||||
}
|
}
|
||||||
|
|
||||||
private int numberOfShards;
|
private Integer numberOfShards;
|
||||||
|
private ByteSizeValue maxSinglePrimarySize;
|
||||||
|
|
||||||
public static ShrinkAction parse(XContentParser parser) throws IOException {
|
public static ShrinkAction parse(XContentParser parser) throws IOException {
|
||||||
return PARSER.parse(parser, null);
|
return PARSER.parse(parser, null);
|
||||||
}
|
}
|
||||||
|
|
||||||
public ShrinkAction(int numberOfShards) {
|
public ShrinkAction(@Nullable Integer numberOfShards, ByteSizeValue maxSinglePrimarySize) {
|
||||||
if (numberOfShards <= 0) {
|
if (numberOfShards != null && maxSinglePrimarySize != null) {
|
||||||
throw new IllegalArgumentException("[" + NUMBER_OF_SHARDS_FIELD.getPreferredName() + "] must be greater than 0");
|
throw new IllegalArgumentException("Cannot set both [number_of_shards] and [max_single_primary_size]");
|
||||||
|
}
|
||||||
|
if (numberOfShards == null && maxSinglePrimarySize == null) {
|
||||||
|
throw new IllegalArgumentException("Either [number_of_shards] or [max_single_primary_size] must be set");
|
||||||
|
}
|
||||||
|
if (maxSinglePrimarySize != null) {
|
||||||
|
if (maxSinglePrimarySize.getBytes() <= 0) {
|
||||||
|
throw new IllegalArgumentException("[max_single_primary_size] must be greater than 0");
|
||||||
|
}
|
||||||
|
this.maxSinglePrimarySize = maxSinglePrimarySize;
|
||||||
|
} else {
|
||||||
|
if (numberOfShards <= 0) {
|
||||||
|
throw new IllegalArgumentException("[" + NUMBER_OF_SHARDS_FIELD.getPreferredName() + "] must be greater than 0");
|
||||||
|
}
|
||||||
|
this.numberOfShards = numberOfShards;
|
||||||
}
|
}
|
||||||
this.numberOfShards = numberOfShards;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int getNumberOfShards() {
|
Integer getNumberOfShards() {
|
||||||
return numberOfShards;
|
return numberOfShards;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ByteSizeValue getMaxSinglePrimarySize() {
|
||||||
|
return maxSinglePrimarySize;
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public String getName() {
|
public String getName() {
|
||||||
return NAME;
|
return NAME;
|
||||||
|
@ -64,7 +89,12 @@ public class ShrinkAction implements LifecycleAction, ToXContentObject {
|
||||||
@Override
|
@Override
|
||||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||||
builder.startObject();
|
builder.startObject();
|
||||||
builder.field(NUMBER_OF_SHARDS_FIELD.getPreferredName(), numberOfShards);
|
if (numberOfShards != null) {
|
||||||
|
builder.field(NUMBER_OF_SHARDS_FIELD.getPreferredName(), numberOfShards);
|
||||||
|
}
|
||||||
|
if (maxSinglePrimarySize != null) {
|
||||||
|
builder.field(MAX_SINGLE_PRIMARY_SIZE.getPreferredName(), maxSinglePrimarySize);
|
||||||
|
}
|
||||||
builder.endObject();
|
builder.endObject();
|
||||||
return builder;
|
return builder;
|
||||||
}
|
}
|
||||||
|
@ -74,12 +104,14 @@ public class ShrinkAction implements LifecycleAction, ToXContentObject {
|
||||||
if (this == o) return true;
|
if (this == o) return true;
|
||||||
if (o == null || getClass() != o.getClass()) return false;
|
if (o == null || getClass() != o.getClass()) return false;
|
||||||
ShrinkAction that = (ShrinkAction) o;
|
ShrinkAction that = (ShrinkAction) o;
|
||||||
return Objects.equals(numberOfShards, that.numberOfShards);
|
|
||||||
|
return Objects.equals(numberOfShards, that.numberOfShards) &&
|
||||||
|
Objects.equals(maxSinglePrimarySize, that.maxSinglePrimarySize);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public int hashCode() {
|
public int hashCode() {
|
||||||
return Objects.hash(numberOfShards);
|
return Objects.hash(numberOfShards, maxSinglePrimarySize);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|
|
@ -24,6 +24,7 @@ import org.elasticsearch.client.TimedRequest;
|
||||||
import org.elasticsearch.client.Validatable;
|
import org.elasticsearch.client.Validatable;
|
||||||
import org.elasticsearch.client.ValidationException;
|
import org.elasticsearch.client.ValidationException;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
|
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||||
import org.elasticsearch.common.xcontent.ToXContentObject;
|
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||||
|
|
||||||
|
@ -45,6 +46,7 @@ public class ResizeRequest extends TimedRequest implements Validatable, ToXConte
|
||||||
private final String targetIndex;
|
private final String targetIndex;
|
||||||
private Settings settings = Settings.EMPTY;
|
private Settings settings = Settings.EMPTY;
|
||||||
private Set<Alias> aliases = new HashSet<>();
|
private Set<Alias> aliases = new HashSet<>();
|
||||||
|
private ByteSizeValue maxSinglePrimarySize;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Creates a new resize request
|
* Creates a new resize request
|
||||||
|
@ -87,6 +89,20 @@ public class ResizeRequest extends TimedRequest implements Validatable, ToXConte
|
||||||
return Collections.unmodifiableSet(this.aliases);
|
return Collections.unmodifiableSet(this.aliases);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Sets the max single primary shard size of the target index
|
||||||
|
*/
|
||||||
|
public void setMaxSinglePrimarySize(ByteSizeValue maxSinglePrimarySize) {
|
||||||
|
this.maxSinglePrimarySize = maxSinglePrimarySize;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Return the max single primary shard size of the target index
|
||||||
|
*/
|
||||||
|
public ByteSizeValue getMaxSinglePrimarySize() {
|
||||||
|
return maxSinglePrimarySize;
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Optional<ValidationException> validate() {
|
public Optional<ValidationException> validate() {
|
||||||
ValidationException validationException = new ValidationException();
|
ValidationException validationException = new ValidationException();
|
||||||
|
|
|
@ -156,7 +156,7 @@ public class IndexLifecycleIT extends ESRestHighLevelClientTestCase {
|
||||||
Map<String, LifecycleAction> warmActions = new HashMap<>();
|
Map<String, LifecycleAction> warmActions = new HashMap<>();
|
||||||
warmActions.put(UnfollowAction.NAME, new UnfollowAction());
|
warmActions.put(UnfollowAction.NAME, new UnfollowAction());
|
||||||
warmActions.put(AllocateAction.NAME, new AllocateAction(null, null, null, Collections.singletonMap("_name", "node-1")));
|
warmActions.put(AllocateAction.NAME, new AllocateAction(null, null, null, Collections.singletonMap("_name", "node-1")));
|
||||||
warmActions.put(ShrinkAction.NAME, new ShrinkAction(1));
|
warmActions.put(ShrinkAction.NAME, new ShrinkAction(1, null));
|
||||||
warmActions.put(ForceMergeAction.NAME, new ForceMergeAction(1000));
|
warmActions.put(ForceMergeAction.NAME, new ForceMergeAction(1000));
|
||||||
lifecyclePhases.put("warm", new Phase("warm", TimeValue.timeValueSeconds(1000), warmActions));
|
lifecyclePhases.put("warm", new Phase("warm", TimeValue.timeValueSeconds(1000), warmActions));
|
||||||
|
|
||||||
|
|
|
@ -62,6 +62,7 @@ import org.elasticsearch.client.indices.rollover.RolloverRequest;
|
||||||
import org.elasticsearch.common.CheckedFunction;
|
import org.elasticsearch.common.CheckedFunction;
|
||||||
import org.elasticsearch.common.Strings;
|
import org.elasticsearch.common.Strings;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
|
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||||
import org.elasticsearch.common.unit.TimeValue;
|
import org.elasticsearch.common.unit.TimeValue;
|
||||||
import org.elasticsearch.common.util.CollectionUtils;
|
import org.elasticsearch.common.util.CollectionUtils;
|
||||||
import org.elasticsearch.common.xcontent.XContentType;
|
import org.elasticsearch.common.xcontent.XContentType;
|
||||||
|
@ -655,6 +656,9 @@ public class IndicesRequestConvertersTests extends ESTestCase {
|
||||||
if (resizeType == ResizeType.SPLIT) {
|
if (resizeType == ResizeType.SPLIT) {
|
||||||
resizeRequest.setSettings(Settings.builder().put("index.number_of_shards", 2).build());
|
resizeRequest.setSettings(Settings.builder().put("index.number_of_shards", 2).build());
|
||||||
}
|
}
|
||||||
|
if (resizeType == ResizeType.SHRINK) {
|
||||||
|
resizeRequest.setMaxSinglePrimarySize(new ByteSizeValue(randomIntBetween(1, 100)));
|
||||||
|
}
|
||||||
|
|
||||||
Request request = function.apply(resizeRequest);
|
Request request = function.apply(resizeRequest);
|
||||||
Assert.assertEquals(HttpPut.METHOD_NAME, request.getMethod());
|
Assert.assertEquals(HttpPut.METHOD_NAME, request.getMethod());
|
||||||
|
|
|
@ -264,7 +264,7 @@ public class ILMDocumentationIT extends ESRestHighLevelClientTestCase {
|
||||||
PutLifecyclePolicyRequest putRequest = new PutLifecyclePolicyRequest(myPolicyAsPut);
|
PutLifecyclePolicyRequest putRequest = new PutLifecyclePolicyRequest(myPolicyAsPut);
|
||||||
|
|
||||||
Map<String, Phase> otherPolicyPhases = new HashMap<>(phases);
|
Map<String, Phase> otherPolicyPhases = new HashMap<>(phases);
|
||||||
Map<String, LifecycleAction> warmActions = Collections.singletonMap(ShrinkAction.NAME, new ShrinkAction(1));
|
Map<String, LifecycleAction> warmActions = Collections.singletonMap(ShrinkAction.NAME, new ShrinkAction(1, null));
|
||||||
otherPolicyPhases.put("warm", new Phase("warm", new TimeValue(30, TimeUnit.DAYS), warmActions));
|
otherPolicyPhases.put("warm", new Phase("warm", new TimeValue(30, TimeUnit.DAYS), warmActions));
|
||||||
otherPolicyAsPut = new LifecyclePolicy("other_policy", otherPolicyPhases);
|
otherPolicyAsPut = new LifecyclePolicy("other_policy", otherPolicyPhases);
|
||||||
|
|
||||||
|
@ -614,7 +614,7 @@ public class ILMDocumentationIT extends ESRestHighLevelClientTestCase {
|
||||||
{
|
{
|
||||||
Map<String, Phase> phases = new HashMap<>();
|
Map<String, Phase> phases = new HashMap<>();
|
||||||
Map<String, LifecycleAction> warmActions = new HashMap<>();
|
Map<String, LifecycleAction> warmActions = new HashMap<>();
|
||||||
warmActions.put(ShrinkAction.NAME, new ShrinkAction(3));
|
warmActions.put(ShrinkAction.NAME, new ShrinkAction(3, null));
|
||||||
phases.put("warm", new Phase("warm", TimeValue.ZERO, warmActions));
|
phases.put("warm", new Phase("warm", TimeValue.ZERO, warmActions));
|
||||||
|
|
||||||
LifecyclePolicy policy = new LifecyclePolicy("my_policy",
|
LifecyclePolicy policy = new LifecyclePolicy("my_policy",
|
||||||
|
|
|
@ -1609,11 +1609,19 @@ public class IndicesClientDocumentationIT extends ESRestHighLevelClientTestCase
|
||||||
request.setWaitForActiveShards(2); // <1>
|
request.setWaitForActiveShards(2); // <1>
|
||||||
request.setWaitForActiveShards(ActiveShardCount.DEFAULT); // <2>
|
request.setWaitForActiveShards(ActiveShardCount.DEFAULT); // <2>
|
||||||
// end::shrink-index-request-waitForActiveShards
|
// end::shrink-index-request-waitForActiveShards
|
||||||
// tag::shrink-index-request-settings
|
if (randomBoolean()) {
|
||||||
request.getTargetIndexRequest().settings(Settings.builder()
|
// tag::shrink-index-request-settings
|
||||||
|
request.getTargetIndexRequest().settings(Settings.builder()
|
||||||
.put("index.number_of_shards", 2) // <1>
|
.put("index.number_of_shards", 2) // <1>
|
||||||
.putNull("index.routing.allocation.require._name")); // <2>
|
.putNull("index.routing.allocation.require._name")); // <2>
|
||||||
// end::shrink-index-request-settings
|
// end::shrink-index-request-settings
|
||||||
|
} else {
|
||||||
|
request.getTargetIndexRequest().settings(Settings.builder()
|
||||||
|
.putNull("index.routing.allocation.require._name"));
|
||||||
|
// tag::shrink-index-request-maxSinglePrimarySize
|
||||||
|
request.setMaxSinglePrimarySize(new ByteSizeValue(50, ByteSizeUnit.GB)); // <1>
|
||||||
|
// end::shrink-index-request-maxSinglePrimarySize
|
||||||
|
}
|
||||||
// tag::shrink-index-request-aliases
|
// tag::shrink-index-request-aliases
|
||||||
request.getTargetIndexRequest().alias(new Alias("target_alias")); // <1>
|
request.getTargetIndexRequest().alias(new Alias("target_alias")); // <1>
|
||||||
// end::shrink-index-request-aliases
|
// end::shrink-index-request-aliases
|
||||||
|
|
|
@ -18,6 +18,7 @@
|
||||||
*/
|
*/
|
||||||
package org.elasticsearch.client.ilm;
|
package org.elasticsearch.client.ilm;
|
||||||
|
|
||||||
|
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||||
import org.elasticsearch.common.xcontent.XContentParser;
|
import org.elasticsearch.common.xcontent.XContentParser;
|
||||||
import org.elasticsearch.test.AbstractXContentTestCase;
|
import org.elasticsearch.test.AbstractXContentTestCase;
|
||||||
|
|
||||||
|
@ -38,7 +39,11 @@ public class ShrinkActionTests extends AbstractXContentTestCase<ShrinkAction> {
|
||||||
}
|
}
|
||||||
|
|
||||||
static ShrinkAction randomInstance() {
|
static ShrinkAction randomInstance() {
|
||||||
return new ShrinkAction(randomIntBetween(1, 100));
|
if (randomBoolean()) {
|
||||||
|
return new ShrinkAction(randomIntBetween(1, 100), null);
|
||||||
|
} else {
|
||||||
|
return new ShrinkAction(null, new ByteSizeValue(randomIntBetween(1, 100)));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -47,7 +52,17 @@ public class ShrinkActionTests extends AbstractXContentTestCase<ShrinkAction> {
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testNonPositiveShardNumber() {
|
public void testNonPositiveShardNumber() {
|
||||||
Exception e = expectThrows(Exception.class, () -> new ShrinkAction(randomIntBetween(-100, 0)));
|
Exception e = expectThrows(Exception.class, () -> new ShrinkAction(randomIntBetween(-100, 0), null));
|
||||||
assertThat(e.getMessage(), equalTo("[number_of_shards] must be greater than 0"));
|
assertThat(e.getMessage(), equalTo("[number_of_shards] must be greater than 0"));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public void testMaxSinglePrimarySize() {
|
||||||
|
ByteSizeValue maxSinglePrimarySize1 = new ByteSizeValue(10);
|
||||||
|
Exception e1 = expectThrows(Exception.class, () -> new ShrinkAction(randomIntBetween(1, 100), maxSinglePrimarySize1));
|
||||||
|
assertThat(e1.getMessage(), equalTo("Cannot set both [number_of_shards] and [max_single_primary_size]"));
|
||||||
|
|
||||||
|
ByteSizeValue maxSinglePrimarySize2 = new ByteSizeValue(0);
|
||||||
|
Exception e2 = expectThrows(Exception.class, () -> new org.elasticsearch.client.ilm.ShrinkAction(null, maxSinglePrimarySize2));
|
||||||
|
assertThat(e2.getMessage(), equalTo("[max_single_primary_size] must be greater than 0"));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -54,6 +54,12 @@ include-tagged::{doc-tests-file}[{api}-request-settings]
|
||||||
<1> The number of shards on the target of the shrink index request
|
<1> The number of shards on the target of the shrink index request
|
||||||
<2> Remove the allocation requirement copied from the source index
|
<2> Remove the allocation requirement copied from the source index
|
||||||
|
|
||||||
|
["source","java",subs="attributes,callouts,macros"]
|
||||||
|
--------------------------------------------------
|
||||||
|
include-tagged::{doc-tests-file}[{api}-request-maxSinglePrimarySize]
|
||||||
|
--------------------------------------------------
|
||||||
|
<1> The max single primary shard size of the target index
|
||||||
|
|
||||||
["source","java",subs="attributes,callouts,macros"]
|
["source","java",subs="attributes,callouts,macros"]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
include-tagged::{doc-tests-file}[{api}-request-aliases]
|
include-tagged::{doc-tests-file}[{api}-request-aliases]
|
||||||
|
@ -75,5 +81,3 @@ include-tagged::{doc-tests-file}[{api}-response]
|
||||||
<1> Indicates whether all of the nodes have acknowledged the request
|
<1> Indicates whether all of the nodes have acknowledged the request
|
||||||
<2> Indicates whether the requisite number of shard copies were started for
|
<2> Indicates whether the requisite number of shard copies were started for
|
||||||
each shard in the index before timing out
|
each shard in the index before timing out
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -40,14 +40,30 @@ managed indices.
|
||||||
[[ilm-shrink-options]]
|
[[ilm-shrink-options]]
|
||||||
==== Shrink options
|
==== Shrink options
|
||||||
`number_of_shards`::
|
`number_of_shards`::
|
||||||
(Required, integer)
|
(Optional, integer)
|
||||||
Number of shards to shrink to.
|
Number of shards to shrink to.
|
||||||
Must be a factor of the number of shards in the source index.
|
Must be a factor of the number of shards in the source index. This parameter conflicts with
|
||||||
|
`max_single_primary_size`, only one of them may be set.
|
||||||
|
|
||||||
|
`max_single_primary_size`::
|
||||||
|
(Optional, <<byte-units, byte units>>)
|
||||||
|
The max single primary shard size for the target index. Used to find the optimum number of shards for the target index.
|
||||||
|
When this parameter is set, each shard's storage in the target index will not be greater than the parameter.
|
||||||
|
The shards count of the target index will still be a factor of the source index's shards count, but if the parameter
|
||||||
|
is less than the single shard size in the source index, the shards count for the target index will be equal to the source index's shards count.
|
||||||
|
For example, when this parameter is set to 50gb, if the source index has 60 primary shards with totaling 100gb, then the
|
||||||
|
target index will have 2 primary shards, with each shard size of 50gb; if the source index has 60 primary shards
|
||||||
|
with totaling 1000gb, then the target index will have 20 primary shards; if the source index has 60 primary shards
|
||||||
|
with totaling 4000gb, then the target index will still have 60 primary shards. This parameter conflicts
|
||||||
|
with `number_of_shards` in the `settings`, only one of them may be set.
|
||||||
|
|
||||||
|
|
||||||
[[ilm-shrink-ex]]
|
[[ilm-shrink-ex]]
|
||||||
==== Example
|
==== Example
|
||||||
|
|
||||||
|
[[ilm-shrink-shards-ex]]
|
||||||
|
===== Set the number of shards of the new shrunken index explicitly
|
||||||
|
|
||||||
[source,console]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
PUT _ilm/policy/my_policy
|
PUT _ilm/policy/my_policy
|
||||||
|
@ -65,3 +81,25 @@ PUT _ilm/policy/my_policy
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
|
|
||||||
|
[[ilm-shrink-size-ex]]
|
||||||
|
===== Calculate the number of shards of the new shrunken index based on the storage of the
|
||||||
|
source index and the `max_single_primary_size` parameter
|
||||||
|
|
||||||
|
[source,console]
|
||||||
|
--------------------------------------------------
|
||||||
|
PUT _ilm/policy/my_policy
|
||||||
|
{
|
||||||
|
"policy": {
|
||||||
|
"phases": {
|
||||||
|
"warm": {
|
||||||
|
"actions": {
|
||||||
|
"shrink" : {
|
||||||
|
"max_single_primary_size": "50gb"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
--------------------------------------------------
|
||||||
|
|
|
@ -230,3 +230,15 @@ include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=timeoutparms]
|
||||||
include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=target-index-aliases]
|
include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=target-index-aliases]
|
||||||
|
|
||||||
include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=target-index-settings]
|
include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=target-index-settings]
|
||||||
|
|
||||||
|
`max_single_primary_size`::
|
||||||
|
(Optional, <<byte-units, byte units>>)
|
||||||
|
The max single primary shard size for the target index. Used to find the optimum number of shards for the target index.
|
||||||
|
When this parameter is set, each shard's storage in the target index will not be greater than the parameter.
|
||||||
|
The shards count of the target index will still be a factor of the source index's shards count, but if the parameter
|
||||||
|
is less than the single shard size in the source index, the shards count for the target index will be equal to the source index's shards count.
|
||||||
|
For example, when this parameter is set to 50gb, if the source index has 60 primary shards with totaling 100gb, then the
|
||||||
|
target index will have 2 primary shards, with each shard size of 50gb; if the source index has 60 primary shards
|
||||||
|
with totaling 1000gb, then the target index will have 20 primary shards; if the source index has 60 primary shards
|
||||||
|
with totaling 4000gb, then the target index will still have 60 primary shards. This parameter conflicts
|
||||||
|
with `number_of_shards` in the `settings`, only one of them may be set.
|
||||||
|
|
|
@ -18,6 +18,7 @@
|
||||||
*/
|
*/
|
||||||
package org.elasticsearch.action.admin.indices.shrink;
|
package org.elasticsearch.action.admin.indices.shrink;
|
||||||
|
|
||||||
|
import org.elasticsearch.Version;
|
||||||
import org.elasticsearch.action.ActionRequestValidationException;
|
import org.elasticsearch.action.ActionRequestValidationException;
|
||||||
import org.elasticsearch.action.IndicesRequest;
|
import org.elasticsearch.action.IndicesRequest;
|
||||||
import org.elasticsearch.action.admin.indices.alias.Alias;
|
import org.elasticsearch.action.admin.indices.alias.Alias;
|
||||||
|
@ -29,6 +30,7 @@ import org.elasticsearch.cluster.metadata.IndexMetadata;
|
||||||
import org.elasticsearch.common.ParseField;
|
import org.elasticsearch.common.ParseField;
|
||||||
import org.elasticsearch.common.io.stream.StreamInput;
|
import org.elasticsearch.common.io.stream.StreamInput;
|
||||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||||
|
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||||
import org.elasticsearch.common.xcontent.ObjectParser;
|
import org.elasticsearch.common.xcontent.ObjectParser;
|
||||||
import org.elasticsearch.common.xcontent.ToXContentObject;
|
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||||
|
@ -45,17 +47,22 @@ import static org.elasticsearch.action.ValidateActions.addValidationError;
|
||||||
public class ResizeRequest extends AcknowledgedRequest<ResizeRequest> implements IndicesRequest, ToXContentObject {
|
public class ResizeRequest extends AcknowledgedRequest<ResizeRequest> implements IndicesRequest, ToXContentObject {
|
||||||
|
|
||||||
public static final ObjectParser<ResizeRequest, Void> PARSER = new ObjectParser<>("resize_request");
|
public static final ObjectParser<ResizeRequest, Void> PARSER = new ObjectParser<>("resize_request");
|
||||||
|
private static final ParseField MAX_SINGLE_PRIMARY_SIZE = new ParseField("max_single_primary_size");
|
||||||
static {
|
static {
|
||||||
PARSER.declareField((parser, request, context) -> request.getTargetIndexRequest().settings(parser.map()),
|
PARSER.declareField((parser, request, context) -> request.getTargetIndexRequest().settings(parser.map()),
|
||||||
new ParseField("settings"), ObjectParser.ValueType.OBJECT);
|
new ParseField("settings"), ObjectParser.ValueType.OBJECT);
|
||||||
PARSER.declareField((parser, request, context) -> request.getTargetIndexRequest().aliases(parser.map()),
|
PARSER.declareField((parser, request, context) -> request.getTargetIndexRequest().aliases(parser.map()),
|
||||||
new ParseField("aliases"), ObjectParser.ValueType.OBJECT);
|
new ParseField("aliases"), ObjectParser.ValueType.OBJECT);
|
||||||
|
PARSER.declareField(ResizeRequest::setMaxSinglePrimarySize,
|
||||||
|
(p, c) -> ByteSizeValue.parseBytesSizeValue(p.text(), MAX_SINGLE_PRIMARY_SIZE.getPreferredName()),
|
||||||
|
MAX_SINGLE_PRIMARY_SIZE, ObjectParser.ValueType.STRING);
|
||||||
}
|
}
|
||||||
|
|
||||||
private CreateIndexRequest targetIndexRequest;
|
private CreateIndexRequest targetIndexRequest;
|
||||||
private String sourceIndex;
|
private String sourceIndex;
|
||||||
private ResizeType type = ResizeType.SHRINK;
|
private ResizeType type = ResizeType.SHRINK;
|
||||||
private Boolean copySettings = true;
|
private Boolean copySettings = true;
|
||||||
|
private ByteSizeValue maxSinglePrimarySize;
|
||||||
|
|
||||||
public ResizeRequest(StreamInput in) throws IOException {
|
public ResizeRequest(StreamInput in) throws IOException {
|
||||||
super(in);
|
super(in);
|
||||||
|
@ -63,6 +70,11 @@ public class ResizeRequest extends AcknowledgedRequest<ResizeRequest> implements
|
||||||
sourceIndex = in.readString();
|
sourceIndex = in.readString();
|
||||||
type = in.readEnum(ResizeType.class);
|
type = in.readEnum(ResizeType.class);
|
||||||
copySettings = in.readOptionalBoolean();
|
copySettings = in.readOptionalBoolean();
|
||||||
|
if (in.getVersion().onOrAfter(Version.V_8_0_0)) {
|
||||||
|
if (in.readBoolean()) {
|
||||||
|
maxSinglePrimarySize = new ByteSizeValue(in);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ResizeRequest() {}
|
ResizeRequest() {}
|
||||||
|
@ -87,6 +99,9 @@ public class ResizeRequest extends AcknowledgedRequest<ResizeRequest> implements
|
||||||
if (type == ResizeType.SPLIT && IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.exists(targetIndexRequest.settings()) == false) {
|
if (type == ResizeType.SPLIT && IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.exists(targetIndexRequest.settings()) == false) {
|
||||||
validationException = addValidationError("index.number_of_shards is required for split operations", validationException);
|
validationException = addValidationError("index.number_of_shards is required for split operations", validationException);
|
||||||
}
|
}
|
||||||
|
if (maxSinglePrimarySize != null && maxSinglePrimarySize.getBytes() <= 0) {
|
||||||
|
validationException = addValidationError("max_single_primary_size must be greater than 0", validationException);
|
||||||
|
}
|
||||||
assert copySettings == null || copySettings;
|
assert copySettings == null || copySettings;
|
||||||
return validationException;
|
return validationException;
|
||||||
}
|
}
|
||||||
|
@ -102,6 +117,9 @@ public class ResizeRequest extends AcknowledgedRequest<ResizeRequest> implements
|
||||||
out.writeString(sourceIndex);
|
out.writeString(sourceIndex);
|
||||||
out.writeEnum(type);
|
out.writeEnum(type);
|
||||||
out.writeOptionalBoolean(copySettings);
|
out.writeOptionalBoolean(copySettings);
|
||||||
|
if (out.getVersion().onOrAfter(Version.V_8_0_0)) {
|
||||||
|
out.writeOptionalWriteable(maxSinglePrimarySize);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -184,6 +202,25 @@ public class ResizeRequest extends AcknowledgedRequest<ResizeRequest> implements
|
||||||
return copySettings;
|
return copySettings;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Sets the max single primary shard size of the target index.
|
||||||
|
* It's used to calculate an optimum shards number of the target index according to storage of
|
||||||
|
* the source index, each shard's storage of the target index will not be greater than this parameter,
|
||||||
|
* while the shards number of the target index still be a factor of the source index's shards number.
|
||||||
|
*
|
||||||
|
* @param maxSinglePrimarySize the max single primary shard size of the target index
|
||||||
|
*/
|
||||||
|
public void setMaxSinglePrimarySize(ByteSizeValue maxSinglePrimarySize) {
|
||||||
|
this.maxSinglePrimarySize = maxSinglePrimarySize;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns the max single primary shard size of the target index
|
||||||
|
*/
|
||||||
|
public ByteSizeValue getMaxSinglePrimarySize() {
|
||||||
|
return maxSinglePrimarySize;
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||||
builder.startObject();
|
builder.startObject();
|
||||||
|
|
|
@ -24,6 +24,7 @@ import org.elasticsearch.action.support.ActiveShardCount;
|
||||||
import org.elasticsearch.action.support.master.AcknowledgedRequestBuilder;
|
import org.elasticsearch.action.support.master.AcknowledgedRequestBuilder;
|
||||||
import org.elasticsearch.client.ElasticsearchClient;
|
import org.elasticsearch.client.ElasticsearchClient;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
|
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||||
|
|
||||||
public class ResizeRequestBuilder extends AcknowledgedRequestBuilder<ResizeRequest, ResizeResponse,
|
public class ResizeRequestBuilder extends AcknowledgedRequestBuilder<ResizeRequest, ResizeResponse,
|
||||||
ResizeRequestBuilder> {
|
ResizeRequestBuilder> {
|
||||||
|
@ -79,4 +80,12 @@ public class ResizeRequestBuilder extends AcknowledgedRequestBuilder<ResizeReque
|
||||||
this.request.setResizeType(type);
|
this.request.setResizeType(type);
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Sets the max single primary shard size of the target index.
|
||||||
|
*/
|
||||||
|
public ResizeRequestBuilder setMaxSinglePrimarySize(ByteSizeValue maxSinglePrimarySize) {
|
||||||
|
this.request.setMaxSinglePrimarySize(maxSinglePrimarySize);
|
||||||
|
return this;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -19,6 +19,8 @@
|
||||||
|
|
||||||
package org.elasticsearch.action.admin.indices.shrink;
|
package org.elasticsearch.action.admin.indices.shrink;
|
||||||
|
|
||||||
|
import org.apache.logging.log4j.LogManager;
|
||||||
|
import org.apache.logging.log4j.Logger;
|
||||||
import org.apache.lucene.index.IndexWriter;
|
import org.apache.lucene.index.IndexWriter;
|
||||||
import org.elasticsearch.action.ActionListener;
|
import org.elasticsearch.action.ActionListener;
|
||||||
import org.elasticsearch.action.admin.indices.create.CreateIndexClusterStateUpdateRequest;
|
import org.elasticsearch.action.admin.indices.create.CreateIndexClusterStateUpdateRequest;
|
||||||
|
@ -39,10 +41,12 @@ import org.elasticsearch.cluster.metadata.MetadataCreateIndexService;
|
||||||
import org.elasticsearch.cluster.service.ClusterService;
|
import org.elasticsearch.cluster.service.ClusterService;
|
||||||
import org.elasticsearch.common.inject.Inject;
|
import org.elasticsearch.common.inject.Inject;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
|
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||||
import org.elasticsearch.index.IndexNotFoundException;
|
import org.elasticsearch.index.IndexNotFoundException;
|
||||||
import org.elasticsearch.index.IndexSettings;
|
import org.elasticsearch.index.IndexSettings;
|
||||||
import org.elasticsearch.index.shard.DocsStats;
|
import org.elasticsearch.index.shard.DocsStats;
|
||||||
import org.elasticsearch.index.shard.ShardId;
|
import org.elasticsearch.index.shard.ShardId;
|
||||||
|
import org.elasticsearch.index.store.StoreStats;
|
||||||
import org.elasticsearch.tasks.Task;
|
import org.elasticsearch.tasks.Task;
|
||||||
import org.elasticsearch.threadpool.ThreadPool;
|
import org.elasticsearch.threadpool.ThreadPool;
|
||||||
import org.elasticsearch.transport.TransportService;
|
import org.elasticsearch.transport.TransportService;
|
||||||
|
@ -56,6 +60,8 @@ import java.util.function.IntFunction;
|
||||||
* Main class to initiate resizing (shrink / split) an index into a new index
|
* Main class to initiate resizing (shrink / split) an index into a new index
|
||||||
*/
|
*/
|
||||||
public class TransportResizeAction extends TransportMasterNodeAction<ResizeRequest, ResizeResponse> {
|
public class TransportResizeAction extends TransportMasterNodeAction<ResizeRequest, ResizeResponse> {
|
||||||
|
private static final Logger logger = LogManager.getLogger(TransportResizeAction.class);
|
||||||
|
|
||||||
private final MetadataCreateIndexService createIndexService;
|
private final MetadataCreateIndexService createIndexService;
|
||||||
private final Client client;
|
private final Client client;
|
||||||
|
|
||||||
|
@ -95,7 +101,8 @@ public class TransportResizeAction extends TransportMasterNodeAction<ResizeReque
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
IndicesStatsRequestBuilder statsRequestBuilder = client.admin().indices().prepareStats(sourceIndex).clear().setDocs(true);
|
IndicesStatsRequestBuilder statsRequestBuilder = client.admin().indices().prepareStats(sourceIndex).clear()
|
||||||
|
.setDocs(true).setStore(true);
|
||||||
IndicesStatsRequest statsRequest = statsRequestBuilder.request();
|
IndicesStatsRequest statsRequest = statsRequestBuilder.request();
|
||||||
statsRequest.setParentTask(clusterService.localNode().getId(), task.getId());
|
statsRequest.setParentTask(clusterService.localNode().getId(), task.getId());
|
||||||
// TODO: only fetch indices stats for shrink type resize requests
|
// TODO: only fetch indices stats for shrink type resize requests
|
||||||
|
@ -103,7 +110,8 @@ public class TransportResizeAction extends TransportMasterNodeAction<ResizeReque
|
||||||
ActionListener.delegateFailure(listener, (delegatedListener, indicesStatsResponse) -> {
|
ActionListener.delegateFailure(listener, (delegatedListener, indicesStatsResponse) -> {
|
||||||
final CreateIndexClusterStateUpdateRequest updateRequest;
|
final CreateIndexClusterStateUpdateRequest updateRequest;
|
||||||
try {
|
try {
|
||||||
updateRequest = prepareCreateIndexRequest(resizeRequest, sourceMetadata, i -> {
|
StoreStats indexStoreStats = indicesStatsResponse.getPrimaries().store;
|
||||||
|
updateRequest = prepareCreateIndexRequest(resizeRequest, sourceMetadata, indexStoreStats, i -> {
|
||||||
IndexShardStats shard = indicesStatsResponse.getIndex(sourceIndex).getIndexShards().get(i);
|
IndexShardStats shard = indicesStatsResponse.getIndex(sourceIndex).getIndexShards().get(i);
|
||||||
return shard == null ? null : shard.getPrimary().getDocs();
|
return shard == null ? null : shard.getPrimary().getDocs();
|
||||||
}, targetIndex);
|
}, targetIndex);
|
||||||
|
@ -121,6 +129,7 @@ public class TransportResizeAction extends TransportMasterNodeAction<ResizeReque
|
||||||
// static for unittesting this method
|
// static for unittesting this method
|
||||||
static CreateIndexClusterStateUpdateRequest prepareCreateIndexRequest(final ResizeRequest resizeRequest,
|
static CreateIndexClusterStateUpdateRequest prepareCreateIndexRequest(final ResizeRequest resizeRequest,
|
||||||
final IndexMetadata sourceMetadata,
|
final IndexMetadata sourceMetadata,
|
||||||
|
final StoreStats indexStoreStats,
|
||||||
final IntFunction<DocsStats> perShardDocStats,
|
final IntFunction<DocsStats> perShardDocStats,
|
||||||
final String targetIndexName) {
|
final String targetIndexName) {
|
||||||
final CreateIndexRequest targetIndex = resizeRequest.getTargetIndexRequest();
|
final CreateIndexRequest targetIndex = resizeRequest.getTargetIndexRequest();
|
||||||
|
@ -129,12 +138,37 @@ public class TransportResizeAction extends TransportMasterNodeAction<ResizeReque
|
||||||
targetIndexSettingsBuilder.remove(IndexMetadata.SETTING_HISTORY_UUID);
|
targetIndexSettingsBuilder.remove(IndexMetadata.SETTING_HISTORY_UUID);
|
||||||
final Settings targetIndexSettings = targetIndexSettingsBuilder.build();
|
final Settings targetIndexSettings = targetIndexSettingsBuilder.build();
|
||||||
final int numShards;
|
final int numShards;
|
||||||
|
ByteSizeValue maxSinglePrimarySize = resizeRequest.getMaxSinglePrimarySize();
|
||||||
if (IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.exists(targetIndexSettings)) {
|
if (IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.exists(targetIndexSettings)) {
|
||||||
|
if (resizeRequest.getResizeType() == ResizeType.SHRINK && maxSinglePrimarySize != null) {
|
||||||
|
throw new IllegalArgumentException("Cannot set both index.number_of_shards and max_single_primary_size" +
|
||||||
|
" for the target index");
|
||||||
|
}
|
||||||
numShards = IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.get(targetIndexSettings);
|
numShards = IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.get(targetIndexSettings);
|
||||||
} else {
|
} else {
|
||||||
assert resizeRequest.getResizeType() != ResizeType.SPLIT : "split must specify the number of shards explicitly";
|
assert resizeRequest.getResizeType() != ResizeType.SPLIT : "split must specify the number of shards explicitly";
|
||||||
if (resizeRequest.getResizeType() == ResizeType.SHRINK) {
|
if (resizeRequest.getResizeType() == ResizeType.SHRINK) {
|
||||||
numShards = 1;
|
if (maxSinglePrimarySize != null) {
|
||||||
|
int sourceIndexShardsNum = sourceMetadata.getNumberOfShards();
|
||||||
|
long sourceIndexStorageBytes = indexStoreStats.getSizeInBytes();
|
||||||
|
long maxSinglePrimarySizeBytes = maxSinglePrimarySize.getBytes();
|
||||||
|
long minShardsNum = sourceIndexStorageBytes / maxSinglePrimarySizeBytes;
|
||||||
|
if (minShardsNum * maxSinglePrimarySizeBytes < sourceIndexStorageBytes) {
|
||||||
|
minShardsNum = minShardsNum + 1;
|
||||||
|
}
|
||||||
|
if (minShardsNum > sourceIndexShardsNum) {
|
||||||
|
logger.info("By setting max_single_primary_size to [{}], the target index [{}] will contain [{}] shards," +
|
||||||
|
" which will be greater than [{}] shards in the source index [{}]," +
|
||||||
|
" using [{}] for the shard count of the target index [{}]",
|
||||||
|
maxSinglePrimarySize.toString(), targetIndexName, minShardsNum, sourceIndexShardsNum,
|
||||||
|
sourceMetadata.getIndex().getName(), sourceIndexShardsNum, targetIndexName);
|
||||||
|
numShards = sourceIndexShardsNum;
|
||||||
|
} else {
|
||||||
|
numShards = calTargetShardsNum(sourceIndexShardsNum, (int)minShardsNum);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
numShards = 1;
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
assert resizeRequest.getResizeType() == ResizeType.CLONE;
|
assert resizeRequest.getResizeType() == ResizeType.CLONE;
|
||||||
numShards = sourceMetadata.getNumberOfShards();
|
numShards = sourceMetadata.getNumberOfShards();
|
||||||
|
@ -199,4 +233,29 @@ public class TransportResizeAction extends TransportMasterNodeAction<ResizeReque
|
||||||
.resizeType(resizeRequest.getResizeType())
|
.resizeType(resizeRequest.getResizeType())
|
||||||
.copySettings(resizeRequest.getCopySettings() == null ? false : resizeRequest.getCopySettings());
|
.copySettings(resizeRequest.getCopySettings() == null ? false : resizeRequest.getCopySettings());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Get the minimum factor of sourceIndexShardsNum which is greater minShardsNum
|
||||||
|
protected static int calTargetShardsNum(final int sourceIndexShardsNum, final int minShardsNum) {
|
||||||
|
if (sourceIndexShardsNum <=0 || minShardsNum <=0){
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
if (sourceIndexShardsNum % minShardsNum == 0) {
|
||||||
|
return minShardsNum;
|
||||||
|
}
|
||||||
|
int num = (int) Math.floor(Math.sqrt(sourceIndexShardsNum));
|
||||||
|
if (minShardsNum >= num) {
|
||||||
|
for (int i = num; i >= 1; i--) {
|
||||||
|
if (sourceIndexShardsNum % i == 0 && minShardsNum <= sourceIndexShardsNum / i) {
|
||||||
|
return sourceIndexShardsNum / i;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
for (int i = 1; i < num; i++) {
|
||||||
|
if (sourceIndexShardsNum % i == 0 && minShardsNum <= i) {
|
||||||
|
return i;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return sourceIndexShardsNum;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -39,7 +39,9 @@ import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllo
|
||||||
import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders;
|
import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders;
|
||||||
import org.elasticsearch.cluster.routing.allocation.decider.MaxRetryAllocationDecider;
|
import org.elasticsearch.cluster.routing.allocation.decider.MaxRetryAllocationDecider;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
|
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||||
import org.elasticsearch.index.shard.DocsStats;
|
import org.elasticsearch.index.shard.DocsStats;
|
||||||
|
import org.elasticsearch.index.store.StoreStats;
|
||||||
import org.elasticsearch.snapshots.EmptySnapshotsInfoService;
|
import org.elasticsearch.snapshots.EmptySnapshotsInfoService;
|
||||||
import org.elasticsearch.test.ESTestCase;
|
import org.elasticsearch.test.ESTestCase;
|
||||||
import org.elasticsearch.test.gateway.TestGatewayAllocator;
|
import org.elasticsearch.test.gateway.TestGatewayAllocator;
|
||||||
|
@ -77,7 +79,8 @@ public class TransportResizeActionTests extends ESTestCase {
|
||||||
assertTrue(
|
assertTrue(
|
||||||
expectThrows(IllegalStateException.class, () ->
|
expectThrows(IllegalStateException.class, () ->
|
||||||
TransportResizeAction.prepareCreateIndexRequest(new ResizeRequest("target", "source"), state,
|
TransportResizeAction.prepareCreateIndexRequest(new ResizeRequest("target", "source"), state,
|
||||||
(i) -> new DocsStats(Integer.MAX_VALUE, between(1, 1000), between(1, 100)), "target")
|
new StoreStats(between(1, 100), between(1, 100)), (i) -> new DocsStats(Integer.MAX_VALUE, between(1, 1000),
|
||||||
|
between(1, 100)), "target")
|
||||||
).getMessage().startsWith("Can't merge index with more than [2147483519] docs - too many documents in shards "));
|
).getMessage().startsWith("Can't merge index with more than [2147483519] docs - too many documents in shards "));
|
||||||
|
|
||||||
|
|
||||||
|
@ -88,6 +91,7 @@ public class TransportResizeActionTests extends ESTestCase {
|
||||||
TransportResizeAction.prepareCreateIndexRequest(req,
|
TransportResizeAction.prepareCreateIndexRequest(req,
|
||||||
createClusterState("source", 8, 1,
|
createClusterState("source", 8, 1,
|
||||||
Settings.builder().put("index.blocks.write", true).build()).metadata().index("source"),
|
Settings.builder().put("index.blocks.write", true).build()).metadata().index("source"),
|
||||||
|
new StoreStats(between(1, 100), between(1, 100)),
|
||||||
(i) -> i == 2 || i == 3 ? new DocsStats(Integer.MAX_VALUE / 2, between(1, 1000), between(1, 10000)) : null
|
(i) -> i == 2 || i == 3 ? new DocsStats(Integer.MAX_VALUE / 2, between(1, 1000), between(1, 10000)) : null
|
||||||
, "target");
|
, "target");
|
||||||
}
|
}
|
||||||
|
@ -101,6 +105,7 @@ public class TransportResizeActionTests extends ESTestCase {
|
||||||
createClusterState("source", 8, 1,
|
createClusterState("source", 8, 1,
|
||||||
Settings.builder().put("index.blocks.write", true).put("index.soft_deletes.enabled", true).build())
|
Settings.builder().put("index.blocks.write", true).put("index.soft_deletes.enabled", true).build())
|
||||||
.metadata().index("source"),
|
.metadata().index("source"),
|
||||||
|
new StoreStats(between(1, 100), between(1, 100)),
|
||||||
(i) -> new DocsStats(between(10, 1000), between(1, 10), between(1, 10000)), "target");
|
(i) -> new DocsStats(between(10, 1000), between(1, 10), between(1, 10000)), "target");
|
||||||
});
|
});
|
||||||
assertThat(softDeletesError.getMessage(), equalTo("Can't disable [index.soft_deletes.enabled] setting on resize"));
|
assertThat(softDeletesError.getMessage(), equalTo("Can't disable [index.soft_deletes.enabled] setting on resize"));
|
||||||
|
@ -121,6 +126,7 @@ public class TransportResizeActionTests extends ESTestCase {
|
||||||
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
|
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
|
||||||
|
|
||||||
TransportResizeAction.prepareCreateIndexRequest(new ResizeRequest("target", "source"), clusterState.metadata().index("source"),
|
TransportResizeAction.prepareCreateIndexRequest(new ResizeRequest("target", "source"), clusterState.metadata().index("source"),
|
||||||
|
new StoreStats(between(1, 100), between(1, 100)),
|
||||||
(i) -> new DocsStats(between(1, 1000), between(1, 1000), between(0, 10000)), "target");
|
(i) -> new DocsStats(between(1, 1000), between(1, 1000), between(0, 10000)), "target");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -144,14 +150,16 @@ public class TransportResizeActionTests extends ESTestCase {
|
||||||
resizeRequest.getTargetIndexRequest()
|
resizeRequest.getTargetIndexRequest()
|
||||||
.settings(Settings.builder().put("index.number_of_shards", 2).build());
|
.settings(Settings.builder().put("index.number_of_shards", 2).build());
|
||||||
IndexMetadata indexMetadata = clusterState.metadata().index("source");
|
IndexMetadata indexMetadata = clusterState.metadata().index("source");
|
||||||
TransportResizeAction.prepareCreateIndexRequest(resizeRequest, indexMetadata, null, "target");
|
TransportResizeAction.prepareCreateIndexRequest(resizeRequest, indexMetadata, new StoreStats(between(1, 100), between(1, 100)),
|
||||||
|
null, "target");
|
||||||
|
|
||||||
resizeRequest.getTargetIndexRequest()
|
resizeRequest.getTargetIndexRequest()
|
||||||
.settings(Settings.builder()
|
.settings(Settings.builder()
|
||||||
.put("index.number_of_routing_shards", randomIntBetween(2, 10))
|
.put("index.number_of_routing_shards", randomIntBetween(2, 10))
|
||||||
.put("index.number_of_shards", 2)
|
.put("index.number_of_shards", 2)
|
||||||
.build());
|
.build());
|
||||||
TransportResizeAction.prepareCreateIndexRequest(resizeRequest, indexMetadata, null, "target");
|
TransportResizeAction.prepareCreateIndexRequest(resizeRequest, indexMetadata, new StoreStats(between(1, 100), between(1, 100)),
|
||||||
|
null, "target");
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testPassNumRoutingShardsAndFail() {
|
public void testPassNumRoutingShardsAndFail() {
|
||||||
|
@ -174,7 +182,8 @@ public class TransportResizeActionTests extends ESTestCase {
|
||||||
resizeRequest.setResizeType(ResizeType.SPLIT);
|
resizeRequest.setResizeType(ResizeType.SPLIT);
|
||||||
resizeRequest.getTargetIndexRequest()
|
resizeRequest.getTargetIndexRequest()
|
||||||
.settings(Settings.builder().put("index.number_of_shards", numShards * 2).build());
|
.settings(Settings.builder().put("index.number_of_shards", numShards * 2).build());
|
||||||
TransportResizeAction.prepareCreateIndexRequest(resizeRequest, clusterState.metadata().index("source"), null, "target");
|
TransportResizeAction.prepareCreateIndexRequest(resizeRequest, clusterState.metadata().index("source"),
|
||||||
|
new StoreStats(between(1, 100), between(1, 100)), null, "target");
|
||||||
|
|
||||||
resizeRequest.getTargetIndexRequest()
|
resizeRequest.getTargetIndexRequest()
|
||||||
.settings(Settings.builder()
|
.settings(Settings.builder()
|
||||||
|
@ -182,7 +191,8 @@ public class TransportResizeActionTests extends ESTestCase {
|
||||||
.put("index.number_of_routing_shards", numShards * 2).build());
|
.put("index.number_of_routing_shards", numShards * 2).build());
|
||||||
ClusterState finalState = clusterState;
|
ClusterState finalState = clusterState;
|
||||||
IllegalArgumentException iae = expectThrows(IllegalArgumentException.class,
|
IllegalArgumentException iae = expectThrows(IllegalArgumentException.class,
|
||||||
() -> TransportResizeAction.prepareCreateIndexRequest(resizeRequest, finalState.metadata().index("source"), null, "target"));
|
() -> TransportResizeAction.prepareCreateIndexRequest(resizeRequest, finalState.metadata().index("source"),
|
||||||
|
new StoreStats(between(1, 100), between(1, 100)), null, "target"));
|
||||||
assertEquals("cannot provide index.number_of_routing_shards on resize", iae.getMessage());
|
assertEquals("cannot provide index.number_of_routing_shards on resize", iae.getMessage());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -210,7 +220,7 @@ public class TransportResizeActionTests extends ESTestCase {
|
||||||
final ActiveShardCount activeShardCount = randomBoolean() ? ActiveShardCount.ALL : ActiveShardCount.ONE;
|
final ActiveShardCount activeShardCount = randomBoolean() ? ActiveShardCount.ALL : ActiveShardCount.ONE;
|
||||||
target.setWaitForActiveShards(activeShardCount);
|
target.setWaitForActiveShards(activeShardCount);
|
||||||
CreateIndexClusterStateUpdateRequest request = TransportResizeAction.prepareCreateIndexRequest(
|
CreateIndexClusterStateUpdateRequest request = TransportResizeAction.prepareCreateIndexRequest(
|
||||||
target, clusterState.metadata().index(indexName), (i) -> stats, "target");
|
target, clusterState.metadata().index(indexName), new StoreStats(between(1, 100), between(1, 100)), (i) -> stats, "target");
|
||||||
assertNotNull(request.recoverFrom());
|
assertNotNull(request.recoverFrom());
|
||||||
assertEquals(indexName, request.recoverFrom().getName());
|
assertEquals(indexName, request.recoverFrom().getName());
|
||||||
assertEquals("1", request.settings().get("index.number_of_shards"));
|
assertEquals("1", request.settings().get("index.number_of_shards"));
|
||||||
|
@ -218,6 +228,86 @@ public class TransportResizeActionTests extends ESTestCase {
|
||||||
assertEquals(request.waitForActiveShards(), activeShardCount);
|
assertEquals(request.waitForActiveShards(), activeShardCount);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public void testCalculateTargetShardsNum() {
|
||||||
|
assertEquals(TransportResizeAction.calTargetShardsNum(0, 0), 1);
|
||||||
|
assertEquals(TransportResizeAction.calTargetShardsNum(10, 0), 1);
|
||||||
|
assertEquals(TransportResizeAction.calTargetShardsNum(10, 1), 1);
|
||||||
|
assertEquals(TransportResizeAction.calTargetShardsNum(10, 2), 2);
|
||||||
|
assertEquals(TransportResizeAction.calTargetShardsNum(10, 3), 5);
|
||||||
|
assertEquals(TransportResizeAction.calTargetShardsNum(10, 6), 10);
|
||||||
|
assertEquals(TransportResizeAction.calTargetShardsNum(10, 11), 10);
|
||||||
|
assertEquals(TransportResizeAction.calTargetShardsNum(59, 21), 59);
|
||||||
|
assertEquals(TransportResizeAction.calTargetShardsNum(60, 21), 30);
|
||||||
|
assertEquals(TransportResizeAction.calTargetShardsNum(60, 31), 60);
|
||||||
|
}
|
||||||
|
|
||||||
|
public void testShrinkWithMaxSinglePrimarySize() {
|
||||||
|
int sourceIndexShardsNum = randomIntBetween(2, 42);
|
||||||
|
IndexMetadata state = createClusterState("source", sourceIndexShardsNum, randomIntBetween(0, 10),
|
||||||
|
Settings.builder().put("index.blocks.write", true).build()).metadata().index("source");
|
||||||
|
ResizeRequest resizeRequest = new ResizeRequest("target", "source");
|
||||||
|
resizeRequest.setMaxSinglePrimarySize(new ByteSizeValue(10));
|
||||||
|
resizeRequest.getTargetIndexRequest()
|
||||||
|
.settings(Settings.builder().put("index.number_of_shards", 2).build());
|
||||||
|
assertTrue(
|
||||||
|
expectThrows(IllegalArgumentException.class, () ->
|
||||||
|
TransportResizeAction.prepareCreateIndexRequest(resizeRequest, state, new StoreStats(between(1, 100), between(1, 100)),
|
||||||
|
(i) -> new DocsStats(Integer.MAX_VALUE, between(1, 1000), between(1, 100)), "target")
|
||||||
|
).getMessage().startsWith("Cannot set both index.number_of_shards and max_single_primary_size for the target index"));
|
||||||
|
|
||||||
|
// create one that won't fail
|
||||||
|
ClusterState clusterState = ClusterState.builder(createClusterState("source", 10, 0,
|
||||||
|
Settings.builder()
|
||||||
|
.put("index.blocks.write", true)
|
||||||
|
.build())).nodes(DiscoveryNodes.builder().add(newNode("node1")))
|
||||||
|
.build();
|
||||||
|
AllocationService service = new AllocationService(new AllocationDeciders(
|
||||||
|
Collections.singleton(new MaxRetryAllocationDecider())),
|
||||||
|
new TestGatewayAllocator(), new BalancedShardsAllocator(Settings.EMPTY), EmptyClusterInfoService.INSTANCE,
|
||||||
|
EmptySnapshotsInfoService.INSTANCE);
|
||||||
|
|
||||||
|
RoutingTable routingTable = service.reroute(clusterState, "reroute").routingTable();
|
||||||
|
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
|
||||||
|
// now we start the shard
|
||||||
|
routingTable = ESAllocationTestCase.startInitializingShardsAndReroute(service, clusterState, "source").routingTable();
|
||||||
|
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
|
||||||
|
int numSourceShards = clusterState.metadata().index("source").getNumberOfShards();
|
||||||
|
DocsStats stats = new DocsStats(between(0, (IndexWriter.MAX_DOCS) / numSourceShards), between(1, 1000), between(1, 10000));
|
||||||
|
|
||||||
|
// each shard's storage will not be greater than the `max_single_primary_size`
|
||||||
|
ResizeRequest target1 = new ResizeRequest("target", "source");
|
||||||
|
target1.setMaxSinglePrimarySize(new ByteSizeValue(2));
|
||||||
|
StoreStats storeStats = new StoreStats(10, between(1, 100));
|
||||||
|
final int targetIndexShardsNum1 = 5;
|
||||||
|
final ActiveShardCount activeShardCount1 = ActiveShardCount.from(targetIndexShardsNum1);
|
||||||
|
target1.setWaitForActiveShards(targetIndexShardsNum1);
|
||||||
|
|
||||||
|
CreateIndexClusterStateUpdateRequest request1 = TransportResizeAction.prepareCreateIndexRequest(
|
||||||
|
target1, clusterState.metadata().index("source"), storeStats, (i) -> stats, "target");
|
||||||
|
assertNotNull(request1.recoverFrom());
|
||||||
|
assertEquals("source", request1.recoverFrom().getName());
|
||||||
|
assertEquals(String.valueOf(targetIndexShardsNum1), request1.settings().get("index.number_of_shards"));
|
||||||
|
assertEquals("shrink_index", request1.cause());
|
||||||
|
assertEquals(request1.waitForActiveShards(), activeShardCount1);
|
||||||
|
|
||||||
|
// if `max_single_primary_size` is less than the single shard size of the source index,
|
||||||
|
// the shards number of the target index will be equal to the source index's shards number
|
||||||
|
ResizeRequest target2 = new ResizeRequest("target2", "source");
|
||||||
|
target2.setMaxSinglePrimarySize(new ByteSizeValue(1));
|
||||||
|
StoreStats storeStats2 = new StoreStats(100, between(1, 100));
|
||||||
|
final int targetIndexShardsNum2 = 10;
|
||||||
|
final ActiveShardCount activeShardCount2 = ActiveShardCount.from(targetIndexShardsNum2);
|
||||||
|
target2.setWaitForActiveShards(activeShardCount2);
|
||||||
|
|
||||||
|
CreateIndexClusterStateUpdateRequest request2 = TransportResizeAction.prepareCreateIndexRequest(
|
||||||
|
target2, clusterState.metadata().index("source"), storeStats2, (i) -> stats, "target");
|
||||||
|
assertNotNull(request2.recoverFrom());
|
||||||
|
assertEquals("source", request2.recoverFrom().getName());
|
||||||
|
assertEquals(String.valueOf(targetIndexShardsNum2), request2.settings().get("index.number_of_shards"));
|
||||||
|
assertEquals("shrink_index", request2.cause());
|
||||||
|
assertEquals(request2.waitForActiveShards(), activeShardCount2);
|
||||||
|
}
|
||||||
|
|
||||||
private DiscoveryNode newNode(String nodeId) {
|
private DiscoveryNode newNode(String nodeId) {
|
||||||
return new DiscoveryNode(
|
return new DiscoveryNode(
|
||||||
nodeId,
|
nodeId,
|
||||||
|
|
|
@ -7,15 +7,19 @@ package org.elasticsearch.xpack.core.ilm;
|
||||||
|
|
||||||
import org.apache.logging.log4j.LogManager;
|
import org.apache.logging.log4j.LogManager;
|
||||||
import org.apache.logging.log4j.Logger;
|
import org.apache.logging.log4j.Logger;
|
||||||
|
import org.elasticsearch.Version;
|
||||||
import org.elasticsearch.client.Client;
|
import org.elasticsearch.client.Client;
|
||||||
import org.elasticsearch.cluster.metadata.IndexAbstraction;
|
import org.elasticsearch.cluster.metadata.IndexAbstraction;
|
||||||
import org.elasticsearch.cluster.metadata.IndexMetadata;
|
import org.elasticsearch.cluster.metadata.IndexMetadata;
|
||||||
|
import org.elasticsearch.common.Nullable;
|
||||||
import org.elasticsearch.common.ParseField;
|
import org.elasticsearch.common.ParseField;
|
||||||
import org.elasticsearch.common.Strings;
|
import org.elasticsearch.common.Strings;
|
||||||
import org.elasticsearch.common.io.stream.StreamInput;
|
import org.elasticsearch.common.io.stream.StreamInput;
|
||||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
|
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||||
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
|
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
|
||||||
|
import org.elasticsearch.common.xcontent.ObjectParser;
|
||||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||||
import org.elasticsearch.common.xcontent.XContentParser;
|
import org.elasticsearch.common.xcontent.XContentParser;
|
||||||
import org.elasticsearch.xpack.core.ilm.Step.StepKey;
|
import org.elasticsearch.xpack.core.ilm.Step.StepKey;
|
||||||
|
@ -34,40 +38,83 @@ public class ShrinkAction implements LifecycleAction {
|
||||||
public static final String NAME = "shrink";
|
public static final String NAME = "shrink";
|
||||||
public static final String SHRUNKEN_INDEX_PREFIX = "shrink-";
|
public static final String SHRUNKEN_INDEX_PREFIX = "shrink-";
|
||||||
public static final ParseField NUMBER_OF_SHARDS_FIELD = new ParseField("number_of_shards");
|
public static final ParseField NUMBER_OF_SHARDS_FIELD = new ParseField("number_of_shards");
|
||||||
|
private static final ParseField MAX_SINGLE_PRIMARY_SIZE = new ParseField("max_single_primary_size");
|
||||||
public static final String CONDITIONAL_SKIP_SHRINK_STEP = BranchingStep.NAME + "-check-prerequisites";
|
public static final String CONDITIONAL_SKIP_SHRINK_STEP = BranchingStep.NAME + "-check-prerequisites";
|
||||||
public static final String CONDITIONAL_DATASTREAM_CHECK_KEY = BranchingStep.NAME + "-on-datastream-check";
|
public static final String CONDITIONAL_DATASTREAM_CHECK_KEY = BranchingStep.NAME + "-on-datastream-check";
|
||||||
|
|
||||||
private static final ConstructingObjectParser<ShrinkAction, Void> PARSER =
|
private static final ConstructingObjectParser<ShrinkAction, Void> PARSER =
|
||||||
new ConstructingObjectParser<>(NAME, a -> new ShrinkAction((Integer) a[0]));
|
new ConstructingObjectParser<>(NAME, a -> new ShrinkAction((Integer) a[0], (ByteSizeValue) a[1]));
|
||||||
|
|
||||||
static {
|
static {
|
||||||
PARSER.declareInt(ConstructingObjectParser.constructorArg(), NUMBER_OF_SHARDS_FIELD);
|
PARSER.declareInt(ConstructingObjectParser.optionalConstructorArg(), NUMBER_OF_SHARDS_FIELD);
|
||||||
|
PARSER.declareField(ConstructingObjectParser.optionalConstructorArg(),
|
||||||
|
(p, c) -> ByteSizeValue.parseBytesSizeValue(p.text(), MAX_SINGLE_PRIMARY_SIZE.getPreferredName()),
|
||||||
|
MAX_SINGLE_PRIMARY_SIZE, ObjectParser.ValueType.STRING);
|
||||||
}
|
}
|
||||||
|
|
||||||
private int numberOfShards;
|
private Integer numberOfShards;
|
||||||
|
private ByteSizeValue maxSinglePrimarySize;
|
||||||
|
|
||||||
public static ShrinkAction parse(XContentParser parser) throws IOException {
|
public static ShrinkAction parse(XContentParser parser) throws IOException {
|
||||||
return PARSER.parse(parser, null);
|
return PARSER.parse(parser, null);
|
||||||
}
|
}
|
||||||
|
|
||||||
public ShrinkAction(int numberOfShards) {
|
public ShrinkAction(@Nullable Integer numberOfShards, @Nullable ByteSizeValue maxSinglePrimarySize) {
|
||||||
if (numberOfShards <= 0) {
|
if (numberOfShards != null && maxSinglePrimarySize != null) {
|
||||||
throw new IllegalArgumentException("[" + NUMBER_OF_SHARDS_FIELD.getPreferredName() + "] must be greater than 0");
|
throw new IllegalArgumentException("Cannot set both [number_of_shards] and [max_single_primary_size]");
|
||||||
|
}
|
||||||
|
if (numberOfShards == null && maxSinglePrimarySize == null) {
|
||||||
|
throw new IllegalArgumentException("Either [number_of_shards] or [max_single_primary_size] must be set");
|
||||||
|
}
|
||||||
|
if (maxSinglePrimarySize != null) {
|
||||||
|
if (maxSinglePrimarySize.getBytes() <= 0) {
|
||||||
|
throw new IllegalArgumentException("[max_single_primary_size] must be greater than 0");
|
||||||
|
}
|
||||||
|
this.maxSinglePrimarySize = maxSinglePrimarySize;
|
||||||
|
} else {
|
||||||
|
if (numberOfShards <= 0) {
|
||||||
|
throw new IllegalArgumentException("[" + NUMBER_OF_SHARDS_FIELD.getPreferredName() + "] must be greater than 0");
|
||||||
|
}
|
||||||
|
this.numberOfShards = numberOfShards;
|
||||||
}
|
}
|
||||||
this.numberOfShards = numberOfShards;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public ShrinkAction(StreamInput in) throws IOException {
|
public ShrinkAction(StreamInput in) throws IOException {
|
||||||
this.numberOfShards = in.readVInt();
|
if (in.getVersion().onOrAfter(Version.V_8_0_0)) {
|
||||||
|
if (in.readBoolean()) {
|
||||||
|
this.numberOfShards = in.readVInt();
|
||||||
|
this.maxSinglePrimarySize = null;
|
||||||
|
} else {
|
||||||
|
this.numberOfShards = null;
|
||||||
|
this.maxSinglePrimarySize = new ByteSizeValue(in);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
this.numberOfShards = in.readVInt();
|
||||||
|
this.maxSinglePrimarySize = null;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
int getNumberOfShards() {
|
Integer getNumberOfShards() {
|
||||||
return numberOfShards;
|
return numberOfShards;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ByteSizeValue getMaxSinglePrimarySize() {
|
||||||
|
return maxSinglePrimarySize;
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void writeTo(StreamOutput out) throws IOException {
|
public void writeTo(StreamOutput out) throws IOException {
|
||||||
out.writeVInt(numberOfShards);
|
if (out.getVersion().onOrAfter(Version.V_8_0_0)) {
|
||||||
|
boolean hasNumberOfShards = numberOfShards != null;
|
||||||
|
out.writeBoolean(hasNumberOfShards);
|
||||||
|
if (hasNumberOfShards) {
|
||||||
|
out.writeVInt(numberOfShards);
|
||||||
|
} else {
|
||||||
|
maxSinglePrimarySize.writeTo(out);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
out.writeVInt(numberOfShards);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -78,7 +125,12 @@ public class ShrinkAction implements LifecycleAction {
|
||||||
@Override
|
@Override
|
||||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||||
builder.startObject();
|
builder.startObject();
|
||||||
builder.field(NUMBER_OF_SHARDS_FIELD.getPreferredName(), numberOfShards);
|
if (numberOfShards != null) {
|
||||||
|
builder.field(NUMBER_OF_SHARDS_FIELD.getPreferredName(), numberOfShards);
|
||||||
|
}
|
||||||
|
if (maxSinglePrimarySize != null) {
|
||||||
|
builder.field(MAX_SINGLE_PRIMARY_SIZE.getPreferredName(), maxSinglePrimarySize);
|
||||||
|
}
|
||||||
builder.endObject();
|
builder.endObject();
|
||||||
return builder;
|
return builder;
|
||||||
}
|
}
|
||||||
|
@ -110,7 +162,7 @@ public class ShrinkAction implements LifecycleAction {
|
||||||
BranchingStep conditionalSkipShrinkStep = new BranchingStep(preShrinkBranchingKey, checkNotWriteIndex, nextStepKey,
|
BranchingStep conditionalSkipShrinkStep = new BranchingStep(preShrinkBranchingKey, checkNotWriteIndex, nextStepKey,
|
||||||
(index, clusterState) -> {
|
(index, clusterState) -> {
|
||||||
IndexMetadata indexMetadata = clusterState.getMetadata().index(index);
|
IndexMetadata indexMetadata = clusterState.getMetadata().index(index);
|
||||||
if (indexMetadata.getNumberOfShards() == numberOfShards) {
|
if (numberOfShards != null && indexMetadata.getNumberOfShards() == numberOfShards) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
if (indexMetadata.getSettings().get(LifecycleSettings.SNAPSHOT_INDEX_NAME) != null) {
|
if (indexMetadata.getSettings().get(LifecycleSettings.SNAPSHOT_INDEX_NAME) != null) {
|
||||||
|
@ -127,7 +179,8 @@ public class ShrinkAction implements LifecycleAction {
|
||||||
UpdateSettingsStep readOnlyStep = new UpdateSettingsStep(readOnlyKey, setSingleNodeKey, client, readOnlySettings);
|
UpdateSettingsStep readOnlyStep = new UpdateSettingsStep(readOnlyKey, setSingleNodeKey, client, readOnlySettings);
|
||||||
SetSingleNodeAllocateStep setSingleNodeStep = new SetSingleNodeAllocateStep(setSingleNodeKey, allocationRoutedKey, client);
|
SetSingleNodeAllocateStep setSingleNodeStep = new SetSingleNodeAllocateStep(setSingleNodeKey, allocationRoutedKey, client);
|
||||||
CheckShrinkReadyStep checkShrinkReadyStep = new CheckShrinkReadyStep(allocationRoutedKey, shrinkKey);
|
CheckShrinkReadyStep checkShrinkReadyStep = new CheckShrinkReadyStep(allocationRoutedKey, shrinkKey);
|
||||||
ShrinkStep shrink = new ShrinkStep(shrinkKey, enoughShardsKey, client, numberOfShards, SHRUNKEN_INDEX_PREFIX);
|
ShrinkStep shrink = new ShrinkStep(shrinkKey, enoughShardsKey, client, numberOfShards, maxSinglePrimarySize,
|
||||||
|
SHRUNKEN_INDEX_PREFIX);
|
||||||
ShrunkShardsAllocatedStep allocated = new ShrunkShardsAllocatedStep(enoughShardsKey, copyMetadataKey, SHRUNKEN_INDEX_PREFIX);
|
ShrunkShardsAllocatedStep allocated = new ShrunkShardsAllocatedStep(enoughShardsKey, copyMetadataKey, SHRUNKEN_INDEX_PREFIX);
|
||||||
CopyExecutionStateStep copyMetadata = new CopyExecutionStateStep(copyMetadataKey, dataStreamCheckBranchingKey,
|
CopyExecutionStateStep copyMetadata = new CopyExecutionStateStep(copyMetadataKey, dataStreamCheckBranchingKey,
|
||||||
SHRUNKEN_INDEX_PREFIX, ShrunkenIndexCheckStep.NAME);
|
SHRUNKEN_INDEX_PREFIX, ShrunkenIndexCheckStep.NAME);
|
||||||
|
@ -157,12 +210,13 @@ public class ShrinkAction implements LifecycleAction {
|
||||||
if (this == o) return true;
|
if (this == o) return true;
|
||||||
if (o == null || getClass() != o.getClass()) return false;
|
if (o == null || getClass() != o.getClass()) return false;
|
||||||
ShrinkAction that = (ShrinkAction) o;
|
ShrinkAction that = (ShrinkAction) o;
|
||||||
return Objects.equals(numberOfShards, that.numberOfShards);
|
return Objects.equals(numberOfShards, that.numberOfShards) &&
|
||||||
|
Objects.equals(maxSinglePrimarySize, that.maxSinglePrimarySize);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public int hashCode() {
|
public int hashCode() {
|
||||||
return Objects.hash(numberOfShards);
|
return Objects.hash(numberOfShards, maxSinglePrimarySize);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|
|
@ -12,6 +12,7 @@ import org.elasticsearch.cluster.ClusterState;
|
||||||
import org.elasticsearch.cluster.ClusterStateObserver;
|
import org.elasticsearch.cluster.ClusterStateObserver;
|
||||||
import org.elasticsearch.cluster.metadata.IndexMetadata;
|
import org.elasticsearch.cluster.metadata.IndexMetadata;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
|
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||||
|
|
||||||
import java.util.Objects;
|
import java.util.Objects;
|
||||||
|
|
||||||
|
@ -21,19 +22,26 @@ import java.util.Objects;
|
||||||
public class ShrinkStep extends AsyncActionStep {
|
public class ShrinkStep extends AsyncActionStep {
|
||||||
public static final String NAME = "shrink";
|
public static final String NAME = "shrink";
|
||||||
|
|
||||||
private int numberOfShards;
|
private Integer numberOfShards;
|
||||||
|
private ByteSizeValue maxSinglePrimarySize;
|
||||||
private String shrunkIndexPrefix;
|
private String shrunkIndexPrefix;
|
||||||
|
|
||||||
public ShrinkStep(StepKey key, StepKey nextStepKey, Client client, int numberOfShards, String shrunkIndexPrefix) {
|
public ShrinkStep(StepKey key, StepKey nextStepKey, Client client, Integer numberOfShards,
|
||||||
|
ByteSizeValue maxSinglePrimarySize, String shrunkIndexPrefix) {
|
||||||
super(key, nextStepKey, client);
|
super(key, nextStepKey, client);
|
||||||
this.numberOfShards = numberOfShards;
|
this.numberOfShards = numberOfShards;
|
||||||
|
this.maxSinglePrimarySize = maxSinglePrimarySize;
|
||||||
this.shrunkIndexPrefix = shrunkIndexPrefix;
|
this.shrunkIndexPrefix = shrunkIndexPrefix;
|
||||||
}
|
}
|
||||||
|
|
||||||
public int getNumberOfShards() {
|
public Integer getNumberOfShards() {
|
||||||
return numberOfShards;
|
return numberOfShards;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public ByteSizeValue getMaxSinglePrimarySize() {
|
||||||
|
return maxSinglePrimarySize;
|
||||||
|
}
|
||||||
|
|
||||||
String getShrunkIndexPrefix() {
|
String getShrunkIndexPrefix() {
|
||||||
return shrunkIndexPrefix;
|
return shrunkIndexPrefix;
|
||||||
}
|
}
|
||||||
|
@ -48,17 +56,20 @@ public class ShrinkStep extends AsyncActionStep {
|
||||||
|
|
||||||
String lifecycle = LifecycleSettings.LIFECYCLE_NAME_SETTING.get(indexMetadata.getSettings());
|
String lifecycle = LifecycleSettings.LIFECYCLE_NAME_SETTING.get(indexMetadata.getSettings());
|
||||||
|
|
||||||
Settings relevantTargetSettings = Settings.builder()
|
Settings.Builder builder = Settings.builder();
|
||||||
.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numberOfShards)
|
// need to remove the single shard, allocation so replicas can be allocated
|
||||||
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, indexMetadata.getNumberOfReplicas())
|
builder.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, indexMetadata.getNumberOfReplicas())
|
||||||
.put(LifecycleSettings.LIFECYCLE_NAME, lifecycle)
|
.put(LifecycleSettings.LIFECYCLE_NAME, lifecycle)
|
||||||
.put(IndexMetadata.INDEX_ROUTING_REQUIRE_GROUP_SETTING.getKey() + "_id", (String) null) // need to remove the single shard
|
.put(IndexMetadata.INDEX_ROUTING_REQUIRE_GROUP_SETTING.getKey() + "_id", (String) null);
|
||||||
// allocation so replicas can be allocated
|
if (numberOfShards != null) {
|
||||||
.build();
|
builder.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numberOfShards);
|
||||||
|
}
|
||||||
|
Settings relevantTargetSettings = builder.build();
|
||||||
|
|
||||||
String shrunkenIndexName = shrunkIndexPrefix + indexMetadata.getIndex().getName();
|
String shrunkenIndexName = shrunkIndexPrefix + indexMetadata.getIndex().getName();
|
||||||
ResizeRequest resizeRequest = new ResizeRequest(shrunkenIndexName, indexMetadata.getIndex().getName())
|
ResizeRequest resizeRequest = new ResizeRequest(shrunkenIndexName, indexMetadata.getIndex().getName())
|
||||||
.masterNodeTimeout(getMasterTimeout(currentState));
|
.masterNodeTimeout(getMasterTimeout(currentState));
|
||||||
|
resizeRequest.setMaxSinglePrimarySize(maxSinglePrimarySize);
|
||||||
resizeRequest.getTargetIndexRequest().settings(relevantTargetSettings);
|
resizeRequest.getTargetIndexRequest().settings(relevantTargetSettings);
|
||||||
|
|
||||||
getClient().admin().indices().resizeIndex(resizeRequest, ActionListener.wrap(response -> {
|
getClient().admin().indices().resizeIndex(resizeRequest, ActionListener.wrap(response -> {
|
||||||
|
@ -72,7 +83,7 @@ public class ShrinkStep extends AsyncActionStep {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public int hashCode() {
|
public int hashCode() {
|
||||||
return Objects.hash(super.hashCode(), numberOfShards, shrunkIndexPrefix);
|
return Objects.hash(super.hashCode(), numberOfShards, maxSinglePrimarySize, shrunkIndexPrefix);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -86,6 +97,7 @@ public class ShrinkStep extends AsyncActionStep {
|
||||||
ShrinkStep other = (ShrinkStep) obj;
|
ShrinkStep other = (ShrinkStep) obj;
|
||||||
return super.equals(obj) &&
|
return super.equals(obj) &&
|
||||||
Objects.equals(numberOfShards, other.numberOfShards) &&
|
Objects.equals(numberOfShards, other.numberOfShards) &&
|
||||||
|
Objects.equals(maxSinglePrimarySize, other.maxSinglePrimarySize) &&
|
||||||
Objects.equals(shrunkIndexPrefix, other.shrunkIndexPrefix);
|
Objects.equals(shrunkIndexPrefix, other.shrunkIndexPrefix);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -11,6 +11,7 @@ import org.elasticsearch.cluster.ClusterState;
|
||||||
import org.elasticsearch.cluster.metadata.IndexMetadata;
|
import org.elasticsearch.cluster.metadata.IndexMetadata;
|
||||||
import org.elasticsearch.cluster.metadata.Metadata;
|
import org.elasticsearch.cluster.metadata.Metadata;
|
||||||
import org.elasticsearch.common.io.stream.Writeable.Reader;
|
import org.elasticsearch.common.io.stream.Writeable.Reader;
|
||||||
|
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||||
import org.elasticsearch.common.unit.TimeValue;
|
import org.elasticsearch.common.unit.TimeValue;
|
||||||
import org.elasticsearch.common.xcontent.XContentParser;
|
import org.elasticsearch.common.xcontent.XContentParser;
|
||||||
import org.elasticsearch.xpack.core.ilm.Step.StepKey;
|
import org.elasticsearch.xpack.core.ilm.Step.StepKey;
|
||||||
|
@ -34,12 +35,20 @@ public class ShrinkActionTests extends AbstractActionTestCase<ShrinkAction> {
|
||||||
}
|
}
|
||||||
|
|
||||||
static ShrinkAction randomInstance() {
|
static ShrinkAction randomInstance() {
|
||||||
return new ShrinkAction(randomIntBetween(1, 100));
|
if (randomBoolean()) {
|
||||||
|
return new ShrinkAction(randomIntBetween(1, 100), null);
|
||||||
|
} else {
|
||||||
|
return new ShrinkAction(null, new ByteSizeValue(randomIntBetween(1, 100)));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected ShrinkAction mutateInstance(ShrinkAction action) {
|
protected ShrinkAction mutateInstance(ShrinkAction action) {
|
||||||
return new ShrinkAction(action.getNumberOfShards() + randomIntBetween(1, 2));
|
if (action.getNumberOfShards() != null) {
|
||||||
|
return new ShrinkAction(action.getNumberOfShards() + randomIntBetween(1, 2), null);
|
||||||
|
} else {
|
||||||
|
return new ShrinkAction(null, new ByteSizeValue(action.getMaxSinglePrimarySize().getBytes() + 1));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -48,14 +57,24 @@ public class ShrinkActionTests extends AbstractActionTestCase<ShrinkAction> {
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testNonPositiveShardNumber() {
|
public void testNonPositiveShardNumber() {
|
||||||
Exception e = expectThrows(Exception.class, () -> new ShrinkAction(randomIntBetween(-100, 0)));
|
Exception e = expectThrows(Exception.class, () -> new ShrinkAction(randomIntBetween(-100, 0), null));
|
||||||
assertThat(e.getMessage(), equalTo("[number_of_shards] must be greater than 0"));
|
assertThat(e.getMessage(), equalTo("[number_of_shards] must be greater than 0"));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public void testMaxSinglePrimarySize() {
|
||||||
|
ByteSizeValue maxSinglePrimarySize1 = new ByteSizeValue(10);
|
||||||
|
Exception e1 = expectThrows(Exception.class, () -> new ShrinkAction(randomIntBetween(1, 100), maxSinglePrimarySize1));
|
||||||
|
assertThat(e1.getMessage(), equalTo("Cannot set both [number_of_shards] and [max_single_primary_size]"));
|
||||||
|
|
||||||
|
ByteSizeValue maxSinglePrimarySize2 = new ByteSizeValue(0);
|
||||||
|
Exception e2 = expectThrows(Exception.class, () -> new ShrinkAction(null, maxSinglePrimarySize2));
|
||||||
|
assertThat(e2.getMessage(), equalTo("[max_single_primary_size] must be greater than 0"));
|
||||||
|
}
|
||||||
|
|
||||||
public void testPerformActionWithSkip() {
|
public void testPerformActionWithSkip() {
|
||||||
String lifecycleName = randomAlphaOfLengthBetween(4, 10);
|
String lifecycleName = randomAlphaOfLengthBetween(4, 10);
|
||||||
int numberOfShards = randomIntBetween(1, 10);
|
int numberOfShards = randomIntBetween(1, 10);
|
||||||
ShrinkAction action = new ShrinkAction(numberOfShards);
|
ShrinkAction action = new ShrinkAction(numberOfShards, null);
|
||||||
String phase = randomAlphaOfLengthBetween(1, 10);
|
String phase = randomAlphaOfLengthBetween(1, 10);
|
||||||
StepKey nextStepKey = new StepKey(randomAlphaOfLengthBetween(1, 10), randomAlphaOfLengthBetween(1, 10),
|
StepKey nextStepKey = new StepKey(randomAlphaOfLengthBetween(1, 10), randomAlphaOfLengthBetween(1, 10),
|
||||||
randomAlphaOfLengthBetween(1, 10));
|
randomAlphaOfLengthBetween(1, 10));
|
||||||
|
@ -90,7 +109,7 @@ public class ShrinkActionTests extends AbstractActionTestCase<ShrinkAction> {
|
||||||
int divisor = randomFrom(2, 3, 6);
|
int divisor = randomFrom(2, 3, 6);
|
||||||
int expectedFinalShards = numShards / divisor;
|
int expectedFinalShards = numShards / divisor;
|
||||||
String lifecycleName = randomAlphaOfLengthBetween(4, 10);
|
String lifecycleName = randomAlphaOfLengthBetween(4, 10);
|
||||||
ShrinkAction action = new ShrinkAction(expectedFinalShards);
|
ShrinkAction action = new ShrinkAction(expectedFinalShards, null);
|
||||||
String phase = randomAlphaOfLengthBetween(1, 10);
|
String phase = randomAlphaOfLengthBetween(1, 10);
|
||||||
StepKey nextStepKey = new StepKey(randomAlphaOfLengthBetween(1, 10), randomAlphaOfLengthBetween(1, 10),
|
StepKey nextStepKey = new StepKey(randomAlphaOfLengthBetween(1, 10), randomAlphaOfLengthBetween(1, 10),
|
||||||
randomAlphaOfLengthBetween(1, 10));
|
randomAlphaOfLengthBetween(1, 10));
|
||||||
|
|
|
@ -14,6 +14,7 @@ import org.elasticsearch.action.admin.indices.shrink.ResizeResponse;
|
||||||
import org.elasticsearch.cluster.metadata.AliasMetadata;
|
import org.elasticsearch.cluster.metadata.AliasMetadata;
|
||||||
import org.elasticsearch.cluster.metadata.IndexMetadata;
|
import org.elasticsearch.cluster.metadata.IndexMetadata;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
|
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||||
import org.elasticsearch.xpack.core.ilm.AsyncActionStep.Listener;
|
import org.elasticsearch.xpack.core.ilm.AsyncActionStep.Listener;
|
||||||
import org.elasticsearch.xpack.core.ilm.Step.StepKey;
|
import org.elasticsearch.xpack.core.ilm.Step.StepKey;
|
||||||
import org.mockito.Mockito;
|
import org.mockito.Mockito;
|
||||||
|
@ -30,16 +31,23 @@ public class ShrinkStepTests extends AbstractStepTestCase<ShrinkStep> {
|
||||||
public ShrinkStep createRandomInstance() {
|
public ShrinkStep createRandomInstance() {
|
||||||
StepKey stepKey = randomStepKey();
|
StepKey stepKey = randomStepKey();
|
||||||
StepKey nextStepKey = randomStepKey();
|
StepKey nextStepKey = randomStepKey();
|
||||||
int numberOfShards = randomIntBetween(1, 20);
|
Integer numberOfShards = null;
|
||||||
|
ByteSizeValue maxSinglePrimarySize = null;
|
||||||
|
if (randomBoolean()) {
|
||||||
|
numberOfShards = randomIntBetween(1, 20);
|
||||||
|
} else {
|
||||||
|
maxSinglePrimarySize = new ByteSizeValue(between(1,100));
|
||||||
|
}
|
||||||
String shrunkIndexPrefix = randomAlphaOfLength(10);
|
String shrunkIndexPrefix = randomAlphaOfLength(10);
|
||||||
return new ShrinkStep(stepKey, nextStepKey, client, numberOfShards, shrunkIndexPrefix);
|
return new ShrinkStep(stepKey, nextStepKey, client, numberOfShards, maxSinglePrimarySize, shrunkIndexPrefix);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public ShrinkStep mutateInstance(ShrinkStep instance) {
|
public ShrinkStep mutateInstance(ShrinkStep instance) {
|
||||||
StepKey key = instance.getKey();
|
StepKey key = instance.getKey();
|
||||||
StepKey nextKey = instance.getNextStepKey();
|
StepKey nextKey = instance.getNextStepKey();
|
||||||
int numberOfShards = instance.getNumberOfShards();
|
Integer numberOfShards = instance.getNumberOfShards();
|
||||||
|
ByteSizeValue maxSinglePrimarySize = instance.getMaxSinglePrimarySize();
|
||||||
String shrunkIndexPrefix = instance.getShrunkIndexPrefix();
|
String shrunkIndexPrefix = instance.getShrunkIndexPrefix();
|
||||||
|
|
||||||
switch (between(0, 3)) {
|
switch (between(0, 3)) {
|
||||||
|
@ -50,7 +58,12 @@ public class ShrinkStepTests extends AbstractStepTestCase<ShrinkStep> {
|
||||||
nextKey = new StepKey(key.getPhase(), key.getAction(), key.getName() + randomAlphaOfLength(5));
|
nextKey = new StepKey(key.getPhase(), key.getAction(), key.getName() + randomAlphaOfLength(5));
|
||||||
break;
|
break;
|
||||||
case 2:
|
case 2:
|
||||||
numberOfShards = numberOfShards + 1;
|
if (numberOfShards != null) {
|
||||||
|
numberOfShards = numberOfShards + 1;
|
||||||
|
}
|
||||||
|
if (maxSinglePrimarySize != null) {
|
||||||
|
maxSinglePrimarySize = new ByteSizeValue(maxSinglePrimarySize.getBytes() + 1);
|
||||||
|
}
|
||||||
break;
|
break;
|
||||||
case 3:
|
case 3:
|
||||||
shrunkIndexPrefix += randomAlphaOfLength(5);
|
shrunkIndexPrefix += randomAlphaOfLength(5);
|
||||||
|
@ -59,13 +72,13 @@ public class ShrinkStepTests extends AbstractStepTestCase<ShrinkStep> {
|
||||||
throw new AssertionError("Illegal randomisation branch");
|
throw new AssertionError("Illegal randomisation branch");
|
||||||
}
|
}
|
||||||
|
|
||||||
return new ShrinkStep(key, nextKey, instance.getClient(), numberOfShards, shrunkIndexPrefix);
|
return new ShrinkStep(key, nextKey, instance.getClient(), numberOfShards, maxSinglePrimarySize, shrunkIndexPrefix);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public ShrinkStep copyInstance(ShrinkStep instance) {
|
public ShrinkStep copyInstance(ShrinkStep instance) {
|
||||||
return new ShrinkStep(instance.getKey(), instance.getNextStepKey(), instance.getClient(), instance.getNumberOfShards(),
|
return new ShrinkStep(instance.getKey(), instance.getNextStepKey(), instance.getClient(), instance.getNumberOfShards(),
|
||||||
instance.getShrunkIndexPrefix());
|
instance.getMaxSinglePrimarySize(), instance.getShrunkIndexPrefix());
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testPerformAction() throws Exception {
|
public void testPerformAction() throws Exception {
|
||||||
|
@ -91,14 +104,20 @@ public class ShrinkStepTests extends AbstractStepTestCase<ShrinkStep> {
|
||||||
ActionListener<ResizeResponse> listener = (ActionListener<ResizeResponse>) invocation.getArguments()[1];
|
ActionListener<ResizeResponse> listener = (ActionListener<ResizeResponse>) invocation.getArguments()[1];
|
||||||
assertThat(request.getSourceIndex(), equalTo(sourceIndexMetadata.getIndex().getName()));
|
assertThat(request.getSourceIndex(), equalTo(sourceIndexMetadata.getIndex().getName()));
|
||||||
assertThat(request.getTargetIndexRequest().aliases(), equalTo(Collections.emptySet()));
|
assertThat(request.getTargetIndexRequest().aliases(), equalTo(Collections.emptySet()));
|
||||||
assertThat(request.getTargetIndexRequest().settings(), equalTo(Settings.builder()
|
|
||||||
.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, step.getNumberOfShards())
|
Settings.Builder builder = Settings.builder();
|
||||||
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, sourceIndexMetadata.getNumberOfReplicas())
|
builder.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, sourceIndexMetadata.getNumberOfReplicas())
|
||||||
.put(LifecycleSettings.LIFECYCLE_NAME, lifecycleName)
|
.put(LifecycleSettings.LIFECYCLE_NAME, lifecycleName)
|
||||||
.put(IndexMetadata.INDEX_ROUTING_REQUIRE_GROUP_SETTING.getKey() + "_id", (String) null)
|
.put(IndexMetadata.INDEX_ROUTING_REQUIRE_GROUP_SETTING.getKey() + "_id", (String) null);
|
||||||
.build()));
|
if (step.getNumberOfShards() != null) {
|
||||||
assertThat(request.getTargetIndexRequest().settings()
|
builder.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, step.getNumberOfShards());
|
||||||
|
}
|
||||||
|
assertThat(request.getTargetIndexRequest().settings(), equalTo(builder.build()));
|
||||||
|
if (step.getNumberOfShards() != null) {
|
||||||
|
assertThat(request.getTargetIndexRequest().settings()
|
||||||
.getAsInt(IndexMetadata.SETTING_NUMBER_OF_SHARDS, -1), equalTo(step.getNumberOfShards()));
|
.getAsInt(IndexMetadata.SETTING_NUMBER_OF_SHARDS, -1), equalTo(step.getNumberOfShards()));
|
||||||
|
}
|
||||||
|
request.setMaxSinglePrimarySize(step.getMaxSinglePrimarySize());
|
||||||
listener.onResponse(new ResizeResponse(true, true, sourceIndexMetadata.getIndex().getName()));
|
listener.onResponse(new ResizeResponse(true, true, sourceIndexMetadata.getIndex().getName()));
|
||||||
return null;
|
return null;
|
||||||
}).when(indicesClient).resizeIndex(Mockito.any(), Mockito.any());
|
}).when(indicesClient).resizeIndex(Mockito.any(), Mockito.any());
|
||||||
|
|
|
@ -52,7 +52,7 @@ public class TimeseriesLifecycleTypeTests extends ESTestCase {
|
||||||
private static final WaitForSnapshotAction TEST_WAIT_FOR_SNAPSHOT_ACTION = new WaitForSnapshotAction("policy");
|
private static final WaitForSnapshotAction TEST_WAIT_FOR_SNAPSHOT_ACTION = new WaitForSnapshotAction("policy");
|
||||||
private static final ForceMergeAction TEST_FORCE_MERGE_ACTION = new ForceMergeAction(1, null);
|
private static final ForceMergeAction TEST_FORCE_MERGE_ACTION = new ForceMergeAction(1, null);
|
||||||
private static final RolloverAction TEST_ROLLOVER_ACTION = new RolloverAction(new ByteSizeValue(1), null, null);
|
private static final RolloverAction TEST_ROLLOVER_ACTION = new RolloverAction(new ByteSizeValue(1), null, null);
|
||||||
private static final ShrinkAction TEST_SHRINK_ACTION = new ShrinkAction(1);
|
private static final ShrinkAction TEST_SHRINK_ACTION = new ShrinkAction(1, null);
|
||||||
private static final ReadOnlyAction TEST_READ_ONLY_ACTION = new ReadOnlyAction();
|
private static final ReadOnlyAction TEST_READ_ONLY_ACTION = new ReadOnlyAction();
|
||||||
private static final FreezeAction TEST_FREEZE_ACTION = new FreezeAction();
|
private static final FreezeAction TEST_FREEZE_ACTION = new FreezeAction();
|
||||||
private static final SetPriorityAction TEST_PRIORITY_ACTION = new SetPriorityAction(0);
|
private static final SetPriorityAction TEST_PRIORITY_ACTION = new SetPriorityAction(0);
|
||||||
|
@ -209,7 +209,7 @@ public class TimeseriesLifecycleTypeTests extends ESTestCase {
|
||||||
|
|
||||||
public void testValidateActionsFollowingSearchableSnapshot() {
|
public void testValidateActionsFollowingSearchableSnapshot() {
|
||||||
Phase hotPhase = new Phase("hot", TimeValue.ZERO, Map.of(SearchableSnapshotAction.NAME, new SearchableSnapshotAction("repo")));
|
Phase hotPhase = new Phase("hot", TimeValue.ZERO, Map.of(SearchableSnapshotAction.NAME, new SearchableSnapshotAction("repo")));
|
||||||
Phase warmPhase = new Phase("warm", TimeValue.ZERO, Map.of(ShrinkAction.NAME, new ShrinkAction(1)));
|
Phase warmPhase = new Phase("warm", TimeValue.ZERO, Map.of(ShrinkAction.NAME, new ShrinkAction(1, null)));
|
||||||
Phase coldPhase = new Phase("cold", TimeValue.ZERO, Map.of(FreezeAction.NAME, new FreezeAction()));
|
Phase coldPhase = new Phase("cold", TimeValue.ZERO, Map.of(FreezeAction.NAME, new FreezeAction()));
|
||||||
|
|
||||||
IllegalArgumentException e = expectThrows(IllegalArgumentException.class,
|
IllegalArgumentException e = expectThrows(IllegalArgumentException.class,
|
||||||
|
@ -621,7 +621,7 @@ public class TimeseriesLifecycleTypeTests extends ESTestCase {
|
||||||
case RolloverAction.NAME:
|
case RolloverAction.NAME:
|
||||||
return new RolloverAction(ByteSizeValue.parseBytesSizeValue("0b", "test"), TimeValue.ZERO, 1L);
|
return new RolloverAction(ByteSizeValue.parseBytesSizeValue("0b", "test"), TimeValue.ZERO, 1L);
|
||||||
case ShrinkAction.NAME:
|
case ShrinkAction.NAME:
|
||||||
return new ShrinkAction(1);
|
return new ShrinkAction(1, null);
|
||||||
case FreezeAction.NAME:
|
case FreezeAction.NAME:
|
||||||
return new FreezeAction();
|
return new FreezeAction();
|
||||||
case SetPriorityAction.NAME:
|
case SetPriorityAction.NAME:
|
||||||
|
|
|
@ -173,7 +173,7 @@ public final class TimeSeriesRestDriver {
|
||||||
warmActions.put(ForceMergeAction.NAME, new ForceMergeAction(1, null));
|
warmActions.put(ForceMergeAction.NAME, new ForceMergeAction(1, null));
|
||||||
warmActions.put(AllocateAction.NAME, new AllocateAction(1, singletonMap("_name", "javaRestTest-0,javaRestTest-1,javaRestTest-2," +
|
warmActions.put(AllocateAction.NAME, new AllocateAction(1, singletonMap("_name", "javaRestTest-0,javaRestTest-1,javaRestTest-2," +
|
||||||
"javaRestTest-3"), null, null));
|
"javaRestTest-3"), null, null));
|
||||||
warmActions.put(ShrinkAction.NAME, new ShrinkAction(1));
|
warmActions.put(ShrinkAction.NAME, new ShrinkAction(1, null));
|
||||||
Map<String, LifecycleAction> coldActions = new HashMap<>();
|
Map<String, LifecycleAction> coldActions = new HashMap<>();
|
||||||
coldActions.put(SetPriorityAction.NAME, new SetPriorityAction(0));
|
coldActions.put(SetPriorityAction.NAME, new SetPriorityAction(0));
|
||||||
coldActions.put(AllocateAction.NAME, new AllocateAction(0, singletonMap("_name", "javaRestTest-3"), null, null));
|
coldActions.put(AllocateAction.NAME, new AllocateAction(0, singletonMap("_name", "javaRestTest-3"), null, null));
|
||||||
|
|
|
@ -71,7 +71,7 @@ public class ExplainLifecycleIT extends ESRestTestCase {
|
||||||
{
|
{
|
||||||
// Create a "shrink-only-policy"
|
// Create a "shrink-only-policy"
|
||||||
Map<String, LifecycleAction> warmActions = new HashMap<>();
|
Map<String, LifecycleAction> warmActions = new HashMap<>();
|
||||||
warmActions.put(ShrinkAction.NAME, new ShrinkAction(17));
|
warmActions.put(ShrinkAction.NAME, new ShrinkAction(17, null));
|
||||||
Map<String, Phase> phases = new HashMap<>();
|
Map<String, Phase> phases = new HashMap<>();
|
||||||
phases.put("warm", new Phase("warm", TimeValue.ZERO, warmActions));
|
phases.put("warm", new Phase("warm", TimeValue.ZERO, warmActions));
|
||||||
LifecyclePolicy lifecyclePolicy = new LifecyclePolicy("shrink-only-policy", phases);
|
LifecyclePolicy lifecyclePolicy = new LifecyclePolicy("shrink-only-policy", phases);
|
||||||
|
|
|
@ -88,7 +88,7 @@ public class TimeSeriesDataStreamsIT extends ESRestTestCase {
|
||||||
|
|
||||||
public void testShrinkActionInPolicyWithoutHotPhase() throws Exception {
|
public void testShrinkActionInPolicyWithoutHotPhase() throws Exception {
|
||||||
String policyName = "logs-policy";
|
String policyName = "logs-policy";
|
||||||
createNewSingletonPolicy(client(), policyName, "warm", new ShrinkAction(1));
|
createNewSingletonPolicy(client(), policyName, "warm", new ShrinkAction(1, null));
|
||||||
|
|
||||||
createComposableTemplate(client(), "logs-template", "logs-foo*", getTemplate(policyName));
|
createComposableTemplate(client(), "logs-template", "logs-foo*", getTemplate(policyName));
|
||||||
|
|
||||||
|
|
|
@ -190,7 +190,7 @@ public class TimeSeriesLifecycleActionsIT extends ESRestTestCase {
|
||||||
String shrunkenIndex = ShrinkAction.SHRUNKEN_INDEX_PREFIX + index;
|
String shrunkenIndex = ShrinkAction.SHRUNKEN_INDEX_PREFIX + index;
|
||||||
createIndexWithSettings(client(), index, alias, Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numShards)
|
createIndexWithSettings(client(), index, alias, Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numShards)
|
||||||
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0));
|
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0));
|
||||||
createNewSingletonPolicy(client(), policy, "warm", new ShrinkAction(numShards + randomIntBetween(1, numShards)));
|
createNewSingletonPolicy(client(), policy, "warm", new ShrinkAction(numShards + randomIntBetween(1, numShards), null));
|
||||||
updatePolicy(index, policy);
|
updatePolicy(index, policy);
|
||||||
assertBusy(() -> {
|
assertBusy(() -> {
|
||||||
String failedStep = getFailedStepForIndex(index);
|
String failedStep = getFailedStepForIndex(index);
|
||||||
|
@ -198,7 +198,7 @@ public class TimeSeriesLifecycleActionsIT extends ESRestTestCase {
|
||||||
}, 30, TimeUnit.SECONDS);
|
}, 30, TimeUnit.SECONDS);
|
||||||
|
|
||||||
// update policy to be correct
|
// update policy to be correct
|
||||||
createNewSingletonPolicy(client(), policy, "warm", new ShrinkAction(expectedFinalShards));
|
createNewSingletonPolicy(client(), policy, "warm", new ShrinkAction(expectedFinalShards, null));
|
||||||
updatePolicy(index, policy);
|
updatePolicy(index, policy);
|
||||||
|
|
||||||
// retry step
|
// retry step
|
||||||
|
@ -523,7 +523,7 @@ public class TimeSeriesLifecycleActionsIT extends ESRestTestCase {
|
||||||
String shrunkenIndex = ShrinkAction.SHRUNKEN_INDEX_PREFIX + index;
|
String shrunkenIndex = ShrinkAction.SHRUNKEN_INDEX_PREFIX + index;
|
||||||
createIndexWithSettings(client(), index, alias, Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numShards)
|
createIndexWithSettings(client(), index, alias, Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numShards)
|
||||||
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0));
|
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0));
|
||||||
createNewSingletonPolicy(client(), policy, "warm", new ShrinkAction(expectedFinalShards));
|
createNewSingletonPolicy(client(), policy, "warm", new ShrinkAction(expectedFinalShards, null));
|
||||||
updatePolicy(index, policy);
|
updatePolicy(index, policy);
|
||||||
assertBusy(() -> assertTrue(indexExists(shrunkenIndex)), 30, TimeUnit.SECONDS);
|
assertBusy(() -> assertTrue(indexExists(shrunkenIndex)), 30, TimeUnit.SECONDS);
|
||||||
assertBusy(() -> assertTrue(aliasExists(shrunkenIndex, index)));
|
assertBusy(() -> assertTrue(aliasExists(shrunkenIndex, index)));
|
||||||
|
@ -542,7 +542,7 @@ public class TimeSeriesLifecycleActionsIT extends ESRestTestCase {
|
||||||
String shrunkenIndex = ShrinkAction.SHRUNKEN_INDEX_PREFIX + index;
|
String shrunkenIndex = ShrinkAction.SHRUNKEN_INDEX_PREFIX + index;
|
||||||
createIndexWithSettings(client(), index, alias, Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numberOfShards)
|
createIndexWithSettings(client(), index, alias, Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numberOfShards)
|
||||||
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0));
|
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0));
|
||||||
createNewSingletonPolicy(client(), policy, "warm", new ShrinkAction(numberOfShards));
|
createNewSingletonPolicy(client(), policy, "warm", new ShrinkAction(numberOfShards, null));
|
||||||
updatePolicy(index, policy);
|
updatePolicy(index, policy);
|
||||||
assertBusy(() -> {
|
assertBusy(() -> {
|
||||||
assertTrue(indexExists(index));
|
assertTrue(indexExists(index));
|
||||||
|
@ -572,7 +572,7 @@ public class TimeSeriesLifecycleActionsIT extends ESRestTestCase {
|
||||||
.endObject()));
|
.endObject()));
|
||||||
assertOK(client().performRequest(request));
|
assertOK(client().performRequest(request));
|
||||||
// create delete policy
|
// create delete policy
|
||||||
createNewSingletonPolicy(client(), policy, "warm", new ShrinkAction(1), TimeValue.timeValueMillis(0));
|
createNewSingletonPolicy(client(), policy, "warm", new ShrinkAction(1, null), TimeValue.timeValueMillis(0));
|
||||||
// create index without policy
|
// create index without policy
|
||||||
createIndexWithSettings(client(), index, alias, Settings.builder()
|
createIndexWithSettings(client(), index, alias, Settings.builder()
|
||||||
.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 2)
|
.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 2)
|
||||||
|
@ -613,7 +613,7 @@ public class TimeSeriesLifecycleActionsIT extends ESRestTestCase {
|
||||||
// add a policy
|
// add a policy
|
||||||
Map<String, LifecycleAction> hotActions = Map.of(
|
Map<String, LifecycleAction> hotActions = Map.of(
|
||||||
RolloverAction.NAME, new RolloverAction(null, null, 1L),
|
RolloverAction.NAME, new RolloverAction(null, null, 1L),
|
||||||
ShrinkAction.NAME, new ShrinkAction(expectedFinalShards));
|
ShrinkAction.NAME, new ShrinkAction(expectedFinalShards, null));
|
||||||
Map<String, Phase> phases = Map.of(
|
Map<String, Phase> phases = Map.of(
|
||||||
"hot", new Phase("hot", TimeValue.ZERO, hotActions));
|
"hot", new Phase("hot", TimeValue.ZERO, hotActions));
|
||||||
LifecyclePolicy lifecyclePolicy = new LifecyclePolicy(policy, phases);
|
LifecyclePolicy lifecyclePolicy = new LifecyclePolicy(policy, phases);
|
||||||
|
@ -675,7 +675,7 @@ public class TimeSeriesLifecycleActionsIT extends ESRestTestCase {
|
||||||
// assign the policy that'll attempt to shrink the index (disabling the migrate action as it'll otherwise wait for
|
// assign the policy that'll attempt to shrink the index (disabling the migrate action as it'll otherwise wait for
|
||||||
// all shards to be active and we want that to happen as part of the shrink action)
|
// all shards to be active and we want that to happen as part of the shrink action)
|
||||||
MigrateAction migrateAction = new MigrateAction(false);
|
MigrateAction migrateAction = new MigrateAction(false);
|
||||||
ShrinkAction shrinkAction = new ShrinkAction(expectedFinalShards);
|
ShrinkAction shrinkAction = new ShrinkAction(expectedFinalShards, null);
|
||||||
Phase phase = new Phase("warm", TimeValue.ZERO, Map.of(migrateAction.getWriteableName(), migrateAction,
|
Phase phase = new Phase("warm", TimeValue.ZERO, Map.of(migrateAction.getWriteableName(), migrateAction,
|
||||||
shrinkAction.getWriteableName(), shrinkAction));
|
shrinkAction.getWriteableName(), shrinkAction));
|
||||||
LifecyclePolicy lifecyclePolicy = new LifecyclePolicy(policy, singletonMap(phase.getName(), phase));
|
LifecyclePolicy lifecyclePolicy = new LifecyclePolicy(policy, singletonMap(phase.getName(), phase));
|
||||||
|
|
|
@ -125,7 +125,7 @@ public class TimeseriesMoveToStepIT extends ESRestTestCase {
|
||||||
|
|
||||||
public void testMoveToInjectedStep() throws Exception {
|
public void testMoveToInjectedStep() throws Exception {
|
||||||
String shrunkenIndex = ShrinkAction.SHRUNKEN_INDEX_PREFIX + index;
|
String shrunkenIndex = ShrinkAction.SHRUNKEN_INDEX_PREFIX + index;
|
||||||
createNewSingletonPolicy(client(), policy, "warm", new ShrinkAction(1), TimeValue.timeValueHours(12));
|
createNewSingletonPolicy(client(), policy, "warm", new ShrinkAction(1, null), TimeValue.timeValueHours(12));
|
||||||
|
|
||||||
createIndexWithSettings(client(), index, alias, Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 3)
|
createIndexWithSettings(client(), index, alias, Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 3)
|
||||||
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)
|
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)
|
||||||
|
|
|
@ -254,7 +254,7 @@ public class SearchableSnapshotActionIT extends ESRestTestCase {
|
||||||
createPolicy(client(), policy,
|
createPolicy(client(), policy,
|
||||||
new Phase("hot", TimeValue.ZERO, Map.of(SetPriorityAction.NAME, new SetPriorityAction(10))),
|
new Phase("hot", TimeValue.ZERO, Map.of(SetPriorityAction.NAME, new SetPriorityAction(10))),
|
||||||
new Phase("warm", TimeValue.ZERO,
|
new Phase("warm", TimeValue.ZERO,
|
||||||
Map.of(ShrinkAction.NAME, new ShrinkAction(1), ForceMergeAction.NAME, new ForceMergeAction(1, null))
|
Map.of(ShrinkAction.NAME, new ShrinkAction(1, null), ForceMergeAction.NAME, new ForceMergeAction(1, null))
|
||||||
),
|
),
|
||||||
new Phase("cold", TimeValue.ZERO, Map.of(SearchableSnapshotAction.NAME, new SearchableSnapshotAction(snapshotRepo))),
|
new Phase("cold", TimeValue.ZERO, Map.of(SearchableSnapshotAction.NAME, new SearchableSnapshotAction(snapshotRepo))),
|
||||||
null
|
null
|
||||||
|
@ -322,7 +322,7 @@ public class SearchableSnapshotActionIT extends ESRestTestCase {
|
||||||
createPolicy(client(), policy,
|
createPolicy(client(), policy,
|
||||||
new Phase("hot", TimeValue.ZERO, Map.of()),
|
new Phase("hot", TimeValue.ZERO, Map.of()),
|
||||||
new Phase("warm", TimeValue.ZERO,
|
new Phase("warm", TimeValue.ZERO,
|
||||||
Map.of(ShrinkAction.NAME, new ShrinkAction(1), ForceMergeAction.NAME, new ForceMergeAction(1, null))
|
Map.of(ShrinkAction.NAME, new ShrinkAction(1, null), ForceMergeAction.NAME, new ForceMergeAction(1, null))
|
||||||
),
|
),
|
||||||
new Phase("cold", TimeValue.ZERO, Map.of(FreezeAction.NAME, new FreezeAction())),
|
new Phase("cold", TimeValue.ZERO, Map.of(FreezeAction.NAME, new FreezeAction())),
|
||||||
null
|
null
|
||||||
|
|
|
@ -75,7 +75,7 @@ public class ILMMultiNodeIT extends ESIntegTestCase {
|
||||||
|
|
||||||
RolloverAction rolloverAction = new RolloverAction(null, null, 1L);
|
RolloverAction rolloverAction = new RolloverAction(null, null, 1L);
|
||||||
Phase hotPhase = new Phase("hot", TimeValue.ZERO, Collections.singletonMap(rolloverAction.getWriteableName(), rolloverAction));
|
Phase hotPhase = new Phase("hot", TimeValue.ZERO, Collections.singletonMap(rolloverAction.getWriteableName(), rolloverAction));
|
||||||
ShrinkAction shrinkAction = new ShrinkAction(1);
|
ShrinkAction shrinkAction = new ShrinkAction(1, null);
|
||||||
Phase warmPhase = new Phase("warm", TimeValue.ZERO, Collections.singletonMap(shrinkAction.getWriteableName(), shrinkAction));
|
Phase warmPhase = new Phase("warm", TimeValue.ZERO, Collections.singletonMap(shrinkAction.getWriteableName(), shrinkAction));
|
||||||
Map<String, Phase> phases = new HashMap<>();
|
Map<String, Phase> phases = new HashMap<>();
|
||||||
phases.put(hotPhase.getName(), hotPhase);
|
phases.put(hotPhase.getName(), hotPhase);
|
||||||
|
|
|
@ -206,7 +206,7 @@ public class IndexLifecycleServiceTests extends ESTestCase {
|
||||||
|
|
||||||
public void testRequestedStopInShrinkActionButNotShrinkStep() {
|
public void testRequestedStopInShrinkActionButNotShrinkStep() {
|
||||||
// test all the shrink action steps that ILM can be stopped during (basically all of them minus the actual shrink)
|
// test all the shrink action steps that ILM can be stopped during (basically all of them minus the actual shrink)
|
||||||
ShrinkAction action = new ShrinkAction(1);
|
ShrinkAction action = new ShrinkAction(1, null);
|
||||||
action.toSteps(mock(Client.class), "warm", randomStepKey()).stream()
|
action.toSteps(mock(Client.class), "warm", randomStepKey()).stream()
|
||||||
.map(sk -> sk.getKey().getName())
|
.map(sk -> sk.getKey().getName())
|
||||||
.filter(name -> name.equals(ShrinkStep.NAME) == false)
|
.filter(name -> name.equals(ShrinkStep.NAME) == false)
|
||||||
|
|
|
@ -323,7 +323,7 @@ public class PolicyStepsRegistryTests extends ESTestCase {
|
||||||
Mockito.when(client.settings()).thenReturn(Settings.EMPTY);
|
Mockito.when(client.settings()).thenReturn(Settings.EMPTY);
|
||||||
String policyName = randomAlphaOfLength(5);
|
String policyName = randomAlphaOfLength(5);
|
||||||
Map<String, LifecycleAction> actions = new HashMap<>();
|
Map<String, LifecycleAction> actions = new HashMap<>();
|
||||||
actions.put("shrink", new ShrinkAction(1));
|
actions.put("shrink", new ShrinkAction(1, null));
|
||||||
Map<String, Phase> phases = new HashMap<>();
|
Map<String, Phase> phases = new HashMap<>();
|
||||||
Phase warmPhase = new Phase("warm", TimeValue.ZERO, actions);
|
Phase warmPhase = new Phase("warm", TimeValue.ZERO, actions);
|
||||||
PhaseExecutionInfo pei = new PhaseExecutionInfo(policyName, warmPhase, 1, randomNonNegativeLong());
|
PhaseExecutionInfo pei = new PhaseExecutionInfo(policyName, warmPhase, 1, randomNonNegativeLong());
|
||||||
|
@ -332,7 +332,7 @@ public class PolicyStepsRegistryTests extends ESTestCase {
|
||||||
LifecyclePolicy newPolicy = new LifecyclePolicy(policyName, phases);
|
LifecyclePolicy newPolicy = new LifecyclePolicy(policyName, phases);
|
||||||
// Modify the policy
|
// Modify the policy
|
||||||
actions = new HashMap<>();
|
actions = new HashMap<>();
|
||||||
actions.put("shrink", new ShrinkAction(2));
|
actions.put("shrink", new ShrinkAction(2, null));
|
||||||
phases = new HashMap<>();
|
phases = new HashMap<>();
|
||||||
phases.put("warm", new Phase("warm", TimeValue.ZERO, actions));
|
phases.put("warm", new Phase("warm", TimeValue.ZERO, actions));
|
||||||
LifecyclePolicy updatedPolicy = new LifecyclePolicy(policyName, phases);
|
LifecyclePolicy updatedPolicy = new LifecyclePolicy(policyName, phases);
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue