Merge remote-tracking branch 'es/master' into enrich

This commit is contained in:
Martijn van Groningen 2019-10-15 07:19:25 +02:00
commit 85ad27e1e7
No known key found for this signature in database
GPG key ID: AB236F4FCF2AF12A
168 changed files with 2695 additions and 594 deletions

View file

@ -21,7 +21,7 @@ slf4j = 1.6.2
# when updating the JNA version, also update the version in buildSrc/build.gradle
jna = 4.5.1
netty = 4.1.38.Final
netty = 4.1.42.Final
joda = 2.10.3
# when updating this version, you need to ensure compatibility with:

View file

@ -30,10 +30,12 @@ import org.elasticsearch.client.ccr.FollowStatsResponse;
import org.elasticsearch.client.ccr.ForgetFollowerRequest;
import org.elasticsearch.client.ccr.GetAutoFollowPatternRequest;
import org.elasticsearch.client.ccr.GetAutoFollowPatternResponse;
import org.elasticsearch.client.ccr.PauseAutoFollowPatternRequest;
import org.elasticsearch.client.ccr.PauseFollowRequest;
import org.elasticsearch.client.ccr.PutAutoFollowPatternRequest;
import org.elasticsearch.client.ccr.PutFollowRequest;
import org.elasticsearch.client.ccr.PutFollowResponse;
import org.elasticsearch.client.ccr.ResumeAutoFollowPatternRequest;
import org.elasticsearch.client.ccr.ResumeFollowRequest;
import org.elasticsearch.client.ccr.UnfollowRequest;
import org.elasticsearch.client.core.AcknowledgedResponse;
@ -410,6 +412,92 @@ public final class CcrClient {
);
}
/**
* Pauses an auto follow pattern.
*
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-pause-auto-follow-pattern.html">
* the docs</a> for more.
*
* @param request the request
* @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
* @return the response
* @throws IOException in case there is a problem sending the request or parsing back the response
*/
public AcknowledgedResponse pauseAutoFollowPattern(PauseAutoFollowPatternRequest request, RequestOptions options) throws IOException {
return restHighLevelClient.performRequestAndParseEntity(
request,
CcrRequestConverters::pauseAutoFollowPattern,
options,
AcknowledgedResponse::fromXContent,
Collections.emptySet()
);
}
/**
* Asynchronously pauses an auto follow pattern.
*
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-pause-auto-follow-pattern.html">
* the docs</a> for more.
* @param request the request
* @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
* @param listener the listener to be notified upon request completion
* @return cancellable that may be used to cancel the request
*/
public Cancellable pauseAutoFollowPatternAsync(PauseAutoFollowPatternRequest request,
RequestOptions options,
ActionListener<AcknowledgedResponse> listener) {
return restHighLevelClient.performRequestAsyncAndParseEntity(
request,
CcrRequestConverters::pauseAutoFollowPattern,
options,
AcknowledgedResponse::fromXContent,
listener,
Collections.emptySet());
}
/**
* Resumes an auto follow pattern.
*
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-resume-auto-follow-pattern.html">
* the docs</a> for more.
*
* @param request the request
* @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
* @return the response
* @throws IOException in case there is a problem sending the request or parsing back the response
*/
public AcknowledgedResponse resumeAutoFollowPattern(ResumeAutoFollowPatternRequest request, RequestOptions options) throws IOException {
return restHighLevelClient.performRequestAndParseEntity(
request,
CcrRequestConverters::resumeAutoFollowPattern,
options,
AcknowledgedResponse::fromXContent,
Collections.emptySet()
);
}
/**
* Asynchronously resumes an auto follow pattern.
*
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/ccr-resume-auto-follow-pattern.html">
* the docs</a> for more.
* @param request the request
* @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
* @param listener the listener to be notified upon request completion
* @return cancellable that may be used to cancel the request
*/
public Cancellable resumeAutoFollowPatternAsync(ResumeAutoFollowPatternRequest request,
RequestOptions options,
ActionListener<AcknowledgedResponse> listener) {
return restHighLevelClient.performRequestAsyncAndParseEntity(
request,
CcrRequestConverters::resumeAutoFollowPattern,
options,
AcknowledgedResponse::fromXContent,
listener,
Collections.emptySet());
}
/**
* Gets all CCR stats.
*

View file

@ -29,9 +29,11 @@ import org.elasticsearch.client.ccr.FollowInfoRequest;
import org.elasticsearch.client.ccr.FollowStatsRequest;
import org.elasticsearch.client.ccr.ForgetFollowerRequest;
import org.elasticsearch.client.ccr.GetAutoFollowPatternRequest;
import org.elasticsearch.client.ccr.PauseAutoFollowPatternRequest;
import org.elasticsearch.client.ccr.PauseFollowRequest;
import org.elasticsearch.client.ccr.PutAutoFollowPatternRequest;
import org.elasticsearch.client.ccr.PutFollowRequest;
import org.elasticsearch.client.ccr.ResumeAutoFollowPatternRequest;
import org.elasticsearch.client.ccr.ResumeFollowRequest;
import org.elasticsearch.client.ccr.UnfollowRequest;
@ -118,6 +120,24 @@ final class CcrRequestConverters {
return new Request(HttpGet.METHOD_NAME, endpoint);
}
static Request pauseAutoFollowPattern(PauseAutoFollowPatternRequest pauseAutoFollowPatternRequest) throws IOException {
String endpoint = new RequestConverters.EndpointBuilder()
.addPathPartAsIs("_ccr", "auto_follow")
.addPathPart(pauseAutoFollowPatternRequest.getName())
.addPathPartAsIs("pause")
.build();
return new Request(HttpPost.METHOD_NAME, endpoint);
}
static Request resumeAutoFollowPattern(ResumeAutoFollowPatternRequest resumeAutoFollowPatternRequest) throws IOException {
String endpoint = new RequestConverters.EndpointBuilder()
.addPathPartAsIs("_ccr", "auto_follow")
.addPathPart(resumeAutoFollowPatternRequest.getName())
.addPathPartAsIs("resume")
.build();
return new Request(HttpPost.METHOD_NAME, endpoint);
}
static Request getCcrStats(CcrStatsRequest ccrStatsRequest) {
String endpoint = new RequestConverters.EndpointBuilder()
.addPathPartAsIs("_ccr", "stats")

View file

@ -43,6 +43,9 @@ import org.elasticsearch.client.slm.GetSnapshotLifecyclePolicyResponse;
import org.elasticsearch.client.slm.GetSnapshotLifecycleStatsRequest;
import org.elasticsearch.client.slm.GetSnapshotLifecycleStatsResponse;
import org.elasticsearch.client.slm.PutSnapshotLifecyclePolicyRequest;
import org.elasticsearch.client.slm.SnapshotLifecycleManagementStatusRequest;
import org.elasticsearch.client.slm.StartSLMRequest;
import org.elasticsearch.client.slm.StopSLMRequest;
import java.io.IOException;
@ -541,4 +544,102 @@ public class IndexLifecycleClient {
return restHighLevelClient.performRequestAsyncAndParseEntity(request, IndexLifecycleRequestConverters::getSnapshotLifecycleStats,
options, GetSnapshotLifecycleStatsResponse::fromXContent, listener, emptySet());
}
/**
* Start the Snapshot Lifecycle Management feature.
* See <pre>
* https://www.elastic.co/guide/en/elasticsearch/client/java-rest/current/
* java-rest-high-ilm-slm-start-slm.html
* </pre> for more.
* @param request the request
* @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
* @return the response
* @throws IOException in case there is a problem sending the request or parsing back the response
*/
public AcknowledgedResponse startSLM(StartSLMRequest request, RequestOptions options) throws IOException {
return restHighLevelClient.performRequestAndParseEntity(request, IndexLifecycleRequestConverters::startSLM, options,
AcknowledgedResponse::fromXContent, emptySet());
}
/**
* Asynchronously start the Snapshot Lifecycle Management feature.
* See <pre>
* https://www.elastic.co/guide/en/elasticsearch/client/java-rest/current/
* java-rest-high-ilm-slm-start-slm.html
* </pre> for more.
* @param request the request
* @param listener the listener to be notified upon request completion
* @return cancellable that may be used to cancel the request
*/
public Cancellable startSLMAsync(StartSLMRequest request, RequestOptions options, ActionListener<AcknowledgedResponse> listener) {
return restHighLevelClient.performRequestAsyncAndParseEntity(request, IndexLifecycleRequestConverters::startSLM, options,
AcknowledgedResponse::fromXContent, listener, emptySet());
}
/**
* Stop the Snapshot Lifecycle Management feature.
* See <pre>
* https://www.elastic.co/guide/en/elasticsearch/client/java-rest/current/
* java-rest-high-ilm-slm-stop-slm.html
* </pre> for more.
* @param request the request
* @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
* @return the response
* @throws IOException in case there is a problem sending the request or parsing back the response
*/
public AcknowledgedResponse stopSLM(StopSLMRequest request, RequestOptions options) throws IOException {
return restHighLevelClient.performRequestAndParseEntity(request, IndexLifecycleRequestConverters::stopSLM, options,
AcknowledgedResponse::fromXContent, emptySet());
}
/**
* Asynchronously stop the Snapshot Lifecycle Management feature.
* See <pre>
* https://www.elastic.co/guide/en/elasticsearch/client/java-rest/current/
* java-rest-high-ilm-slm-stop-slm.html
* </pre> for more.
* @param request the request
* @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
* @param listener the listener to be notified upon request completion
* @return cancellable that may be used to cancel the request
*/
public Cancellable stopSLMAsync(StopSLMRequest request, RequestOptions options, ActionListener<AcknowledgedResponse> listener) {
return restHighLevelClient.performRequestAsyncAndParseEntity(request, IndexLifecycleRequestConverters::stopSLM, options,
AcknowledgedResponse::fromXContent, listener, emptySet());
}
/**
* Get the status of Snapshot Lifecycle Management.
* See <pre>
* https://www.elastic.co/guide/en/elasticsearch/client/java-rest/current/
* java-rest-high-ilm-slm-status.html
* </pre> for more.
* @param request the request
* @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
* @return the response
* @throws IOException in case there is a problem sending the request or parsing back the response
*/
public LifecycleManagementStatusResponse getSLMStatus(SnapshotLifecycleManagementStatusRequest request,
RequestOptions options) throws IOException {
return restHighLevelClient.performRequestAndParseEntity(request, IndexLifecycleRequestConverters::snapshotLifecycleManagementStatus,
options, LifecycleManagementStatusResponse::fromXContent, emptySet());
}
/**
* Asynchronously get the status of Snapshot Lifecycle Management.
* See <pre>
* https://www.elastic.co/guide/en/elasticsearch/client/java-rest/current/
* java-rest-high-ilm-slm-status.html
* </pre> for more.
* @param request the request
* @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
* @param listener the listener to be notified upon request completion
* @return cancellable that may be used to cancel the request
*/
public Cancellable getSLMStatusAsync(SnapshotLifecycleManagementStatusRequest request, RequestOptions options,
ActionListener<LifecycleManagementStatusResponse> listener) {
return restHighLevelClient.performRequestAsyncAndParseEntity(request,
IndexLifecycleRequestConverters::snapshotLifecycleManagementStatus, options, LifecycleManagementStatusResponse::fromXContent,
listener, emptySet());
}
}

View file

@ -38,6 +38,9 @@ import org.elasticsearch.client.slm.ExecuteSnapshotLifecycleRetentionRequest;
import org.elasticsearch.client.slm.GetSnapshotLifecyclePolicyRequest;
import org.elasticsearch.client.slm.GetSnapshotLifecycleStatsRequest;
import org.elasticsearch.client.slm.PutSnapshotLifecyclePolicyRequest;
import org.elasticsearch.client.slm.SnapshotLifecycleManagementStatusRequest;
import org.elasticsearch.client.slm.StartSLMRequest;
import org.elasticsearch.client.slm.StopSLMRequest;
import org.elasticsearch.common.Strings;
import java.io.IOException;
@ -239,4 +242,43 @@ final class IndexLifecycleRequestConverters {
request.addParameters(params.asMap());
return request;
}
static Request snapshotLifecycleManagementStatus(SnapshotLifecycleManagementStatusRequest snapshotLifecycleManagementStatusRequest){
Request request = new Request(HttpGet.METHOD_NAME,
new RequestConverters.EndpointBuilder()
.addPathPartAsIs("_slm")
.addPathPartAsIs("status")
.build());
RequestConverters.Params params = new RequestConverters.Params();
params.withMasterTimeout(snapshotLifecycleManagementStatusRequest.masterNodeTimeout());
params.withTimeout(snapshotLifecycleManagementStatusRequest.timeout());
request.addParameters(params.asMap());
return request;
}
static Request startSLM(StartSLMRequest startSLMRequest) {
Request request = new Request(HttpPost.METHOD_NAME,
new RequestConverters.EndpointBuilder()
.addPathPartAsIs("_slm")
.addPathPartAsIs("start")
.build());
RequestConverters.Params params = new RequestConverters.Params();
params.withMasterTimeout(startSLMRequest.masterNodeTimeout());
params.withTimeout(startSLMRequest.timeout());
request.addParameters(params.asMap());
return request;
}
static Request stopSLM(StopSLMRequest stopSLMRequest) {
Request request = new Request(HttpPost.METHOD_NAME,
new RequestConverters.EndpointBuilder()
.addPathPartAsIs("_slm")
.addPathPartAsIs("stop")
.build());
RequestConverters.Params params = new RequestConverters.Params();
params.withMasterTimeout(stopSLMRequest.masterNodeTimeout());
params.withTimeout(stopSLMRequest.timeout());
request.addParameters(params.asMap());
return request;
}
}

View file

@ -0,0 +1,45 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.client.ccr;
import org.elasticsearch.client.Validatable;
import java.util.Objects;
/**
* Request class for pause auto follow pattern api.
*/
public final class PauseAutoFollowPatternRequest implements Validatable {
private final String name;
/**
* Pause auto follow pattern with the specified name
*
* @param name The name of the auto follow pattern to pause
*/
public PauseAutoFollowPatternRequest(String name) {
this.name = Objects.requireNonNull(name);
}
public String getName() {
return name;
}
}

View file

@ -0,0 +1,45 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.client.ccr;
import org.elasticsearch.client.Validatable;
import java.util.Objects;
/**
* Request class for resume auto follow pattern api.
*/
public final class ResumeAutoFollowPatternRequest implements Validatable {
private final String name;
/**
* Resume auto follow pattern with the specified name
*
* @param name The name of the auto follow pattern to resume
*/
public ResumeAutoFollowPatternRequest(String name) {
this.name = Objects.requireNonNull(name);
}
public String getName() {
return name;
}
}

View file

@ -62,6 +62,7 @@ public class DatafeedConfig implements ToXContentObject {
public static final ParseField SCRIPT_FIELDS = new ParseField("script_fields");
public static final ParseField CHUNKING_CONFIG = new ParseField("chunking_config");
public static final ParseField DELAYED_DATA_CHECK_CONFIG = new ParseField("delayed_data_check_config");
public static final ParseField MAX_EMPTY_SEARCHES = new ParseField("max_empty_searches");
public static final ConstructingObjectParser<Builder, Void> PARSER = new ConstructingObjectParser<>(
"datafeed_config", true, a -> new Builder((String)a[0], (String)a[1]));
@ -88,6 +89,7 @@ public class DatafeedConfig implements ToXContentObject {
PARSER.declareInt(Builder::setScrollSize, SCROLL_SIZE);
PARSER.declareObject(Builder::setChunkingConfig, ChunkingConfig.PARSER, CHUNKING_CONFIG);
PARSER.declareObject(Builder::setDelayedDataCheckConfig, DelayedDataCheckConfig.PARSER, DELAYED_DATA_CHECK_CONFIG);
PARSER.declareInt(Builder::setMaxEmptySearches, MAX_EMPTY_SEARCHES);
}
private static BytesReference parseBytes(XContentParser parser) throws IOException {
@ -107,11 +109,12 @@ public class DatafeedConfig implements ToXContentObject {
private final Integer scrollSize;
private final ChunkingConfig chunkingConfig;
private final DelayedDataCheckConfig delayedDataCheckConfig;
private final Integer maxEmptySearches;
private DatafeedConfig(String id, String jobId, TimeValue queryDelay, TimeValue frequency, List<String> indices, BytesReference query,
BytesReference aggregations, List<SearchSourceBuilder.ScriptField> scriptFields, Integer scrollSize,
ChunkingConfig chunkingConfig, DelayedDataCheckConfig delayedDataCheckConfig) {
ChunkingConfig chunkingConfig, DelayedDataCheckConfig delayedDataCheckConfig,
Integer maxEmptySearches) {
this.id = id;
this.jobId = jobId;
this.queryDelay = queryDelay;
@ -123,6 +126,7 @@ public class DatafeedConfig implements ToXContentObject {
this.scrollSize = scrollSize;
this.chunkingConfig = chunkingConfig;
this.delayedDataCheckConfig = delayedDataCheckConfig;
this.maxEmptySearches = maxEmptySearches;
}
public String getId() {
@ -169,6 +173,10 @@ public class DatafeedConfig implements ToXContentObject {
return delayedDataCheckConfig;
}
public Integer getMaxEmptySearches() {
return maxEmptySearches;
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
@ -205,6 +213,9 @@ public class DatafeedConfig implements ToXContentObject {
if (delayedDataCheckConfig != null) {
builder.field(DELAYED_DATA_CHECK_CONFIG.getPreferredName(), delayedDataCheckConfig);
}
if (maxEmptySearches != null) {
builder.field(MAX_EMPTY_SEARCHES.getPreferredName(), maxEmptySearches);
}
builder.endObject();
return builder;
@ -245,7 +256,8 @@ public class DatafeedConfig implements ToXContentObject {
&& Objects.equals(asMap(this.aggregations), asMap(that.aggregations))
&& Objects.equals(this.scriptFields, that.scriptFields)
&& Objects.equals(this.chunkingConfig, that.chunkingConfig)
&& Objects.equals(this.delayedDataCheckConfig, that.delayedDataCheckConfig);
&& Objects.equals(this.delayedDataCheckConfig, that.delayedDataCheckConfig)
&& Objects.equals(this.maxEmptySearches, that.maxEmptySearches);
}
/**
@ -256,7 +268,7 @@ public class DatafeedConfig implements ToXContentObject {
@Override
public int hashCode() {
return Objects.hash(id, jobId, frequency, queryDelay, indices, asMap(query), scrollSize, asMap(aggregations), scriptFields,
chunkingConfig, delayedDataCheckConfig);
chunkingConfig, delayedDataCheckConfig, maxEmptySearches);
}
public static Builder builder(String id, String jobId) {
@ -276,6 +288,7 @@ public class DatafeedConfig implements ToXContentObject {
private Integer scrollSize;
private ChunkingConfig chunkingConfig;
private DelayedDataCheckConfig delayedDataCheckConfig;
private Integer maxEmptySearches;
public Builder(String id, String jobId) {
this.id = Objects.requireNonNull(id, ID.getPreferredName());
@ -294,6 +307,7 @@ public class DatafeedConfig implements ToXContentObject {
this.scrollSize = config.scrollSize;
this.chunkingConfig = config.chunkingConfig;
this.delayedDataCheckConfig = config.getDelayedDataCheckConfig();
this.maxEmptySearches = config.getMaxEmptySearches();
}
public Builder setIndices(List<String> indices) {
@ -376,9 +390,14 @@ public class DatafeedConfig implements ToXContentObject {
return this;
}
public Builder setMaxEmptySearches(int maxEmptySearches) {
this.maxEmptySearches = maxEmptySearches;
return this;
}
public DatafeedConfig build() {
return new DatafeedConfig(id, jobId, queryDelay, frequency, indices, query, aggregations, scriptFields, scrollSize,
chunkingConfig, delayedDataCheckConfig);
chunkingConfig, delayedDataCheckConfig, maxEmptySearches);
}
private static BytesReference xContentToBytes(ToXContentObject object) throws IOException {

View file

@ -77,6 +77,7 @@ public class DatafeedUpdate implements ToXContentObject {
PARSER.declareObject(Builder::setDelayedDataCheckConfig,
DelayedDataCheckConfig.PARSER,
DatafeedConfig.DELAYED_DATA_CHECK_CONFIG);
PARSER.declareInt(Builder::setMaxEmptySearches, DatafeedConfig.MAX_EMPTY_SEARCHES);
}
private static BytesReference parseBytes(XContentParser parser) throws IOException {
@ -95,10 +96,12 @@ public class DatafeedUpdate implements ToXContentObject {
private final Integer scrollSize;
private final ChunkingConfig chunkingConfig;
private final DelayedDataCheckConfig delayedDataCheckConfig;
private final Integer maxEmptySearches;
private DatafeedUpdate(String id, TimeValue queryDelay, TimeValue frequency, List<String> indices, BytesReference query,
BytesReference aggregations, List<SearchSourceBuilder.ScriptField> scriptFields, Integer scrollSize,
ChunkingConfig chunkingConfig, DelayedDataCheckConfig delayedDataCheckConfig) {
ChunkingConfig chunkingConfig, DelayedDataCheckConfig delayedDataCheckConfig,
Integer maxEmptySearches) {
this.id = id;
this.queryDelay = queryDelay;
this.frequency = frequency;
@ -109,6 +112,7 @@ public class DatafeedUpdate implements ToXContentObject {
this.scrollSize = scrollSize;
this.chunkingConfig = chunkingConfig;
this.delayedDataCheckConfig = delayedDataCheckConfig;
this.maxEmptySearches = maxEmptySearches;
}
/**
@ -147,6 +151,7 @@ public class DatafeedUpdate implements ToXContentObject {
}
addOptionalField(builder, DatafeedConfig.SCROLL_SIZE, scrollSize);
addOptionalField(builder, DatafeedConfig.CHUNKING_CONFIG, chunkingConfig);
addOptionalField(builder, DatafeedConfig.MAX_EMPTY_SEARCHES, maxEmptySearches);
builder.endObject();
return builder;
}
@ -193,6 +198,10 @@ public class DatafeedUpdate implements ToXContentObject {
return delayedDataCheckConfig;
}
public Integer getMaxEmptySearches() {
return maxEmptySearches;
}
private static Map<String, Object> asMap(BytesReference bytesReference) {
return bytesReference == null ? null : XContentHelper.convertToMap(bytesReference, true, XContentType.JSON).v2();
}
@ -227,7 +236,8 @@ public class DatafeedUpdate implements ToXContentObject {
&& Objects.equals(asMap(this.aggregations), asMap(that.aggregations))
&& Objects.equals(this.delayedDataCheckConfig, that.delayedDataCheckConfig)
&& Objects.equals(this.scriptFields, that.scriptFields)
&& Objects.equals(this.chunkingConfig, that.chunkingConfig);
&& Objects.equals(this.chunkingConfig, that.chunkingConfig)
&& Objects.equals(this.maxEmptySearches, that.maxEmptySearches);
}
/**
@ -238,7 +248,7 @@ public class DatafeedUpdate implements ToXContentObject {
@Override
public int hashCode() {
return Objects.hash(id, frequency, queryDelay, indices, asMap(query), scrollSize, asMap(aggregations), scriptFields,
chunkingConfig, delayedDataCheckConfig);
chunkingConfig, delayedDataCheckConfig, maxEmptySearches);
}
public static Builder builder(String id) {
@ -257,6 +267,7 @@ public class DatafeedUpdate implements ToXContentObject {
private Integer scrollSize;
private ChunkingConfig chunkingConfig;
private DelayedDataCheckConfig delayedDataCheckConfig;
private Integer maxEmptySearches;
public Builder(String id) {
this.id = Objects.requireNonNull(id, DatafeedConfig.ID.getPreferredName());
@ -273,6 +284,7 @@ public class DatafeedUpdate implements ToXContentObject {
this.scrollSize = config.scrollSize;
this.chunkingConfig = config.chunkingConfig;
this.delayedDataCheckConfig = config.delayedDataCheckConfig;
this.maxEmptySearches = config.maxEmptySearches;
}
public Builder setIndices(List<String> indices) {
@ -346,9 +358,14 @@ public class DatafeedUpdate implements ToXContentObject {
return this;
}
public Builder setMaxEmptySearches(int maxEmptySearches) {
this.maxEmptySearches = maxEmptySearches;
return this;
}
public DatafeedUpdate build() {
return new DatafeedUpdate(id, queryDelay, frequency, indices, query, aggregations, scriptFields, scrollSize,
chunkingConfig, delayedDataCheckConfig);
chunkingConfig, delayedDataCheckConfig, maxEmptySearches);
}
private static BytesReference xContentToBytes(ToXContentObject object) throws IOException {

View file

@ -56,6 +56,7 @@ public class DataFrameAnalyticsConfig implements ToXContentObject {
private static final ParseField MODEL_MEMORY_LIMIT = new ParseField("model_memory_limit");
private static final ParseField CREATE_TIME = new ParseField("create_time");
private static final ParseField VERSION = new ParseField("version");
private static final ParseField ALLOW_LAZY_START = new ParseField("allow_lazy_start");
private static ObjectParser<Builder, Void> PARSER = new ObjectParser<>("data_frame_analytics_config", true, Builder::new);
@ -86,6 +87,7 @@ public class DataFrameAnalyticsConfig implements ToXContentObject {
},
VERSION,
ValueType.STRING);
PARSER.declareBoolean(Builder::setAllowLazyStart, ALLOW_LAZY_START);
}
private static DataFrameAnalysis parseAnalysis(XContentParser parser) throws IOException {
@ -105,11 +107,12 @@ public class DataFrameAnalyticsConfig implements ToXContentObject {
private final ByteSizeValue modelMemoryLimit;
private final Instant createTime;
private final Version version;
private final Boolean allowLazyStart;
private DataFrameAnalyticsConfig(@Nullable String id, @Nullable String description, @Nullable DataFrameAnalyticsSource source,
@Nullable DataFrameAnalyticsDest dest, @Nullable DataFrameAnalysis analysis,
@Nullable FetchSourceContext analyzedFields, @Nullable ByteSizeValue modelMemoryLimit,
@Nullable Instant createTime, @Nullable Version version) {
@Nullable Instant createTime, @Nullable Version version, @Nullable Boolean allowLazyStart) {
this.id = id;
this.description = description;
this.source = source;
@ -119,6 +122,7 @@ public class DataFrameAnalyticsConfig implements ToXContentObject {
this.modelMemoryLimit = modelMemoryLimit;
this.createTime = createTime == null ? null : Instant.ofEpochMilli(createTime.toEpochMilli());;
this.version = version;
this.allowLazyStart = allowLazyStart;
}
public String getId() {
@ -157,6 +161,10 @@ public class DataFrameAnalyticsConfig implements ToXContentObject {
return version;
}
public Boolean getAllowLazyStart() {
return allowLazyStart;
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
@ -190,6 +198,9 @@ public class DataFrameAnalyticsConfig implements ToXContentObject {
if (version != null) {
builder.field(VERSION.getPreferredName(), version);
}
if (allowLazyStart != null) {
builder.field(ALLOW_LAZY_START.getPreferredName(), allowLazyStart);
}
builder.endObject();
return builder;
}
@ -208,12 +219,13 @@ public class DataFrameAnalyticsConfig implements ToXContentObject {
&& Objects.equals(analyzedFields, other.analyzedFields)
&& Objects.equals(modelMemoryLimit, other.modelMemoryLimit)
&& Objects.equals(createTime, other.createTime)
&& Objects.equals(version, other.version);
&& Objects.equals(version, other.version)
&& Objects.equals(allowLazyStart, other.allowLazyStart);
}
@Override
public int hashCode() {
return Objects.hash(id, description, source, dest, analysis, analyzedFields, modelMemoryLimit, createTime, version);
return Objects.hash(id, description, source, dest, analysis, analyzedFields, modelMemoryLimit, createTime, version, allowLazyStart);
}
@Override
@ -232,6 +244,7 @@ public class DataFrameAnalyticsConfig implements ToXContentObject {
private ByteSizeValue modelMemoryLimit;
private Instant createTime;
private Version version;
private Boolean allowLazyStart;
private Builder() {}
@ -280,9 +293,14 @@ public class DataFrameAnalyticsConfig implements ToXContentObject {
return this;
}
public Builder setAllowLazyStart(Boolean allowLazyStart) {
this.allowLazyStart = allowLazyStart;
return this;
}
public DataFrameAnalyticsConfig build() {
return new DataFrameAnalyticsConfig(id, description, source, dest, analysis, analyzedFields, modelMemoryLimit, createTime,
version);
version, allowLazyStart);
}
}
}

View file

@ -22,7 +22,7 @@ package org.elasticsearch.client.ml.dataframe;
import java.util.Locale;
public enum DataFrameAnalyticsState {
STARTED, REINDEXING, ANALYZING, STOPPING, STOPPED;
STARTED, REINDEXING, ANALYZING, STOPPING, STOPPED, STARTING;
public static DataFrameAnalyticsState fromString(String name) {
return valueOf(name.trim().toUpperCase(Locale.ROOT));

View file

@ -67,6 +67,7 @@ public class Job implements ToXContentObject {
public static final ParseField MODEL_SNAPSHOT_ID = new ParseField("model_snapshot_id");
public static final ParseField RESULTS_INDEX_NAME = new ParseField("results_index_name");
public static final ParseField DELETING = new ParseField("deleting");
public static final ParseField ALLOW_LAZY_OPEN = new ParseField("allow_lazy_open");
public static final ObjectParser<Builder, Void> PARSER = new ObjectParser<>("job_details", true, Builder::new);
@ -96,6 +97,7 @@ public class Job implements ToXContentObject {
PARSER.declareStringOrNull(Builder::setModelSnapshotId, MODEL_SNAPSHOT_ID);
PARSER.declareString(Builder::setResultsIndexName, RESULTS_INDEX_NAME);
PARSER.declareBoolean(Builder::setDeleting, DELETING);
PARSER.declareBoolean(Builder::setAllowLazyOpen, ALLOW_LAZY_OPEN);
}
private final String jobId;
@ -117,13 +119,14 @@ public class Job implements ToXContentObject {
private final String modelSnapshotId;
private final String resultsIndexName;
private final Boolean deleting;
private final Boolean allowLazyOpen;
private Job(String jobId, String jobType, List<String> groups, String description,
Date createTime, Date finishedTime,
AnalysisConfig analysisConfig, AnalysisLimits analysisLimits, DataDescription dataDescription,
ModelPlotConfig modelPlotConfig, Long renormalizationWindowDays, TimeValue backgroundPersistInterval,
Long modelSnapshotRetentionDays, Long resultsRetentionDays, Map<String, Object> customSettings,
String modelSnapshotId, String resultsIndexName, Boolean deleting) {
String modelSnapshotId, String resultsIndexName, Boolean deleting, Boolean allowLazyOpen) {
this.jobId = jobId;
this.jobType = jobType;
@ -143,6 +146,7 @@ public class Job implements ToXContentObject {
this.modelSnapshotId = modelSnapshotId;
this.resultsIndexName = resultsIndexName;
this.deleting = deleting;
this.allowLazyOpen = allowLazyOpen;
}
/**
@ -271,6 +275,10 @@ public class Job implements ToXContentObject {
return deleting;
}
public Boolean getAllowLazyOpen() {
return allowLazyOpen;
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
@ -326,6 +334,9 @@ public class Job implements ToXContentObject {
if (deleting != null) {
builder.field(DELETING.getPreferredName(), deleting);
}
if (allowLazyOpen != null) {
builder.field(ALLOW_LAZY_OPEN.getPreferredName(), allowLazyOpen);
}
builder.endObject();
return builder;
}
@ -358,7 +369,8 @@ public class Job implements ToXContentObject {
&& Objects.equals(this.customSettings, that.customSettings)
&& Objects.equals(this.modelSnapshotId, that.modelSnapshotId)
&& Objects.equals(this.resultsIndexName, that.resultsIndexName)
&& Objects.equals(this.deleting, that.deleting);
&& Objects.equals(this.deleting, that.deleting)
&& Objects.equals(this.allowLazyOpen, that.allowLazyOpen);
}
@Override
@ -366,7 +378,7 @@ public class Job implements ToXContentObject {
return Objects.hash(jobId, jobType, groups, description, createTime, finishedTime,
analysisConfig, analysisLimits, dataDescription, modelPlotConfig, renormalizationWindowDays,
backgroundPersistInterval, modelSnapshotRetentionDays, resultsRetentionDays, customSettings,
modelSnapshotId, resultsIndexName, deleting);
modelSnapshotId, resultsIndexName, deleting, allowLazyOpen);
}
@Override
@ -398,6 +410,7 @@ public class Job implements ToXContentObject {
private String modelSnapshotId;
private String resultsIndexName;
private Boolean deleting;
private Boolean allowLazyOpen;
private Builder() {
}
@ -425,6 +438,7 @@ public class Job implements ToXContentObject {
this.modelSnapshotId = job.getModelSnapshotId();
this.resultsIndexName = job.getResultsIndexNameNoPrefix();
this.deleting = job.getDeleting();
this.allowLazyOpen = job.getAllowLazyOpen();
}
public Builder setId(String id) {
@ -521,6 +535,11 @@ public class Job implements ToXContentObject {
return this;
}
Builder setAllowLazyOpen(Boolean allowLazyOpen) {
this.allowLazyOpen = allowLazyOpen;
return this;
}
/**
* Builds a job.
*
@ -533,7 +552,7 @@ public class Job implements ToXContentObject {
id, jobType, groups, description, createTime, finishedTime,
analysisConfig, analysisLimits, dataDescription, modelPlotConfig, renormalizationWindowDays,
backgroundPersistInterval, modelSnapshotRetentionDays, resultsRetentionDays, customSettings,
modelSnapshotId, resultsIndexName, deleting);
modelSnapshotId, resultsIndexName, deleting, allowLazyOpen);
}
}
}

View file

@ -54,6 +54,7 @@ public class JobUpdate implements ToXContentObject {
PARSER.declareLong(Builder::setModelSnapshotRetentionDays, Job.MODEL_SNAPSHOT_RETENTION_DAYS);
PARSER.declareStringArray(Builder::setCategorizationFilters, AnalysisConfig.CATEGORIZATION_FILTERS);
PARSER.declareField(Builder::setCustomSettings, (p, c) -> p.map(), Job.CUSTOM_SETTINGS, ObjectParser.ValueType.OBJECT);
PARSER.declareBoolean(Builder::setAllowLazyOpen, Job.ALLOW_LAZY_OPEN);
}
private final String jobId;
@ -68,13 +69,14 @@ public class JobUpdate implements ToXContentObject {
private final Long resultsRetentionDays;
private final List<String> categorizationFilters;
private final Map<String, Object> customSettings;
private final Boolean allowLazyOpen;
private JobUpdate(String jobId, @Nullable List<String> groups, @Nullable String description,
@Nullable List<DetectorUpdate> detectorUpdates, @Nullable ModelPlotConfig modelPlotConfig,
@Nullable AnalysisLimits analysisLimits, @Nullable TimeValue backgroundPersistInterval,
@Nullable Long renormalizationWindowDays, @Nullable Long resultsRetentionDays,
@Nullable Long modelSnapshotRetentionDays, @Nullable List<String> categorisationFilters,
@Nullable Map<String, Object> customSettings) {
@Nullable Map<String, Object> customSettings, @Nullable Boolean allowLazyOpen) {
this.jobId = jobId;
this.groups = groups;
this.description = description;
@ -87,6 +89,7 @@ public class JobUpdate implements ToXContentObject {
this.resultsRetentionDays = resultsRetentionDays;
this.categorizationFilters = categorisationFilters;
this.customSettings = customSettings;
this.allowLazyOpen = allowLazyOpen;
}
public String getJobId() {
@ -137,6 +140,10 @@ public class JobUpdate implements ToXContentObject {
return customSettings;
}
public Boolean getAllowLazyOpen() {
return allowLazyOpen;
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
@ -174,6 +181,9 @@ public class JobUpdate implements ToXContentObject {
if (customSettings != null) {
builder.field(Job.CUSTOM_SETTINGS.getPreferredName(), customSettings);
}
if (allowLazyOpen != null) {
builder.field(Job.ALLOW_LAZY_OPEN.getPreferredName(), allowLazyOpen);
}
builder.endObject();
return builder;
}
@ -201,13 +211,15 @@ public class JobUpdate implements ToXContentObject {
&& Objects.equals(this.modelSnapshotRetentionDays, that.modelSnapshotRetentionDays)
&& Objects.equals(this.resultsRetentionDays, that.resultsRetentionDays)
&& Objects.equals(this.categorizationFilters, that.categorizationFilters)
&& Objects.equals(this.customSettings, that.customSettings);
&& Objects.equals(this.customSettings, that.customSettings)
&& Objects.equals(this.allowLazyOpen, that.allowLazyOpen);
}
@Override
public int hashCode() {
return Objects.hash(jobId, groups, description, detectorUpdates, modelPlotConfig, analysisLimits, renormalizationWindowDays,
backgroundPersistInterval, modelSnapshotRetentionDays, resultsRetentionDays, categorizationFilters, customSettings);
backgroundPersistInterval, modelSnapshotRetentionDays, resultsRetentionDays, categorizationFilters, customSettings,
allowLazyOpen);
}
public static class DetectorUpdate implements ToXContentObject {
@ -303,6 +315,7 @@ public class JobUpdate implements ToXContentObject {
private Long resultsRetentionDays;
private List<String> categorizationFilters;
private Map<String, Object> customSettings;
private Boolean allowLazyOpen;
/**
* New {@link JobUpdate.Builder} object for the existing job
@ -446,9 +459,15 @@ public class JobUpdate implements ToXContentObject {
return this;
}
public Builder setAllowLazyOpen(boolean allowLazyOpen) {
this.allowLazyOpen = allowLazyOpen;
return this;
}
public JobUpdate build() {
return new JobUpdate(jobId, groups, description, detectorUpdates, modelPlotConfig, analysisLimits, backgroundPersistInterval,
renormalizationWindowDays, resultsRetentionDays, modelSnapshotRetentionDays, categorizationFilters, customSettings);
renormalizationWindowDays, resultsRetentionDays, modelSnapshotRetentionDays, categorizationFilters, customSettings,
allowLazyOpen);
}
}
}

View file

@ -0,0 +1,25 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.client.slm;
import org.elasticsearch.client.TimedRequest;
public class SnapshotLifecycleManagementStatusRequest extends TimedRequest {
}

View file

@ -0,0 +1,25 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.client.slm;
import org.elasticsearch.client.TimedRequest;
public class StartSLMRequest extends TimedRequest {
}

View file

@ -0,0 +1,25 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.client.slm;
import org.elasticsearch.client.TimedRequest;
public class StopSLMRequest extends TimedRequest {
}

View file

@ -31,9 +31,11 @@ import org.elasticsearch.client.ccr.FollowInfoRequest;
import org.elasticsearch.client.ccr.FollowStatsRequest;
import org.elasticsearch.client.ccr.ForgetFollowerRequest;
import org.elasticsearch.client.ccr.GetAutoFollowPatternRequest;
import org.elasticsearch.client.ccr.PauseAutoFollowPatternRequest;
import org.elasticsearch.client.ccr.PauseFollowRequest;
import org.elasticsearch.client.ccr.PutAutoFollowPatternRequest;
import org.elasticsearch.client.ccr.PutFollowRequest;
import org.elasticsearch.client.ccr.ResumeAutoFollowPatternRequest;
import org.elasticsearch.client.ccr.ResumeFollowRequest;
import org.elasticsearch.client.ccr.UnfollowRequest;
import org.elasticsearch.common.unit.ByteSizeValue;
@ -143,6 +145,26 @@ public class CcrRequestConvertersTests extends ESTestCase {
assertThat(result.getEntity(), nullValue());
}
public void testPauseAutofollowPattern() throws Exception {
PauseAutoFollowPatternRequest pauseAutoFollowPatternRequest = new PauseAutoFollowPatternRequest(randomAlphaOfLength(4));
Request result = CcrRequestConverters.pauseAutoFollowPattern(pauseAutoFollowPatternRequest);
assertThat(result.getMethod(), equalTo(HttpPost.METHOD_NAME));
assertThat(result.getEndpoint(), equalTo("/_ccr/auto_follow/" + pauseAutoFollowPatternRequest.getName() + "/pause"));
assertThat(result.getParameters().size(), equalTo(0));
assertThat(result.getEntity(), nullValue());
}
public void testResumeAutofollowPattern() throws Exception {
ResumeAutoFollowPatternRequest resumeAutoFollowPatternRequest = new ResumeAutoFollowPatternRequest(randomAlphaOfLength(4));
Request result = CcrRequestConverters.resumeAutoFollowPattern(resumeAutoFollowPatternRequest);
assertThat(result.getMethod(), equalTo(HttpPost.METHOD_NAME));
assertThat(result.getEndpoint(), equalTo("/_ccr/auto_follow/" + resumeAutoFollowPatternRequest.getName() + "/resume"));
assertThat(result.getParameters().size(), equalTo(0));
assertThat(result.getEntity(), nullValue());
}
public void testGetCcrStats() throws Exception {
CcrStatsRequest ccrStatsRequest = new CcrStatsRequest();
Request result = CcrRequestConverters.getCcrStats(ccrStatsRequest);

View file

@ -44,10 +44,12 @@ import org.elasticsearch.client.ccr.GetAutoFollowPatternRequest;
import org.elasticsearch.client.ccr.GetAutoFollowPatternResponse;
import org.elasticsearch.client.ccr.GetAutoFollowPatternResponse.Pattern;
import org.elasticsearch.client.ccr.IndicesFollowStats;
import org.elasticsearch.client.ccr.PauseAutoFollowPatternRequest;
import org.elasticsearch.client.ccr.PauseFollowRequest;
import org.elasticsearch.client.ccr.PutAutoFollowPatternRequest;
import org.elasticsearch.client.ccr.PutFollowRequest;
import org.elasticsearch.client.ccr.PutFollowResponse;
import org.elasticsearch.client.ccr.ResumeAutoFollowPatternRequest;
import org.elasticsearch.client.ccr.ResumeFollowRequest;
import org.elasticsearch.client.ccr.UnfollowRequest;
import org.elasticsearch.client.core.AcknowledgedResponse;
@ -681,6 +683,122 @@ public class CCRDocumentationIT extends ESRestHighLevelClientTestCase {
}
}
public void testPauseAutoFollowPattern() throws Exception {
final RestHighLevelClient client = highLevelClient();
{
final PutAutoFollowPatternRequest putRequest = new PutAutoFollowPatternRequest("my_pattern", "local", List.of("logs-*"));
AcknowledgedResponse putResponse = client.ccr().putAutoFollowPattern(putRequest, RequestOptions.DEFAULT);
assertThat(putResponse.isAcknowledged(), is(true));
}
// tag::ccr-pause-auto-follow-pattern-request
PauseAutoFollowPatternRequest request =
new PauseAutoFollowPatternRequest("my_pattern"); // <1>
// end::ccr-pause-auto-follow-pattern-request
// tag::ccr-pause-auto-follow-pattern-execute
AcknowledgedResponse response = client.ccr()
.pauseAutoFollowPattern(request, RequestOptions.DEFAULT);
// end::ccr-pause-auto-follow-pattern-execute
// tag::ccr-pause-auto-follow-pattern-response
boolean acknowledged = response.isAcknowledged(); // <1>
// end::ccr-pause-auto-follow-pattern-response
// tag::ccr-pause-auto-follow-pattern-execute-listener
ActionListener<AcknowledgedResponse> listener =
new ActionListener<AcknowledgedResponse>() {
@Override
public void onResponse(AcknowledgedResponse response) { // <1>
boolean paused = response.isAcknowledged();
}
@Override
public void onFailure(Exception e) {
// <2>
}
};
// end::ccr-pause-auto-follow-pattern-execute-listener
// Replace the empty listener by a blocking listener in test
final CountDownLatch latch = new CountDownLatch(1);
listener = new LatchedActionListener<>(listener, latch);
// tag::ccr-pause-auto-follow-pattern-execute-async
client.ccr().pauseAutoFollowPatternAsync(request,
RequestOptions.DEFAULT, listener); // <1>
// end::ccr-pause-auto-follow-pattern-execute-async
assertTrue(latch.await(30L, TimeUnit.SECONDS));
// Cleanup:
{
DeleteAutoFollowPatternRequest deleteRequest = new DeleteAutoFollowPatternRequest("my_pattern");
AcknowledgedResponse deleteResponse = client.ccr().deleteAutoFollowPattern(deleteRequest, RequestOptions.DEFAULT);
assertThat(deleteResponse.isAcknowledged(), is(true));
}
}
public void testResumeAutoFollowPattern() throws Exception {
final RestHighLevelClient client = highLevelClient();
{
final PutAutoFollowPatternRequest putRequest = new PutAutoFollowPatternRequest("my_pattern", "local", List.of("logs-*"));
AcknowledgedResponse putResponse = client.ccr().putAutoFollowPattern(putRequest, RequestOptions.DEFAULT);
assertThat(putResponse.isAcknowledged(), is(true));
final PauseAutoFollowPatternRequest pauseRequest = new PauseAutoFollowPatternRequest("my_pattern");
AcknowledgedResponse pauseResponse = client.ccr().pauseAutoFollowPattern(pauseRequest, RequestOptions.DEFAULT);
assertThat(pauseResponse.isAcknowledged(), is(true));
}
// tag::ccr-resume-auto-follow-pattern-request
ResumeAutoFollowPatternRequest request =
new ResumeAutoFollowPatternRequest("my_pattern"); // <1>
// end::ccr-resume-auto-follow-pattern-request
// tag::ccr-resume-auto-follow-pattern-execute
AcknowledgedResponse response = client.ccr()
.resumeAutoFollowPattern(request, RequestOptions.DEFAULT);
// end::ccr-resume-auto-follow-pattern-execute
// tag::ccr-resume-auto-follow-pattern-response
boolean acknowledged = response.isAcknowledged(); // <1>
// end::ccr-resume-auto-follow-pattern-response
// tag::ccr-resume-auto-follow-pattern-execute-listener
ActionListener<AcknowledgedResponse> listener =
new ActionListener<AcknowledgedResponse>() {
@Override
public void onResponse(AcknowledgedResponse response) { // <1>
boolean resumed = response.isAcknowledged();
}
@Override
public void onFailure(Exception e) {
// <2>
}
};
// end::ccr-resume-auto-follow-pattern-execute-listener
// Replace the empty listener by a blocking listener in test
final CountDownLatch latch = new CountDownLatch(1);
listener = new LatchedActionListener<>(listener, latch);
// tag::ccr-resume-auto-follow-pattern-execute-async
client.ccr().resumeAutoFollowPatternAsync(request,
RequestOptions.DEFAULT, listener); // <1>
// end::ccr-resume-auto-follow-pattern-execute-async
assertTrue(latch.await(30L, TimeUnit.SECONDS));
// Cleanup:
{
DeleteAutoFollowPatternRequest deleteRequest = new DeleteAutoFollowPatternRequest("my_pattern");
AcknowledgedResponse deleteResponse = client.ccr().deleteAutoFollowPattern(deleteRequest, RequestOptions.DEFAULT);
assertThat(deleteResponse.isAcknowledged(), is(true));
}
}
public void testGetCCRStats() throws Exception {
RestHighLevelClient client = highLevelClient();

View file

@ -64,10 +64,13 @@ import org.elasticsearch.client.slm.GetSnapshotLifecycleStatsRequest;
import org.elasticsearch.client.slm.GetSnapshotLifecycleStatsResponse;
import org.elasticsearch.client.slm.PutSnapshotLifecyclePolicyRequest;
import org.elasticsearch.client.slm.SnapshotInvocationRecord;
import org.elasticsearch.client.slm.SnapshotLifecycleManagementStatusRequest;
import org.elasticsearch.client.slm.SnapshotLifecyclePolicy;
import org.elasticsearch.client.slm.SnapshotLifecyclePolicyMetadata;
import org.elasticsearch.client.slm.SnapshotLifecycleStats;
import org.elasticsearch.client.slm.SnapshotRetentionConfiguration;
import org.elasticsearch.client.slm.StartSLMRequest;
import org.elasticsearch.client.slm.StopSLMRequest;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.collect.ImmutableOpenMap;
@ -460,7 +463,7 @@ public class ILMDocumentationIT extends ESRestHighLevelClientTestCase {
assertTrue(latch.await(30L, TimeUnit.SECONDS));
}
public void testStartStopStatus() throws Exception {
public void testILMStartStopStatus() throws Exception {
RestHighLevelClient client = highLevelClient();
stopILM(client);
@ -776,7 +779,7 @@ public class ILMDocumentationIT extends ESRestHighLevelClientTestCase {
assertTrue(response.isAcknowledged());
//////// PUT
// tag::slm-put-snapshot-lifecycle-policy
// tag::slm-put-snapshot-lifecycle-policy-request
Map<String, Object> config = new HashMap<>();
config.put("indices", Collections.singletonList("idx"));
SnapshotRetentionConfiguration retention =
@ -786,7 +789,7 @@ public class ILMDocumentationIT extends ESRestHighLevelClientTestCase {
"my_repository", config, retention);
PutSnapshotLifecyclePolicyRequest request =
new PutSnapshotLifecyclePolicyRequest(policy);
// end::slm-put-snapshot-lifecycle-policy
// end::slm-put-snapshot-lifecycle-policy-request
// tag::slm-put-snapshot-lifecycle-policy-execute
AcknowledgedResponse resp = client.indexLifecycle()
@ -815,16 +818,16 @@ public class ILMDocumentationIT extends ESRestHighLevelClientTestCase {
// tag::slm-put-snapshot-lifecycle-policy-execute-async
client.indexLifecycle().putSnapshotLifecyclePolicyAsync(request,
RequestOptions.DEFAULT, putListener);
RequestOptions.DEFAULT, putListener); // <1>
// end::slm-put-snapshot-lifecycle-policy-execute-async
//////// GET
// tag::slm-get-snapshot-lifecycle-policy
// tag::slm-get-snapshot-lifecycle-policy-request
GetSnapshotLifecyclePolicyRequest getAllRequest =
new GetSnapshotLifecyclePolicyRequest(); // <1>
GetSnapshotLifecyclePolicyRequest getRequest =
new GetSnapshotLifecyclePolicyRequest("policy_id"); // <2>
// end::slm-get-snapshot-lifecycle-policy
// end::slm-get-snapshot-lifecycle-policy-request
// tag::slm-get-snapshot-lifecycle-policy-execute
GetSnapshotLifecyclePolicyResponse getResponse =
@ -851,7 +854,7 @@ public class ILMDocumentationIT extends ESRestHighLevelClientTestCase {
// tag::slm-get-snapshot-lifecycle-policy-execute-async
client.indexLifecycle().getSnapshotLifecyclePolicyAsync(getRequest,
RequestOptions.DEFAULT, getListener);
RequestOptions.DEFAULT, getListener); // <1>
// end::slm-get-snapshot-lifecycle-policy-execute-async
assertThat(getResponse.getPolicies().size(), equalTo(1));
@ -879,10 +882,10 @@ public class ILMDocumentationIT extends ESRestHighLevelClientTestCase {
createIndex("idx", Settings.builder().put("index.number_of_shards", 1).build());
//////// EXECUTE
// tag::slm-execute-snapshot-lifecycle-policy
// tag::slm-execute-snapshot-lifecycle-policy-request
ExecuteSnapshotLifecyclePolicyRequest executeRequest =
new ExecuteSnapshotLifecyclePolicyRequest("policy_id"); // <1>
// end::slm-execute-snapshot-lifecycle-policy
// end::slm-execute-snapshot-lifecycle-policy-request
// tag::slm-execute-snapshot-lifecycle-policy-execute
ExecuteSnapshotLifecyclePolicyResponse executeResponse =
@ -937,7 +940,7 @@ public class ILMDocumentationIT extends ESRestHighLevelClientTestCase {
// tag::slm-execute-snapshot-lifecycle-policy-execute-async
client.indexLifecycle()
.executeSnapshotLifecyclePolicyAsync(executeRequest,
RequestOptions.DEFAULT, executeListener);
RequestOptions.DEFAULT, executeListener); // <1>
// end::slm-execute-snapshot-lifecycle-policy-execute-async
latch.await(5, TimeUnit.SECONDS);
@ -958,42 +961,50 @@ public class ILMDocumentationIT extends ESRestHighLevelClientTestCase {
greaterThanOrEqualTo(1L));
//////// DELETE
// tag::slm-delete-snapshot-lifecycle-policy
// tag::slm-delete-snapshot-lifecycle-policy-request
DeleteSnapshotLifecyclePolicyRequest deleteRequest =
new DeleteSnapshotLifecyclePolicyRequest("policy_id"); // <1>
// end::slm-delete-snapshot-lifecycle-policy
// end::slm-delete-snapshot-lifecycle-policy-request
// tag::slm-delete-snapshot-lifecycle-policy-execute
AcknowledgedResponse deleteResp = client.indexLifecycle()
.deleteSnapshotLifecyclePolicy(deleteRequest, RequestOptions.DEFAULT);
// end::slm-delete-snapshot-lifecycle-policy-execute
// tag::slm-delete-snapshot-lifecycle-policy-response
boolean deleteAcknowledged = deleteResp.isAcknowledged(); // <1>
// end::slm-delete-snapshot-lifecycle-policy-response
assertTrue(deleteResp.isAcknowledged());
ActionListener<AcknowledgedResponse> deleteListener = new ActionListener<>() {
// tag::slm-delete-snapshot-lifecycle-policy-execute-listener
ActionListener<AcknowledgedResponse> deleteListener =
new ActionListener<>() {
@Override
public void onResponse(AcknowledgedResponse resp) {
// no-op
boolean deleteAcknowledged = resp.isAcknowledged(); // <1>
}
@Override
public void onFailure(Exception e) {
// no-op
// <2>
}
};
// end::slm-delete-snapshot-lifecycle-policy-execute-listener
// tag::slm-delete-snapshot-lifecycle-policy-execute-async
client.indexLifecycle()
.deleteSnapshotLifecyclePolicyAsync(deleteRequest,
RequestOptions.DEFAULT, deleteListener);
RequestOptions.DEFAULT, deleteListener); // <1>
// end::slm-delete-snapshot-lifecycle-policy-execute-async
assertTrue(deleteResp.isAcknowledged());
//////// EXECUTE RETENTION
// tag::slm-execute-snapshot-lifecycle-retention
// tag::slm-execute-snapshot-lifecycle-retention-request
ExecuteSnapshotLifecycleRetentionRequest req =
new ExecuteSnapshotLifecycleRetentionRequest();
// end::slm-execute-snapshot-lifecycle-retention
// end::slm-execute-snapshot-lifecycle-retention-request
// tag::slm-execute-snapshot-lifecycle-retention-execute
AcknowledgedResponse retentionResp =
@ -1006,7 +1017,7 @@ public class ILMDocumentationIT extends ESRestHighLevelClientTestCase {
final boolean acked = retentionResp.isAcknowledged();
// end::slm-execute-snapshot-lifecycle-retention-response
// tag::slm-execute-snapshot-lifecycle-policy-execute-listener
// tag::slm-execute-snapshot-lifecycle-retention-execute-listener
ActionListener<AcknowledgedResponse> retentionListener =
new ActionListener<>() {
@Override
@ -1024,7 +1035,7 @@ public class ILMDocumentationIT extends ESRestHighLevelClientTestCase {
// tag::slm-execute-snapshot-lifecycle-retention-execute-async
client.indexLifecycle()
.executeSnapshotLifecycleRetentionAsync(req,
RequestOptions.DEFAULT, retentionListener);
RequestOptions.DEFAULT, retentionListener); // <1>
// end::slm-execute-snapshot-lifecycle-retention-execute-async
}
@ -1051,6 +1062,152 @@ public class ILMDocumentationIT extends ESRestHighLevelClientTestCase {
});
}
public void testSLMStartStopStatus() throws Exception {
RestHighLevelClient client = highLevelClient();
stopSLM(client);
// tag::slm-status-request
SnapshotLifecycleManagementStatusRequest request =
new SnapshotLifecycleManagementStatusRequest();
// end::slm-status-request
// Check that SLM has stopped
{
// tag::slm-status-execute
LifecycleManagementStatusResponse response =
client.indexLifecycle()
.getSLMStatus(request, RequestOptions.DEFAULT);
// end::slm-status-execute
// tag::slm-status-response
OperationMode operationMode = response.getOperationMode(); // <1>
// end::slm-status-response
assertThat(operationMode, Matchers.either(equalTo(OperationMode.STOPPING)).or(equalTo(OperationMode.STOPPED)));
}
startSLM(client);
// tag::slm-status-execute-listener
ActionListener<LifecycleManagementStatusResponse> listener =
new ActionListener<LifecycleManagementStatusResponse>() {
@Override
public void onResponse(
LifecycleManagementStatusResponse response) {
OperationMode operationMode = response
.getOperationMode(); // <1>
}
@Override
public void onFailure(Exception e) {
// <2>
}
};
// end::slm-status-execute-listener
final CountDownLatch latch = new CountDownLatch(1);
listener = new LatchedActionListener<>(listener, latch);
// tag::slm-status-execute-async
client.indexLifecycle().getSLMStatusAsync(request,
RequestOptions.DEFAULT, listener); // <1>
// end::slm-status-execute-async
assertTrue(latch.await(30L, TimeUnit.SECONDS));
// Check that SLM is running again
LifecycleManagementStatusResponse response =
client.indexLifecycle()
.getSLMStatus(request, RequestOptions.DEFAULT);
OperationMode operationMode = response.getOperationMode();
assertEquals(OperationMode.RUNNING, operationMode);
}
private void stopSLM(RestHighLevelClient client) throws IOException, InterruptedException {
// tag::slm-stop-slm-request
StopSLMRequest request = new StopSLMRequest();
// end::slm-stop-slm-request
// tag::slm-stop-slm-execute
AcknowledgedResponse response = client.indexLifecycle()
.stopSLM(request, RequestOptions.DEFAULT);
// end::slm-stop-slm-execute
// tag::slm-stop-slm-response
boolean acknowledged = response.isAcknowledged(); // <1>
// end::slm-stop-slm-response
assertTrue(acknowledged);
// tag::slm-stop-slm-execute-listener
ActionListener<AcknowledgedResponse> listener =
new ActionListener<AcknowledgedResponse>() {
@Override
public void onResponse(AcknowledgedResponse response) {
boolean acknowledged = response.isAcknowledged(); // <1>
}
@Override
public void onFailure(Exception e) {
// <2>
}
};
// end::slm-stop-slm-execute-listener
// Replace the empty listener by a blocking listener in test
final CountDownLatch latch = new CountDownLatch(1);
listener = new LatchedActionListener<>(listener, latch);
// tag::slm-stop-slm-execute-async
client.indexLifecycle().stopSLMAsync(request,
RequestOptions.DEFAULT, listener); // <1>
// end::slm-stop-slm-execute-async
assertTrue(latch.await(30L, TimeUnit.SECONDS));
}
private void startSLM(RestHighLevelClient client) throws IOException, InterruptedException {
// tag::slm-start-slm-request
StartSLMRequest request1 = new StartSLMRequest();
// end::slm-start-slm-request
// tag::slm-start-slm-execute
AcknowledgedResponse response = client.indexLifecycle()
.startSLM(request1, RequestOptions.DEFAULT);
// end::slm-start-slm-execute
// tag::slm-start-slm-response
boolean acknowledged = response.isAcknowledged(); // <1>
// end::slm-start-slm-response
assertTrue(acknowledged);
// tag::slm-start-slm-execute-listener
ActionListener<AcknowledgedResponse> listener =
new ActionListener<AcknowledgedResponse>() {
@Override
public void onResponse(AcknowledgedResponse response) {
boolean acknowledged = response.isAcknowledged(); // <1>
}
@Override
public void onFailure(Exception e) {
// <2>
}
};
// end::slm-start-slm-execute-listener
// Replace the empty listener by a blocking listener in test
final CountDownLatch latch = new CountDownLatch(1);
listener = new LatchedActionListener<>(listener, latch);
// tag::slm-start-slm-execute-async
client.indexLifecycle().startSLMAsync(request1,
RequestOptions.DEFAULT, listener); // <1>
// end::slm-start-slm-execute-async
assertTrue(latch.await(30L, TimeUnit.SECONDS));
}
static Map<String, Object> toMap(Response response) throws IOException {
return XContentHelper.convertToMap(JsonXContent.jsonXContent, EntityUtils.toString(response.getEntity()), false);
}

View file

@ -106,6 +106,9 @@ public class DatafeedConfigTests extends AbstractXContentTestCase<DatafeedConfig
if (randomBoolean()) {
builder.setDelayedDataCheckConfig(DelayedDataCheckConfigTests.createRandomizedConfig());
}
if (randomBoolean()) {
builder.setMaxEmptySearches(randomIntBetween(10, 100));
}
return builder;
}

View file

@ -80,6 +80,9 @@ public class DatafeedUpdateTests extends AbstractXContentTestCase<DatafeedUpdate
if (randomBoolean()) {
builder.setDelayedDataCheckConfig(DelayedDataCheckConfigTests.createRandomizedConfig());
}
if (randomBoolean()) {
builder.setMaxEmptySearches(randomIntBetween(10, 100));
}
return builder.build();
}

View file

@ -66,6 +66,9 @@ public class DataFrameAnalyticsConfigTests extends AbstractXContentTestCase<Data
if (randomBoolean()) {
builder.setVersion(Version.CURRENT);
}
if (randomBoolean()) {
builder.setAllowLazyStart(randomBoolean());
}
return builder.build();
}

View file

@ -159,6 +159,9 @@ public class JobTests extends AbstractXContentTestCase<Job> {
if (randomBoolean()) {
builder.setDeleting(randomBoolean());
}
if (randomBoolean()) {
builder.setAllowLazyOpen(randomBoolean());
}
return builder;
}

View file

@ -79,6 +79,9 @@ public class JobUpdateTests extends AbstractXContentTestCase<JobUpdate> {
if (randomBoolean()) {
update.setCustomSettings(Collections.singletonMap(randomAlphaOfLength(10), randomAlphaOfLength(10)));
}
if (randomBoolean()) {
update.setAllowLazyOpen(randomBoolean());
}
return update.build();
}

View file

@ -56,6 +56,7 @@ import java.net.URI;
import java.net.URISyntaxException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Base64;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
@ -73,6 +74,7 @@ import java.util.concurrent.ExecutionException;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.stream.Collectors;
import static java.nio.charset.StandardCharsets.UTF_8;
import static java.util.Collections.singletonList;
/**
@ -119,6 +121,34 @@ public class RestClient implements Closeable {
setNodes(nodes);
}
/**
* Returns a new {@link RestClientBuilder} to help with {@link RestClient} creation.
* Creates a new builder instance and sets the nodes that the client will send requests to.
*
* @param cloudId a valid elastic cloud cloudId that will route to a cluster. The cloudId is located in
* the user console https://cloud.elastic.co and will resemble a string like the following
* optionalHumanReadableName:dXMtZWFzdC0xLmF3cy5mb3VuZC5pbyRlbGFzdGljc2VhcmNoJGtpYmFuYQ==
*/
public static RestClientBuilder builder(String cloudId) {
// there is an optional first portion of the cloudId that is a human readable string, but it is not used.
if (cloudId.contains(":")) {
if (cloudId.indexOf(":") == cloudId.length() - 1) {
throw new IllegalStateException("cloudId " + cloudId + " must begin with a human readable identifier followed by a colon");
}
cloudId = cloudId.substring(cloudId.indexOf(":") + 1);
}
String decoded = new String(Base64.getDecoder().decode(cloudId), UTF_8);
// once decoded the parts are separated by a $ character
String[] decodedParts = decoded.split("\\$");
if (decodedParts.length != 3) {
throw new IllegalStateException("cloudId " + cloudId + " did not decode to a cluster identifier correctly");
}
String url = decodedParts[1] + "." + decodedParts[0];
return builder(new HttpHost(url, 443, "https"));
}
/**
* Returns a new {@link RestClientBuilder} to help with {@link RestClient} creation.
* Creates a new builder instance and sets the hosts that the client will send requests to.

View file

@ -26,8 +26,10 @@ import org.apache.http.impl.nio.client.HttpAsyncClientBuilder;
import org.apache.http.message.BasicHeader;
import java.io.IOException;
import java.util.Base64;
import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.equalTo;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertThat;
@ -159,6 +161,38 @@ public class RestClientBuilderTests extends RestClientTestCase {
}
}
public void testBuildCloudId() throws IOException {
String host = "us-east-1.aws.found.io";
String esId = "elasticsearch";
String kibanaId = "kibana";
String toEncode = host + "$" + esId + "$" + kibanaId;
String encodedId = Base64.getEncoder().encodeToString(toEncode.getBytes(UTF8));
assertNotNull(RestClient.builder(encodedId));
assertNotNull(RestClient.builder("humanReadable:" + encodedId));
String badId = Base64.getEncoder().encodeToString("foo$bar".getBytes(UTF8));
try {
RestClient.builder(badId);
fail("should have failed");
} catch (IllegalStateException e) {
assertEquals("cloudId " + badId + " did not decode to a cluster identifier correctly", e.getMessage());
}
try {
RestClient.builder(badId + ":");
fail("should have failed");
} catch (IllegalStateException e) {
assertEquals("cloudId " + badId + ":" + " must begin with a human readable identifier followed by a colon", e.getMessage());
}
RestClient client = RestClient.builder(encodedId).build();
assertThat(client.getNodes().size(), equalTo(1));
assertThat(client.getNodes().get(0).getHost().getHostName(), equalTo(esId + "." + host));
assertThat(client.getNodes().get(0).getHost().getPort(), equalTo(443));
assertThat(client.getNodes().get(0).getHost().getSchemeName(), equalTo("https"));
client.close();
}
public void testSetPathPrefixNull() {
try {
RestClient.builder(new HttpHost("localhost", 9200)).setPathPrefix(null);

View file

@ -0,0 +1,32 @@
--
:api: ccr-pause-auto-follow-pattern
:request: PauseAutoFollowPatternRequest
:response: AcknowledgedResponse
--
[role="xpack"]
[id="{upid}-{api}"]
=== Pause Auto Follow Pattern API
[id="{upid}-{api}-request"]
==== Request
The Pause Auto Follow Pattern API allows you to pause an existing auto follow pattern.
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests-file}[{api}-request]
--------------------------------------------------
<1> The name of the auto follow pattern.
[id="{upid}-{api}-response"]
==== Response
The returned +{response}+ indicates if the pause auto follow pattern request was received.
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests-file}[{api}-response]
--------------------------------------------------
<1> Whether or not the pause auto follow pattern request was acknowledged.
include::../execution.asciidoc[]

View file

@ -0,0 +1,33 @@
--
:api: ccr-resume-auto-follow-pattern
:request: ResumeAutoFollowPatternRequest
:response: AcknowledgedResponse
--
[role="xpack"]
[id="{upid}-{api}"]
=== Resume Auto Follow Pattern API
[id="{upid}-{api}-request"]
==== Request
The Resume Auto Follow Pattern API allows you to resume the activity
for a pause auto follow pattern.
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests-file}[{api}-request]
--------------------------------------------------
<1> The name of the auto follow pattern.
[id="{upid}-{api}-response"]
==== Response
The returned +{response}+ indicates if the resume auto follow pattern request was received.
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests-file}[{api}-response]
--------------------------------------------------
<1> Whether or not the resume auto follow pattern request was acknowledged.
include::../execution.asciidoc[]

View file

@ -1,6 +1,6 @@
[role="xpack"]
[[java-rest-high-x-pack-graph-explore]]
=== X-Pack Graph explore API
=== Graph explore API
[[java-rest-high-x-pack-graph-explore-execution]]
==== Initial request

View file

@ -0,0 +1,36 @@
--
:api: slm-status
:request: SnapshotLifecycleManagementStatusRequest
:response: AcknowledgedResponse
--
[role="xpack"]
[id="{upid}-{api}"]
=== Snapshot Lifecycle Management Status API
[id="{upid}-{api}-request"]
==== Request
The Snapshot Lifecycle Management Status API allows you to retrieve the status
of Snapshot Lifecycle Management
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests-file}[{api}-request]
--------------------------------------------------
[id="{upid}-{api}-response"]
==== Response
The returned +{response}+ indicates the status of Snapshot Lifecycle Management.
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests-file}[{api}-response]
--------------------------------------------------
<1> The returned status can be `RUNNING`, `STOPPING`, or `STOPPED`.
include::../execution.asciidoc[]

View file

@ -0,0 +1,36 @@
--
:api: slm-start-slm
:request: StartSLMRequest
:response: AcknowledgedResponse
--
[role="xpack"]
[id="{upid}-{api}"]
=== Start Snapshot Lifecycle Management API
[id="{upid}-{api}-request"]
==== Request
The Start Snapshot Lifecycle Management API allows you to start Snapshot
Lifecycle Management if it has previously been stopped.
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests-file}[{api}-request]
--------------------------------------------------
[id="{upid}-{api}-response"]
==== Response
The returned +{response}+ indicates if the request to start Snapshot Lifecycle
Management was received.
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests-file}[{api}-response]
--------------------------------------------------
<1> Whether or not the request to start Snapshot Lifecycle Management was
acknowledged.
include::../execution.asciidoc[]

View file

@ -0,0 +1,38 @@
--
:api: slm-stop-slm
:request: StopSLMRequest
:response: AcknowledgedResponse
--
[role="xpack"]
[id="{upid}-{api}"]
=== Stop Snapshot Lifecycle Management API
[id="{upid}-{api}-request"]
==== Request
The Stop Snapshot Management API allows you to stop Snapshot Lifecycle
Management temporarily.
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests-file}[{api}-request]
--------------------------------------------------
[id="{upid}-{api}-response"]
==== Response
The returned +{response}+ indicates if the request to stop Snapshot
Lifecycle Management was received.
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests-file}[{api}-response]
--------------------------------------------------
<1> Whether or not the request to stop Snapshot Lifecycle Management was
acknowledged.
include::../execution.asciidoc[]

View file

@ -529,6 +529,8 @@ The Java High Level REST Client supports the following CCR APIs:
* <<{upid}-ccr-put-auto-follow-pattern>>
* <<{upid}-ccr-delete-auto-follow-pattern>>
* <<{upid}-ccr-get-auto-follow-pattern>>
* <<{upid}-ccr-pause-auto-follow-pattern>>
* <<{upid}-ccr-resume-auto-follow-pattern>>
* <<{upid}-ccr-get-stats>>
* <<{upid}-ccr-get-follow-stats>>
* <<{upid}-ccr-get-follow-info>>
@ -541,6 +543,8 @@ include::ccr/forget_follower.asciidoc[]
include::ccr/put_auto_follow_pattern.asciidoc[]
include::ccr/delete_auto_follow_pattern.asciidoc[]
include::ccr/get_auto_follow_pattern.asciidoc[]
include::ccr/pause_auto_follow_pattern.asciidoc[]
include::ccr/resume_auto_follow_pattern.asciidoc[]
include::ccr/get_stats.asciidoc[]
include::ccr/get_follow_stats.asciidoc[]
include::ccr/get_follow_info.asciidoc[]
@ -575,6 +579,35 @@ include::ilm/lifecycle_management_status.asciidoc[]
include::ilm/retry_lifecycle_policy.asciidoc[]
include::ilm/remove_lifecycle_policy_from_index.asciidoc[]
[role="xpack"]
== Snapshot Lifecycle Management APIs
:upid: {mainid}-ilm
:doc-tests-file: {doc-tests}/ILMDocumentationIT.java
The Java High Level REST Client supports the following Snapshot Lifecycle
Management APIs:
* <<{upid}-slm-put-snapshot-lifecycle-policy>>
* <<{upid}-slm-delete-snapshot-lifecycle-policy>>
* <<{upid}-ilm-get-lifecycle-policy>>
* <<{upid}-slm-start-slm>>
* <<{upid}-slm-stop-slm>>
* <<{upid}-slm-status>>
* <<{upid}-slm-execute-snapshot-lifecycle-policy>>
* <<{upid}-slm-execute-snapshot-lifecycle-retention>>
include::ilm/put_snapshot_lifecycle_policy.asciidoc[]
include::ilm/delete_snapshot_lifecycle_policy.asciidoc[]
include::ilm/get_snapshot_lifecycle_policy.asciidoc[]
include::ilm/start_snapshot_lifecycle_management.asciidoc[]
include::ilm/stop_snapshot_lifecycle_management.asciidoc[]
include::ilm/snapshot_lifecycle_management_status.asciidoc[]
include::ilm/execute_snapshot_lifecycle_policy.asciidoc[]
include::ilm/execute_snapshot_lifecycle_retention.asciidoc[]
[role="xpack"]
[[transform_apis]]
== {transform-cap} APIs

View file

@ -34,8 +34,6 @@ include::{docdir}/rest-api/common-parms.asciidoc[tag=help]
include::{docdir}/rest-api/common-parms.asciidoc[tag=local]
include::{docdir}/rest-api/common-parms.asciidoc[tag=master-timeout]
include::{docdir}/rest-api/common-parms.asciidoc[tag=cat-s]
include::{docdir}/rest-api/common-parms.asciidoc[tag=cat-v]

View file

@ -34,10 +34,6 @@ include::{docdir}/rest-api/common-parms.asciidoc[tag=cat-h]
include::{docdir}/rest-api/common-parms.asciidoc[tag=help]
include::{docdir}/rest-api/common-parms.asciidoc[tag=local]
include::{docdir}/rest-api/common-parms.asciidoc[tag=master-timeout]
include::{docdir}/rest-api/common-parms.asciidoc[tag=cat-s]
include::{docdir}/rest-api/common-parms.asciidoc[tag=cat-v]

View file

@ -31,10 +31,6 @@ include::{docdir}/rest-api/common-parms.asciidoc[tag=bytes]
include::{docdir}/rest-api/common-parms.asciidoc[tag=http-format]
include::{docdir}/rest-api/common-parms.asciidoc[tag=local]
include::{docdir}/rest-api/common-parms.asciidoc[tag=master-timeout]
include::{docdir}/rest-api/common-parms.asciidoc[tag=cat-h]
include::{docdir}/rest-api/common-parms.asciidoc[tag=help]

View file

@ -45,10 +45,6 @@ include::{docdir}/rest-api/common-parms.asciidoc[tag=cat-h]
include::{docdir}/rest-api/common-parms.asciidoc[tag=help]
include::{docdir}/rest-api/common-parms.asciidoc[tag=local]
include::{docdir}/rest-api/common-parms.asciidoc[tag=master-timeout]
include::{docdir}/rest-api/common-parms.asciidoc[tag=cat-s]
include::{docdir}/rest-api/common-parms.asciidoc[tag=time]

View file

@ -50,10 +50,6 @@ include::{docdir}/rest-api/common-parms.asciidoc[tag=help]
include::{docdir}/rest-api/common-parms.asciidoc[tag=index-query-parm]
include::{docdir}/rest-api/common-parms.asciidoc[tag=local]
include::{docdir}/rest-api/common-parms.asciidoc[tag=master-timeout]
include::{docdir}/rest-api/common-parms.asciidoc[tag=cat-s]
include::{docdir}/rest-api/common-parms.asciidoc[tag=time]

View file

@ -95,8 +95,6 @@ Reason for any snapshot failures.
include::{docdir}/rest-api/common-parms.asciidoc[tag=help]
include::{docdir}/rest-api/common-parms.asciidoc[tag=local]
`ignore_unavailable`::
(Optional, boolean) If `true`, the response does not include information from
unavailable snapshots. Defaults to `false`.

View file

@ -51,8 +51,6 @@ include::{docdir}/rest-api/common-parms.asciidoc[tag=help]
include::{docdir}/rest-api/common-parms.asciidoc[tag=node-id-query-parm]
include::{docdir}/rest-api/common-parms.asciidoc[tag=timeoutparms]
include::{docdir}/rest-api/common-parms.asciidoc[tag=parent-task-id]
include::{docdir}/rest-api/common-parms.asciidoc[tag=cat-s]

View file

@ -15,10 +15,9 @@ SLM policy management is split into three different CRUD APIs, a way to put or u
policies, a way to retrieve policies, and a way to delete unwanted policies, as
well as a separate API for immediately invoking a snapshot based on a policy.
Since SLM falls under the same category as ILM, it is stopped and started by
using the <<start-stop-ilm,start and stop>> ILM APIs. It is, however, managed
by a different enable setting. To disable SLM's functionality, set the cluster
setting `xpack.slm.enabled` to `false` in elasticsearch.yml.
SLM can be stopped temporarily and restarted using the <<slm-stop,Stop SLM>> and
<<slm-start,Start SLM>> APIs. To disable SLM's functionality entirely, set the
cluster setting `xpack.slm.enabled` to `false` in elasticsearch.yml.
[[slm-api-put]]
=== Put snapshot lifecycle policy API
@ -317,21 +316,42 @@ GET /_slm/policy
[[slm-api-execute]]
=== Execute Snapshot Lifecycle Policy API
=== Execute snapshot lifecycle policy API
++++
<titleabbrev>Execute snapshot lifecycle policy</titleabbrev>
++++
Executes a snapshot lifecycle policy, immediately creating a snapshot
without waiting for the scheduled creation time.
[[slm-api-execute-request]]
==== {api-request-title}
`PUT /_slm/policy/<snapshot-lifecycle-policy-id>/_execute`
[[slm-api-execute-desc]]
==== {api-description-title}
Sometimes it can be useful to immediately execute a snapshot based on policy,
perhaps before an upgrade or before performing other maintenance on indices. The
execute snapshot policy API allows you to perform a snapshot immediately without
waiting for a policy's scheduled invocation.
==== Path Parameters
`policy_id` (required)::
(string) Id of the policy to execute
[[slm-api-execute-path-params]]
==== {api-path-parms-title}
==== Example
`<snapshot-lifecycle-policy-id>`::
(Required, string)
ID of the snapshot lifecycle policy to execute.
To take an immediate snapshot using a policy, use the following
[[slm-api-execute-example]]
==== {api-examples-title}
To take an immediate snapshot using a policy, use the following request:
[source,console]
--------------------------------------------------
@ -339,7 +359,7 @@ POST /_slm/policy/daily-snapshots/_execute
--------------------------------------------------
// TEST[skip:we can't easily handle snapshots from docs tests]
This API will immediately return with the generated snapshot name
This API returns the following response with the generated snapshot name:
[source,console-result]
--------------------------------------------------
@ -450,8 +470,7 @@ POST /_slm/policy/daily-snapshots/_execute
--------------------------------------------------
// TESTRESPONSE[skip:we can't handle snapshots in docs tests]
Now retriving the policy shows that the policy has successfully been executed:
Now retrieving the policy shows that the policy has successfully been executed:
[source,console]
--------------------------------------------------
@ -514,12 +533,22 @@ Which now includes the successful snapshot information:
It is a good idea to test policies using the execute API to ensure they work.
[[slm-get-stats]]
=== Get Snapshot Lifecycle Stats API
=== Get snapshot lifecycle stats API
++++
<titleabbrev>Get snapshot lifecycle stats</titleabbrev>
++++
SLM stores statistics on a global and per-policy level about actions taken. These stats can be
retrieved by using the following API:
Returns global and policy-level statistics about actions taken by {slm}.
==== Example
[[slm-api-stats-request]]
==== {api-request-title}
`GET /_slm/stats`
[[slm-api-stats-example]]
==== {api-examples-title}
[source,console]
--------------------------------------------------
@ -527,7 +556,7 @@ GET /_slm/stats
--------------------------------------------------
// TEST[continued]
Which returns a response similar to:
The API returns the following response:
[source,js]
--------------------------------------------------
@ -546,19 +575,40 @@ Which returns a response similar to:
--------------------------------------------------
// TESTRESPONSE[s/runs": 13/runs": $body.retention_runs/ s/_failed": 0/_failed": $body.retention_failed/ s/_timed_out": 0/_timed_out": $body.retention_timed_out/ s/"1.4s"/$body.retention_deletion_time/ s/1404/$body.retention_deletion_time_millis/ s/total_snapshots_taken": 1/total_snapshots_taken": $body.total_snapshots_taken/ s/total_snapshots_failed": 1/total_snapshots_failed": $body.total_snapshots_failed/ s/"policy_stats": [.*]/"policy_stats": $body.policy_stats/]
[[slm-api-delete]]
=== Delete Snapshot Lifecycle Policy API
=== Delete snapshot lifecycle policy API
++++
<titleabbrev>Delete snapshot lifecycle policy</titleabbrev>
++++
Deletes an existing snapshot lifecycle policy.
[[slm-api-delete-request]]
==== {api-request-title}
`DELETE /_slm/policy/<snapshot-lifecycle-policy-id>`
[[slm-api-delete-desc]]
==== {api-description-title}
A policy can be deleted by issuing a delete request with the policy id. Note
that this prevents any future snapshots from being taken, but does not cancel
any currently ongoing snapshots or remove any previously taken snapshots.
==== Path Parameters
`policy_id` (optional)::
(string) Id of the policy to remove.
[[slm-api-delete-path-params]]
==== {api-path-parms-title}
==== Example
`<snapshot-lifecycle-policy-id>`::
(Required, string)
ID of the snapshot lifecycle policy to delete.
[[slm-api-delete-example]]
==== {api-examples-title}
[source,console]
--------------------------------------------------
@ -566,23 +616,42 @@ DELETE /_slm/policy/daily-snapshots
--------------------------------------------------
// TEST[continued]
[[slm-api-execute-retention]]
=== Execute Snapshot Lifecycle Retention API
=== Execute snapshot lifecycle retention API
++++
<titleabbrev>Execute snapshot lifecycle retention</titleabbrev>
++++
Deletes any expired snapshots based on lifecycle policy retention rules.
[[slm-api-execute-retention-request]]
==== {api-request-title}
`POST /_slm/_execute_retention`
[[slm-api-execute-retention-desc]]
==== {api-description-title}
While Snapshot Lifecycle Management retention is usually invoked through the global cluster settings
for its schedule, it can sometimes be useful to invoke a retention run to expunge expired snapshots
immediately. This API allows you to run a one-off retention run.
==== Example
To immediately start snapshot retention, use the following
[[slm-api-execute-retention-example]]
==== {api-examples-title}
To immediately start snapshot retention, use the following request:
[source,console]
--------------------------------------------------
POST /_slm/_execute_retention
--------------------------------------------------
This API will immediately return, as retention will be run asynchronously in the background:
This API returns the following response as retention runs asynchronously in the
background:
[source,console-result]
--------------------------------------------------
@ -591,3 +660,163 @@ This API will immediately return, as retention will be run asynchronously in the
}
--------------------------------------------------
[[slm-stop]]
=== Stop Snapshot Lifecycle Management API
[subs="attributes"]
++++
<titleabbrev>Stop Snapshot Lifecycle Management</titleabbrev>
++++
Stop the Snapshot Lifecycle Management (SLM) plugin.
[[slm-stop-request]]
==== {api-request-title}
`POST /_ilm/stop`
[[slm-stop-desc]]
==== {api-description-title}
Halts all snapshot lifecycle management operations and stops the SLM plugin.
This is useful when you are performing maintenance on the cluster and need to
prevent SLM from performing any actions on your indices. Note that this API does
not stop any snapshots that are currently in progress, and that snapshots can
still be taken manually via the <<slm-api-execute,Execute Policy API>> even
when SLM is stopped.
The API returns as soon as the stop request has been acknowledged, but the
plugin might continue to run until in-progress operations complete and the plugin
can be safely stopped. Use the <<slm-get-status, Get SLM Status>> API to see
if SLM is running.
==== Request Parameters
include::{docdir}/rest-api/common-parms.asciidoc[tag=timeoutparms]
==== Authorization
You must have the `manage_slm` cluster privilege to use this API.
For more information, see <<security-privileges>>.
[[slm-stop-example]]
==== {api-examples-title}
Stops the SLM plugin.
[source,console]
--------------------------------------------------
POST _slm/stop
--------------------------------------------------
// TEST[continued]
If the request does not encounter errors, you receive the following result:
[source,console-result]
--------------------------------------------------
{
"acknowledged": true
}
--------------------------------------------------
[[slm-start]]
=== Start Snapshot Lifecycle Management API
[subs="attributes"]
++++
<titleabbrev>Start Snapshot Lifecycle Management</titleabbrev>
++++
Start the Snapshot Lifecycle Management (SLM) plugin.
[[slm-start-request]]
==== {api-request-title}
`POST /_slm/start`
[[slm-start-desc]]
==== {api-description-title}
Starts the SLM plugin if it is currently stopped. SLM is started
automatically when the cluster is formed. Restarting SLM is only
necessary if it has been stopped using the <<slm-stop, Stop SLM API>>.
==== Request Parameters
include::{docdir}/rest-api/common-parms.asciidoc[tag=timeoutparms]
==== Authorization
You must have the `manage_slm` cluster privilege to use this API.
For more information, see <<security-privileges>>.
[[slm-start-example]]
==== {api-examples-title}
Starts the SLM plugin.
[source,console]
--------------------------------------------------
POST _slm/start
--------------------------------------------------
// TEST[continued]
If the request succeeds, you receive the following result:
[source,console-result]
--------------------------------------------------
{
"acknowledged": true
}
--------------------------------------------------
[[slm-get-status]]
=== Get Snapshot Lifecycle Management status API
[subs="attributes"]
++++
<titleabbrev>Get Snapshot Lifecycle Management status</titleabbrev>
++++
Retrieves the current Snapshot Lifecycle Management (SLM) status.
[[slm-get-status-request]]
==== {api-request-title}
`GET /_slm/status`
[[slm-get-status-desc]]
==== {api-description-title}
Returns the status of the SLM plugin. The `operation_mode` field in the
response shows one of three states: `STARTED`, `STOPPING`,
or `STOPPED`. You can change the status of the SLM plugin with the
<<slm-start, Start SLM>> and <<slm-stop, Stop SLM>> APIs.
==== Request Parameters
include::{docdir}/rest-api/common-parms.asciidoc[tag=timeoutparms]
==== Authorization
You must have the `manage_slm` or `read_slm` or both cluster privileges to use this API.
For more information, see <<security-privileges>>.
[[slm-get-status-example]]
==== {api-examples-title}
Gets the SLM plugin status.
[source,console]
--------------------------------------------------
GET _slm/status
--------------------------------------------------
If the request succeeds, the body of the response shows the operation mode:
[source,console-result]
--------------------------------------------------
{
"operation_mode": "RUNNING"
}
--------------------------------------------------

View file

@ -65,6 +65,15 @@ A {dfeed} resource has the following properties:
`{"enabled": true, "check_window": "1h"}` See
<<ml-datafeed-delayed-data-check-config>>.
`max_empty_searches`::
(integer) If a real-time {dfeed} has never seen any data (including during
any initial training period) then it will automatically stop itself and
close its associated job after this many real-time searches that return no
documents. In other words, it will stop after `frequency` times
`max_empty_searches` of real-time operation. If not set
then a {dfeed} with no end time that sees no data will remain started until
it is explicitly stopped. By default this setting is not set.
[[ml-datafeed-chunking-config]]
==== Chunking configuration objects

View file

@ -122,7 +122,8 @@ The API returns the following results:
"time_format": "epoch_ms"
},
"model_snapshot_retention_days": 1,
"results_index_name": "shared"
"results_index_name": "shared",
"allow_lazy_open": false
}
]
}

View file

@ -95,6 +95,19 @@ so do not set the `background_persist_interval` value too low.
deleted from Elasticsearch. The default value is null, which means results
are retained.
`allow_lazy_open`::
(boolean) Advanced configuration option.
Whether this job should be allowed to open when there is insufficient
{ml} node capacity for it to be immediately assigned to a node.
The default is `false`, which means that the <<ml-open-job>>
will return an error if a {ml} node with capacity to run the
job cannot immediately be found. (However, this is also subject to
the cluster-wide `xpack.ml.max_lazy_ml_nodes` setting - see
<<advanced-ml-settings>>.) If this option is set to `true` then
the <<ml-open-job>> will not return an error, and the job will
wait in the `opening` state until sufficient {ml} node capacity
is available.
[[ml-analysisconfig]]
==== Analysis Configuration Objects

View file

@ -149,7 +149,8 @@ When the job is created, you receive the following results:
"time_format" : "epoch_ms"
},
"model_snapshot_retention_days" : 1,
"results_index_name" : "shared"
"results_index_name" : "shared",
"allow_lazy_open" : false
}
----
// TESTRESPONSE[s/"job_version" : "8.0.0"/"job_version" : $body.job_version/]

View file

@ -101,6 +101,15 @@ parallel and close one when you are satisfied with the results of the other job.
(Optional, unsigned integer) The `size` parameter that is used in {es}
searches. The default value is `1000`.
`max_empty_searches`::
(Optional, integer) If a real-time {dfeed} has never seen any data (including
during any initial training period) then it will automatically stop itself
and close its associated job after this many real-time searches that return
no documents. In other words, it will stop after `frequency` times
`max_empty_searches` of real-time operation. If not set
then a {dfeed} with no end time that sees no data will remain started until
it is explicitly stopped. The special value `-1` unsets this setting.
For more information about these properties, see <<ml-datafeed-resource>>.

View file

@ -74,11 +74,14 @@ See <<ml-job-resource>>. | Yes
|`results_retention_days` |Advanced configuration option. The number of days
for which job results are retained. See <<ml-job-resource>>. | Yes
|`allow_lazy_open` |Advanced configuration option. Whether to allow the job to be
opened when no {ml} node has sufficient capacity. See <<ml-job-resource>>. | Yes
|=======================================================================
For those properties that have `Requires Restart` set to `Yes` in this table,
if the job is open when you make the update, you must stop the data feed, close
the job, then restart the data feed and open the job for the changes to take
the job, then reopen the job and restart the data feed for the changes to take
effect.
[NOTE]
@ -170,7 +173,8 @@ configuration information, including the updated property values. For example:
}
]
},
"results_index_name": "shared"
"results_index_name": "shared",
"allow_lazy_open": false
}
----
// TESTRESPONSE[s/"job_version": "7.0.0-alpha1"/"job_version": $body.job_version/]

View file

@ -138,6 +138,18 @@ that dont contain a results field are not included in the {reganalysis}.
as this object is passed verbatim to {es}. By default, this property has
the following value: `{"match_all": {}}`.
`allow_lazy_start`::
(Optional, boolean) Whether this job should be allowed to start when there
is insufficient {ml} node capacity for it to be immediately assigned to a node.
The default is `false`, which means that the <<start-dfanalytics>>
will return an error if a {ml} node with capacity to run the
job cannot immediately be found. (However, this is also subject to
the cluster-wide `xpack.ml.max_lazy_ml_nodes` setting - see
<<advanced-ml-settings>>.) If this option is set to `true` then
the <<start-dfanalytics>> will not return an error, and the job will
wait in the `starting` state until sufficient {ml} node capacity
is available.
[[ml-put-dfanalytics-example]]
==== {api-examples-title}
@ -197,7 +209,8 @@ The API returns the following result:
},
"model_memory_limit": "1gb",
"create_time" : 1562265491319,
"version" : "8.0.0"
"version" : "8.0.0",
"allow_lazy_start" : false
}
----
// TESTRESPONSE[s/1562265491319/$body.$_path/]
@ -257,7 +270,8 @@ The API returns the following result:
},
"model_memory_limit" : "1gb",
"create_time" : 1567168659127,
"version" : "8.0.0"
"version" : "8.0.0",
"allow_lazy_start" : false
}
----
// TESTRESPONSE[s/1567168659127/$body.$_path/]

View file

@ -54,9 +54,7 @@ public final class Grok {
"(?::(?<subname>[[:alnum:]@\\[\\]_:.-]+))?" +
")" +
"(?:=(?<definition>" +
"(?:" +
"(?:[^{}]+|\\.+)+" +
")+" +
")" +
")?" + "\\}";
private static final Regex GROK_PATTERN_REGEX = new Regex(GROK_PATTERN.getBytes(StandardCharsets.UTF_8), 0,

View file

@ -293,7 +293,7 @@ public class ReindexDocumentationIT extends ESIntegTestCase {
assertThat(ALLOWED_OPERATIONS.drainPermits(), equalTo(0));
ReindexRequestBuilder builder = new ReindexRequestBuilder(client, ReindexAction.INSTANCE).source(INDEX_NAME)
.destination("target_index", "_doc");
.destination("target_index");
// Scroll by 1 so that cancellation is easier to control
builder.source().setSize(1);

View file

@ -43,23 +43,23 @@ public class ReindexBasicTests extends ReindexTestCase {
assertHitCount(client().prepareSearch("source").setSize(0).get(), 4);
// Copy all the docs
ReindexRequestBuilder copy = reindex().source("source").destination("dest", "type").refresh(true);
ReindexRequestBuilder copy = reindex().source("source").destination("dest").refresh(true);
assertThat(copy.get(), matcher().created(4));
assertHitCount(client().prepareSearch("dest").setSize(0).get(), 4);
// Now none of them
createIndex("none");
copy = reindex().source("source").destination("none", "type").filter(termQuery("foo", "no_match")).refresh(true);
copy = reindex().source("source").destination("none").filter(termQuery("foo", "no_match")).refresh(true);
assertThat(copy.get(), matcher().created(0));
assertHitCount(client().prepareSearch("none").setSize(0).get(), 0);
// Now half of them
copy = reindex().source("source").destination("dest_half", "type").filter(termQuery("foo", "a")).refresh(true);
copy = reindex().source("source").destination("dest_half").filter(termQuery("foo", "a")).refresh(true);
assertThat(copy.get(), matcher().created(2));
assertHitCount(client().prepareSearch("dest_half").setSize(0).get(), 2);
// Limit with maxDocs
copy = reindex().source("source").destination("dest_size_one", "type").maxDocs(1).refresh(true);
copy = reindex().source("source").destination("dest_size_one").maxDocs(1).refresh(true);
assertThat(copy.get(), matcher().created(1));
assertHitCount(client().prepareSearch("dest_size_one").setSize(0).get(), 1);
}
@ -75,7 +75,7 @@ public class ReindexBasicTests extends ReindexTestCase {
assertHitCount(client().prepareSearch("source").setSize(0).get(), max);
// Copy all the docs
ReindexRequestBuilder copy = reindex().source("source").destination("dest", "type").refresh(true);
ReindexRequestBuilder copy = reindex().source("source").destination("dest").refresh(true);
// Use a small batch size so we have to use more than one batch
copy.source().setSize(5);
assertThat(copy.get(), matcher().created(max).batches(max, 5));
@ -83,7 +83,7 @@ public class ReindexBasicTests extends ReindexTestCase {
// Copy some of the docs
int half = max / 2;
copy = reindex().source("source").destination("dest_half", "type").refresh(true);
copy = reindex().source("source").destination("dest_half").refresh(true);
// Use a small batch size so we have to use more than one batch
copy.source().setSize(5);
copy.maxDocs(half);
@ -105,7 +105,7 @@ public class ReindexBasicTests extends ReindexTestCase {
int expectedSlices = expectedSliceStatuses(slices, "source");
// Copy all the docs
ReindexRequestBuilder copy = reindex().source("source").destination("dest", "type").refresh(true).setSlices(slices);
ReindexRequestBuilder copy = reindex().source("source").destination("dest").refresh(true).setSlices(slices);
// Use a small batch size so we have to use more than one batch
copy.source().setSize(5);
assertThat(copy.get(), matcher().created(max).batches(greaterThanOrEqualTo(max / 5)).slices(hasSize(expectedSlices)));
@ -113,7 +113,7 @@ public class ReindexBasicTests extends ReindexTestCase {
// Copy some of the docs
int half = max / 2;
copy = reindex().source("source").destination("dest_half", "type").refresh(true).setSlices(slices);
copy = reindex().source("source").destination("dest_half").refresh(true).setSlices(slices);
// Use a small batch size so we have to use more than one batch
copy.source().setSize(5);
copy.maxDocs(half);
@ -148,7 +148,7 @@ public class ReindexBasicTests extends ReindexTestCase {
String[] sourceIndexNames = docs.keySet().toArray(new String[docs.size()]);
ReindexRequestBuilder request = reindex()
.source(sourceIndexNames)
.destination("dest", "type")
.destination("dest")
.refresh(true)
.setSlices(slices);

View file

@ -1 +0,0 @@
d16cf15d29c409987cecde77407fbb6f1e16d262

View file

@ -0,0 +1 @@
6e6fc9178d1f1401aa0d6b843341efb91720f2cd

View file

@ -1 +0,0 @@
ccfbdfc727cbf702350572a0b12fe92185ebf162

View file

@ -0,0 +1 @@
b1d5ed85a558fbbadc2783f869fbd0adcd32b07b

View file

@ -1 +0,0 @@
4d55b3cdb74cd140d262de96987ebd369125a64c

View file

@ -0,0 +1 @@
5f71267aa784d0e6c5ec09fb988339d244b205a0

View file

@ -1 +0,0 @@
6f8aae763f743d91fb1ba1e9011dae0ef4f6ff34

View file

@ -0,0 +1 @@
e02700b574d3a0e2100308f971f0753ac8700e7c

View file

@ -1 +0,0 @@
ebf1f2bd0dad5e16aa1fc48d32e5dbe507b38d53

View file

@ -0,0 +1 @@
fc6546be5df552d9729f008d8d41a6dee28127aa

View file

@ -1 +0,0 @@
b00be4aa309e9b56e498191aa8c73e4f393759ed

View file

@ -0,0 +1 @@
ccaacf418a9e486b65e82c47bed66439119c5fdb

View file

@ -1 +0,0 @@
cd8b612d5daa42d1be3bb3203e4857597d5db79b

View file

@ -0,0 +1 @@
857502e863c02c829fdafea61c3fda6bda01d0af

View file

@ -62,7 +62,7 @@ public class AzureBlobContainer extends AbstractBlobContainer {
logger.trace("blobExists({})", blobName);
try {
return blobStore.blobExists(buildKey(blobName));
} catch (URISyntaxException | StorageException e) {
} catch (URISyntaxException | StorageException | IOException e) {
logger.warn("can not access [{}] in container {{}}: {}", blobName, blobStore, e.getMessage());
}
return false;
@ -97,7 +97,6 @@ public class AzureBlobContainer extends AbstractBlobContainer {
@Override
public void writeBlob(String blobName, InputStream inputStream, long blobSize, boolean failIfAlreadyExists) throws IOException {
logger.trace("writeBlob({}, stream, {})", buildKey(blobName), blobSize);
try {
blobStore.writeBlob(buildKey(blobName), inputStream, blobSize, failIfAlreadyExists);
} catch (URISyntaxException|StorageException e) {

View file

@ -33,7 +33,6 @@ import org.elasticsearch.threadpool.ThreadPool;
import java.io.IOException;
import java.io.InputStream;
import java.net.URISyntaxException;
import java.nio.file.FileAlreadyExistsException;
import java.util.Collections;
import java.util.Map;
import java.util.concurrent.Executor;
@ -88,11 +87,11 @@ public class AzureBlobStore implements BlobStore {
public void close() {
}
public boolean blobExists(String blob) throws URISyntaxException, StorageException {
public boolean blobExists(String blob) throws URISyntaxException, StorageException, IOException {
return service.blobExists(clientName, container, blob);
}
public void deleteBlob(String blob) throws URISyntaxException, StorageException {
public void deleteBlob(String blob) throws URISyntaxException, StorageException, IOException {
service.deleteBlob(clientName, container, blob);
}
@ -106,17 +105,17 @@ public class AzureBlobStore implements BlobStore {
}
public Map<String, BlobMetaData> listBlobsByPrefix(String keyPath, String prefix)
throws URISyntaxException, StorageException {
throws URISyntaxException, StorageException, IOException {
return service.listBlobsByPrefix(clientName, container, keyPath, prefix);
}
public Map<String, BlobContainer> children(BlobPath path) throws URISyntaxException, StorageException {
public Map<String, BlobContainer> children(BlobPath path) throws URISyntaxException, StorageException, IOException {
return Collections.unmodifiableMap(service.children(clientName, container, path).stream().collect(
Collectors.toMap(Function.identity(), name -> new AzureBlobContainer(path.add(name), this, threadPool))));
}
public void writeBlob(String blobName, InputStream inputStream, long blobSize, boolean failIfAlreadyExists)
throws URISyntaxException, StorageException, FileAlreadyExistsException {
throws URISyntaxException, StorageException, IOException {
service.writeBlob(this.clientName, container, blobName, inputStream, blobSize, failIfAlreadyExists);
}
}

View file

@ -267,7 +267,7 @@ public class AzureStorageService {
}
public Map<String, BlobMetaData> listBlobsByPrefix(String account, String container, String keyPath, String prefix)
throws URISyntaxException, StorageException {
throws URISyntaxException, StorageException, IOException {
// NOTE: this should be here: if (prefix == null) prefix = "";
// however, this is really inefficient since deleteBlobsByPrefix enumerates everything and
// then does a prefix match on the result; it should just call listBlobsByPrefix with the prefix!
@ -295,7 +295,7 @@ public class AzureStorageService {
return Map.copyOf(blobsBuilder);
}
public Set<String> children(String account, String container, BlobPath path) throws URISyntaxException, StorageException {
public Set<String> children(String account, String container, BlobPath path) throws URISyntaxException, StorageException, IOException {
final var blobsBuilder = new HashSet<String>();
final Tuple<CloudBlobClient, Supplier<OperationContext>> client = client(account);
final CloudBlobContainer blobContainer = client.v1().getContainerReference(container);
@ -319,8 +319,9 @@ public class AzureStorageService {
}
public void writeBlob(String account, String container, String blobName, InputStream inputStream, long blobSize,
boolean failIfAlreadyExists)
throws URISyntaxException, StorageException, FileAlreadyExistsException {
boolean failIfAlreadyExists) throws URISyntaxException, StorageException, IOException {
assert inputStream.markSupported()
: "Should not be used with non-mark supporting streams as their retry handling in the SDK is broken";
logger.trace(() -> new ParameterizedMessage("writeBlob({}, stream, {})", blobName, blobSize));
final Tuple<CloudBlobClient, Supplier<OperationContext>> client = client(account);
final CloudBlobContainer blobContainer = client.v1().getContainerReference(container);

View file

@ -20,6 +20,7 @@
package org.elasticsearch.repositories.azure;
import com.microsoft.azure.storage.StorageException;
import org.apache.logging.log4j.core.util.Throwables;
import org.elasticsearch.SpecialPermission;
import java.io.IOException;
@ -44,7 +45,9 @@ public final class SocketAccess {
try {
return AccessController.doPrivileged(operation);
} catch (PrivilegedActionException e) {
throw (IOException) e.getCause();
Throwables.rethrow(e.getCause());
assert false : "always throws";
return null;
}
}
@ -53,7 +56,9 @@ public final class SocketAccess {
try {
return AccessController.doPrivileged(operation);
} catch (PrivilegedActionException e) {
throw (StorageException) e.getCause();
Throwables.rethrow(e.getCause());
assert false : "always throws";
return null;
}
}
@ -65,12 +70,7 @@ public final class SocketAccess {
return null;
});
} catch (PrivilegedActionException e) {
Throwable cause = e.getCause();
if (cause instanceof StorageException) {
throw (StorageException) cause;
} else {
throw (URISyntaxException) cause;
}
Throwables.rethrow(e.getCause());
}
}

View file

@ -49,6 +49,7 @@ import org.junit.After;
import org.junit.Before;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.net.InetAddress;
@ -63,6 +64,7 @@ import java.util.Map;
import java.util.Objects;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
@ -294,6 +296,44 @@ public class AzureBlobContainerRetriesTests extends ESTestCase {
assertThat(blocks.isEmpty(), is(true));
}
public void testRetryUntilFail() throws IOException {
final AtomicBoolean requestReceived = new AtomicBoolean(false);
httpServer.createContext("/container/write_blob_max_retries", exchange -> {
try {
if (requestReceived.compareAndSet(false, true)) {
throw new AssertionError("Should not receive two requests");
} else {
exchange.sendResponseHeaders(RestStatus.CREATED.getStatus(), -1);
}
} finally {
exchange.close();
}
});
final BlobContainer blobContainer = createBlobContainer(randomIntBetween(2, 5));
try (InputStream stream = new InputStream() {
@Override
public int read() throws IOException {
throw new IOException("foo");
}
@Override
public boolean markSupported() {
return true;
}
@Override
public void reset() {
throw new AssertionError("should not be called");
}
}) {
final IOException ioe = expectThrows(IOException.class, () ->
blobContainer.writeBlob("write_blob_max_retries", stream, randomIntBetween(1, 128), randomBoolean()));
assertThat(ioe.getMessage(), is("foo"));
}
}
private static byte[] randomBlobContent() {
return randomByteArrayOfLength(randomIntBetween(1, frequently() ? 512 : 1 << 20)); // rarely up to 1mb
}

View file

@ -1 +0,0 @@
d16cf15d29c409987cecde77407fbb6f1e16d262

View file

@ -0,0 +1 @@
6e6fc9178d1f1401aa0d6b843341efb91720f2cd

View file

@ -1 +0,0 @@
ccfbdfc727cbf702350572a0b12fe92185ebf162

View file

@ -0,0 +1 @@
b1d5ed85a558fbbadc2783f869fbd0adcd32b07b

View file

@ -1 +0,0 @@
4d55b3cdb74cd140d262de96987ebd369125a64c

View file

@ -0,0 +1 @@
5f71267aa784d0e6c5ec09fb988339d244b205a0

View file

@ -1 +0,0 @@
6f8aae763f743d91fb1ba1e9011dae0ef4f6ff34

View file

@ -0,0 +1 @@
e02700b574d3a0e2100308f971f0753ac8700e7c

View file

@ -1 +0,0 @@
ebf1f2bd0dad5e16aa1fc48d32e5dbe507b38d53

View file

@ -0,0 +1 @@
fc6546be5df552d9729f008d8d41a6dee28127aa

View file

@ -1 +0,0 @@
b00be4aa309e9b56e498191aa8c73e4f393759ed

View file

@ -0,0 +1 @@
ccaacf418a9e486b65e82c47bed66439119c5fdb

View file

@ -1 +0,0 @@
cd8b612d5daa42d1be3bb3203e4857597d5db79b

View file

@ -0,0 +1 @@
857502e863c02c829fdafea61c3fda6bda01d0af

View file

@ -36,10 +36,6 @@
"type":"boolean",
"description":"Return local information, do not retrieve the state from master node (default: false)"
},
"master_timeout":{
"type":"time",
"description":"Explicit operation timeout for connection to master node"
},
"h":{
"type":"list",
"description":"Comma-separated list of column names to display"

View file

@ -32,14 +32,6 @@
"type":"string",
"description":"a short version of the Accept header, e.g. json, yaml"
},
"local":{
"type":"boolean",
"description":"Return local information, do not retrieve the state from master node (default: false)"
},
"master_timeout":{
"type":"time",
"description":"Explicit operation timeout for connection to master node"
},
"h":{
"type":"list",
"description":"Comma-separated list of column names to display"

View file

@ -49,14 +49,6 @@
"pb"
]
},
"local":{
"type":"boolean",
"description":"Return local information, do not retrieve the state from master node (default: false)"
},
"master_timeout":{
"type":"time",
"description":"Explicit operation timeout for connection to master node"
},
"h":{
"type":"list",
"description":"Comma-separated list of column names to display"

View file

@ -20,14 +20,6 @@
"type":"string",
"description":"a short version of the Accept header, e.g. json, yaml"
},
"local":{
"type":"boolean",
"description":"Return local information, do not retrieve the state from master node (default: false)"
},
"master_timeout":{
"type":"time",
"description":"Explicit operation timeout for connection to master node"
},
"h":{
"type":"list",
"description":"Comma-separated list of column names to display"

View file

@ -59,10 +59,6 @@
"description":"If `true`, the response includes detailed information about shard recoveries",
"default":false
},
"master_timeout":{
"type":"time",
"description":"Explicit operation timeout for connection to master node"
},
"h":{
"type":"list",
"description":"Comma-separated list of column names to display"

View file

@ -1,8 +1,8 @@
---
"Test Index and Search locale dependent mappings / dates":
- skip:
version: "all"
reason: "Awaits fix: https://github.com/elastic/elasticsearch/issues/39981(Previously: JDK9 only supports this with a special sysproperty added in 6.2.0.)"
version: " - 6.1.99"
reason: JDK9 only supports this with a special sysproperty added in 6.2.0
- do:
indices.create:
index: test_index

View file

@ -19,7 +19,6 @@
package org.elasticsearch.index.reindex;
import org.apache.logging.log4j.LogManager;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.CompositeIndicesRequest;
import org.elasticsearch.action.index.IndexRequest;
@ -29,7 +28,6 @@ import org.elasticsearch.common.Strings;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.logging.DeprecationLogger;
import org.elasticsearch.common.lucene.uid.Versions;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.xcontent.ObjectParser;
@ -321,9 +319,6 @@ public class ReindexRequest extends AbstractBulkIndexByScrollRequest<ReindexRequ
static final ObjectParser<ReindexRequest, Void> PARSER = new ObjectParser<>("reindex");
static final String TYPES_DEPRECATION_MESSAGE = "[types removal] Specifying types in reindex requests is deprecated.";
private static final DeprecationLogger deprecationLogger = new DeprecationLogger(LogManager.getLogger(ReindexRequest.class));
static {
ObjectParser.Parser<ReindexRequest, Void> sourceParser = (parser, request, context) -> {
// Funky hack to work around Search not having a proper ObjectParser and us wanting to extract query if using remote.

View file

@ -60,14 +60,6 @@ public class ReindexRequestBuilder extends
return this;
}
/**
* Set the destination index and type.
*/
public ReindexRequestBuilder destination(String index, String type) {
destination.setIndex(index).setType(type);
return this;
}
/**
* Setup reindexing from a remote cluster.
*/

View file

@ -1216,6 +1216,15 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
getEngine().failEngine(reason, e);
}
/**
* Acquire the searcher without applying the additional reader wrapper.
*/
public Engine.Searcher acquireSearcherNoWrap(String source) {
readAllowed();
markSearcherAccessed();
return getEngine().acquireSearcher(source, Engine.SearcherScope.EXTERNAL);
}
public Engine.Searcher acquireSearcher(String source) {
return acquireSearcher(source, Engine.SearcherScope.EXTERNAL);
}

View file

@ -1011,10 +1011,16 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv
*/
public boolean canMatch(ShardSearchRequest request) throws IOException {
assert request.searchType() == SearchType.QUERY_THEN_FETCH : "unexpected search type: " + request.searchType();
try (DefaultSearchContext context = createSearchContext(request, defaultSearchTimeout, false, "can_match")) {
SearchSourceBuilder source = context.request().source();
if (canRewriteToMatchNone(source)) {
QueryBuilder queryBuilder = source.query();
IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex());
IndexShard indexShard = indexService.getShard(request.shardId().getId());
// we don't want to use the reader wrapper since it could run costly operations
// and we can afford false positives.
try (Engine.Searcher searcher = indexShard.acquireSearcherNoWrap("can_match")) {
QueryShardContext context = indexService.newQueryShardContext(request.shardId().id(), searcher,
request::nowInMillis, request.getClusterAlias());
Rewriteable.rewrite(request.getRewriteable(), context, false);
if (canRewriteToMatchNone(request.source())) {
QueryBuilder queryBuilder = request.source().query();
return queryBuilder instanceof MatchNoneQueryBuilder == false;
}
return true; // null query means match_all

View file

@ -19,6 +19,9 @@
package org.elasticsearch.search;
import com.carrotsearch.hppc.IntArrayList;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.FilterDirectoryReader;
import org.apache.lucene.index.LeafReader;
import org.apache.lucene.search.Query;
import org.apache.lucene.store.AlreadyClosedException;
import org.elasticsearch.ElasticsearchException;
@ -76,6 +79,7 @@ import org.elasticsearch.search.internal.SearchContext;
import org.elasticsearch.search.internal.ShardSearchRequest;
import org.elasticsearch.search.suggest.SuggestBuilder;
import org.elasticsearch.test.ESSingleNodeTestCase;
import org.junit.Before;
import java.io.IOException;
import java.util.Collection;
@ -88,6 +92,7 @@ import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Semaphore;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.function.Function;
import static java.util.Collections.singletonList;
@ -111,7 +116,42 @@ public class SearchServiceTests extends ESSingleNodeTestCase {
@Override
protected Collection<Class<? extends Plugin>> getPlugins() {
return pluginList(FailOnRewriteQueryPlugin.class, CustomScriptPlugin.class, InternalOrPrivateSettingsPlugin.class);
return pluginList(FailOnRewriteQueryPlugin.class, CustomScriptPlugin.class,
ReaderWrapperCountPlugin.class, InternalOrPrivateSettingsPlugin.class);
}
public static class ReaderWrapperCountPlugin extends Plugin {
@Override
public void onIndexModule(IndexModule indexModule) {
indexModule.setReaderWrapper(service -> SearchServiceTests::apply);
}
}
@Before
private void resetCount() {
numWrapInvocations = new AtomicInteger(0);
}
private static AtomicInteger numWrapInvocations = new AtomicInteger(0);
private static DirectoryReader apply(DirectoryReader directoryReader) throws IOException {
numWrapInvocations.incrementAndGet();
return new FilterDirectoryReader(directoryReader,
new FilterDirectoryReader.SubReaderWrapper() {
@Override
public LeafReader wrap(LeafReader reader) {
return reader;
}
}) {
@Override
protected DirectoryReader doWrapDirectoryReader(DirectoryReader in) throws IOException {
return in;
}
@Override
public CacheHelper getReaderCacheHelper() {
return directoryReader.getReaderCacheHelper();
}
};
}
public static class CustomScriptPlugin extends MockScriptPlugin {
@ -559,6 +599,7 @@ public class SearchServiceTests extends ESSingleNodeTestCase {
final IndexService indexService = indicesService.indexServiceSafe(resolveIndex("index"));
final IndexShard indexShard = indexService.getShard(0);
SearchRequest searchRequest = new SearchRequest().allowPartialSearchResults(true);
int numWrapReader = numWrapInvocations.get();
assertTrue(service.canMatch(new ShardSearchRequest(OriginalIndices.NONE, searchRequest, indexShard.shardId(), 1,
new AliasFilter(null, Strings.EMPTY_ARRAY), 1f, -1, null, null)));
@ -582,6 +623,13 @@ public class SearchServiceTests extends ESSingleNodeTestCase {
searchRequest.source(new SearchSourceBuilder().query(new MatchNoneQueryBuilder()));
assertFalse(service.canMatch(new ShardSearchRequest(OriginalIndices.NONE, searchRequest, indexShard.shardId(), 1,
new AliasFilter(null, Strings.EMPTY_ARRAY), 1f, -1, null, null)));
assertEquals(numWrapReader, numWrapInvocations.get());
// make sure that the wrapper is called when the context is actually created
service.createContext(new ShardSearchRequest(OriginalIndices.NONE, searchRequest,
indexShard.shardId(), 1, new AliasFilter(null, Strings.EMPTY_ARRAY),
1f, -1, null, null)).close();
assertEquals(numWrapReader+1, numWrapInvocations.get());
}
public void testCanRewriteToMatchNone() {

View file

@ -24,8 +24,12 @@ Creates and updates role mappings.
==== {api-description-title}
Role mappings define which roles are assigned to each user. Each mapping has
_rules_ that identify users and a list of _roles_ that are
granted to those users.
_rules_ that identify users and a list of _roles_ that are granted to those users.
The role mapping APIs are generally the preferred way to manage role mappings
rather than using {stack-ov}/mapping-roles.html#mapping-roles-file[role mapping files].
The create or update role mappings API cannot update role mappings that are defined
in role mapping files.
NOTE: This API does not create roles. Rather, it maps users to existing roles.
Roles can be created by using <<security-api-roles, Role Management APIs>> or

View file

@ -23,6 +23,11 @@ Removes role mappings.
Role mappings define which roles are assigned to each user. For more information,
see <<mapping-roles>>.
The role mapping APIs are generally the preferred way to manage role mappings
rather than using <<mapping-roles-file,role mapping files>>.
The delete role mappings API cannot remove role mappings that are defined
in role mapping files.
[[security-api-delete-role-mapping-path-params]]
==== {api-path-parms-title}

View file

@ -25,6 +25,11 @@ Retrieves role mappings.
Role mappings define which roles are assigned to each user. For more information,
see <<mapping-roles>>.
The role mapping APIs are generally the preferred way to manage role mappings
rather than using <<mapping-roles-file,role mapping files>>.
The get role mappings API cannot retrieve role mappings that are defined
in role mapping files.
[[security-api-get-role-mapping-path-params]]
==== {api-path-parms-title}

View file

@ -66,6 +66,24 @@ You can change this default behavior by changing the
this is a common setting in Elasticsearch, changing its value might effect other
schedules in the system.
While the _role mapping APIs_ is he preferred way to manage role mappings, using
the `role_mappings.yml` file becomes useful in a couple of use cases:
. If you want to define fixed role mappings that no one (besides an administrator
with physical access to the {es} nodes) would be able to change.
. If cluster administration depends on users from external realms and these users
need to have their roles mapped to them even when the cluster is RED. For instance
an administrator that authenticates via LDAP or PKI and gets assigned an
administrator role so that they can perform corrective actions.
Please note however, that the role_mappings.yml file is provided
as a minimal administrative function and is not intended to cover and be used to
define roles for all use cases.
IMPORTANT: You cannot view, edit, or remove any roles that are defined in the role
mapping files by using the the role mapping APIs.
==== Realm specific details
[float]
[[ldap-role-mapping]]

View file

@ -143,12 +143,14 @@ public final class MlTasks {
public static DataFrameAnalyticsState getDataFrameAnalyticsState(String analyticsId, @Nullable PersistentTasksCustomMetaData tasks) {
PersistentTasksCustomMetaData.PersistentTask<?> task = getDataFrameAnalyticsTask(analyticsId, tasks);
if (task != null && task.getState() != null) {
if (task != null) {
DataFrameAnalyticsTaskState taskState = (DataFrameAnalyticsTaskState) task.getState();
return taskState.getState();
} else {
return DataFrameAnalyticsState.STOPPED;
if (taskState == null) {
return DataFrameAnalyticsState.STARTING;
}
return taskState.getState();
}
return DataFrameAnalyticsState.STOPPED;
}
/**
@ -178,32 +180,29 @@ public final class MlTasks {
* @param nodes The cluster nodes
* @return The job Ids of tasks to do not have an assignment.
*/
public static Set<String> unallocatedJobIds(@Nullable PersistentTasksCustomMetaData tasks,
public static Set<String> unassignedJobIds(@Nullable PersistentTasksCustomMetaData tasks,
DiscoveryNodes nodes) {
return unallocatedJobTasks(tasks, nodes).stream()
return unassignedJobTasks(tasks, nodes).stream()
.map(task -> task.getId().substring(JOB_TASK_ID_PREFIX.length()))
.collect(Collectors.toSet());
}
/**
* The job tasks that do not have an allocation as determined by
* The job tasks that do not have an assignment as determined by
* {@link PersistentTasksClusterService#needsReassignment(PersistentTasksCustomMetaData.Assignment, DiscoveryNodes)}
*
* @param tasks Persistent tasks. If null an empty set is returned.
* @param nodes The cluster nodes
* @return Unallocated job tasks
* @return Unassigned job tasks
*/
public static Collection<PersistentTasksCustomMetaData.PersistentTask<?>> unallocatedJobTasks(
public static Collection<PersistentTasksCustomMetaData.PersistentTask<?>> unassignedJobTasks(
@Nullable PersistentTasksCustomMetaData tasks,
DiscoveryNodes nodes) {
if (tasks == null) {
return Collections.emptyList();
}
return tasks.findTasks(JOB_TASK_NAME, task -> true)
.stream()
.filter(task -> PersistentTasksClusterService.needsReassignment(task.getAssignment(), nodes))
.collect(Collectors.toList());
return tasks.findTasks(JOB_TASK_NAME, task -> PersistentTasksClusterService.needsReassignment(task.getAssignment(), nodes));
}
/**
@ -231,32 +230,29 @@ public final class MlTasks {
* @param nodes The cluster nodes
* @return The job Ids of tasks to do not have an assignment.
*/
public static Set<String> unallocatedDatafeedIds(@Nullable PersistentTasksCustomMetaData tasks,
public static Set<String> unassignedDatafeedIds(@Nullable PersistentTasksCustomMetaData tasks,
DiscoveryNodes nodes) {
return unallocatedDatafeedTasks(tasks, nodes).stream()
return unassignedDatafeedTasks(tasks, nodes).stream()
.map(task -> task.getId().substring(DATAFEED_TASK_ID_PREFIX.length()))
.collect(Collectors.toSet());
}
/**
* The datafeed tasks that do not have an allocation as determined by
* The datafeed tasks that do not have an assignment as determined by
* {@link PersistentTasksClusterService#needsReassignment(PersistentTasksCustomMetaData.Assignment, DiscoveryNodes)}
*
* @param tasks Persistent tasks. If null an empty set is returned.
* @param nodes The cluster nodes
* @return Unallocated datafeed tasks
* @return Unassigned datafeed tasks
*/
public static Collection<PersistentTasksCustomMetaData.PersistentTask<?>> unallocatedDatafeedTasks(
public static Collection<PersistentTasksCustomMetaData.PersistentTask<?>> unassignedDatafeedTasks(
@Nullable PersistentTasksCustomMetaData tasks,
DiscoveryNodes nodes) {
if (tasks == null) {
return Collections.emptyList();
}
return tasks.findTasks(DATAFEED_TASK_NAME, task -> true)
.stream()
.filter(task -> PersistentTasksClusterService.needsReassignment(task.getAssignment(), nodes))
.collect(Collectors.toList());
return tasks.findTasks(DATAFEED_TASK_NAME, task -> PersistentTasksClusterService.needsReassignment(task.getAssignment(), nodes));
}
}

Some files were not shown because too many files have changed in this diff Show more