mirror of
https://github.com/elastic/elasticsearch.git
synced 2025-06-28 09:28:55 -04:00
Remove HLRC docs and related integration tests. (#81358)
The HLRC will no longer be published from 8.0.0 and onwards. Also the HLRC docs are no longer published, so these can be removed now. The HLRC is currently used in a number of java rest tests and for this reason it can't be removed completely. Closes #81299
This commit is contained in:
parent
f03fa87c74
commit
eec64f72bf
303 changed files with 6 additions and 41823 deletions
|
@ -1,226 +0,0 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0 and the Server Side Public License, v 1; you may not use this file except
|
||||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
||||
* Side Public License, v 1.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.client.documentation;
|
||||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.LatchedActionListener;
|
||||
import org.elasticsearch.client.ESRestHighLevelClientTestCase;
|
||||
import org.elasticsearch.client.RequestOptions;
|
||||
import org.elasticsearch.client.RestHighLevelClient;
|
||||
import org.elasticsearch.client.asyncsearch.AsyncSearchResponse;
|
||||
import org.elasticsearch.client.asyncsearch.DeleteAsyncSearchRequest;
|
||||
import org.elasticsearch.client.asyncsearch.GetAsyncSearchRequest;
|
||||
import org.elasticsearch.client.asyncsearch.SubmitAsyncSearchRequest;
|
||||
import org.elasticsearch.client.core.AcknowledgedResponse;
|
||||
import org.elasticsearch.client.indices.CreateIndexRequest;
|
||||
import org.elasticsearch.client.indices.CreateIndexResponse;
|
||||
import org.elasticsearch.core.TimeValue;
|
||||
import org.elasticsearch.index.query.QueryBuilders;
|
||||
import org.elasticsearch.search.builder.SearchSourceBuilder;
|
||||
import org.junit.Before;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
/**
|
||||
* Documentation for Async Search APIs in the high level java client.
|
||||
* Code wrapped in {@code tag} and {@code end} tags is included in the docs.
|
||||
*/
|
||||
@SuppressWarnings("removal")
|
||||
public class AsyncSearchDocumentationIT extends ESRestHighLevelClientTestCase {
|
||||
|
||||
@Before
|
||||
void setUpIndex() throws IOException {
|
||||
CreateIndexResponse createIndexResponse = highLevelClient().indices()
|
||||
.create(new CreateIndexRequest("my-index"), RequestOptions.DEFAULT);
|
||||
assertTrue(createIndexResponse.isAcknowledged());
|
||||
}
|
||||
|
||||
public void testSubmitAsyncSearch() throws Exception {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
|
||||
// tag::asyncsearch-submit-request
|
||||
SearchSourceBuilder searchSource = new SearchSourceBuilder()
|
||||
.query(QueryBuilders.matchAllQuery()); // <1>
|
||||
String[] indices = new String[] { "my-index" }; // <2>
|
||||
SubmitAsyncSearchRequest request
|
||||
= new SubmitAsyncSearchRequest(searchSource, indices);
|
||||
// end::asyncsearch-submit-request
|
||||
|
||||
// tag::asyncsearch-submit-request-arguments
|
||||
request.setWaitForCompletionTimeout(TimeValue.timeValueSeconds(30)); // <1>
|
||||
request.setKeepAlive(TimeValue.timeValueMinutes(15)); // <2>
|
||||
request.setKeepOnCompletion(false); // <3>
|
||||
// end::asyncsearch-submit-request-arguments
|
||||
|
||||
// tag::asyncsearch-submit-execute
|
||||
AsyncSearchResponse response = client.asyncSearch()
|
||||
.submit(request, RequestOptions.DEFAULT); // <1>
|
||||
// end::asyncsearch-submit-execute
|
||||
|
||||
assertNotNull(response);
|
||||
assertNull(response.getFailure());
|
||||
|
||||
// tag::asyncsearch-submit-response
|
||||
response.getSearchResponse(); // <1>
|
||||
response.getId(); // <2>
|
||||
response.isPartial(); // <3>
|
||||
response.isRunning(); // <4>
|
||||
response.getStartTime(); // <5>
|
||||
response.getExpirationTime(); // <6>
|
||||
response.getFailure(); // <7>
|
||||
// end::asyncsearch-submit-response
|
||||
|
||||
// tag::asyncsearch-submit-listener
|
||||
ActionListener<AsyncSearchResponse> listener =
|
||||
new ActionListener<AsyncSearchResponse>() {
|
||||
@Override
|
||||
public void onResponse(AsyncSearchResponse response) {
|
||||
// <1>
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
};
|
||||
// end::asyncsearch-submit-listener
|
||||
|
||||
// Replace the empty listener by a blocking listener in test
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
// tag::asyncsearch-submit-execute-async
|
||||
client.asyncSearch()
|
||||
.submitAsync(request, RequestOptions.DEFAULT, listener); // <1>
|
||||
// end::asyncsearch-submit-execute-async
|
||||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
}
|
||||
|
||||
public void testGetAsyncSearch() throws Exception {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
SearchSourceBuilder searchSource = new SearchSourceBuilder().query(QueryBuilders.matchAllQuery());
|
||||
String[] indices = new String[] { "my-index" };
|
||||
SubmitAsyncSearchRequest submitRequest = new SubmitAsyncSearchRequest(searchSource, indices);
|
||||
submitRequest.setKeepOnCompletion(true);
|
||||
AsyncSearchResponse submitResponse = client.asyncSearch().submit(submitRequest, RequestOptions.DEFAULT);
|
||||
String id = submitResponse.getId();
|
||||
|
||||
// tag::asyncsearch-get-request
|
||||
GetAsyncSearchRequest request = new GetAsyncSearchRequest(id);
|
||||
// end::asyncsearch-get-request
|
||||
|
||||
// tag::asyncsearch-get-request-arguments
|
||||
request.setWaitForCompletion(TimeValue.timeValueSeconds(30)); // <1>
|
||||
request.setKeepAlive(TimeValue.timeValueMinutes(15)); // <2>
|
||||
// end::asyncsearch-get-request-arguments
|
||||
|
||||
// tag::asyncsearch-get-execute
|
||||
AsyncSearchResponse response = client.asyncSearch()
|
||||
.get(request, RequestOptions.DEFAULT); // <1>
|
||||
// end::asyncsearch-get-execute
|
||||
|
||||
assertNotNull(response);
|
||||
assertNull(response.getFailure());
|
||||
|
||||
// tag::asyncsearch-get-response
|
||||
response.getSearchResponse(); // <1>
|
||||
response.getId(); // <2>
|
||||
response.isPartial(); // <3>
|
||||
response.isRunning(); // <4>
|
||||
response.getStartTime(); // <5>
|
||||
response.getExpirationTime(); // <6>
|
||||
response.getFailure(); // <7>
|
||||
// end::asyncsearch-get-response
|
||||
|
||||
// tag::asyncsearch-get-listener
|
||||
ActionListener<AsyncSearchResponse> listener =
|
||||
new ActionListener<AsyncSearchResponse>() {
|
||||
@Override
|
||||
public void onResponse(AsyncSearchResponse response) {
|
||||
// <1>
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
};
|
||||
// end::asyncsearch-get-listener
|
||||
|
||||
// Replace the empty listener by a blocking listener in test
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
// tag::asyncsearch-get-execute-async
|
||||
client.asyncSearch()
|
||||
.getAsync(request, RequestOptions.DEFAULT, listener); // <1>
|
||||
// end::asyncsearch-get-execute-async
|
||||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
client.asyncSearch().delete(new DeleteAsyncSearchRequest(id), RequestOptions.DEFAULT);
|
||||
}
|
||||
|
||||
@SuppressWarnings("unused")
|
||||
public void testDeleteAsyncSearch() throws Exception {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
SearchSourceBuilder searchSource = new SearchSourceBuilder().query(QueryBuilders.matchAllQuery());
|
||||
String[] indices = new String[] { "my-index" };
|
||||
SubmitAsyncSearchRequest submitRequest = new SubmitAsyncSearchRequest(searchSource, indices);
|
||||
submitRequest.setKeepOnCompletion(true);
|
||||
AsyncSearchResponse submitResponse = client.asyncSearch().submit(submitRequest, RequestOptions.DEFAULT);
|
||||
String id = submitResponse.getId();
|
||||
|
||||
// tag::asyncsearch-delete-request
|
||||
DeleteAsyncSearchRequest request = new DeleteAsyncSearchRequest(id);
|
||||
// end::asyncsearch-delete-request
|
||||
|
||||
// tag::asyncsearch-delete-execute
|
||||
AcknowledgedResponse response = client.asyncSearch() // <1>
|
||||
.delete(new DeleteAsyncSearchRequest(id),
|
||||
RequestOptions.DEFAULT);
|
||||
// end::asyncsearch-delete-execute
|
||||
|
||||
assertNotNull(response);
|
||||
assertTrue(response.isAcknowledged());
|
||||
|
||||
// tag::asyncsearch-delete-response
|
||||
response.isAcknowledged(); // <1>
|
||||
// end::asyncsearch-delete-response
|
||||
|
||||
// tag::asyncsearch-delete-listener
|
||||
ActionListener<AcknowledgedResponse> listener =
|
||||
new ActionListener<AcknowledgedResponse>() {
|
||||
@Override
|
||||
public void onResponse(AcknowledgedResponse response) {
|
||||
// <1>
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
};
|
||||
// end::asyncsearch-delete-listener
|
||||
|
||||
// Replace the empty listener by a blocking listener in test
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
// tag::asyncsearch-delete-execute-async
|
||||
client.asyncSearch()
|
||||
.deleteAsync(request, RequestOptions.DEFAULT, listener); // <1>
|
||||
// end::asyncsearch-delete-execute-async
|
||||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
|
||||
}
|
||||
}
|
|
@ -1,970 +0,0 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0 and the Server Side Public License, v 1; you may not use this file except
|
||||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
||||
* Side Public License, v 1.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.client.documentation;
|
||||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.LatchedActionListener;
|
||||
import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest;
|
||||
import org.elasticsearch.action.support.ActiveShardCount;
|
||||
import org.elasticsearch.client.ESRestHighLevelClientTestCase;
|
||||
import org.elasticsearch.client.Request;
|
||||
import org.elasticsearch.client.RequestOptions;
|
||||
import org.elasticsearch.client.Response;
|
||||
import org.elasticsearch.client.RestHighLevelClient;
|
||||
import org.elasticsearch.client.ccr.AutoFollowStats;
|
||||
import org.elasticsearch.client.ccr.CcrStatsRequest;
|
||||
import org.elasticsearch.client.ccr.CcrStatsResponse;
|
||||
import org.elasticsearch.client.ccr.DeleteAutoFollowPatternRequest;
|
||||
import org.elasticsearch.client.ccr.FollowInfoRequest;
|
||||
import org.elasticsearch.client.ccr.FollowInfoResponse;
|
||||
import org.elasticsearch.client.ccr.FollowStatsRequest;
|
||||
import org.elasticsearch.client.ccr.FollowStatsResponse;
|
||||
import org.elasticsearch.client.ccr.ForgetFollowerRequest;
|
||||
import org.elasticsearch.client.ccr.GetAutoFollowPatternRequest;
|
||||
import org.elasticsearch.client.ccr.GetAutoFollowPatternResponse;
|
||||
import org.elasticsearch.client.ccr.GetAutoFollowPatternResponse.Pattern;
|
||||
import org.elasticsearch.client.ccr.IndicesFollowStats;
|
||||
import org.elasticsearch.client.ccr.PauseAutoFollowPatternRequest;
|
||||
import org.elasticsearch.client.ccr.PauseFollowRequest;
|
||||
import org.elasticsearch.client.ccr.PutAutoFollowPatternRequest;
|
||||
import org.elasticsearch.client.ccr.PutFollowRequest;
|
||||
import org.elasticsearch.client.ccr.PutFollowResponse;
|
||||
import org.elasticsearch.client.ccr.ResumeAutoFollowPatternRequest;
|
||||
import org.elasticsearch.client.ccr.ResumeFollowRequest;
|
||||
import org.elasticsearch.client.ccr.UnfollowRequest;
|
||||
import org.elasticsearch.client.core.AcknowledgedResponse;
|
||||
import org.elasticsearch.client.core.BroadcastResponse;
|
||||
import org.elasticsearch.client.indices.CloseIndexRequest;
|
||||
import org.elasticsearch.client.indices.CreateIndexRequest;
|
||||
import org.elasticsearch.client.indices.CreateIndexResponse;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.test.rest.yaml.ObjectPath;
|
||||
import org.junit.Before;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import static org.hamcrest.Matchers.is;
|
||||
|
||||
@SuppressWarnings("removal")
|
||||
public class CCRDocumentationIT extends ESRestHighLevelClientTestCase {
|
||||
|
||||
@Before
|
||||
public void setupRemoteClusterConfig() throws Exception {
|
||||
setupRemoteClusterConfig("local");
|
||||
}
|
||||
|
||||
public void testPutFollow() throws Exception {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
{
|
||||
// Create leader index:
|
||||
CreateIndexRequest createIndexRequest = new CreateIndexRequest("leader");
|
||||
CreateIndexResponse response = client.indices().create(createIndexRequest, RequestOptions.DEFAULT);
|
||||
assertThat(response.isAcknowledged(), is(true));
|
||||
}
|
||||
|
||||
// tag::ccr-put-follow-request
|
||||
PutFollowRequest putFollowRequest = new PutFollowRequest(
|
||||
"local", // <1>
|
||||
"leader", // <2>
|
||||
"follower", // <3>
|
||||
ActiveShardCount.ONE // <4>
|
||||
);
|
||||
Settings settings =
|
||||
Settings.builder().put("index.number_of_replicas", 0L).build();
|
||||
putFollowRequest.setSettings(settings); // <5>
|
||||
// end::ccr-put-follow-request
|
||||
|
||||
// tag::ccr-put-follow-execute
|
||||
PutFollowResponse putFollowResponse =
|
||||
client.ccr().putFollow(putFollowRequest, RequestOptions.DEFAULT);
|
||||
// end::ccr-put-follow-execute
|
||||
|
||||
// tag::ccr-put-follow-response
|
||||
boolean isFollowIndexCreated =
|
||||
putFollowResponse.isFollowIndexCreated(); // <1>
|
||||
boolean isFollowIndexShardsAcked =
|
||||
putFollowResponse.isFollowIndexShardsAcked(); // <2>
|
||||
boolean isIndexFollowingStarted =
|
||||
putFollowResponse.isIndexFollowingStarted(); // <3>
|
||||
// end::ccr-put-follow-response
|
||||
|
||||
// Pause following and delete follower index, so that we can execute put follow api again:
|
||||
{
|
||||
PauseFollowRequest pauseFollowRequest = new PauseFollowRequest("follower");
|
||||
AcknowledgedResponse pauseFollowResponse = client.ccr().pauseFollow(pauseFollowRequest, RequestOptions.DEFAULT);
|
||||
assertThat(pauseFollowResponse.isAcknowledged(), is(true));
|
||||
|
||||
DeleteIndexRequest deleteIndexRequest = new DeleteIndexRequest("follower");
|
||||
assertThat(client.indices().delete(deleteIndexRequest, RequestOptions.DEFAULT).isAcknowledged(), is(true));
|
||||
}
|
||||
|
||||
// tag::ccr-put-follow-execute-listener
|
||||
ActionListener<PutFollowResponse> listener =
|
||||
new ActionListener<PutFollowResponse>() {
|
||||
@Override
|
||||
public void onResponse(PutFollowResponse response) { // <1>
|
||||
boolean isFollowIndexCreated =
|
||||
putFollowResponse.isFollowIndexCreated();
|
||||
boolean isFollowIndexShardsAcked =
|
||||
putFollowResponse.isFollowIndexShardsAcked();
|
||||
boolean isIndexFollowingStarted =
|
||||
putFollowResponse.isIndexFollowingStarted();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
};
|
||||
// end::ccr-put-follow-execute-listener
|
||||
|
||||
// Replace the empty listener by a blocking listener in test
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
// tag::ccr-put-follow-execute-async
|
||||
client.ccr().putFollowAsync(putFollowRequest,
|
||||
RequestOptions.DEFAULT, listener); // <1>
|
||||
// end::ccr-put-follow-execute-async
|
||||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
|
||||
{
|
||||
PauseFollowRequest pauseFollowRequest = new PauseFollowRequest("follower");
|
||||
AcknowledgedResponse pauseFollowResponse = client.ccr().pauseFollow(pauseFollowRequest, RequestOptions.DEFAULT);
|
||||
assertThat(pauseFollowResponse.isAcknowledged(), is(true));
|
||||
}
|
||||
}
|
||||
|
||||
public void testPauseFollow() throws Exception {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
{
|
||||
// Create leader index:
|
||||
CreateIndexRequest createIndexRequest = new CreateIndexRequest("leader");
|
||||
CreateIndexResponse response = client.indices().create(createIndexRequest, RequestOptions.DEFAULT);
|
||||
assertThat(response.isAcknowledged(), is(true));
|
||||
}
|
||||
String followIndex = "follower";
|
||||
// Follow index, so that it can be paused:
|
||||
{
|
||||
PutFollowRequest putFollowRequest = new PutFollowRequest("local", "leader", followIndex, ActiveShardCount.ONE);
|
||||
PutFollowResponse putFollowResponse = client.ccr().putFollow(putFollowRequest, RequestOptions.DEFAULT);
|
||||
assertThat(putFollowResponse.isFollowIndexCreated(), is(true));
|
||||
assertThat(putFollowResponse.isFollowIndexShardsAcked(), is(true));
|
||||
assertThat(putFollowResponse.isIndexFollowingStarted(), is(true));
|
||||
}
|
||||
|
||||
// tag::ccr-pause-follow-request
|
||||
PauseFollowRequest request = new PauseFollowRequest(followIndex); // <1>
|
||||
// end::ccr-pause-follow-request
|
||||
|
||||
// tag::ccr-pause-follow-execute
|
||||
AcknowledgedResponse response =
|
||||
client.ccr().pauseFollow(request, RequestOptions.DEFAULT);
|
||||
// end::ccr-pause-follow-execute
|
||||
|
||||
// tag::ccr-pause-follow-response
|
||||
boolean acknowledged = response.isAcknowledged(); // <1>
|
||||
// end::ccr-pause-follow-response
|
||||
|
||||
// tag::ccr-pause-follow-execute-listener
|
||||
ActionListener<AcknowledgedResponse> listener =
|
||||
new ActionListener<AcknowledgedResponse>() {
|
||||
@Override
|
||||
public void onResponse(AcknowledgedResponse response) {
|
||||
boolean acknowledged = response.isAcknowledged(); // <1>
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
};
|
||||
// end::ccr-pause-follow-execute-listener
|
||||
|
||||
// Resume follow index, so that it can be paused again:
|
||||
{
|
||||
ResumeFollowRequest resumeFollowRequest = new ResumeFollowRequest(followIndex);
|
||||
AcknowledgedResponse resumeResponse = client.ccr().resumeFollow(resumeFollowRequest, RequestOptions.DEFAULT);
|
||||
assertThat(resumeResponse.isAcknowledged(), is(true));
|
||||
}
|
||||
|
||||
// Replace the empty listener by a blocking listener in test
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
// tag::ccr-pause-follow-execute-async
|
||||
client.ccr()
|
||||
.pauseFollowAsync(request, RequestOptions.DEFAULT, listener); // <1>
|
||||
// end::ccr-pause-follow-execute-async
|
||||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
}
|
||||
|
||||
public void testResumeFollow() throws Exception {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
{
|
||||
// Create leader index:
|
||||
CreateIndexRequest createIndexRequest = new CreateIndexRequest("leader");
|
||||
CreateIndexResponse response = client.indices().create(createIndexRequest, RequestOptions.DEFAULT);
|
||||
assertThat(response.isAcknowledged(), is(true));
|
||||
}
|
||||
String followIndex = "follower";
|
||||
// Follow index, so that it can be paused:
|
||||
{
|
||||
PutFollowRequest putFollowRequest = new PutFollowRequest("local", "leader", followIndex, ActiveShardCount.ONE);
|
||||
PutFollowResponse putFollowResponse = client.ccr().putFollow(putFollowRequest, RequestOptions.DEFAULT);
|
||||
assertThat(putFollowResponse.isFollowIndexCreated(), is(true));
|
||||
assertThat(putFollowResponse.isFollowIndexShardsAcked(), is(true));
|
||||
assertThat(putFollowResponse.isIndexFollowingStarted(), is(true));
|
||||
}
|
||||
|
||||
// Pause follow index, so that it can be resumed:
|
||||
{
|
||||
PauseFollowRequest pauseFollowRequest = new PauseFollowRequest(followIndex);
|
||||
AcknowledgedResponse pauseResponse = client.ccr().pauseFollow(pauseFollowRequest, RequestOptions.DEFAULT);
|
||||
assertThat(pauseResponse.isAcknowledged(), is(true));
|
||||
}
|
||||
|
||||
// tag::ccr-resume-follow-request
|
||||
ResumeFollowRequest request = new ResumeFollowRequest(followIndex); // <1>
|
||||
// end::ccr-resume-follow-request
|
||||
|
||||
// tag::ccr-resume-follow-execute
|
||||
AcknowledgedResponse response =
|
||||
client.ccr().resumeFollow(request, RequestOptions.DEFAULT);
|
||||
// end::ccr-resume-follow-execute
|
||||
|
||||
// tag::ccr-resume-follow-response
|
||||
boolean acknowledged = response.isAcknowledged(); // <1>
|
||||
// end::ccr-resume-follow-response
|
||||
|
||||
// Pause follow index, so that it can be resumed again:
|
||||
{
|
||||
PauseFollowRequest pauseFollowRequest = new PauseFollowRequest(followIndex);
|
||||
AcknowledgedResponse pauseResponse = client.ccr().pauseFollow(pauseFollowRequest, RequestOptions.DEFAULT);
|
||||
assertThat(pauseResponse.isAcknowledged(), is(true));
|
||||
}
|
||||
|
||||
// tag::ccr-resume-follow-execute-listener
|
||||
ActionListener<AcknowledgedResponse> listener =
|
||||
new ActionListener<AcknowledgedResponse>() {
|
||||
@Override
|
||||
public void onResponse(AcknowledgedResponse response) {
|
||||
boolean acknowledged = response.isAcknowledged(); // <1>
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
};
|
||||
// end::ccr-resume-follow-execute-listener
|
||||
|
||||
// Replace the empty listener by a blocking listener in test
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
// tag::ccr-resume-follow-execute-async
|
||||
client.ccr()
|
||||
.resumeFollowAsync(request, RequestOptions.DEFAULT, listener); // <1>
|
||||
// end::ccr-resume-follow-execute-async
|
||||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
|
||||
// Cleanup:
|
||||
client.ccr().pauseFollow(new PauseFollowRequest(followIndex), RequestOptions.DEFAULT);
|
||||
}
|
||||
|
||||
public void testUnfollow() throws Exception {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
{
|
||||
// Create leader index:
|
||||
CreateIndexRequest createIndexRequest = new CreateIndexRequest("leader");
|
||||
CreateIndexResponse response = client.indices().create(createIndexRequest, RequestOptions.DEFAULT);
|
||||
assertThat(response.isAcknowledged(), is(true));
|
||||
}
|
||||
String followIndex = "follower";
|
||||
// Follow index, pause and close, so that it can be unfollowed:
|
||||
{
|
||||
PutFollowRequest putFollowRequest = new PutFollowRequest("local", "leader", followIndex, ActiveShardCount.ONE);
|
||||
PutFollowResponse putFollowResponse = client.ccr().putFollow(putFollowRequest, RequestOptions.DEFAULT);
|
||||
assertThat(putFollowResponse.isFollowIndexCreated(), is(true));
|
||||
assertThat(putFollowResponse.isFollowIndexShardsAcked(), is(true));
|
||||
assertThat(putFollowResponse.isIndexFollowingStarted(), is(true));
|
||||
|
||||
PauseFollowRequest pauseFollowRequest = new PauseFollowRequest(followIndex);
|
||||
AcknowledgedResponse unfollowResponse = client.ccr().pauseFollow(pauseFollowRequest, RequestOptions.DEFAULT);
|
||||
assertThat(unfollowResponse.isAcknowledged(), is(true));
|
||||
|
||||
CloseIndexRequest closeIndexRequest = new CloseIndexRequest(followIndex);
|
||||
assertThat(client.indices().close(closeIndexRequest, RequestOptions.DEFAULT).isAcknowledged(), is(true));
|
||||
}
|
||||
|
||||
// tag::ccr-unfollow-request
|
||||
UnfollowRequest request = new UnfollowRequest(followIndex); // <1>
|
||||
// end::ccr-unfollow-request
|
||||
|
||||
// tag::ccr-unfollow-execute
|
||||
AcknowledgedResponse response =
|
||||
client.ccr().unfollow(request, RequestOptions.DEFAULT);
|
||||
// end::ccr-unfollow-execute
|
||||
|
||||
// tag::ccr-unfollow-response
|
||||
boolean acknowledged = response.isAcknowledged(); // <1>
|
||||
// end::ccr-unfollow-response
|
||||
|
||||
// Delete, put follow index, pause and close, so that it can be unfollowed again:
|
||||
{
|
||||
DeleteIndexRequest deleteIndexRequest = new DeleteIndexRequest(followIndex);
|
||||
assertThat(client.indices().delete(deleteIndexRequest, RequestOptions.DEFAULT).isAcknowledged(), is(true));
|
||||
|
||||
PutFollowRequest putFollowRequest = new PutFollowRequest("local", "leader", followIndex, ActiveShardCount.ONE);
|
||||
PutFollowResponse putFollowResponse = client.ccr().putFollow(putFollowRequest, RequestOptions.DEFAULT);
|
||||
assertThat(putFollowResponse.isFollowIndexCreated(), is(true));
|
||||
assertThat(putFollowResponse.isFollowIndexShardsAcked(), is(true));
|
||||
assertThat(putFollowResponse.isIndexFollowingStarted(), is(true));
|
||||
|
||||
PauseFollowRequest pauseFollowRequest = new PauseFollowRequest(followIndex);
|
||||
AcknowledgedResponse unfollowResponse = client.ccr().pauseFollow(pauseFollowRequest, RequestOptions.DEFAULT);
|
||||
assertThat(unfollowResponse.isAcknowledged(), is(true));
|
||||
|
||||
CloseIndexRequest closeIndexRequest = new CloseIndexRequest(followIndex);
|
||||
assertThat(client.indices().close(closeIndexRequest, RequestOptions.DEFAULT).isAcknowledged(), is(true));
|
||||
}
|
||||
|
||||
// tag::ccr-unfollow-execute-listener
|
||||
ActionListener<AcknowledgedResponse> listener =
|
||||
new ActionListener<AcknowledgedResponse>() {
|
||||
@Override
|
||||
public void onResponse(AcknowledgedResponse response) {
|
||||
boolean acknowledged = response.isAcknowledged(); // <1>
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
};
|
||||
// end::ccr-unfollow-execute-listener
|
||||
|
||||
// Replace the empty listener by a blocking listener in test
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
// tag::ccr-unfollow-execute-async
|
||||
client.ccr()
|
||||
.unfollowAsync(request, RequestOptions.DEFAULT, listener); // <1>
|
||||
// end::ccr-unfollow-execute-async
|
||||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
}
|
||||
|
||||
public void testForgetFollower() throws InterruptedException, IOException {
|
||||
final RestHighLevelClient client = highLevelClient();
|
||||
final String leaderIndex = "leader";
|
||||
{
|
||||
// create leader index
|
||||
final CreateIndexRequest createIndexRequest = new CreateIndexRequest(leaderIndex);
|
||||
final Map<String, String> settings = new HashMap<>(2);
|
||||
final int numberOfShards = randomIntBetween(1, 2);
|
||||
settings.put("index.number_of_shards", Integer.toString(numberOfShards));
|
||||
createIndexRequest.settings(settings);
|
||||
final CreateIndexResponse response = client.indices().create(createIndexRequest, RequestOptions.DEFAULT);
|
||||
assertThat(response.isAcknowledged(), is(true));
|
||||
}
|
||||
final String followerIndex = "follower";
|
||||
|
||||
final PutFollowRequest putFollowRequest = new PutFollowRequest("local", "leader", followerIndex, ActiveShardCount.ONE);
|
||||
final PutFollowResponse putFollowResponse = client.ccr().putFollow(putFollowRequest, RequestOptions.DEFAULT);
|
||||
assertTrue(putFollowResponse.isFollowIndexCreated());
|
||||
assertTrue((putFollowResponse.isFollowIndexShardsAcked()));
|
||||
assertTrue(putFollowResponse.isIndexFollowingStarted());
|
||||
|
||||
final PauseFollowRequest pauseFollowRequest = new PauseFollowRequest("follower");
|
||||
AcknowledgedResponse pauseFollowResponse = client.ccr().pauseFollow(pauseFollowRequest, RequestOptions.DEFAULT);
|
||||
assertTrue(pauseFollowResponse.isAcknowledged());
|
||||
|
||||
final String followerCluster = highLevelClient().info(RequestOptions.DEFAULT).getClusterName();
|
||||
final Request statsRequest = new Request("GET", "/follower/_stats");
|
||||
final Response statsResponse = client().performRequest(statsRequest);
|
||||
final ObjectPath statsObjectPath = ObjectPath.createFromResponse(statsResponse);
|
||||
final String followerIndexUUID = statsObjectPath.evaluate("indices.follower.uuid");
|
||||
|
||||
final String leaderCluster = "local";
|
||||
|
||||
// tag::ccr-forget-follower-request
|
||||
final ForgetFollowerRequest request = new ForgetFollowerRequest(
|
||||
followerCluster, // <1>
|
||||
followerIndex, // <2>
|
||||
followerIndexUUID, // <3>
|
||||
leaderCluster, // <4>
|
||||
leaderIndex); // <5>
|
||||
// end::ccr-forget-follower-request
|
||||
|
||||
// tag::ccr-forget-follower-execute
|
||||
final BroadcastResponse response = client
|
||||
.ccr()
|
||||
.forgetFollower(request, RequestOptions.DEFAULT);
|
||||
// end::ccr-forget-follower-execute
|
||||
|
||||
// tag::ccr-forget-follower-response
|
||||
final BroadcastResponse.Shards shards = response.shards(); // <1>
|
||||
final int total = shards.total(); // <2>
|
||||
final int successful = shards.successful(); // <3>
|
||||
final int skipped = shards.skipped(); // <4>
|
||||
final int failed = shards.failed(); // <5>
|
||||
shards.failures().forEach(failure -> {}); // <6>
|
||||
// end::ccr-forget-follower-response
|
||||
|
||||
// tag::ccr-forget-follower-execute-listener
|
||||
ActionListener<BroadcastResponse> listener =
|
||||
new ActionListener<BroadcastResponse>() {
|
||||
|
||||
@Override
|
||||
public void onResponse(final BroadcastResponse response) {
|
||||
final BroadcastResponse.Shards shards = // <1>
|
||||
response.shards();
|
||||
final int total = shards.total();
|
||||
final int successful = shards.successful();
|
||||
final int skipped = shards.skipped();
|
||||
final int failed = shards.failed();
|
||||
shards.failures().forEach(failure -> {});
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(final Exception e) {
|
||||
// <2>
|
||||
}
|
||||
|
||||
};
|
||||
// end::ccr-forget-follower-execute-listener
|
||||
|
||||
// replace the empty listener by a blocking listener in test
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
// tag::ccr-forget-follower-execute-async
|
||||
client.ccr().forgetFollowerAsync(
|
||||
request,
|
||||
RequestOptions.DEFAULT,
|
||||
listener); // <1>
|
||||
// end::ccr-forget-follower-execute-async
|
||||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
}
|
||||
|
||||
public void testPutAutoFollowPattern() throws Exception {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
|
||||
// tag::ccr-put-auto-follow-pattern-request
|
||||
PutAutoFollowPatternRequest request =
|
||||
new PutAutoFollowPatternRequest(
|
||||
"my_pattern", // <1>
|
||||
"local", // <2>
|
||||
Arrays.asList("logs-*", "metrics-*"), // <3>
|
||||
Arrays.asList("logs-excluded", "metrics-excluded") // <4>
|
||||
);
|
||||
request.setFollowIndexNamePattern("copy-{{leader_index}}"); // <5>
|
||||
Settings settings =
|
||||
Settings.builder().put("index.number_of_replicas", 0L).build();
|
||||
request.setSettings(settings); // <6>
|
||||
// end::ccr-put-auto-follow-pattern-request
|
||||
|
||||
// tag::ccr-put-auto-follow-pattern-execute
|
||||
AcknowledgedResponse response = client.ccr()
|
||||
.putAutoFollowPattern(request, RequestOptions.DEFAULT);
|
||||
// end::ccr-put-auto-follow-pattern-execute
|
||||
|
||||
// tag::ccr-put-auto-follow-pattern-response
|
||||
boolean acknowledged = response.isAcknowledged(); // <1>
|
||||
// end::ccr-put-auto-follow-pattern-response
|
||||
|
||||
// Delete auto follow pattern, so that we can store it again:
|
||||
{
|
||||
final DeleteAutoFollowPatternRequest deleteRequest = new DeleteAutoFollowPatternRequest("my_pattern");
|
||||
AcknowledgedResponse deleteResponse = client.ccr().deleteAutoFollowPattern(deleteRequest, RequestOptions.DEFAULT);
|
||||
assertThat(deleteResponse.isAcknowledged(), is(true));
|
||||
}
|
||||
|
||||
// tag::ccr-put-auto-follow-pattern-execute-listener
|
||||
ActionListener<AcknowledgedResponse> listener =
|
||||
new ActionListener<AcknowledgedResponse>() {
|
||||
@Override
|
||||
public void onResponse(AcknowledgedResponse response) { // <1>
|
||||
boolean acknowledged = response.isAcknowledged();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
};
|
||||
// end::ccr-put-auto-follow-pattern-execute-listener
|
||||
|
||||
// Replace the empty listener by a blocking listener in test
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
// tag::ccr-put-auto-follow-pattern-execute-async
|
||||
client.ccr().putAutoFollowPatternAsync(request,
|
||||
RequestOptions.DEFAULT, listener); // <1>
|
||||
// end::ccr-put-auto-follow-pattern-execute-async
|
||||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
|
||||
// Cleanup:
|
||||
{
|
||||
final DeleteAutoFollowPatternRequest deleteRequest = new DeleteAutoFollowPatternRequest("my_pattern");
|
||||
AcknowledgedResponse deleteResponse = client.ccr().deleteAutoFollowPattern(deleteRequest, RequestOptions.DEFAULT);
|
||||
assertThat(deleteResponse.isAcknowledged(), is(true));
|
||||
}
|
||||
}
|
||||
|
||||
public void testDeleteAutoFollowPattern() throws Exception {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
|
||||
// Put auto follow pattern, so that we can delete it:
|
||||
{
|
||||
final PutAutoFollowPatternRequest putRequest = new PutAutoFollowPatternRequest(
|
||||
"my_pattern",
|
||||
"local",
|
||||
Collections.singletonList("logs-*")
|
||||
);
|
||||
AcknowledgedResponse putResponse = client.ccr().putAutoFollowPattern(putRequest, RequestOptions.DEFAULT);
|
||||
assertThat(putResponse.isAcknowledged(), is(true));
|
||||
}
|
||||
|
||||
// tag::ccr-delete-auto-follow-pattern-request
|
||||
DeleteAutoFollowPatternRequest request =
|
||||
new DeleteAutoFollowPatternRequest("my_pattern"); // <1>
|
||||
// end::ccr-delete-auto-follow-pattern-request
|
||||
|
||||
// tag::ccr-delete-auto-follow-pattern-execute
|
||||
AcknowledgedResponse response = client.ccr()
|
||||
.deleteAutoFollowPattern(request, RequestOptions.DEFAULT);
|
||||
// end::ccr-delete-auto-follow-pattern-execute
|
||||
|
||||
// tag::ccr-delete-auto-follow-pattern-response
|
||||
boolean acknowledged = response.isAcknowledged(); // <1>
|
||||
// end::ccr-delete-auto-follow-pattern-response
|
||||
|
||||
// Put auto follow pattern, so that we can delete it again:
|
||||
{
|
||||
final PutAutoFollowPatternRequest putRequest = new PutAutoFollowPatternRequest(
|
||||
"my_pattern",
|
||||
"local",
|
||||
Collections.singletonList("logs-*")
|
||||
);
|
||||
AcknowledgedResponse putResponse = client.ccr().putAutoFollowPattern(putRequest, RequestOptions.DEFAULT);
|
||||
assertThat(putResponse.isAcknowledged(), is(true));
|
||||
}
|
||||
|
||||
// tag::ccr-delete-auto-follow-pattern-execute-listener
|
||||
ActionListener<AcknowledgedResponse> listener =
|
||||
new ActionListener<AcknowledgedResponse>() {
|
||||
@Override
|
||||
public void onResponse(AcknowledgedResponse response) { // <1>
|
||||
boolean acknowledged = response.isAcknowledged();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
};
|
||||
// end::ccr-delete-auto-follow-pattern-execute-listener
|
||||
|
||||
// Replace the empty listener by a blocking listener in test
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
// tag::ccr-delete-auto-follow-pattern-execute-async
|
||||
client.ccr().deleteAutoFollowPatternAsync(request,
|
||||
RequestOptions.DEFAULT, listener); // <1>
|
||||
// end::ccr-delete-auto-follow-pattern-execute-async
|
||||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
}
|
||||
|
||||
public void testGetAutoFollowPattern() throws Exception {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
|
||||
// Put auto follow pattern, so that we can get it:
|
||||
{
|
||||
final PutAutoFollowPatternRequest putRequest = new PutAutoFollowPatternRequest(
|
||||
"my_pattern",
|
||||
"local",
|
||||
Collections.singletonList("logs-*")
|
||||
);
|
||||
AcknowledgedResponse putResponse = client.ccr().putAutoFollowPattern(putRequest, RequestOptions.DEFAULT);
|
||||
assertThat(putResponse.isAcknowledged(), is(true));
|
||||
}
|
||||
|
||||
// tag::ccr-get-auto-follow-pattern-request
|
||||
GetAutoFollowPatternRequest request =
|
||||
new GetAutoFollowPatternRequest("my_pattern"); // <1>
|
||||
// end::ccr-get-auto-follow-pattern-request
|
||||
|
||||
// tag::ccr-get-auto-follow-pattern-execute
|
||||
GetAutoFollowPatternResponse response = client.ccr()
|
||||
.getAutoFollowPattern(request, RequestOptions.DEFAULT);
|
||||
// end::ccr-get-auto-follow-pattern-execute
|
||||
|
||||
// tag::ccr-get-auto-follow-pattern-response
|
||||
Map<String, Pattern> patterns = response.getPatterns();
|
||||
Pattern pattern = patterns.get("my_pattern"); // <1>
|
||||
pattern.getLeaderIndexPatterns();
|
||||
// end::ccr-get-auto-follow-pattern-response
|
||||
|
||||
// tag::ccr-get-auto-follow-pattern-execute-listener
|
||||
ActionListener<GetAutoFollowPatternResponse> listener =
|
||||
new ActionListener<GetAutoFollowPatternResponse>() {
|
||||
@Override
|
||||
public void onResponse(GetAutoFollowPatternResponse
|
||||
response) { // <1>
|
||||
Map<String, Pattern> patterns = response.getPatterns();
|
||||
Pattern pattern = patterns.get("my_pattern");
|
||||
pattern.getLeaderIndexPatterns();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
};
|
||||
// end::ccr-get-auto-follow-pattern-execute-listener
|
||||
|
||||
// Replace the empty listener by a blocking listener in test
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
// tag::ccr-get-auto-follow-pattern-execute-async
|
||||
client.ccr().getAutoFollowPatternAsync(request,
|
||||
RequestOptions.DEFAULT, listener); // <1>
|
||||
// end::ccr-get-auto-follow-pattern-execute-async
|
||||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
|
||||
// Cleanup:
|
||||
{
|
||||
DeleteAutoFollowPatternRequest deleteRequest = new DeleteAutoFollowPatternRequest("my_pattern");
|
||||
AcknowledgedResponse deleteResponse = client.ccr().deleteAutoFollowPattern(deleteRequest, RequestOptions.DEFAULT);
|
||||
assertThat(deleteResponse.isAcknowledged(), is(true));
|
||||
}
|
||||
}
|
||||
|
||||
public void testPauseAutoFollowPattern() throws Exception {
|
||||
final RestHighLevelClient client = highLevelClient();
|
||||
{
|
||||
final PutAutoFollowPatternRequest putRequest = new PutAutoFollowPatternRequest("my_pattern", "local", List.of("logs-*"));
|
||||
AcknowledgedResponse putResponse = client.ccr().putAutoFollowPattern(putRequest, RequestOptions.DEFAULT);
|
||||
assertThat(putResponse.isAcknowledged(), is(true));
|
||||
}
|
||||
|
||||
// tag::ccr-pause-auto-follow-pattern-request
|
||||
PauseAutoFollowPatternRequest request =
|
||||
new PauseAutoFollowPatternRequest("my_pattern"); // <1>
|
||||
// end::ccr-pause-auto-follow-pattern-request
|
||||
|
||||
// tag::ccr-pause-auto-follow-pattern-execute
|
||||
AcknowledgedResponse response = client.ccr()
|
||||
.pauseAutoFollowPattern(request, RequestOptions.DEFAULT);
|
||||
// end::ccr-pause-auto-follow-pattern-execute
|
||||
|
||||
// tag::ccr-pause-auto-follow-pattern-response
|
||||
boolean acknowledged = response.isAcknowledged(); // <1>
|
||||
// end::ccr-pause-auto-follow-pattern-response
|
||||
|
||||
// tag::ccr-pause-auto-follow-pattern-execute-listener
|
||||
ActionListener<AcknowledgedResponse> listener =
|
||||
new ActionListener<AcknowledgedResponse>() {
|
||||
@Override
|
||||
public void onResponse(AcknowledgedResponse response) { // <1>
|
||||
boolean paused = response.isAcknowledged();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
};
|
||||
// end::ccr-pause-auto-follow-pattern-execute-listener
|
||||
|
||||
// Replace the empty listener by a blocking listener in test
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
// tag::ccr-pause-auto-follow-pattern-execute-async
|
||||
client.ccr().pauseAutoFollowPatternAsync(request,
|
||||
RequestOptions.DEFAULT, listener); // <1>
|
||||
// end::ccr-pause-auto-follow-pattern-execute-async
|
||||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
|
||||
// Cleanup:
|
||||
{
|
||||
DeleteAutoFollowPatternRequest deleteRequest = new DeleteAutoFollowPatternRequest("my_pattern");
|
||||
AcknowledgedResponse deleteResponse = client.ccr().deleteAutoFollowPattern(deleteRequest, RequestOptions.DEFAULT);
|
||||
assertThat(deleteResponse.isAcknowledged(), is(true));
|
||||
}
|
||||
}
|
||||
|
||||
public void testResumeAutoFollowPattern() throws Exception {
|
||||
final RestHighLevelClient client = highLevelClient();
|
||||
{
|
||||
final PutAutoFollowPatternRequest putRequest = new PutAutoFollowPatternRequest("my_pattern", "local", List.of("logs-*"));
|
||||
AcknowledgedResponse putResponse = client.ccr().putAutoFollowPattern(putRequest, RequestOptions.DEFAULT);
|
||||
assertThat(putResponse.isAcknowledged(), is(true));
|
||||
|
||||
final PauseAutoFollowPatternRequest pauseRequest = new PauseAutoFollowPatternRequest("my_pattern");
|
||||
AcknowledgedResponse pauseResponse = client.ccr().pauseAutoFollowPattern(pauseRequest, RequestOptions.DEFAULT);
|
||||
assertThat(pauseResponse.isAcknowledged(), is(true));
|
||||
}
|
||||
|
||||
// tag::ccr-resume-auto-follow-pattern-request
|
||||
ResumeAutoFollowPatternRequest request =
|
||||
new ResumeAutoFollowPatternRequest("my_pattern"); // <1>
|
||||
// end::ccr-resume-auto-follow-pattern-request
|
||||
|
||||
// tag::ccr-resume-auto-follow-pattern-execute
|
||||
AcknowledgedResponse response = client.ccr()
|
||||
.resumeAutoFollowPattern(request, RequestOptions.DEFAULT);
|
||||
// end::ccr-resume-auto-follow-pattern-execute
|
||||
|
||||
// tag::ccr-resume-auto-follow-pattern-response
|
||||
boolean acknowledged = response.isAcknowledged(); // <1>
|
||||
// end::ccr-resume-auto-follow-pattern-response
|
||||
|
||||
// tag::ccr-resume-auto-follow-pattern-execute-listener
|
||||
ActionListener<AcknowledgedResponse> listener =
|
||||
new ActionListener<AcknowledgedResponse>() {
|
||||
@Override
|
||||
public void onResponse(AcknowledgedResponse response) { // <1>
|
||||
boolean resumed = response.isAcknowledged();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
};
|
||||
// end::ccr-resume-auto-follow-pattern-execute-listener
|
||||
|
||||
// Replace the empty listener by a blocking listener in test
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
// tag::ccr-resume-auto-follow-pattern-execute-async
|
||||
client.ccr().resumeAutoFollowPatternAsync(request,
|
||||
RequestOptions.DEFAULT, listener); // <1>
|
||||
// end::ccr-resume-auto-follow-pattern-execute-async
|
||||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
|
||||
// Cleanup:
|
||||
{
|
||||
DeleteAutoFollowPatternRequest deleteRequest = new DeleteAutoFollowPatternRequest("my_pattern");
|
||||
AcknowledgedResponse deleteResponse = client.ccr().deleteAutoFollowPattern(deleteRequest, RequestOptions.DEFAULT);
|
||||
assertThat(deleteResponse.isAcknowledged(), is(true));
|
||||
}
|
||||
}
|
||||
|
||||
public void testGetCCRStats() throws Exception {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
|
||||
// tag::ccr-get-stats-request
|
||||
CcrStatsRequest request =
|
||||
new CcrStatsRequest(); // <1>
|
||||
// end::ccr-get-stats-request
|
||||
|
||||
// tag::ccr-get-stats-execute
|
||||
CcrStatsResponse response = client.ccr()
|
||||
.getCcrStats(request, RequestOptions.DEFAULT);
|
||||
// end::ccr-get-stats-execute
|
||||
|
||||
// tag::ccr-get-stats-response
|
||||
IndicesFollowStats indicesFollowStats =
|
||||
response.getIndicesFollowStats(); // <1>
|
||||
AutoFollowStats autoFollowStats =
|
||||
response.getAutoFollowStats(); // <2>
|
||||
// end::ccr-get-stats-response
|
||||
|
||||
// tag::ccr-get-stats-execute-listener
|
||||
ActionListener<CcrStatsResponse> listener =
|
||||
new ActionListener<CcrStatsResponse>() {
|
||||
@Override
|
||||
public void onResponse(CcrStatsResponse response) { // <1>
|
||||
IndicesFollowStats indicesFollowStats =
|
||||
response.getIndicesFollowStats();
|
||||
AutoFollowStats autoFollowStats =
|
||||
response.getAutoFollowStats();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
};
|
||||
// end::ccr-get-stats-execute-listener
|
||||
|
||||
// Replace the empty listener by a blocking listener in test
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
// tag::ccr-get-stats-execute-async
|
||||
client.ccr().getCcrStatsAsync(request,
|
||||
RequestOptions.DEFAULT, listener); // <1>
|
||||
// end::ccr-get-stats-execute-async
|
||||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
}
|
||||
|
||||
public void testGetFollowStats() throws Exception {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
|
||||
{
|
||||
// Create leader index:
|
||||
CreateIndexRequest createIndexRequest = new CreateIndexRequest("leader");
|
||||
CreateIndexResponse response = client.indices().create(createIndexRequest, RequestOptions.DEFAULT);
|
||||
assertThat(response.isAcknowledged(), is(true));
|
||||
}
|
||||
{
|
||||
// Follow index, so that we can query for follow stats:
|
||||
PutFollowRequest putFollowRequest = new PutFollowRequest("local", "leader", "follower", ActiveShardCount.ONE);
|
||||
PutFollowResponse putFollowResponse = client.ccr().putFollow(putFollowRequest, RequestOptions.DEFAULT);
|
||||
assertThat(putFollowResponse.isFollowIndexCreated(), is(true));
|
||||
assertThat(putFollowResponse.isFollowIndexShardsAcked(), is(true));
|
||||
assertThat(putFollowResponse.isIndexFollowingStarted(), is(true));
|
||||
}
|
||||
|
||||
// tag::ccr-get-follow-stats-request
|
||||
FollowStatsRequest request =
|
||||
new FollowStatsRequest("follower"); // <1>
|
||||
// end::ccr-get-follow-stats-request
|
||||
|
||||
// tag::ccr-get-follow-stats-execute
|
||||
FollowStatsResponse response = client.ccr()
|
||||
.getFollowStats(request, RequestOptions.DEFAULT);
|
||||
// end::ccr-get-follow-stats-execute
|
||||
|
||||
// tag::ccr-get-follow-stats-response
|
||||
IndicesFollowStats indicesFollowStats =
|
||||
response.getIndicesFollowStats(); // <1>
|
||||
// end::ccr-get-follow-stats-response
|
||||
|
||||
// tag::ccr-get-follow-stats-execute-listener
|
||||
ActionListener<FollowStatsResponse> listener =
|
||||
new ActionListener<FollowStatsResponse>() {
|
||||
@Override
|
||||
public void onResponse(FollowStatsResponse response) { // <1>
|
||||
IndicesFollowStats indicesFollowStats =
|
||||
response.getIndicesFollowStats();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
};
|
||||
// end::ccr-get-follow-stats-execute-listener
|
||||
|
||||
// Replace the empty listener by a blocking listener in test
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
// tag::ccr-get-follow-stats-execute-async
|
||||
client.ccr().getFollowStatsAsync(request,
|
||||
RequestOptions.DEFAULT, listener); // <1>
|
||||
// end::ccr-get-follow-stats-execute-async
|
||||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
|
||||
{
|
||||
PauseFollowRequest pauseFollowRequest = new PauseFollowRequest("follower");
|
||||
AcknowledgedResponse pauseFollowResponse = client.ccr().pauseFollow(pauseFollowRequest, RequestOptions.DEFAULT);
|
||||
assertThat(pauseFollowResponse.isAcknowledged(), is(true));
|
||||
}
|
||||
}
|
||||
|
||||
public void testGetFollowInfos() throws Exception {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
|
||||
{
|
||||
// Create leader index:
|
||||
CreateIndexRequest createIndexRequest = new CreateIndexRequest("leader");
|
||||
CreateIndexResponse response = client.indices().create(createIndexRequest, RequestOptions.DEFAULT);
|
||||
assertThat(response.isAcknowledged(), is(true));
|
||||
}
|
||||
{
|
||||
// Follow index, so that we can query for follow stats:
|
||||
PutFollowRequest putFollowRequest = new PutFollowRequest("local", "leader", "follower", ActiveShardCount.ONE);
|
||||
PutFollowResponse putFollowResponse = client.ccr().putFollow(putFollowRequest, RequestOptions.DEFAULT);
|
||||
assertThat(putFollowResponse.isFollowIndexCreated(), is(true));
|
||||
assertThat(putFollowResponse.isFollowIndexShardsAcked(), is(true));
|
||||
assertThat(putFollowResponse.isIndexFollowingStarted(), is(true));
|
||||
}
|
||||
|
||||
// tag::ccr-get-follow-info-request
|
||||
FollowInfoRequest request =
|
||||
new FollowInfoRequest("follower"); // <1>
|
||||
// end::ccr-get-follow-info-request
|
||||
|
||||
// tag::ccr-get-follow-info-execute
|
||||
FollowInfoResponse response = client.ccr()
|
||||
.getFollowInfo(request, RequestOptions.DEFAULT);
|
||||
// end::ccr-get-follow-info-execute
|
||||
|
||||
// tag::ccr-get-follow-info-response
|
||||
List<FollowInfoResponse.FollowerInfo> infos =
|
||||
response.getInfos(); // <1>
|
||||
// end::ccr-get-follow-info-response
|
||||
|
||||
// tag::ccr-get-follow-info-execute-listener
|
||||
ActionListener<FollowInfoResponse> listener =
|
||||
new ActionListener<FollowInfoResponse>() {
|
||||
@Override
|
||||
public void onResponse(FollowInfoResponse response) { // <1>
|
||||
List<FollowInfoResponse.FollowerInfo> infos =
|
||||
response.getInfos();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
};
|
||||
// end::ccr-get-follow-info-execute-listener
|
||||
|
||||
// Replace the empty listener by a blocking listener in test
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
// tag::ccr-get-follow-info-execute-async
|
||||
client.ccr().getFollowInfoAsync(request,
|
||||
RequestOptions.DEFAULT, listener); // <1>
|
||||
// end::ccr-get-follow-info-execute-async
|
||||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
|
||||
{
|
||||
PauseFollowRequest pauseFollowRequest = new PauseFollowRequest("follower");
|
||||
AcknowledgedResponse pauseFollowResponse = client.ccr().pauseFollow(pauseFollowRequest, RequestOptions.DEFAULT);
|
||||
assertThat(pauseFollowResponse.isAcknowledged(), is(true));
|
||||
}
|
||||
}
|
||||
|
||||
}
|
File diff suppressed because it is too large
Load diff
|
@ -1,696 +0,0 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0 and the Server Side Public License, v 1; you may not use this file except
|
||||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
||||
* Side Public License, v 1.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.client.documentation;
|
||||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.LatchedActionListener;
|
||||
import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest;
|
||||
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
|
||||
import org.elasticsearch.action.admin.cluster.settings.ClusterGetSettingsRequest;
|
||||
import org.elasticsearch.action.admin.cluster.settings.ClusterGetSettingsResponse;
|
||||
import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest;
|
||||
import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse;
|
||||
import org.elasticsearch.action.support.ActiveShardCount;
|
||||
import org.elasticsearch.action.support.master.AcknowledgedResponse;
|
||||
import org.elasticsearch.client.ESRestHighLevelClientTestCase;
|
||||
import org.elasticsearch.client.RequestOptions;
|
||||
import org.elasticsearch.client.RestHighLevelClient;
|
||||
import org.elasticsearch.client.WarningsHandler;
|
||||
import org.elasticsearch.client.cluster.RemoteConnectionInfo;
|
||||
import org.elasticsearch.client.cluster.RemoteInfoRequest;
|
||||
import org.elasticsearch.client.cluster.RemoteInfoResponse;
|
||||
import org.elasticsearch.client.indices.CreateIndexRequest;
|
||||
import org.elasticsearch.client.indices.DeleteComponentTemplateRequest;
|
||||
import org.elasticsearch.client.indices.GetComponentTemplatesRequest;
|
||||
import org.elasticsearch.client.indices.GetComponentTemplatesResponse;
|
||||
import org.elasticsearch.client.indices.PutComponentTemplateRequest;
|
||||
import org.elasticsearch.cluster.health.ClusterHealthStatus;
|
||||
import org.elasticsearch.cluster.health.ClusterIndexHealth;
|
||||
import org.elasticsearch.cluster.health.ClusterShardHealth;
|
||||
import org.elasticsearch.cluster.metadata.AliasMetadata;
|
||||
import org.elasticsearch.cluster.metadata.ComponentTemplate;
|
||||
import org.elasticsearch.cluster.metadata.Template;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider;
|
||||
import org.elasticsearch.common.Priority;
|
||||
import org.elasticsearch.common.compress.CompressedXContent;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.ByteSizeUnit;
|
||||
import org.elasticsearch.core.TimeValue;
|
||||
import org.elasticsearch.indices.recovery.RecoverySettings;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
import org.elasticsearch.xcontent.XContentType;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.greaterThan;
|
||||
import static org.hamcrest.Matchers.is;
|
||||
import static org.hamcrest.Matchers.notNullValue;
|
||||
|
||||
/**
|
||||
* Documentation for Cluster APIs in the high level java client.
|
||||
* Code wrapped in {@code tag} and {@code end} tags is included in the docs.
|
||||
*/
|
||||
@SuppressWarnings("removal")
|
||||
public class ClusterClientDocumentationIT extends ESRestHighLevelClientTestCase {
|
||||
|
||||
public void testClusterPutSettings() throws IOException {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
|
||||
// tag::put-settings-request
|
||||
ClusterUpdateSettingsRequest request = new ClusterUpdateSettingsRequest();
|
||||
// end::put-settings-request
|
||||
|
||||
// tag::put-settings-create-settings
|
||||
String transientSettingKey =
|
||||
RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey();
|
||||
int transientSettingValue = 10;
|
||||
Settings transientSettings =
|
||||
Settings.builder()
|
||||
.put(transientSettingKey, transientSettingValue, ByteSizeUnit.BYTES)
|
||||
.build(); // <1>
|
||||
|
||||
String persistentSettingKey =
|
||||
EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey();
|
||||
String persistentSettingValue =
|
||||
EnableAllocationDecider.Allocation.NONE.name();
|
||||
Settings persistentSettings =
|
||||
Settings.builder()
|
||||
.put(persistentSettingKey, persistentSettingValue)
|
||||
.build(); // <2>
|
||||
// end::put-settings-create-settings
|
||||
|
||||
// tag::put-settings-request-cluster-settings
|
||||
request.transientSettings(transientSettings); // <1>
|
||||
request.persistentSettings(persistentSettings); // <2>
|
||||
// end::put-settings-request-cluster-settings
|
||||
|
||||
{
|
||||
// tag::put-settings-settings-builder
|
||||
Settings.Builder transientSettingsBuilder =
|
||||
Settings.builder()
|
||||
.put(transientSettingKey, transientSettingValue, ByteSizeUnit.BYTES);
|
||||
request.transientSettings(transientSettingsBuilder); // <1>
|
||||
// end::put-settings-settings-builder
|
||||
}
|
||||
{
|
||||
// tag::put-settings-settings-map
|
||||
Map<String, Object> map = new HashMap<>();
|
||||
map.put(transientSettingKey
|
||||
, transientSettingValue + ByteSizeUnit.BYTES.getSuffix());
|
||||
request.transientSettings(map); // <1>
|
||||
// end::put-settings-settings-map
|
||||
}
|
||||
{
|
||||
// tag::put-settings-settings-source
|
||||
request.transientSettings(
|
||||
"{\"indices.recovery.max_bytes_per_sec\": \"10b\"}"
|
||||
, XContentType.JSON); // <1>
|
||||
// end::put-settings-settings-source
|
||||
}
|
||||
|
||||
// tag::put-settings-request-timeout
|
||||
request.timeout(TimeValue.timeValueMinutes(2)); // <1>
|
||||
request.timeout("2m"); // <2>
|
||||
// end::put-settings-request-timeout
|
||||
// tag::put-settings-request-masterTimeout
|
||||
request.masterNodeTimeout(TimeValue.timeValueMinutes(1)); // <1>
|
||||
request.masterNodeTimeout("1m"); // <2>
|
||||
// end::put-settings-request-masterTimeout
|
||||
|
||||
RequestOptions options = RequestOptions.DEFAULT.toBuilder().setWarningsHandler(WarningsHandler.PERMISSIVE).build();
|
||||
// tag::put-settings-execute
|
||||
ClusterUpdateSettingsResponse response = client.cluster().putSettings(request, options);
|
||||
// end::put-settings-execute
|
||||
|
||||
// tag::put-settings-response
|
||||
boolean acknowledged = response.isAcknowledged(); // <1>
|
||||
Settings transientSettingsResponse = response.getTransientSettings(); // <2>
|
||||
Settings persistentSettingsResponse = response.getPersistentSettings(); // <3>
|
||||
// end::put-settings-response
|
||||
assertTrue(acknowledged);
|
||||
assertThat(transientSettingsResponse.get(transientSettingKey), equalTo(transientSettingValue + ByteSizeUnit.BYTES.getSuffix()));
|
||||
assertThat(persistentSettingsResponse.get(persistentSettingKey), equalTo(persistentSettingValue));
|
||||
|
||||
// tag::put-settings-request-reset-transient
|
||||
request.transientSettings(Settings.builder().putNull(transientSettingKey).build()); // <1>
|
||||
// tag::put-settings-request-reset-transient
|
||||
request.persistentSettings(Settings.builder().putNull(persistentSettingKey));
|
||||
ClusterUpdateSettingsResponse resetResponse = client.cluster().putSettings(request, options);
|
||||
|
||||
assertTrue(resetResponse.isAcknowledged());
|
||||
}
|
||||
|
||||
public void testClusterUpdateSettingsAsync() throws Exception {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
{
|
||||
ClusterUpdateSettingsRequest request = new ClusterUpdateSettingsRequest();
|
||||
|
||||
// tag::put-settings-execute-listener
|
||||
ActionListener<ClusterUpdateSettingsResponse> listener =
|
||||
new ActionListener<ClusterUpdateSettingsResponse>() {
|
||||
@Override
|
||||
public void onResponse(ClusterUpdateSettingsResponse response) {
|
||||
// <1>
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
};
|
||||
// end::put-settings-execute-listener
|
||||
|
||||
// Replace the empty listener by a blocking listener in test
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
// tag::put-settings-execute-async
|
||||
client.cluster().putSettingsAsync(request, RequestOptions.DEFAULT, listener); // <1>
|
||||
// end::put-settings-execute-async
|
||||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
}
|
||||
}
|
||||
|
||||
@SuppressWarnings("unused")
|
||||
public void testClusterGetSettings() throws IOException {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
|
||||
// tag::get-settings-request
|
||||
ClusterGetSettingsRequest request = new ClusterGetSettingsRequest();
|
||||
// end::get-settings-request
|
||||
|
||||
// tag::get-settings-request-includeDefaults
|
||||
request.includeDefaults(true); // <1>
|
||||
// end::get-settings-request-includeDefaults
|
||||
|
||||
// tag::get-settings-request-local
|
||||
request.local(true); // <1>
|
||||
// end::get-settings-request-local
|
||||
|
||||
// tag::get-settings-request-masterTimeout
|
||||
request.masterNodeTimeout(TimeValue.timeValueMinutes(1)); // <1>
|
||||
request.masterNodeTimeout("1m"); // <2>
|
||||
// end::get-settings-request-masterTimeout
|
||||
|
||||
// tag::get-settings-execute
|
||||
ClusterGetSettingsResponse response = client.cluster().getSettings(request, RequestOptions.DEFAULT); // <1>
|
||||
// end::get-settings-execute
|
||||
|
||||
// tag::get-settings-response
|
||||
Settings persistentSettings = response.getPersistentSettings(); // <1>
|
||||
Settings transientSettings = response.getTransientSettings(); // <2>
|
||||
Settings defaultSettings = response.getDefaultSettings(); // <3>
|
||||
String settingValue = response.getSetting("cluster.routing.allocation.enable"); // <4>
|
||||
// end::get-settings-response
|
||||
|
||||
assertThat(defaultSettings.size(), greaterThan(0));
|
||||
}
|
||||
|
||||
public void testClusterGetSettingsAsync() throws InterruptedException {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
|
||||
ClusterGetSettingsRequest request = new ClusterGetSettingsRequest();
|
||||
|
||||
// tag::get-settings-execute-listener
|
||||
ActionListener<ClusterGetSettingsResponse> listener =
|
||||
new ActionListener<ClusterGetSettingsResponse>() {
|
||||
@Override
|
||||
public void onResponse(ClusterGetSettingsResponse response) {
|
||||
// <1>
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
};
|
||||
// end::get-settings-execute-listener
|
||||
|
||||
// Replace the empty listener by a blocking listener in test
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
// tag::get-settings-execute-async
|
||||
client.cluster().getSettingsAsync(request, RequestOptions.DEFAULT, listener); // <1>
|
||||
// end::get-settings-execute-async
|
||||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
}
|
||||
|
||||
@SuppressWarnings("unused")
|
||||
public void testClusterHealth() throws IOException {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
client.indices().create(new CreateIndexRequest("index"), RequestOptions.DEFAULT);
|
||||
{
|
||||
// tag::health-request
|
||||
ClusterHealthRequest request = new ClusterHealthRequest();
|
||||
// end::health-request
|
||||
}
|
||||
{
|
||||
// tag::health-request-indices-ctr
|
||||
ClusterHealthRequest request = new ClusterHealthRequest("index1", "index2");
|
||||
// end::health-request-indices-ctr
|
||||
}
|
||||
{
|
||||
// tag::health-request-indices-setter
|
||||
ClusterHealthRequest request = new ClusterHealthRequest();
|
||||
request.indices("index1", "index2");
|
||||
// end::health-request-indices-setter
|
||||
}
|
||||
ClusterHealthRequest request = new ClusterHealthRequest();
|
||||
|
||||
// tag::health-request-timeout
|
||||
request.timeout(TimeValue.timeValueSeconds(50)); // <1>
|
||||
request.timeout("50s"); // <2>
|
||||
// end::health-request-timeout
|
||||
|
||||
// tag::health-request-master-timeout
|
||||
request.masterNodeTimeout(TimeValue.timeValueSeconds(20)); // <1>
|
||||
request.masterNodeTimeout("20s"); // <2>
|
||||
// end::health-request-master-timeout
|
||||
|
||||
// tag::health-request-wait-status
|
||||
request.waitForStatus(ClusterHealthStatus.YELLOW); // <1>
|
||||
request.waitForYellowStatus(); // <2>
|
||||
// end::health-request-wait-status
|
||||
|
||||
// tag::health-request-wait-events
|
||||
request.waitForEvents(Priority.NORMAL); // <1>
|
||||
// end::health-request-wait-events
|
||||
|
||||
// tag::health-request-level
|
||||
request.level(ClusterHealthRequest.Level.SHARDS); // <1>
|
||||
// end::health-request-level
|
||||
|
||||
// tag::health-request-wait-relocation
|
||||
request.waitForNoRelocatingShards(true); // <1>
|
||||
// end::health-request-wait-relocation
|
||||
|
||||
// tag::health-request-wait-initializing
|
||||
request.waitForNoInitializingShards(true); // <1>
|
||||
// end::health-request-wait-initializing
|
||||
|
||||
// tag::health-request-wait-nodes
|
||||
request.waitForNodes("2"); // <1>
|
||||
request.waitForNodes(">=2"); // <2>
|
||||
request.waitForNodes("le(2)"); // <3>
|
||||
// end::health-request-wait-nodes
|
||||
|
||||
// tag::health-request-wait-active
|
||||
request.waitForActiveShards(ActiveShardCount.ALL); // <1>
|
||||
request.waitForActiveShards(1); // <2>
|
||||
// end::health-request-wait-active
|
||||
|
||||
// tag::health-request-local
|
||||
request.local(true); // <1>
|
||||
// end::health-request-local
|
||||
|
||||
// tag::health-execute
|
||||
ClusterHealthResponse response = client.cluster().health(request, RequestOptions.DEFAULT);
|
||||
// end::health-execute
|
||||
|
||||
assertThat(response.isTimedOut(), equalTo(false));
|
||||
assertThat(response.status(), equalTo(RestStatus.OK));
|
||||
assertThat(response.getStatus(), equalTo(ClusterHealthStatus.YELLOW));
|
||||
assertThat(response, notNullValue());
|
||||
// tag::health-response-general
|
||||
String clusterName = response.getClusterName(); // <1>
|
||||
ClusterHealthStatus status = response.getStatus(); // <2>
|
||||
// end::health-response-general
|
||||
|
||||
// tag::health-response-request-status
|
||||
boolean timedOut = response.isTimedOut(); // <1>
|
||||
RestStatus restStatus = response.status(); // <2>
|
||||
// end::health-response-request-status
|
||||
|
||||
// tag::health-response-nodes
|
||||
int numberOfNodes = response.getNumberOfNodes(); // <1>
|
||||
int numberOfDataNodes = response.getNumberOfDataNodes(); // <2>
|
||||
// end::health-response-nodes
|
||||
|
||||
{
|
||||
// tag::health-response-shards
|
||||
int activeShards = response.getActiveShards(); // <1>
|
||||
int activePrimaryShards = response.getActivePrimaryShards(); // <2>
|
||||
int relocatingShards = response.getRelocatingShards(); // <3>
|
||||
int initializingShards = response.getInitializingShards(); // <4>
|
||||
int unassignedShards = response.getUnassignedShards(); // <5>
|
||||
int delayedUnassignedShards = response.getDelayedUnassignedShards(); // <6>
|
||||
double activeShardsPercent = response.getActiveShardsPercent(); // <7>
|
||||
// end::health-response-shards
|
||||
}
|
||||
|
||||
// tag::health-response-task
|
||||
TimeValue taskMaxWaitingTime = response.getTaskMaxWaitingTime(); // <1>
|
||||
int numberOfPendingTasks = response.getNumberOfPendingTasks(); // <2>
|
||||
int numberOfInFlightFetch = response.getNumberOfInFlightFetch(); // <3>
|
||||
// end::health-response-task
|
||||
|
||||
// tag::health-response-indices
|
||||
Map<String, ClusterIndexHealth> indices = response.getIndices(); // <1>
|
||||
// end::health-response-indices
|
||||
|
||||
{
|
||||
// tag::health-response-index
|
||||
ClusterIndexHealth index = indices.get("index"); // <1>
|
||||
ClusterHealthStatus indexStatus = index.getStatus();
|
||||
int numberOfShards = index.getNumberOfShards();
|
||||
int numberOfReplicas = index.getNumberOfReplicas();
|
||||
int activeShards = index.getActiveShards();
|
||||
int activePrimaryShards = index.getActivePrimaryShards();
|
||||
int initializingShards = index.getInitializingShards();
|
||||
int relocatingShards = index.getRelocatingShards();
|
||||
int unassignedShards = index.getUnassignedShards();
|
||||
// end::health-response-index
|
||||
|
||||
// tag::health-response-shard-details
|
||||
Map<Integer, ClusterShardHealth> shards = index.getShards(); // <1>
|
||||
ClusterShardHealth shardHealth = shards.get(0);
|
||||
int shardId = shardHealth.getShardId();
|
||||
ClusterHealthStatus shardStatus = shardHealth.getStatus();
|
||||
int active = shardHealth.getActiveShards();
|
||||
int initializing = shardHealth.getInitializingShards();
|
||||
int unassigned = shardHealth.getUnassignedShards();
|
||||
int relocating = shardHealth.getRelocatingShards();
|
||||
boolean primaryActive = shardHealth.isPrimaryActive();
|
||||
// end::health-response-shard-details
|
||||
}
|
||||
}
|
||||
|
||||
public void testClusterHealthAsync() throws Exception {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
{
|
||||
ClusterHealthRequest request = new ClusterHealthRequest();
|
||||
|
||||
// tag::health-execute-listener
|
||||
ActionListener<ClusterHealthResponse> listener =
|
||||
new ActionListener<ClusterHealthResponse>() {
|
||||
@Override
|
||||
public void onResponse(ClusterHealthResponse response) {
|
||||
// <1>
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
};
|
||||
// end::health-execute-listener
|
||||
|
||||
// Replace the empty listener by a blocking listener in test
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
// tag::health-execute-async
|
||||
client.cluster().healthAsync(request, RequestOptions.DEFAULT, listener); // <1>
|
||||
// end::health-execute-async
|
||||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
}
|
||||
}
|
||||
|
||||
public void testRemoteInfo() throws Exception {
|
||||
setupRemoteClusterConfig("local_cluster");
|
||||
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
|
||||
// tag::remote-info-request
|
||||
RemoteInfoRequest request = new RemoteInfoRequest();
|
||||
// end::remote-info-request
|
||||
|
||||
// tag::remote-info-execute
|
||||
RemoteInfoResponse response = client.cluster().remoteInfo(request, RequestOptions.DEFAULT); // <1>
|
||||
// end::remote-info-execute
|
||||
|
||||
// tag::remote-info-response
|
||||
List<RemoteConnectionInfo> infos = response.getInfos();
|
||||
// end::remote-info-response
|
||||
|
||||
assertThat(infos.size(), greaterThan(0));
|
||||
}
|
||||
|
||||
public void testRemoteInfoAsync() throws Exception {
|
||||
setupRemoteClusterConfig("local_cluster");
|
||||
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
|
||||
// tag::remote-info-request
|
||||
RemoteInfoRequest request = new RemoteInfoRequest();
|
||||
// end::remote-info-request
|
||||
|
||||
// tag::remote-info-execute-listener
|
||||
ActionListener<RemoteInfoResponse> listener =
|
||||
new ActionListener<>() {
|
||||
@Override
|
||||
public void onResponse(RemoteInfoResponse response) {
|
||||
// <1>
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
};
|
||||
// end::remote-info-execute-listener
|
||||
|
||||
// Replace the empty listener by a blocking listener in test
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
// tag::health-execute-async
|
||||
client.cluster().remoteInfoAsync(request, RequestOptions.DEFAULT, listener); // <1>
|
||||
// end::health-execute-async
|
||||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
}
|
||||
|
||||
public void testGetComponentTemplates() throws Exception {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
{
|
||||
Template template = new Template(Settings.builder().put("index.number_of_replicas", 3).build(), null, null);
|
||||
ComponentTemplate componentTemplate = new ComponentTemplate(template, null, null);
|
||||
PutComponentTemplateRequest putComponentTemplateRequest = new PutComponentTemplateRequest().name("ct1")
|
||||
.componentTemplate(componentTemplate);
|
||||
client.cluster().putComponentTemplate(putComponentTemplateRequest, RequestOptions.DEFAULT);
|
||||
|
||||
assertTrue(client.cluster().putComponentTemplate(putComponentTemplateRequest, RequestOptions.DEFAULT).isAcknowledged());
|
||||
}
|
||||
|
||||
// tag::get-component-templates-request
|
||||
GetComponentTemplatesRequest request = new GetComponentTemplatesRequest("ct1"); // <1>
|
||||
// end::get-component-templates-request
|
||||
|
||||
// tag::get-component-templates-request-masterTimeout
|
||||
request.setMasterNodeTimeout(TimeValue.timeValueMinutes(1)); // <1>
|
||||
request.setMasterNodeTimeout("1m"); // <2>
|
||||
// end::get-component-templates-request-masterTimeout
|
||||
|
||||
// tag::get-component-templates-execute
|
||||
GetComponentTemplatesResponse getTemplatesResponse = client.cluster().getComponentTemplate(request, RequestOptions.DEFAULT);
|
||||
// end::get-component-templates-execute
|
||||
|
||||
// tag::get-component-templates-response
|
||||
Map<String, ComponentTemplate> templates = getTemplatesResponse.getComponentTemplates(); // <1>
|
||||
// end::get-component-templates-response
|
||||
|
||||
assertThat(templates.size(), is(1));
|
||||
assertThat(templates.get("ct1"), is(notNullValue()));
|
||||
|
||||
// tag::get-component-templates-execute-listener
|
||||
ActionListener<GetComponentTemplatesResponse> listener =
|
||||
new ActionListener<GetComponentTemplatesResponse>() {
|
||||
@Override
|
||||
public void onResponse(GetComponentTemplatesResponse response) {
|
||||
// <1>
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
};
|
||||
// end::get-component-templates-execute-listener
|
||||
|
||||
// Replace the empty listener by a blocking listener in test
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
// tag::get-component-templates-execute-async
|
||||
client.cluster().getComponentTemplateAsync(request, RequestOptions.DEFAULT, listener); // <1>
|
||||
// end::get-component-templates-execute-async
|
||||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
}
|
||||
|
||||
public void testPutComponentTemplate() throws Exception {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
|
||||
{
|
||||
// tag::put-component-template-request
|
||||
PutComponentTemplateRequest request = new PutComponentTemplateRequest()
|
||||
.name("ct1"); // <1>
|
||||
|
||||
Settings settings = Settings.builder()
|
||||
.put("index.number_of_shards", 3)
|
||||
.put("index.number_of_replicas", 1)
|
||||
.build();
|
||||
String mappingJson = """
|
||||
{
|
||||
"properties": {
|
||||
"message": {
|
||||
"type": "text"
|
||||
}
|
||||
}
|
||||
}""";
|
||||
AliasMetadata twitterAlias = AliasMetadata.builder("twitter_alias").build();
|
||||
Template template = new Template(settings, new CompressedXContent(mappingJson), Map.of("twitter_alias", twitterAlias)); // <2>
|
||||
|
||||
request.componentTemplate(new ComponentTemplate(template, null, null));
|
||||
assertTrue(client.cluster().putComponentTemplate(request, RequestOptions.DEFAULT).isAcknowledged());
|
||||
// end::put-component-template-request
|
||||
}
|
||||
|
||||
{
|
||||
// tag::put-component-template-request-version
|
||||
PutComponentTemplateRequest request = new PutComponentTemplateRequest()
|
||||
.name("ct1");
|
||||
Settings settings = Settings.builder()
|
||||
.put("index.number_of_replicas", 3)
|
||||
.build();
|
||||
Template template = new Template(settings, null, null);
|
||||
|
||||
request.componentTemplate(new ComponentTemplate(template, 3L, null)); // <1>
|
||||
assertTrue(client.cluster().putComponentTemplate(request, RequestOptions.DEFAULT).isAcknowledged());
|
||||
// end::put-component-template-request-version
|
||||
|
||||
// tag::put-component-template-request-create
|
||||
request.create(true); // <1>
|
||||
// end::put-component-template-request-create
|
||||
|
||||
// tag::put-component-template-request-masterTimeout
|
||||
request.setMasterTimeout(TimeValue.timeValueMinutes(1)); // <1>
|
||||
// end::put-component-template-request-masterTimeout
|
||||
|
||||
request.create(false); // make test happy
|
||||
|
||||
// tag::put-component-template-request-execute
|
||||
AcknowledgedResponse putComponentTemplateResponse = client.cluster().putComponentTemplate(request, RequestOptions.DEFAULT);
|
||||
// end::put-component-template-request-execute
|
||||
|
||||
// tag::put-component-template-response
|
||||
boolean acknowledged = putComponentTemplateResponse.isAcknowledged(); // <1>
|
||||
// end::put-component-template-response
|
||||
assertTrue(acknowledged);
|
||||
|
||||
// tag::put-component-template-execute-listener
|
||||
ActionListener<AcknowledgedResponse> listener =
|
||||
new ActionListener<AcknowledgedResponse>() {
|
||||
@Override
|
||||
public void onResponse(AcknowledgedResponse putComponentTemplateResponse) {
|
||||
// <1>
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
};
|
||||
// end::put-component-template-execute-listener
|
||||
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
// tag::put-component-template-execute-async
|
||||
client.cluster().putComponentTemplateAsync(request, RequestOptions.DEFAULT, listener); // <1>
|
||||
// end::put-component-template-execute-async
|
||||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
}
|
||||
}
|
||||
|
||||
public void testDeleteComponentTemplate() throws Exception {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
{
|
||||
PutComponentTemplateRequest request = new PutComponentTemplateRequest().name("ct1");
|
||||
|
||||
Settings settings = Settings.builder().put("index.number_of_shards", 3).put("index.number_of_replicas", 1).build();
|
||||
String mappingJson = """
|
||||
{
|
||||
"properties": {
|
||||
"message": {
|
||||
"type": "text"
|
||||
}
|
||||
}
|
||||
}""";
|
||||
AliasMetadata twitterAlias = AliasMetadata.builder("twitter_alias").build();
|
||||
Template template = new Template(settings, new CompressedXContent(mappingJson), Map.of("twitter_alias", twitterAlias));
|
||||
|
||||
request.componentTemplate(new ComponentTemplate(template, null, null));
|
||||
assertTrue(client.cluster().putComponentTemplate(request, RequestOptions.DEFAULT).isAcknowledged());
|
||||
}
|
||||
|
||||
// tag::delete-component-template-request
|
||||
DeleteComponentTemplateRequest deleteRequest = new DeleteComponentTemplateRequest("ct1"); // <1>
|
||||
// end::delete-component-template-request
|
||||
|
||||
// tag::delete-component-template-request-masterTimeout
|
||||
deleteRequest.setMasterTimeout(TimeValue.timeValueMinutes(1)); // <1>
|
||||
// end::delete-component-template-request-masterTimeout
|
||||
|
||||
// tag::delete-component-template-execute
|
||||
AcknowledgedResponse deleteTemplateAcknowledge = client.cluster().deleteComponentTemplate(deleteRequest, RequestOptions.DEFAULT);
|
||||
// end::delete-component-template-execute
|
||||
|
||||
// tag::delete-component-template-response
|
||||
boolean acknowledged = deleteTemplateAcknowledge.isAcknowledged(); // <1>
|
||||
// end::delete-component-template-response
|
||||
assertThat(acknowledged, equalTo(true));
|
||||
|
||||
{
|
||||
PutComponentTemplateRequest request = new PutComponentTemplateRequest().name("ct1");
|
||||
|
||||
Settings settings = Settings.builder().put("index.number_of_shards", 3).put("index.number_of_replicas", 1).build();
|
||||
Template template = new Template(settings, null, null);
|
||||
request.componentTemplate(new ComponentTemplate(template, null, null));
|
||||
assertTrue(client.cluster().putComponentTemplate(request, RequestOptions.DEFAULT).isAcknowledged());
|
||||
}
|
||||
|
||||
// tag::delete-component-template-execute-listener
|
||||
ActionListener<AcknowledgedResponse> listener =
|
||||
new ActionListener<AcknowledgedResponse>() {
|
||||
@Override
|
||||
public void onResponse(AcknowledgedResponse response) {
|
||||
// <1>
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
};
|
||||
// end::delete-component-template-execute-listener
|
||||
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
// tag::delete-component-template-execute-async
|
||||
client.cluster().deleteComponentTemplateAsync(deleteRequest, RequestOptions.DEFAULT, listener); // <1>
|
||||
// end::delete-component-template-execute-async
|
||||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
}
|
||||
|
||||
}
|
|
@ -1,325 +0,0 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0 and the Server Side Public License, v 1; you may not use this file except
|
||||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
||||
* Side Public License, v 1.
|
||||
*/
|
||||
package org.elasticsearch.client.documentation;
|
||||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.LatchedActionListener;
|
||||
import org.elasticsearch.client.ESRestHighLevelClientTestCase;
|
||||
import org.elasticsearch.client.RequestOptions;
|
||||
import org.elasticsearch.client.RestHighLevelClient;
|
||||
import org.elasticsearch.client.core.AcknowledgedResponse;
|
||||
import org.elasticsearch.client.enrich.DeletePolicyRequest;
|
||||
import org.elasticsearch.client.enrich.ExecutePolicyRequest;
|
||||
import org.elasticsearch.client.enrich.ExecutePolicyResponse;
|
||||
import org.elasticsearch.client.enrich.GetPolicyRequest;
|
||||
import org.elasticsearch.client.enrich.GetPolicyResponse;
|
||||
import org.elasticsearch.client.enrich.NamedPolicy;
|
||||
import org.elasticsearch.client.enrich.PutPolicyRequest;
|
||||
import org.elasticsearch.client.enrich.StatsRequest;
|
||||
import org.elasticsearch.client.enrich.StatsResponse;
|
||||
import org.elasticsearch.client.enrich.StatsResponse.CoordinatorStats;
|
||||
import org.elasticsearch.client.enrich.StatsResponse.ExecutingPolicy;
|
||||
import org.elasticsearch.client.indices.CreateIndexRequest;
|
||||
import org.junit.After;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
@SuppressWarnings("removal")
|
||||
public class EnrichDocumentationIT extends ESRestHighLevelClientTestCase {
|
||||
|
||||
@After
|
||||
public void cleanup() {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
DeletePolicyRequest deletePolicyRequest = new DeletePolicyRequest("users-policy");
|
||||
try {
|
||||
client.enrich().deletePolicy(deletePolicyRequest, RequestOptions.DEFAULT);
|
||||
} catch (Exception e) {
|
||||
// ignore... it is ok if policy has already been removed
|
||||
}
|
||||
}
|
||||
|
||||
public void testPutPolicy() throws Exception {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
CreateIndexRequest createIndexRequest = new CreateIndexRequest("users").mapping(
|
||||
Map.of("properties", Map.of("email", Map.of("type", "keyword")))
|
||||
);
|
||||
client.indices().create(createIndexRequest, RequestOptions.DEFAULT);
|
||||
|
||||
// tag::enrich-put-policy-request
|
||||
PutPolicyRequest putPolicyRequest = new PutPolicyRequest(
|
||||
"users-policy", "match", List.of("users"),
|
||||
"email", List.of("address", "zip", "city", "state"));
|
||||
// end::enrich-put-policy-request
|
||||
|
||||
// tag::enrich-put-policy-execute
|
||||
AcknowledgedResponse putPolicyResponse =
|
||||
client.enrich().putPolicy(putPolicyRequest, RequestOptions.DEFAULT);
|
||||
// end::enrich-put-policy-execute
|
||||
|
||||
// tag::enrich-put-policy-response
|
||||
boolean isAcknowledged =
|
||||
putPolicyResponse.isAcknowledged(); // <1>
|
||||
// end::enrich-put-policy-response
|
||||
|
||||
// tag::enrich-put-policy-execute-listener
|
||||
ActionListener<AcknowledgedResponse> listener = new ActionListener<>() {
|
||||
@Override
|
||||
public void onResponse(AcknowledgedResponse response) { // <1>
|
||||
boolean isAcknowledged = response.isAcknowledged();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
};
|
||||
// end::enrich-put-policy-execute-listener
|
||||
|
||||
// Replace the empty listener by a blocking listener in test
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
// tag::enrich-put-policy-execute-async
|
||||
client.enrich().putPolicyAsync(putPolicyRequest,
|
||||
RequestOptions.DEFAULT, listener); // <1>
|
||||
// end::enrich-put-policy-execute-async
|
||||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
}
|
||||
|
||||
public void testDeletePolicy() throws Exception {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
|
||||
{
|
||||
CreateIndexRequest createIndexRequest = new CreateIndexRequest("users").mapping(
|
||||
Map.of("properties", Map.of("email", Map.of("type", "keyword")))
|
||||
);
|
||||
client.indices().create(createIndexRequest, RequestOptions.DEFAULT);
|
||||
|
||||
// Add a policy, so that it can be deleted:
|
||||
PutPolicyRequest putPolicyRequest = new PutPolicyRequest(
|
||||
"users-policy",
|
||||
"match",
|
||||
List.of("users"),
|
||||
"email",
|
||||
List.of("address", "zip", "city", "state")
|
||||
);
|
||||
client.enrich().putPolicy(putPolicyRequest, RequestOptions.DEFAULT);
|
||||
}
|
||||
|
||||
// tag::enrich-delete-policy-request
|
||||
DeletePolicyRequest deletePolicyRequest =
|
||||
new DeletePolicyRequest("users-policy");
|
||||
// end::enrich-delete-policy-request
|
||||
|
||||
// tag::enrich-delete-policy-execute
|
||||
AcknowledgedResponse deletePolicyResponse = client.enrich()
|
||||
.deletePolicy(deletePolicyRequest, RequestOptions.DEFAULT);
|
||||
// end::enrich-delete-policy-execute
|
||||
|
||||
// tag::enrich-delete-policy-response
|
||||
boolean isAcknowledged =
|
||||
deletePolicyResponse.isAcknowledged(); // <1>
|
||||
// end::enrich-delete-policy-response
|
||||
|
||||
// tag::enrich-delete-policy-execute-listener
|
||||
ActionListener<AcknowledgedResponse> listener = new ActionListener<>() {
|
||||
@Override
|
||||
public void onResponse(AcknowledgedResponse response) { // <1>
|
||||
boolean isAcknowledged = response.isAcknowledged();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
};
|
||||
// end::enrich-delete-policy-execute-listener
|
||||
|
||||
// Replace the empty listener by a blocking listener in test
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
// tag::enrich-delete-policy-execute-async
|
||||
client.enrich().deletePolicyAsync(deletePolicyRequest,
|
||||
RequestOptions.DEFAULT, listener); // <1>
|
||||
// end::enrich-delete-policy-execute-async
|
||||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
}
|
||||
|
||||
public void testGetPolicy() throws Exception {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
|
||||
CreateIndexRequest createIndexRequest = new CreateIndexRequest("users").mapping(
|
||||
Map.of("properties", Map.of("email", Map.of("type", "keyword")))
|
||||
);
|
||||
client.indices().create(createIndexRequest, RequestOptions.DEFAULT);
|
||||
|
||||
PutPolicyRequest putPolicyRequest = new PutPolicyRequest(
|
||||
"users-policy",
|
||||
"match",
|
||||
List.of("users"),
|
||||
"email",
|
||||
List.of("address", "zip", "city", "state")
|
||||
);
|
||||
client.enrich().putPolicy(putPolicyRequest, RequestOptions.DEFAULT);
|
||||
|
||||
// tag::enrich-get-policy-request
|
||||
GetPolicyRequest getPolicyRequest = new GetPolicyRequest("users-policy");
|
||||
// end::enrich-get-policy-request
|
||||
|
||||
// tag::enrich-get-policy-execute
|
||||
GetPolicyResponse getPolicyResponse =
|
||||
client.enrich().getPolicy(getPolicyRequest, RequestOptions.DEFAULT);
|
||||
// end::enrich-get-policy-execute
|
||||
|
||||
// tag::enrich-get-policy-response
|
||||
List<NamedPolicy> policies = getPolicyResponse.getPolicies(); // <1>
|
||||
NamedPolicy policy = policies.get(0);
|
||||
// end::enrich-get-policy-response
|
||||
|
||||
// tag::enrich-get-policy-execute-listener
|
||||
ActionListener<GetPolicyResponse> listener = new ActionListener<>() {
|
||||
@Override
|
||||
public void onResponse(GetPolicyResponse response) { // <1>
|
||||
List<NamedPolicy> policies = response.getPolicies();
|
||||
NamedPolicy policy = policies.get(0);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
};
|
||||
// end::enrich-get-policy-execute-listener
|
||||
|
||||
// Replace the empty listener by a blocking listener in test
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
// tag::enrich-get-policy-execute-async
|
||||
client.enrich().getPolicyAsync(getPolicyRequest,
|
||||
RequestOptions.DEFAULT, listener); // <1>
|
||||
// end::enrich-get-policy-execute-async
|
||||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
}
|
||||
|
||||
public void testStats() throws Exception {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
|
||||
// tag::enrich-stats-request
|
||||
StatsRequest statsRequest = new StatsRequest();
|
||||
// end::enrich-stats-request
|
||||
|
||||
// tag::enrich-stats-execute
|
||||
StatsResponse statsResponse =
|
||||
client.enrich().stats(statsRequest, RequestOptions.DEFAULT);
|
||||
// end::enrich-stats-execute
|
||||
|
||||
// tag::enrich-stats-response
|
||||
List<ExecutingPolicy> executingPolicies =
|
||||
statsResponse.getExecutingPolicies(); // <1>
|
||||
List<CoordinatorStats> coordinatorStats =
|
||||
statsResponse.getCoordinatorStats(); // <2>
|
||||
// end::enrich-stats-response
|
||||
|
||||
// tag::enrich-stats-execute-listener
|
||||
ActionListener<StatsResponse> listener = new ActionListener<>() {
|
||||
@Override
|
||||
public void onResponse(StatsResponse response) { // <1>
|
||||
List<ExecutingPolicy> executingPolicies =
|
||||
statsResponse.getExecutingPolicies();
|
||||
List<CoordinatorStats> coordinatorStats =
|
||||
statsResponse.getCoordinatorStats();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
};
|
||||
// end::enrich-stats-execute-listener
|
||||
|
||||
// Replace the empty listener by a blocking listener in test
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
// tag::enrich-stats-execute-async
|
||||
client.enrich().statsAsync(statsRequest, RequestOptions.DEFAULT,
|
||||
listener); // <1>
|
||||
// end::enrich-stats-execute-async
|
||||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
}
|
||||
|
||||
public void testExecutePolicy() throws Exception {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
|
||||
{
|
||||
CreateIndexRequest createIndexRequest = new CreateIndexRequest("users").mapping(
|
||||
Map.of("properties", Map.of("email", Map.of("type", "keyword")))
|
||||
);
|
||||
client.indices().create(createIndexRequest, RequestOptions.DEFAULT);
|
||||
PutPolicyRequest putPolicyRequest = new PutPolicyRequest(
|
||||
"users-policy",
|
||||
"match",
|
||||
List.of("users"),
|
||||
"email",
|
||||
List.of("address", "zip", "city", "state")
|
||||
);
|
||||
client.enrich().putPolicy(putPolicyRequest, RequestOptions.DEFAULT);
|
||||
}
|
||||
|
||||
// tag::enrich-execute-policy-request
|
||||
ExecutePolicyRequest request =
|
||||
new ExecutePolicyRequest("users-policy");
|
||||
// end::enrich-execute-policy-request
|
||||
|
||||
// tag::enrich-execute-policy-execute
|
||||
ExecutePolicyResponse response =
|
||||
client.enrich().executePolicy(request, RequestOptions.DEFAULT);
|
||||
// end::enrich-execute-policy-execute
|
||||
|
||||
// tag::enrich-execute-policy-response
|
||||
ExecutePolicyResponse.ExecutionStatus status =
|
||||
response.getExecutionStatus();
|
||||
// end::enrich-execute-policy-response
|
||||
|
||||
// tag::enrich-execute-policy-execute-listener
|
||||
ActionListener<ExecutePolicyResponse> listener = new ActionListener<>() {
|
||||
@Override
|
||||
public void onResponse(ExecutePolicyResponse response) { // <1>
|
||||
ExecutePolicyResponse.ExecutionStatus status =
|
||||
response.getExecutionStatus();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
};
|
||||
// end::enrich-execute-policy-execute-listener
|
||||
|
||||
// Replace the empty listener by a blocking listener in test
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
// tag::enrich-execute-policy-execute-async
|
||||
client.enrich().executePolicyAsync(request, RequestOptions.DEFAULT,
|
||||
listener); // <1>
|
||||
// end::enrich-execute-policy-execute-async
|
||||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
}
|
||||
|
||||
}
|
|
@ -1,112 +0,0 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0 and the Server Side Public License, v 1; you may not use this file except
|
||||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
||||
* Side Public License, v 1.
|
||||
*/
|
||||
package org.elasticsearch.client.documentation;
|
||||
|
||||
import org.apache.http.client.methods.HttpPost;
|
||||
import org.apache.http.client.methods.HttpPut;
|
||||
import org.elasticsearch.client.ESRestHighLevelClientTestCase;
|
||||
import org.elasticsearch.client.Request;
|
||||
import org.elasticsearch.client.RequestOptions;
|
||||
import org.elasticsearch.client.RestHighLevelClient;
|
||||
import org.elasticsearch.client.graph.Connection;
|
||||
import org.elasticsearch.client.graph.GraphExploreRequest;
|
||||
import org.elasticsearch.client.graph.GraphExploreResponse;
|
||||
import org.elasticsearch.client.graph.Hop;
|
||||
import org.elasticsearch.client.graph.Vertex;
|
||||
import org.elasticsearch.client.graph.VertexRequest;
|
||||
import org.elasticsearch.core.SuppressForbidden;
|
||||
import org.elasticsearch.index.query.TermQueryBuilder;
|
||||
import org.junit.Before;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collection;
|
||||
|
||||
@SuppressWarnings("removal")
|
||||
public class GraphDocumentationIT extends ESRestHighLevelClientTestCase {
|
||||
|
||||
@Before
|
||||
public void indexDocuments() throws IOException {
|
||||
// Create chain of doc IDs across indices 1->2->3
|
||||
Request doc1 = new Request(HttpPut.METHOD_NAME, "/index1/_doc/1");
|
||||
doc1.setJsonEntity("""
|
||||
{"participants":[1,2], "text":"let's start projectx", "attachment_md5":"324FHDGHFDG4564"}""");
|
||||
client().performRequest(doc1);
|
||||
|
||||
Request doc2 = new Request(HttpPut.METHOD_NAME, "/index2/_doc/2");
|
||||
doc2.setJsonEntity("""
|
||||
{"participants":[2,3,4], "text":"got something you both may be interested in"}""");
|
||||
client().performRequest(doc2);
|
||||
|
||||
client().performRequest(new Request(HttpPost.METHOD_NAME, "/_refresh"));
|
||||
}
|
||||
|
||||
@SuppressForbidden(reason = "system out is ok for a documentation example")
|
||||
public void testExplore() throws Exception {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
|
||||
// tag::x-pack-graph-explore-request
|
||||
GraphExploreRequest request = new GraphExploreRequest();
|
||||
request.indices("index1", "index2");
|
||||
request.useSignificance(false);
|
||||
TermQueryBuilder startingQuery = new TermQueryBuilder("text", "projectx");
|
||||
|
||||
Hop hop1 = request.createNextHop(startingQuery); // <1>
|
||||
VertexRequest people = hop1.addVertexRequest("participants"); // <2>
|
||||
people.minDocCount(1);
|
||||
VertexRequest files = hop1.addVertexRequest("attachment_md5");
|
||||
files.minDocCount(1);
|
||||
|
||||
Hop hop2 = request.createNextHop(null); // <3>
|
||||
VertexRequest vr2 = hop2.addVertexRequest("participants");
|
||||
vr2.minDocCount(5);
|
||||
|
||||
GraphExploreResponse exploreResponse = client.graph().explore(request, RequestOptions.DEFAULT); // <4>
|
||||
// end::x-pack-graph-explore-request
|
||||
|
||||
// tag::x-pack-graph-explore-response
|
||||
Collection<Vertex> v = exploreResponse.getVertices();
|
||||
Collection<Connection> c = exploreResponse.getConnections();
|
||||
for (Vertex vertex : v) {
|
||||
System.out.println(vertex.getField() + ":" + vertex.getTerm() + // <1>
|
||||
" discovered at hop depth " + vertex.getHopDepth());
|
||||
}
|
||||
for (Connection link : c) {
|
||||
System.out.println(link.getFrom() + " -> " + link.getTo() // <2>
|
||||
+ " evidenced by " + link.getDocCount() + " docs");
|
||||
}
|
||||
// end::x-pack-graph-explore-response
|
||||
|
||||
Collection<Vertex> initialVertices = exploreResponse.getVertices();
|
||||
|
||||
// tag::x-pack-graph-explore-expand
|
||||
GraphExploreRequest expandRequest = new GraphExploreRequest();
|
||||
expandRequest.indices("index1", "index2");
|
||||
|
||||
|
||||
Hop expandHop1 = expandRequest.createNextHop(null); // <1>
|
||||
VertexRequest fromPeople = expandHop1.addVertexRequest("participants"); // <2>
|
||||
for (Vertex vertex : initialVertices) {
|
||||
if (vertex.getField().equals("participants")) {
|
||||
fromPeople.addInclude(vertex.getTerm(), 1f);
|
||||
}
|
||||
}
|
||||
|
||||
Hop expandHop2 = expandRequest.createNextHop(null);
|
||||
VertexRequest newPeople = expandHop2.addVertexRequest("participants"); // <3>
|
||||
for (Vertex vertex : initialVertices) {
|
||||
if (vertex.getField().equals("participants")) {
|
||||
newPeople.addExclude(vertex.getTerm());
|
||||
}
|
||||
}
|
||||
|
||||
GraphExploreResponse expandResponse = client.graph().explore(expandRequest, RequestOptions.DEFAULT);
|
||||
// end::x-pack-graph-explore-expand
|
||||
|
||||
}
|
||||
|
||||
}
|
File diff suppressed because it is too large
Load diff
File diff suppressed because it is too large
Load diff
|
@ -1,418 +0,0 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0 and the Server Side Public License, v 1; you may not use this file except
|
||||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
||||
* Side Public License, v 1.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.client.documentation;
|
||||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.LatchedActionListener;
|
||||
import org.elasticsearch.action.ingest.DeletePipelineRequest;
|
||||
import org.elasticsearch.action.ingest.GetPipelineRequest;
|
||||
import org.elasticsearch.action.ingest.GetPipelineResponse;
|
||||
import org.elasticsearch.action.ingest.PutPipelineRequest;
|
||||
import org.elasticsearch.action.ingest.SimulateDocumentBaseResult;
|
||||
import org.elasticsearch.action.ingest.SimulateDocumentResult;
|
||||
import org.elasticsearch.action.ingest.SimulateDocumentVerboseResult;
|
||||
import org.elasticsearch.action.ingest.SimulatePipelineRequest;
|
||||
import org.elasticsearch.action.ingest.SimulatePipelineResponse;
|
||||
import org.elasticsearch.action.ingest.SimulateProcessorResult;
|
||||
import org.elasticsearch.action.support.master.AcknowledgedResponse;
|
||||
import org.elasticsearch.client.ESRestHighLevelClientTestCase;
|
||||
import org.elasticsearch.client.RequestOptions;
|
||||
import org.elasticsearch.client.RestHighLevelClient;
|
||||
import org.elasticsearch.common.bytes.BytesArray;
|
||||
import org.elasticsearch.core.TimeValue;
|
||||
import org.elasticsearch.ingest.PipelineConfiguration;
|
||||
import org.elasticsearch.xcontent.XContentType;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
/**
|
||||
* This class is used to generate the Java Ingest API documentation.
|
||||
* You need to wrap your code between two tags like:
|
||||
* // tag::example
|
||||
* // end::example
|
||||
*
|
||||
* Where example is your tag name.
|
||||
*
|
||||
* Then in the documentation, you can extract what is between tag and end tags with
|
||||
* ["source","java",subs="attributes,callouts,macros"]
|
||||
* --------------------------------------------------
|
||||
* include-tagged::{doc-tests}/IngestClientDocumentationIT.java[example]
|
||||
* --------------------------------------------------
|
||||
*
|
||||
* The column width of the code block is 84. If the code contains a line longer
|
||||
* than 84, the line will be cut and a horizontal scroll bar will be displayed.
|
||||
* (the code indentation of the tag is not included in the width)
|
||||
*/
|
||||
@SuppressWarnings("removal")
|
||||
public class IngestClientDocumentationIT extends ESRestHighLevelClientTestCase {
|
||||
|
||||
public void testPutPipeline() throws IOException {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
|
||||
{
|
||||
// tag::put-pipeline-request
|
||||
String source = """
|
||||
{
|
||||
"description": "my set of processors",
|
||||
"processors": [
|
||||
{
|
||||
"set": {
|
||||
"field": "foo",
|
||||
"value": "bar"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
""";
|
||||
PutPipelineRequest request = new PutPipelineRequest(
|
||||
"my-pipeline-id", // <1>
|
||||
new BytesArray(source.getBytes(StandardCharsets.UTF_8)), // <2>
|
||||
XContentType.JSON // <3>
|
||||
);
|
||||
// end::put-pipeline-request
|
||||
|
||||
// tag::put-pipeline-request-timeout
|
||||
request.timeout(TimeValue.timeValueMinutes(2)); // <1>
|
||||
request.timeout("2m"); // <2>
|
||||
// end::put-pipeline-request-timeout
|
||||
|
||||
// tag::put-pipeline-request-masterTimeout
|
||||
request.masterNodeTimeout(TimeValue.timeValueMinutes(1)); // <1>
|
||||
request.masterNodeTimeout("1m"); // <2>
|
||||
// end::put-pipeline-request-masterTimeout
|
||||
|
||||
// tag::put-pipeline-execute
|
||||
AcknowledgedResponse response = client.ingest().putPipeline(request, RequestOptions.DEFAULT); // <1>
|
||||
// end::put-pipeline-execute
|
||||
|
||||
// tag::put-pipeline-response
|
||||
boolean acknowledged = response.isAcknowledged(); // <1>
|
||||
// end::put-pipeline-response
|
||||
assertTrue(acknowledged);
|
||||
}
|
||||
}
|
||||
|
||||
public void testPutPipelineAsync() throws Exception {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
|
||||
{
|
||||
String source = """
|
||||
{
|
||||
"description": "my set of processors",
|
||||
"processors": [ { "set": { "field": "foo", "value": "bar" } } ]
|
||||
}""";
|
||||
PutPipelineRequest request = new PutPipelineRequest(
|
||||
"my-pipeline-id",
|
||||
new BytesArray(source.getBytes(StandardCharsets.UTF_8)),
|
||||
XContentType.JSON
|
||||
);
|
||||
|
||||
// tag::put-pipeline-execute-listener
|
||||
ActionListener<AcknowledgedResponse> listener =
|
||||
new ActionListener<AcknowledgedResponse>() {
|
||||
@Override
|
||||
public void onResponse(AcknowledgedResponse response) {
|
||||
// <1>
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
};
|
||||
// end::put-pipeline-execute-listener
|
||||
|
||||
// Replace the empty listener by a blocking listener in test
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
// tag::put-pipeline-execute-async
|
||||
client.ingest().putPipelineAsync(request, RequestOptions.DEFAULT, listener); // <1>
|
||||
// end::put-pipeline-execute-async
|
||||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
}
|
||||
}
|
||||
|
||||
@SuppressWarnings("unused")
|
||||
public void testGetPipeline() throws IOException {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
|
||||
{
|
||||
createPipeline("my-pipeline-id");
|
||||
}
|
||||
|
||||
{
|
||||
// tag::get-pipeline-request
|
||||
GetPipelineRequest request = new GetPipelineRequest("my-pipeline-id"); // <1>
|
||||
// end::get-pipeline-request
|
||||
|
||||
// tag::get-pipeline-request-masterTimeout
|
||||
request.masterNodeTimeout(TimeValue.timeValueMinutes(1)); // <1>
|
||||
request.masterNodeTimeout("1m"); // <2>
|
||||
// end::get-pipeline-request-masterTimeout
|
||||
|
||||
// tag::get-pipeline-execute
|
||||
GetPipelineResponse response = client.ingest().getPipeline(request, RequestOptions.DEFAULT); // <1>
|
||||
// end::get-pipeline-execute
|
||||
|
||||
// tag::get-pipeline-response
|
||||
boolean successful = response.isFound(); // <1>
|
||||
List<PipelineConfiguration> pipelines = response.pipelines(); // <2>
|
||||
for(PipelineConfiguration pipeline: pipelines) {
|
||||
Map<String, Object> config = pipeline.getConfigAsMap(); // <3>
|
||||
}
|
||||
// end::get-pipeline-response
|
||||
|
||||
assertTrue(successful);
|
||||
}
|
||||
}
|
||||
|
||||
public void testGetPipelineAsync() throws Exception {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
|
||||
{
|
||||
createPipeline("my-pipeline-id");
|
||||
}
|
||||
|
||||
{
|
||||
GetPipelineRequest request = new GetPipelineRequest("my-pipeline-id");
|
||||
|
||||
// tag::get-pipeline-execute-listener
|
||||
ActionListener<GetPipelineResponse> listener =
|
||||
new ActionListener<GetPipelineResponse>() {
|
||||
@Override
|
||||
public void onResponse(GetPipelineResponse response) {
|
||||
// <1>
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
};
|
||||
// end::get-pipeline-execute-listener
|
||||
|
||||
// Replace the empty listener by a blocking listener in test
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
// tag::get-pipeline-execute-async
|
||||
client.ingest().getPipelineAsync(request, RequestOptions.DEFAULT, listener); // <1>
|
||||
// end::get-pipeline-execute-async
|
||||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
}
|
||||
}
|
||||
|
||||
public void testDeletePipeline() throws IOException {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
|
||||
{
|
||||
createPipeline("my-pipeline-id");
|
||||
}
|
||||
|
||||
{
|
||||
// tag::delete-pipeline-request
|
||||
DeletePipelineRequest request = new DeletePipelineRequest("my-pipeline-id"); // <1>
|
||||
// end::delete-pipeline-request
|
||||
|
||||
// tag::delete-pipeline-request-timeout
|
||||
request.timeout(TimeValue.timeValueMinutes(2)); // <1>
|
||||
request.timeout("2m"); // <2>
|
||||
// end::delete-pipeline-request-timeout
|
||||
|
||||
// tag::delete-pipeline-request-masterTimeout
|
||||
request.masterNodeTimeout(TimeValue.timeValueMinutes(1)); // <1>
|
||||
request.masterNodeTimeout("1m"); // <2>
|
||||
// end::delete-pipeline-request-masterTimeout
|
||||
|
||||
// tag::delete-pipeline-execute
|
||||
AcknowledgedResponse response = client.ingest().deletePipeline(request, RequestOptions.DEFAULT); // <1>
|
||||
// end::delete-pipeline-execute
|
||||
|
||||
// tag::delete-pipeline-response
|
||||
boolean acknowledged = response.isAcknowledged(); // <1>
|
||||
// end::delete-pipeline-response
|
||||
assertTrue(acknowledged);
|
||||
}
|
||||
}
|
||||
|
||||
public void testDeletePipelineAsync() throws Exception {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
|
||||
{
|
||||
createPipeline("my-pipeline-id");
|
||||
}
|
||||
|
||||
{
|
||||
DeletePipelineRequest request = new DeletePipelineRequest("my-pipeline-id");
|
||||
|
||||
// tag::delete-pipeline-execute-listener
|
||||
ActionListener<AcknowledgedResponse> listener =
|
||||
new ActionListener<AcknowledgedResponse>() {
|
||||
@Override
|
||||
public void onResponse(AcknowledgedResponse response) {
|
||||
// <1>
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
};
|
||||
// end::delete-pipeline-execute-listener
|
||||
|
||||
// Replace the empty listener by a blocking listener in test
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
// tag::delete-pipeline-execute-async
|
||||
client.ingest().deletePipelineAsync(request, RequestOptions.DEFAULT, listener); // <1>
|
||||
// end::delete-pipeline-execute-async
|
||||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
}
|
||||
}
|
||||
|
||||
public void testSimulatePipeline() throws IOException {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
|
||||
{
|
||||
// tag::simulate-pipeline-request
|
||||
String source = """
|
||||
{
|
||||
"pipeline": {
|
||||
"description": "_description",
|
||||
"processors": [ { "set": { "field": "field2", "value": "_value" } } ]
|
||||
},
|
||||
"docs": [
|
||||
{
|
||||
"_index": "index",
|
||||
"_id": "id",
|
||||
"_source": {
|
||||
"foo": "bar"
|
||||
}
|
||||
},
|
||||
{
|
||||
"_index": "index",
|
||||
"_id": "id",
|
||||
"_source": {
|
||||
"foo": "rab"
|
||||
}
|
||||
}
|
||||
]
|
||||
}""";
|
||||
SimulatePipelineRequest request = new SimulatePipelineRequest(
|
||||
new BytesArray(source.getBytes(StandardCharsets.UTF_8)), // <1>
|
||||
XContentType.JSON // <2>
|
||||
);
|
||||
// end::simulate-pipeline-request
|
||||
|
||||
// tag::simulate-pipeline-request-pipeline-id
|
||||
request.setId("my-pipeline-id"); // <1>
|
||||
// end::simulate-pipeline-request-pipeline-id
|
||||
|
||||
// For testing we set this back to null
|
||||
request.setId(null);
|
||||
|
||||
// tag::simulate-pipeline-request-verbose
|
||||
request.setVerbose(true); // <1>
|
||||
// end::simulate-pipeline-request-verbose
|
||||
|
||||
// tag::simulate-pipeline-execute
|
||||
SimulatePipelineResponse response = client.ingest().simulate(request, RequestOptions.DEFAULT); // <1>
|
||||
// end::simulate-pipeline-execute
|
||||
|
||||
// tag::simulate-pipeline-response
|
||||
for (SimulateDocumentResult result: response.getResults()) { // <1>
|
||||
if (request.isVerbose()) {
|
||||
assert result instanceof SimulateDocumentVerboseResult;
|
||||
SimulateDocumentVerboseResult verboseResult = (SimulateDocumentVerboseResult)result; // <2>
|
||||
for (SimulateProcessorResult processorResult: verboseResult.getProcessorResults()) { // <3>
|
||||
processorResult.getIngestDocument(); // <4>
|
||||
processorResult.getFailure(); // <5>
|
||||
}
|
||||
} else {
|
||||
assert result instanceof SimulateDocumentBaseResult;
|
||||
SimulateDocumentBaseResult baseResult = (SimulateDocumentBaseResult)result; // <6>
|
||||
baseResult.getIngestDocument(); // <7>
|
||||
baseResult.getFailure(); // <8>
|
||||
}
|
||||
}
|
||||
// end::simulate-pipeline-response
|
||||
assert (response.getResults().size() > 0);
|
||||
}
|
||||
}
|
||||
|
||||
public void testSimulatePipelineAsync() throws Exception {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
|
||||
{
|
||||
String source = """
|
||||
{
|
||||
"pipeline": {
|
||||
"description": "_description",
|
||||
"processors": [ { "set": { "field": "field2", "value": "_value" } } ]
|
||||
},
|
||||
"docs": [
|
||||
{
|
||||
"_index": "index",
|
||||
"_id": "id",
|
||||
"_source": {
|
||||
"foo": "bar"
|
||||
}
|
||||
},
|
||||
{
|
||||
"_index": "index",
|
||||
"_id": "id",
|
||||
"_source": {
|
||||
"foo": "rab"
|
||||
}
|
||||
}
|
||||
]
|
||||
}""";
|
||||
SimulatePipelineRequest request = new SimulatePipelineRequest(
|
||||
new BytesArray(source.getBytes(StandardCharsets.UTF_8)),
|
||||
XContentType.JSON
|
||||
);
|
||||
|
||||
// tag::simulate-pipeline-execute-listener
|
||||
ActionListener<SimulatePipelineResponse> listener =
|
||||
new ActionListener<SimulatePipelineResponse>() {
|
||||
@Override
|
||||
public void onResponse(SimulatePipelineResponse response) {
|
||||
// <1>
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
};
|
||||
// end::simulate-pipeline-execute-listener
|
||||
|
||||
// Replace the empty listener by a blocking listener in test
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
// tag::simulate-pipeline-execute-async
|
||||
client.ingest().simulateAsync(request, RequestOptions.DEFAULT, listener); // <1>
|
||||
// end::simulate-pipeline-execute-async
|
||||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -1,366 +0,0 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0 and the Server Side Public License, v 1; you may not use this file except
|
||||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
||||
* Side Public License, v 1.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.client.documentation;
|
||||
|
||||
import org.elasticsearch.Build;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.LatchedActionListener;
|
||||
import org.elasticsearch.action.support.master.AcknowledgedResponse;
|
||||
import org.elasticsearch.client.ESRestHighLevelClientTestCase;
|
||||
import org.elasticsearch.client.RequestOptions;
|
||||
import org.elasticsearch.client.RestHighLevelClient;
|
||||
import org.elasticsearch.client.license.DeleteLicenseRequest;
|
||||
import org.elasticsearch.client.license.GetBasicStatusResponse;
|
||||
import org.elasticsearch.client.license.GetLicenseRequest;
|
||||
import org.elasticsearch.client.license.GetLicenseResponse;
|
||||
import org.elasticsearch.client.license.GetTrialStatusResponse;
|
||||
import org.elasticsearch.client.license.LicensesStatus;
|
||||
import org.elasticsearch.client.license.PutLicenseRequest;
|
||||
import org.elasticsearch.client.license.PutLicenseResponse;
|
||||
import org.elasticsearch.client.license.StartBasicRequest;
|
||||
import org.elasticsearch.client.license.StartBasicResponse;
|
||||
import org.elasticsearch.client.license.StartTrialRequest;
|
||||
import org.elasticsearch.client.license.StartTrialResponse;
|
||||
import org.elasticsearch.core.Booleans;
|
||||
import org.junit.After;
|
||||
import org.junit.BeforeClass;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import static org.elasticsearch.client.LicenseIT.putTrialLicense;
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
import static org.hamcrest.Matchers.endsWith;
|
||||
import static org.hamcrest.Matchers.hasSize;
|
||||
import static org.hamcrest.Matchers.not;
|
||||
import static org.hamcrest.Matchers.nullValue;
|
||||
import static org.hamcrest.Matchers.startsWith;
|
||||
import static org.hamcrest.core.Is.is;
|
||||
|
||||
/**
|
||||
* Documentation for Licensing APIs in the high level java client.
|
||||
* Code wrapped in {@code tag} and {@code end} tags is included in the docs.
|
||||
*/
|
||||
@SuppressWarnings("removal")
|
||||
public class LicensingDocumentationIT extends ESRestHighLevelClientTestCase {
|
||||
|
||||
@BeforeClass
|
||||
public static void checkForSnapshot() {
|
||||
assumeTrue("Trial license used to rollback is only valid when tested against snapshot/test builds", Build.CURRENT.isSnapshot());
|
||||
}
|
||||
|
||||
@After
|
||||
public void rollbackToTrial() throws IOException {
|
||||
putTrialLicense();
|
||||
}
|
||||
|
||||
public void testLicense() throws Exception {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
String license = """
|
||||
{
|
||||
"license": {
|
||||
"uid": "893361dc-9749-4997-93cb-802e3d7fa4a8",
|
||||
"type": "gold",
|
||||
"issue_date_in_millis": 1411948800000,
|
||||
"expiry_date_in_millis": 1914278399999,
|
||||
"max_nodes": 1,
|
||||
"issued_to": "issued_to",
|
||||
"issuer": "issuer",
|
||||
"signature": "AAAAAgAAAA3U8+YmnvwC+CWsV/mRAAABmC9ZN0hjZDBGYnVyRXpCOW5Bb3FjZDAxOWpSbTVoMVZwUzRxVk1PSmkxakxZdW5IMlhlTHNoN1N2\
|
||||
MXMvRFk4d3JTZEx3R3RRZ0pzU3lobWJKZnQvSEFva0ppTHBkWkprZWZSQi9iNmRQNkw1SlpLN0lDalZCS095MXRGN1lIZlpYcVVTTnFrcTE2dzhJZmZrdFQrN3JQeG\
|
||||
wxb0U0MXZ0dDJHSERiZTVLOHNzSDByWnpoZEphZHBEZjUrTVBxRENNSXNsWWJjZllaODdzVmEzUjNiWktNWGM5TUhQV2plaUo4Q1JOUml4MXNuL0pSOEhQaVB2azhm\
|
||||
Uk9QVzhFeTFoM1Q0RnJXSG53MWk2K055c28zSmRnVkF1b2JSQkFLV2VXUmVHNDZ2R3o2VE1qbVNQS2lxOHN5bUErZlNIWkZSVmZIWEtaSU9wTTJENDVvT1NCYklacU\
|
||||
YyK2FwRW9xa0t6dldMbmMzSGtQc3FWOTgzZ3ZUcXMvQkt2RUZwMFJnZzlvL2d2bDRWUzh6UG5pdENGWFRreXNKNkE9PQAAAQBe8GfzDm6T537Iuuvjetb3xK5dvg0K\
|
||||
5NQapv+rczWcQFxgCuzbF8plkgetP1aAGZP4uRESDQPMlOCsx4d0UqqAm9f7GbBQ3l93P+PogInPFeEH9NvOmaAQovmxVM9SE6DsDqlX4cXSO+bgWpXPTd2LmpoQc1\
|
||||
fXd6BZ8GeuyYpVHVKp9hVU0tAYjw6HzYOE7+zuO1oJYOxElqy66AnIfkvHrvni+flym3tE7tDTgsDRaz7W3iBhaqiSntEqabEkvHdPHQdSR99XGaEvnHO1paK01/35\
|
||||
iZF6OXHsF7CCj+558GRXiVxzueOe7TsGSSt8g7YjZwV9bRCyU7oB4B/nidgI"
|
||||
}
|
||||
}""";
|
||||
{
|
||||
//tag::put-license-execute
|
||||
PutLicenseRequest request = new PutLicenseRequest();
|
||||
request.setLicenseDefinition(license); // <1>
|
||||
request.setAcknowledge(false); // <2>
|
||||
|
||||
PutLicenseResponse response = client.license().putLicense(request, RequestOptions.DEFAULT);
|
||||
//end::put-license-execute
|
||||
|
||||
//tag::put-license-response
|
||||
LicensesStatus status = response.status(); // <1>
|
||||
assertEquals(status, LicensesStatus.VALID); // <2>
|
||||
boolean acknowledged = response.isAcknowledged(); // <3>
|
||||
String acknowledgeHeader = response.acknowledgeHeader(); // <4>
|
||||
Map<String, String[]> acknowledgeMessages = response.acknowledgeMessages(); // <5>
|
||||
//end::put-license-response
|
||||
|
||||
assertFalse(acknowledged); // Should fail because we are trying to downgrade from platinum trial to gold
|
||||
assertThat(acknowledgeHeader, startsWith("This license update requires acknowledgement."));
|
||||
assertThat(acknowledgeMessages.keySet(), not(hasSize(0)));
|
||||
}
|
||||
{
|
||||
PutLicenseRequest request = new PutLicenseRequest();
|
||||
// tag::put-license-execute-listener
|
||||
ActionListener<PutLicenseResponse> listener = new ActionListener<PutLicenseResponse>() {
|
||||
@Override
|
||||
public void onResponse(PutLicenseResponse putLicenseResponse) {
|
||||
// <1>
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
};
|
||||
// end::put-license-execute-listener
|
||||
|
||||
// Replace the empty listener by a blocking listener in test
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
// tag::put-license-execute-async
|
||||
client.license().putLicenseAsync(
|
||||
request, RequestOptions.DEFAULT, listener); // <1>
|
||||
// end::put-license-execute-async
|
||||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
}
|
||||
|
||||
// we cannot actually delete the license, otherwise the remaining tests won't work
|
||||
if (Booleans.isTrue("true")) {
|
||||
return;
|
||||
}
|
||||
{
|
||||
//tag::delete-license-execute
|
||||
DeleteLicenseRequest request = new DeleteLicenseRequest();
|
||||
|
||||
AcknowledgedResponse response = client.license().deleteLicense(request, RequestOptions.DEFAULT);
|
||||
//end::delete-license-execute
|
||||
|
||||
//tag::delete-license-response
|
||||
boolean acknowledged = response.isAcknowledged(); // <1>
|
||||
//end::delete-license-response
|
||||
|
||||
assertTrue(acknowledged);
|
||||
}
|
||||
{
|
||||
DeleteLicenseRequest request = new DeleteLicenseRequest();
|
||||
// tag::delete-license-execute-listener
|
||||
ActionListener<AcknowledgedResponse> listener = new ActionListener<AcknowledgedResponse>() {
|
||||
@Override
|
||||
public void onResponse(AcknowledgedResponse deleteLicenseResponse) {
|
||||
// <1>
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
};
|
||||
// end::delete-license-execute-listener
|
||||
|
||||
// Replace the empty listener by a blocking listener in test
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
// tag::delete-license-execute-async
|
||||
client.license().deleteLicenseAsync(
|
||||
request, RequestOptions.DEFAULT, listener); // <1>
|
||||
// end::delete-license-execute-async
|
||||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
}
|
||||
}
|
||||
|
||||
public void testGetLicense() throws Exception {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
{
|
||||
//tag::get-license-execute
|
||||
GetLicenseRequest request = new GetLicenseRequest();
|
||||
|
||||
GetLicenseResponse response = client.license().getLicense(request, RequestOptions.DEFAULT);
|
||||
//end::get-license-execute
|
||||
|
||||
//tag::get-license-response
|
||||
String currentLicense = response.getLicenseDefinition(); // <1>
|
||||
//end::get-license-response
|
||||
|
||||
assertThat(currentLicense, containsString("trial"));
|
||||
assertThat(currentLicense, containsString("ntegTest"));
|
||||
}
|
||||
{
|
||||
GetLicenseRequest request = new GetLicenseRequest();
|
||||
// tag::get-license-execute-listener
|
||||
ActionListener<GetLicenseResponse> listener = new ActionListener<GetLicenseResponse>() {
|
||||
@Override
|
||||
public void onResponse(GetLicenseResponse indexResponse) {
|
||||
// <1>
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
};
|
||||
// end::get-license-execute-listener
|
||||
|
||||
// Replace the empty listener by a blocking listener in test
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
// tag::get-license-execute-async
|
||||
client.license().getLicenseAsync(
|
||||
request, RequestOptions.DEFAULT, listener); // <1>
|
||||
// end::get-license-execute-async
|
||||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
}
|
||||
{
|
||||
GetLicenseRequest request = new GetLicenseRequest();
|
||||
RequestOptions.Builder builder = RequestOptions.DEFAULT.toBuilder();
|
||||
// Make sure that it still works in other formats
|
||||
builder.addHeader("Accept", randomFrom("application/smile", "application/cbor"));
|
||||
RequestOptions options = builder.build();
|
||||
GetLicenseResponse response = client.license().getLicense(request, options);
|
||||
String currentLicense = response.getLicenseDefinition();
|
||||
assertThat(currentLicense, startsWith("{"));
|
||||
assertThat(currentLicense, containsString("trial"));
|
||||
assertThat(currentLicense, containsString("ntegTest"));
|
||||
assertThat(currentLicense, endsWith("}"));
|
||||
}
|
||||
}
|
||||
|
||||
public void testStartTrial() throws Exception {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
|
||||
{
|
||||
// tag::start-trial-execute
|
||||
StartTrialRequest request = new StartTrialRequest(true); // <1>
|
||||
|
||||
StartTrialResponse response = client.license().startTrial(request, RequestOptions.DEFAULT);
|
||||
// end::start-trial-execute
|
||||
|
||||
// tag::start-trial-response
|
||||
boolean acknowledged = response.isAcknowledged(); // <1>
|
||||
boolean trialWasStarted = response.isTrialWasStarted(); // <2>
|
||||
String licenseType = response.getLicenseType(); // <3>
|
||||
String errorMessage = response.getErrorMessage(); // <4>
|
||||
String acknowledgeHeader = response.getAcknowledgeHeader(); // <5>
|
||||
Map<String, String[]> acknowledgeMessages = response.getAcknowledgeMessages(); // <6>
|
||||
// end::start-trial-response
|
||||
|
||||
assertTrue(acknowledged);
|
||||
assertFalse(trialWasStarted);
|
||||
assertThat(licenseType, nullValue());
|
||||
assertThat(errorMessage, is("Operation failed: Trial was already activated."));
|
||||
assertThat(acknowledgeHeader, nullValue());
|
||||
assertThat(acknowledgeMessages, nullValue());
|
||||
}
|
||||
|
||||
{
|
||||
StartTrialRequest request = new StartTrialRequest();
|
||||
|
||||
// tag::start-trial-execute-listener
|
||||
ActionListener<StartTrialResponse> listener = new ActionListener<StartTrialResponse>() {
|
||||
@Override
|
||||
public void onResponse(StartTrialResponse response) {
|
||||
// <1>
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
};
|
||||
// end::start-trial-execute-listener
|
||||
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
// tag::start-trial-execute-async
|
||||
client.license().startTrialAsync(request, RequestOptions.DEFAULT, listener);
|
||||
// end::start-trial-execute-async
|
||||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
}
|
||||
}
|
||||
|
||||
public void testPostStartBasic() throws Exception {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
{
|
||||
//tag::start-basic-execute
|
||||
StartBasicRequest request = new StartBasicRequest();
|
||||
|
||||
StartBasicResponse response = client.license().startBasic(request, RequestOptions.DEFAULT);
|
||||
//end::start-basic-execute
|
||||
|
||||
//tag::start-basic-response
|
||||
boolean acknowledged = response.isAcknowledged(); // <1>
|
||||
boolean basicStarted = response.isBasicStarted(); // <2>
|
||||
String errorMessage = response.getErrorMessage(); // <3>
|
||||
String acknowledgeMessage = response.getAcknowledgeMessage(); // <4>
|
||||
Map<String, String[]> acknowledgeMessages = response.getAcknowledgeMessages(); // <5>
|
||||
//end::start-basic-response
|
||||
}
|
||||
{
|
||||
StartBasicRequest request = new StartBasicRequest();
|
||||
// tag::start-basic-listener
|
||||
ActionListener<StartBasicResponse> listener = new ActionListener<StartBasicResponse>() {
|
||||
@Override
|
||||
public void onResponse(StartBasicResponse indexResponse) {
|
||||
// <1>
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
};
|
||||
// end::start-basic-listener
|
||||
|
||||
// Replace the empty listener by a blocking listener in test
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
// tag::start-basic-execute-async
|
||||
client.license().startBasicAsync(
|
||||
request, RequestOptions.DEFAULT, listener); // <1>
|
||||
// end::start-basic-execute-async
|
||||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
}
|
||||
}
|
||||
|
||||
public void testGetTrialStatus() throws IOException {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
{
|
||||
//tag::get-trial-status-execute
|
||||
GetTrialStatusResponse response = client.license().getTrialStatus(RequestOptions.DEFAULT);
|
||||
//end::get-trial-status-execute
|
||||
|
||||
//tag::get-trial-status-response
|
||||
boolean eligibleToStartTrial = response.isEligibleToStartTrial(); // <1>
|
||||
//end::get-trial-status-response
|
||||
}
|
||||
}
|
||||
|
||||
public void testGetBasicStatus() throws IOException {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
{
|
||||
//tag::get-basic-status-execute
|
||||
GetBasicStatusResponse response = client.license().getBasicStatus(RequestOptions.DEFAULT);
|
||||
//end::get-basic-status-execute
|
||||
|
||||
//tag::get-basic-status-response
|
||||
boolean eligibleToStartbasic = response.isEligibleToStartBasic(); // <1>
|
||||
//end::get-basic-status-response
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,106 +0,0 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0 and the Server Side Public License, v 1; you may not use this file except
|
||||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
||||
* Side Public License, v 1.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.client.documentation;
|
||||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.LatchedActionListener;
|
||||
import org.elasticsearch.client.ESRestHighLevelClientTestCase;
|
||||
import org.elasticsearch.client.RequestOptions;
|
||||
import org.elasticsearch.client.RestHighLevelClient;
|
||||
import org.elasticsearch.client.migration.DeprecationInfoRequest;
|
||||
import org.elasticsearch.client.migration.DeprecationInfoResponse;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
/**
|
||||
* This class is used to generate the Java Migration API documentation.
|
||||
* You need to wrap your code between two tags like:
|
||||
* // tag::example
|
||||
* // end::example
|
||||
*
|
||||
* Where example is your tag name.
|
||||
*
|
||||
* Then in the documentation, you can extract what is between tag and end tags with
|
||||
* ["source","java",subs="attributes,callouts,macros"]
|
||||
* --------------------------------------------------
|
||||
* include-tagged::{doc-tests}/MigrationClientDocumentationIT.java[example]
|
||||
* --------------------------------------------------
|
||||
*
|
||||
* The column width of the code block is 84. If the code contains a line longer
|
||||
* than 84, the line will be cut and a horizontal scroll bar will be displayed.
|
||||
* (the code indentation of the tag is not included in the width)
|
||||
*/
|
||||
@SuppressWarnings("removal")
|
||||
public class MigrationClientDocumentationIT extends ESRestHighLevelClientTestCase {
|
||||
|
||||
public void testGetDeprecationInfo() throws IOException, InterruptedException {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
createIndex("test", Settings.EMPTY);
|
||||
|
||||
//tag::get-deprecation-info-request
|
||||
List<String> indices = new ArrayList<>();
|
||||
indices.add("test");
|
||||
DeprecationInfoRequest deprecationInfoRequest = new DeprecationInfoRequest(indices); // <1>
|
||||
//end::get-deprecation-info-request
|
||||
|
||||
// tag::get-deprecation-info-execute
|
||||
DeprecationInfoResponse deprecationInfoResponse =
|
||||
client.migration().getDeprecationInfo(deprecationInfoRequest, RequestOptions.DEFAULT);
|
||||
// end::get-deprecation-info-execute
|
||||
|
||||
// tag::get-deprecation-info-response
|
||||
List<DeprecationInfoResponse.DeprecationIssue> clusterIssues =
|
||||
deprecationInfoResponse.getClusterSettingsIssues(); // <1>
|
||||
List<DeprecationInfoResponse.DeprecationIssue> nodeIssues =
|
||||
deprecationInfoResponse.getNodeSettingsIssues(); // <2>
|
||||
Map<String, List<DeprecationInfoResponse.DeprecationIssue>> indexIssues =
|
||||
deprecationInfoResponse.getIndexSettingsIssues(); // <3>
|
||||
List<DeprecationInfoResponse.DeprecationIssue> mlIssues =
|
||||
deprecationInfoResponse.getMlSettingsIssues(); // <4>
|
||||
// end::get-deprecation-info-response
|
||||
|
||||
// tag::get-deprecation-info-execute-listener
|
||||
ActionListener<DeprecationInfoResponse> listener =
|
||||
new ActionListener<DeprecationInfoResponse>() {
|
||||
@Override
|
||||
public void onResponse(DeprecationInfoResponse deprecationInfoResponse1) { // <1>
|
||||
List<DeprecationInfoResponse.DeprecationIssue> clusterIssues =
|
||||
deprecationInfoResponse.getClusterSettingsIssues();
|
||||
List<DeprecationInfoResponse.DeprecationIssue> nodeIssues =
|
||||
deprecationInfoResponse.getNodeSettingsIssues();
|
||||
Map<String, List<DeprecationInfoResponse.DeprecationIssue>> indexIssues =
|
||||
deprecationInfoResponse.getIndexSettingsIssues();
|
||||
List<DeprecationInfoResponse.DeprecationIssue> mlIssues =
|
||||
deprecationInfoResponse.getMlSettingsIssues();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
};
|
||||
// end::get-deprecation-info-execute-listener
|
||||
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
// tag::get-deprecation-info-execute-async
|
||||
client.migration().getDeprecationInfoAsync(deprecationInfoRequest,
|
||||
RequestOptions.DEFAULT, listener); // <1>
|
||||
// end::get-deprecation-info-execute-async
|
||||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
}
|
||||
}
|
|
@ -1,109 +0,0 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0 and the Server Side Public License, v 1; you may not use this file except
|
||||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
||||
* Side Public License, v 1.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.client.documentation;
|
||||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.delete.DeleteRequest;
|
||||
import org.elasticsearch.action.delete.DeleteResponse;
|
||||
import org.elasticsearch.action.get.GetRequest;
|
||||
import org.elasticsearch.action.index.IndexRequest;
|
||||
import org.elasticsearch.action.index.IndexResponse;
|
||||
import org.elasticsearch.client.ESRestHighLevelClientTestCase;
|
||||
import org.elasticsearch.client.Request;
|
||||
import org.elasticsearch.client.RequestOptions;
|
||||
import org.elasticsearch.client.Response;
|
||||
import org.elasticsearch.client.RestHighLevelClient;
|
||||
import org.elasticsearch.cluster.health.ClusterHealthStatus;
|
||||
import org.elasticsearch.common.xcontent.XContentHelper;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
import org.elasticsearch.xcontent.XContentType;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* This class is used to generate the documentation for the
|
||||
* docs/java-rest/high-level/migration.asciidoc page.
|
||||
*
|
||||
* You need to wrap your code between two tags like:
|
||||
* // tag::example[]
|
||||
* // end::example[]
|
||||
*
|
||||
* Where example is your tag name.
|
||||
*
|
||||
* Then in the documentation, you can extract what is between tag and end tags with
|
||||
* ["source","java",subs="attributes,callouts,macros"]
|
||||
* --------------------------------------------------
|
||||
* include-tagged::{doc-tests}/MigrationDocumentationIT.java[example]
|
||||
* --------------------------------------------------
|
||||
*/
|
||||
@SuppressWarnings("removal")
|
||||
public class MigrationDocumentationIT extends ESRestHighLevelClientTestCase {
|
||||
public void testClusterHealth() throws IOException {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
{
|
||||
//tag::migration-cluster-health
|
||||
Request request = new Request("GET", "/_cluster/health");
|
||||
request.addParameter("wait_for_status", "green"); // <1>
|
||||
Response response = client.getLowLevelClient().performRequest(request); // <2>
|
||||
|
||||
ClusterHealthStatus healthStatus;
|
||||
try (InputStream is = response.getEntity().getContent()) { // <3>
|
||||
Map<String, Object> map = XContentHelper.convertToMap(XContentType.JSON.xContent(), is, true); // <4>
|
||||
healthStatus = ClusterHealthStatus.fromString((String) map.get("status")); // <5>
|
||||
}
|
||||
|
||||
if (healthStatus != ClusterHealthStatus.GREEN) {
|
||||
// <6>
|
||||
}
|
||||
//end::migration-cluster-health
|
||||
assertSame(ClusterHealthStatus.GREEN, healthStatus);
|
||||
}
|
||||
}
|
||||
|
||||
public void testRequests() throws Exception {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
{
|
||||
//tag::migration-request-ctor
|
||||
IndexRequest request = new IndexRequest("index").id("id"); // <1>
|
||||
request.source("{\"field\":\"value\"}", XContentType.JSON);
|
||||
//end::migration-request-ctor
|
||||
|
||||
//tag::migration-request-ctor-execution
|
||||
IndexResponse response = client.index(request, RequestOptions.DEFAULT);
|
||||
//end::migration-request-ctor-execution
|
||||
assertEquals(RestStatus.CREATED, response.status());
|
||||
}
|
||||
{
|
||||
//tag::migration-request-async-execution
|
||||
DeleteRequest request = new DeleteRequest("index", "id"); // <1>
|
||||
client.deleteAsync(request, RequestOptions.DEFAULT, new ActionListener<DeleteResponse>() { // <2>
|
||||
@Override
|
||||
public void onResponse(DeleteResponse deleteResponse) {
|
||||
// <3>
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// <4>
|
||||
}
|
||||
});
|
||||
//end::migration-request-async-execution
|
||||
assertBusy(() -> assertFalse(client.exists(new GetRequest("index", "id"), RequestOptions.DEFAULT)));
|
||||
}
|
||||
{
|
||||
//tag::migration-request-sync-execution
|
||||
DeleteRequest request = new DeleteRequest("index", "id");
|
||||
DeleteResponse response = client.delete(request, RequestOptions.DEFAULT); // <1>
|
||||
//end::migration-request-sync-execution
|
||||
assertEquals(RestStatus.NOT_FOUND, response.status());
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,195 +0,0 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0 and the Server Side Public License, v 1; you may not use this file except
|
||||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
||||
* Side Public License, v 1.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.client.documentation;
|
||||
|
||||
import org.apache.http.HttpHost;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.LatchedActionListener;
|
||||
import org.elasticsearch.client.ESRestHighLevelClientTestCase;
|
||||
import org.elasticsearch.client.RequestOptions;
|
||||
import org.elasticsearch.client.RestClient;
|
||||
import org.elasticsearch.client.RestHighLevelClient;
|
||||
import org.elasticsearch.client.core.MainResponse;
|
||||
import org.elasticsearch.client.xpack.XPackInfoRequest;
|
||||
import org.elasticsearch.client.xpack.XPackInfoResponse;
|
||||
import org.elasticsearch.client.xpack.XPackInfoResponse.BuildInfo;
|
||||
import org.elasticsearch.client.xpack.XPackInfoResponse.FeatureSetsInfo;
|
||||
import org.elasticsearch.client.xpack.XPackInfoResponse.LicenseInfo;
|
||||
import org.elasticsearch.client.xpack.XPackUsageRequest;
|
||||
import org.elasticsearch.client.xpack.XPackUsageResponse;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.time.Instant;
|
||||
import java.util.EnumSet;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import static org.hamcrest.Matchers.greaterThan;
|
||||
import static org.hamcrest.Matchers.is;
|
||||
|
||||
/**
|
||||
* Documentation for miscellaneous APIs in the high level java client.
|
||||
* Code wrapped in {@code tag} and {@code end} tags is included in the docs.
|
||||
*/
|
||||
@SuppressWarnings("removal")
|
||||
public class MiscellaneousDocumentationIT extends ESRestHighLevelClientTestCase {
|
||||
|
||||
public void testMain() throws IOException {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
{
|
||||
//tag::main-execute
|
||||
MainResponse response = client.info(RequestOptions.DEFAULT);
|
||||
//end::main-execute
|
||||
//tag::main-response
|
||||
String clusterName = response.getClusterName();
|
||||
String clusterUuid = response.getClusterUuid();
|
||||
String nodeName = response.getNodeName();
|
||||
MainResponse.Version version = response.getVersion();
|
||||
String buildDate = version.getBuildDate();
|
||||
String buildFlavor = version.getBuildFlavor();
|
||||
String buildHash = version.getBuildHash();
|
||||
String buildType = version.getBuildType();
|
||||
String luceneVersion = version.getLuceneVersion();
|
||||
String minimumIndexCompatibilityVersion= version.getMinimumIndexCompatibilityVersion();
|
||||
String minimumWireCompatibilityVersion = version.getMinimumWireCompatibilityVersion();
|
||||
String number = version.getNumber();
|
||||
//end::main-response
|
||||
assertNotNull(clusterName);
|
||||
assertNotNull(clusterUuid);
|
||||
assertNotNull(nodeName);
|
||||
assertNotNull(version);
|
||||
assertNotNull(buildDate);
|
||||
assertNotNull(buildFlavor);
|
||||
assertNotNull(buildHash);
|
||||
assertNotNull(buildType);
|
||||
assertNotNull(luceneVersion);
|
||||
assertNotNull(minimumIndexCompatibilityVersion);
|
||||
assertNotNull(minimumWireCompatibilityVersion);
|
||||
assertNotNull(number);
|
||||
}
|
||||
}
|
||||
|
||||
public void testPing() throws IOException {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
//tag::ping-execute
|
||||
boolean response = client.ping(RequestOptions.DEFAULT);
|
||||
//end::ping-execute
|
||||
assertTrue(response);
|
||||
}
|
||||
|
||||
public void testXPackInfo() throws Exception {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
{
|
||||
//tag::x-pack-info-execute
|
||||
XPackInfoRequest request = new XPackInfoRequest();
|
||||
request.setVerbose(true); // <1>
|
||||
request.setCategories(EnumSet.of( // <2>
|
||||
XPackInfoRequest.Category.BUILD,
|
||||
XPackInfoRequest.Category.LICENSE,
|
||||
XPackInfoRequest.Category.FEATURES));
|
||||
XPackInfoResponse response = client.xpack().info(request, RequestOptions.DEFAULT);
|
||||
//end::x-pack-info-execute
|
||||
|
||||
//tag::x-pack-info-response
|
||||
BuildInfo build = response.getBuildInfo(); // <1>
|
||||
LicenseInfo license = response.getLicenseInfo(); // <2>
|
||||
assertThat(license.getExpiryDate(), is(greaterThan(Instant.now().toEpochMilli()))); // <3>
|
||||
FeatureSetsInfo features = response.getFeatureSetsInfo(); // <4>
|
||||
//end::x-pack-info-response
|
||||
|
||||
assertNotNull(response.getBuildInfo());
|
||||
assertNotNull(response.getLicenseInfo());
|
||||
assertNotNull(response.getFeatureSetsInfo());
|
||||
}
|
||||
{
|
||||
XPackInfoRequest request = new XPackInfoRequest();
|
||||
// tag::x-pack-info-execute-listener
|
||||
ActionListener<XPackInfoResponse> listener = new ActionListener<XPackInfoResponse>() {
|
||||
@Override
|
||||
public void onResponse(XPackInfoResponse indexResponse) {
|
||||
// <1>
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
};
|
||||
// end::x-pack-info-execute-listener
|
||||
|
||||
// Replace the empty listener by a blocking listener in test
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
// tag::x-pack-info-execute-async
|
||||
client.xpack().infoAsync(request, RequestOptions.DEFAULT, listener); // <1>
|
||||
// end::x-pack-info-execute-async
|
||||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
}
|
||||
}
|
||||
|
||||
public void testXPackUsage() throws Exception {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
{
|
||||
//tag::x-pack-usage-execute
|
||||
XPackUsageRequest request = new XPackUsageRequest();
|
||||
XPackUsageResponse response = client.xpack().usage(request, RequestOptions.DEFAULT);
|
||||
//end::x-pack-usage-execute
|
||||
|
||||
//tag::x-pack-usage-response
|
||||
Map<String, Map<String, Object>> usages = response.getUsages();
|
||||
Map<String, Object> monitoringUsage = usages.get("monitoring");
|
||||
assertThat(monitoringUsage.get("available"), is(true));
|
||||
assertThat(monitoringUsage.get("enabled"), is(true));
|
||||
assertThat(monitoringUsage.get("collection_enabled"), is(false));
|
||||
//end::x-pack-usage-response
|
||||
}
|
||||
{
|
||||
XPackUsageRequest request = new XPackUsageRequest();
|
||||
// tag::x-pack-usage-execute-listener
|
||||
ActionListener<XPackUsageResponse> listener = new ActionListener<XPackUsageResponse>() {
|
||||
@Override
|
||||
public void onResponse(XPackUsageResponse response) {
|
||||
// <1>
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
};
|
||||
// end::x-pack-usage-execute-listener
|
||||
|
||||
// Replace the empty listener by a blocking listener in test
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
// tag::x-pack-usage-execute-async
|
||||
client.xpack().usageAsync(request, RequestOptions.DEFAULT, listener); // <1>
|
||||
// end::x-pack-usage-execute-async
|
||||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
}
|
||||
}
|
||||
|
||||
public void testInitializationFromClientBuilder() throws IOException {
|
||||
//tag::rest-high-level-client-init
|
||||
RestHighLevelClient client = new RestHighLevelClient(
|
||||
RestClient.builder(
|
||||
new HttpHost("localhost", 9200, "http"),
|
||||
new HttpHost("localhost", 9201, "http")));
|
||||
//end::rest-high-level-client-init
|
||||
|
||||
//tag::rest-high-level-client-close
|
||||
client.close();
|
||||
//end::rest-high-level-client-close
|
||||
}
|
||||
}
|
File diff suppressed because it is too large
Load diff
|
@ -1,672 +0,0 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0 and the Server Side Public License, v 1; you may not use this file except
|
||||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
||||
* Side Public License, v 1.
|
||||
*/
|
||||
package org.elasticsearch.client.documentation;
|
||||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.LatchedActionListener;
|
||||
import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest;
|
||||
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
|
||||
import org.elasticsearch.action.admin.indices.refresh.RefreshRequest;
|
||||
import org.elasticsearch.action.admin.indices.refresh.RefreshResponse;
|
||||
import org.elasticsearch.action.bulk.BulkRequest;
|
||||
import org.elasticsearch.action.bulk.BulkResponse;
|
||||
import org.elasticsearch.action.index.IndexRequest;
|
||||
import org.elasticsearch.action.search.SearchRequest;
|
||||
import org.elasticsearch.action.search.SearchResponse;
|
||||
import org.elasticsearch.action.support.WriteRequest;
|
||||
import org.elasticsearch.client.ESRestHighLevelClientTestCase;
|
||||
import org.elasticsearch.client.RequestOptions;
|
||||
import org.elasticsearch.client.RestHighLevelClient;
|
||||
import org.elasticsearch.client.RollupClient;
|
||||
import org.elasticsearch.client.core.AcknowledgedResponse;
|
||||
import org.elasticsearch.client.rollup.DeleteRollupJobRequest;
|
||||
import org.elasticsearch.client.rollup.GetRollupCapsRequest;
|
||||
import org.elasticsearch.client.rollup.GetRollupCapsResponse;
|
||||
import org.elasticsearch.client.rollup.GetRollupIndexCapsRequest;
|
||||
import org.elasticsearch.client.rollup.GetRollupIndexCapsResponse;
|
||||
import org.elasticsearch.client.rollup.GetRollupJobRequest;
|
||||
import org.elasticsearch.client.rollup.GetRollupJobResponse;
|
||||
import org.elasticsearch.client.rollup.GetRollupJobResponse.JobWrapper;
|
||||
import org.elasticsearch.client.rollup.GetRollupJobResponse.RollupIndexerJobStats;
|
||||
import org.elasticsearch.client.rollup.GetRollupJobResponse.RollupJobStatus;
|
||||
import org.elasticsearch.client.rollup.PutRollupJobRequest;
|
||||
import org.elasticsearch.client.rollup.RollableIndexCaps;
|
||||
import org.elasticsearch.client.rollup.RollupJobCaps;
|
||||
import org.elasticsearch.client.rollup.StartRollupJobRequest;
|
||||
import org.elasticsearch.client.rollup.StartRollupJobResponse;
|
||||
import org.elasticsearch.client.rollup.StopRollupJobRequest;
|
||||
import org.elasticsearch.client.rollup.StopRollupJobResponse;
|
||||
import org.elasticsearch.client.rollup.job.config.DateHistogramGroupConfig;
|
||||
import org.elasticsearch.client.rollup.job.config.GroupConfig;
|
||||
import org.elasticsearch.client.rollup.job.config.HistogramGroupConfig;
|
||||
import org.elasticsearch.client.rollup.job.config.MetricConfig;
|
||||
import org.elasticsearch.client.rollup.job.config.RollupJobConfig;
|
||||
import org.elasticsearch.client.rollup.job.config.TermsGroupConfig;
|
||||
import org.elasticsearch.cluster.health.ClusterHealthStatus;
|
||||
import org.elasticsearch.core.TimeValue;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval;
|
||||
import org.elasticsearch.search.aggregations.metrics.MaxAggregationBuilder;
|
||||
import org.elasticsearch.search.aggregations.metrics.NumericMetricsAggregation;
|
||||
import org.elasticsearch.search.builder.SearchSourceBuilder;
|
||||
import org.junit.Before;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder;
|
||||
import static org.hamcrest.Matchers.closeTo;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.hasSize;
|
||||
import static org.hamcrest.Matchers.oneOf;
|
||||
|
||||
@SuppressWarnings("removal")
|
||||
public class RollupDocumentationIT extends ESRestHighLevelClientTestCase {
|
||||
|
||||
@Before
|
||||
public void setUpDocs() throws IOException {
|
||||
final BulkRequest bulkRequest = new BulkRequest();
|
||||
bulkRequest.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE);
|
||||
for (int i = 0; i < 50; i++) {
|
||||
final IndexRequest indexRequest = new IndexRequest("docs");
|
||||
indexRequest.source(
|
||||
jsonBuilder().startObject()
|
||||
.field("timestamp", String.format(Locale.ROOT, "2018-01-01T00:%02d:00Z", i))
|
||||
.field("hostname", 0)
|
||||
.field("datacenter", 0)
|
||||
.field("temperature", i)
|
||||
.field("voltage", 0)
|
||||
.field("load", 0)
|
||||
.field("net_in", 0)
|
||||
.field("net_out", 0)
|
||||
.endObject()
|
||||
);
|
||||
bulkRequest.add(indexRequest);
|
||||
}
|
||||
BulkResponse bulkResponse = highLevelClient().bulk(bulkRequest, RequestOptions.DEFAULT);
|
||||
assertEquals(RestStatus.OK, bulkResponse.status());
|
||||
assertFalse(bulkResponse.hasFailures());
|
||||
|
||||
RefreshResponse refreshResponse = highLevelClient().indices().refresh(new RefreshRequest("docs"), RequestOptions.DEFAULT);
|
||||
assertEquals(0, refreshResponse.getFailedShards());
|
||||
}
|
||||
|
||||
public void testCreateRollupJob() throws Exception {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
|
||||
final String indexPattern = "docs";
|
||||
final String rollupIndex = "rollup";
|
||||
final String cron = "*/1 * * * * ?";
|
||||
final int pageSize = 100;
|
||||
final TimeValue timeout = null;
|
||||
|
||||
//tag::x-pack-rollup-put-rollup-job-group-config
|
||||
DateHistogramGroupConfig dateHistogram =
|
||||
new DateHistogramGroupConfig("timestamp", DateHistogramInterval.HOUR, new DateHistogramInterval("7d"), "UTC"); // <1>
|
||||
TermsGroupConfig terms = new TermsGroupConfig("hostname", "datacenter"); // <2>
|
||||
HistogramGroupConfig histogram = new HistogramGroupConfig(5L, "load", "net_in", "net_out"); // <3>
|
||||
|
||||
GroupConfig groups = new GroupConfig(dateHistogram, histogram, terms); // <4>
|
||||
//end::x-pack-rollup-put-rollup-job-group-config
|
||||
|
||||
//tag::x-pack-rollup-put-rollup-job-metrics-config
|
||||
List<MetricConfig> metrics = new ArrayList<>(); // <1>
|
||||
metrics.add(new MetricConfig("temperature", Arrays.asList("min", "max", "sum"))); // <2>
|
||||
metrics.add(new MetricConfig("voltage", Arrays.asList("avg", "value_count"))); // <3>
|
||||
//end::x-pack-rollup-put-rollup-job-metrics-config
|
||||
{
|
||||
String id = "job_1";
|
||||
|
||||
//tag::x-pack-rollup-put-rollup-job-config
|
||||
RollupJobConfig config = new RollupJobConfig(id, // <1>
|
||||
indexPattern, // <2>
|
||||
rollupIndex, // <3>
|
||||
cron, // <4>
|
||||
pageSize, // <5>
|
||||
groups, // <6>
|
||||
metrics, // <7>
|
||||
timeout); // <8>
|
||||
//end::x-pack-rollup-put-rollup-job-config
|
||||
|
||||
//tag::x-pack-rollup-put-rollup-job-request
|
||||
PutRollupJobRequest request = new PutRollupJobRequest(config); // <1>
|
||||
//end::x-pack-rollup-put-rollup-job-request
|
||||
|
||||
//tag::x-pack-rollup-put-rollup-job-execute
|
||||
AcknowledgedResponse response = client.rollup().putRollupJob(request, RequestOptions.DEFAULT);
|
||||
//end::x-pack-rollup-put-rollup-job-execute
|
||||
|
||||
//tag::x-pack-rollup-put-rollup-job-response
|
||||
boolean acknowledged = response.isAcknowledged(); // <1>
|
||||
//end::x-pack-rollup-put-rollup-job-response
|
||||
assertTrue(acknowledged);
|
||||
}
|
||||
{
|
||||
String id = "job_2";
|
||||
RollupJobConfig config = new RollupJobConfig(id, indexPattern, rollupIndex, cron, pageSize, groups, metrics, timeout);
|
||||
PutRollupJobRequest request = new PutRollupJobRequest(config);
|
||||
// tag::x-pack-rollup-put-rollup-job-execute-listener
|
||||
ActionListener<AcknowledgedResponse> listener = new ActionListener<AcknowledgedResponse>() {
|
||||
@Override
|
||||
public void onResponse(AcknowledgedResponse response) {
|
||||
// <1>
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
};
|
||||
// end::x-pack-rollup-put-rollup-job-execute-listener
|
||||
|
||||
// Replace the empty listener by a blocking listener in test
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
// tag::x-pack-rollup-put-rollup-job-execute-async
|
||||
client.rollup().putRollupJobAsync(request, RequestOptions.DEFAULT, listener); // <1>
|
||||
// end::x-pack-rollup-put-rollup-job-execute-async
|
||||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
}
|
||||
}
|
||||
|
||||
@SuppressWarnings("unused")
|
||||
public void testGetRollupJob() throws Exception {
|
||||
testCreateRollupJob();
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
|
||||
// tag::x-pack-rollup-get-rollup-job-request
|
||||
GetRollupJobRequest getAll = new GetRollupJobRequest(); // <1>
|
||||
GetRollupJobRequest getJob = new GetRollupJobRequest("job_1"); // <2>
|
||||
// end::x-pack-rollup-get-rollup-job-request
|
||||
|
||||
// tag::x-pack-rollup-get-rollup-job-execute
|
||||
GetRollupJobResponse response = client.rollup().getRollupJob(getJob, RequestOptions.DEFAULT);
|
||||
// end::x-pack-rollup-get-rollup-job-execute
|
||||
|
||||
// tag::x-pack-rollup-get-rollup-job-response
|
||||
assertThat(response.getJobs(), hasSize(1));
|
||||
JobWrapper job = response.getJobs().get(0); // <1>
|
||||
RollupJobConfig config = job.getJob();
|
||||
RollupJobStatus status = job.getStatus();
|
||||
RollupIndexerJobStats stats = job.getStats();
|
||||
// end::x-pack-rollup-get-rollup-job-response
|
||||
assertNotNull(config);
|
||||
assertNotNull(status);
|
||||
assertNotNull(status);
|
||||
|
||||
// tag::x-pack-rollup-get-rollup-job-execute-listener
|
||||
ActionListener<GetRollupJobResponse> listener = new ActionListener<GetRollupJobResponse>() {
|
||||
@Override
|
||||
public void onResponse(GetRollupJobResponse response) {
|
||||
// <1>
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
};
|
||||
// end::x-pack-rollup-get-rollup-job-execute-listener
|
||||
|
||||
// Replace the empty listener by a blocking listener in test
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
// tag::x-pack-rollup-get-rollup-job-execute-async
|
||||
client.rollup().getRollupJobAsync(getJob, RequestOptions.DEFAULT, listener); // <1>
|
||||
// end::x-pack-rollup-get-rollup-job-execute-async
|
||||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
}
|
||||
|
||||
@SuppressWarnings("unused")
|
||||
public void testStartRollupJob() throws Exception {
|
||||
testCreateRollupJob();
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
String id = "job_1";
|
||||
// tag::rollup-start-job-request
|
||||
StartRollupJobRequest request = new StartRollupJobRequest(id); // <1>
|
||||
// end::rollup-start-job-request
|
||||
try {
|
||||
// tag::rollup-start-job-execute
|
||||
RollupClient rc = client.rollup();
|
||||
StartRollupJobResponse response = rc.startRollupJob(request, RequestOptions.DEFAULT);
|
||||
// end::rollup-start-job-execute
|
||||
// tag::rollup-start-job-response
|
||||
response.isAcknowledged(); // <1>
|
||||
// end::rollup-start-job-response
|
||||
} catch (Exception e) {
|
||||
// Swallow any exception, this test does not test actually cancelling.
|
||||
}
|
||||
// stop job to prevent spamming exceptions on next start request
|
||||
StopRollupJobRequest stopRequest = new StopRollupJobRequest(id);
|
||||
stopRequest.waitForCompletion();
|
||||
stopRequest.timeout(TimeValue.timeValueSeconds(10));
|
||||
|
||||
StopRollupJobResponse response = client.rollup().stopRollupJob(stopRequest, RequestOptions.DEFAULT);
|
||||
assertTrue(response.isAcknowledged());
|
||||
|
||||
// tag::rollup-start-job-execute-listener
|
||||
ActionListener<StartRollupJobResponse> listener = new ActionListener<StartRollupJobResponse>() {
|
||||
@Override
|
||||
public void onResponse(StartRollupJobResponse response) {
|
||||
// <1>
|
||||
}
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
};
|
||||
// end::rollup-start-job-execute-listener
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
// tag::rollup-start-job-execute-async
|
||||
RollupClient rc = client.rollup();
|
||||
rc.startRollupJobAsync(request, RequestOptions.DEFAULT, listener); // <1>
|
||||
// end::rollup-start-job-execute-async
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
|
||||
// stop job so it can correctly be deleted by the test teardown
|
||||
response = rc.stopRollupJob(stopRequest, RequestOptions.DEFAULT);
|
||||
assertTrue(response.isAcknowledged());
|
||||
}
|
||||
|
||||
@SuppressWarnings("unused")
|
||||
public void testStopRollupJob() throws Exception {
|
||||
testCreateRollupJob();
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
|
||||
String id = "job_1";
|
||||
// tag::rollup-stop-job-request
|
||||
StopRollupJobRequest request = new StopRollupJobRequest(id); // <1>
|
||||
request.waitForCompletion(true); // <2>
|
||||
request.timeout(TimeValue.timeValueSeconds(10)); // <3>
|
||||
// end::rollup-stop-job-request
|
||||
|
||||
try {
|
||||
// tag::rollup-stop-job-execute
|
||||
RollupClient rc = client.rollup();
|
||||
StopRollupJobResponse response = rc.stopRollupJob(request, RequestOptions.DEFAULT);
|
||||
// end::rollup-stop-job-execute
|
||||
|
||||
// tag::rollup-stop-job-response
|
||||
response.isAcknowledged(); // <1>
|
||||
// end::rollup-stop-job-response
|
||||
} catch (Exception e) {
|
||||
// Swallow any exception, this test does not test actually cancelling.
|
||||
}
|
||||
|
||||
// tag::rollup-stop-job-execute-listener
|
||||
ActionListener<StopRollupJobResponse> listener = new ActionListener<StopRollupJobResponse>() {
|
||||
@Override
|
||||
public void onResponse(StopRollupJobResponse response) {
|
||||
// <1>
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
};
|
||||
// end::rollup-stop-job-execute-listener
|
||||
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
// tag::rollup-stop-job-execute-async
|
||||
RollupClient rc = client.rollup();
|
||||
rc.stopRollupJobAsync(request, RequestOptions.DEFAULT, listener); // <1>
|
||||
// end::rollup-stop-job-execute-async
|
||||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
}
|
||||
|
||||
public void testSearch() throws Exception {
|
||||
// Setup a rollup index to query
|
||||
testCreateRollupJob();
|
||||
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
|
||||
// tag::search-request
|
||||
SearchRequest request = new SearchRequest();
|
||||
request.source(new SearchSourceBuilder()
|
||||
.size(0)
|
||||
.aggregation(new MaxAggregationBuilder("max_temperature")
|
||||
.field("temperature")));
|
||||
// end::search-request
|
||||
|
||||
// tag::search-execute
|
||||
SearchResponse response =
|
||||
client.rollup().search(request, RequestOptions.DEFAULT);
|
||||
// end::search-execute
|
||||
|
||||
// tag::search-response
|
||||
NumericMetricsAggregation.SingleValue maxTemperature =
|
||||
response.getAggregations().get("max_temperature");
|
||||
assertThat(maxTemperature.value(), closeTo(49.0, .00001));
|
||||
// end::search-response
|
||||
|
||||
ActionListener<SearchResponse> listener;
|
||||
// tag::search-execute-listener
|
||||
listener = new ActionListener<SearchResponse>() {
|
||||
@Override
|
||||
public void onResponse(SearchResponse response) {
|
||||
// <1>
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
};
|
||||
// end::search-execute-listener
|
||||
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
// tag::search-execute-async
|
||||
client.rollup().searchAsync(request, RequestOptions.DEFAULT, listener); // <1>
|
||||
// end::search-execute-async
|
||||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
}
|
||||
|
||||
@SuppressWarnings("unused")
|
||||
public void testGetRollupCaps() throws Exception {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
|
||||
DateHistogramGroupConfig dateHistogram = new DateHistogramGroupConfig.FixedInterval(
|
||||
"timestamp",
|
||||
DateHistogramInterval.HOUR,
|
||||
new DateHistogramInterval("7d"),
|
||||
"UTC"
|
||||
); // <1>
|
||||
TermsGroupConfig terms = new TermsGroupConfig("hostname", "datacenter");
|
||||
HistogramGroupConfig histogram = new HistogramGroupConfig(5L, "load", "net_in", "net_out");
|
||||
GroupConfig groups = new GroupConfig(dateHistogram, histogram, terms);
|
||||
List<MetricConfig> metrics = new ArrayList<>(); // <1>
|
||||
metrics.add(new MetricConfig("temperature", Arrays.asList("min", "max", "sum")));
|
||||
metrics.add(new MetricConfig("voltage", Arrays.asList("avg", "value_count")));
|
||||
|
||||
//tag::x-pack-rollup-get-rollup-caps-setup
|
||||
final String indexPattern = "docs";
|
||||
final String rollupIndexName = "rollup";
|
||||
final String cron = "*/1 * * * * ?";
|
||||
final int pageSize = 100;
|
||||
final TimeValue timeout = null;
|
||||
|
||||
String id = "job_1";
|
||||
RollupJobConfig config = new RollupJobConfig(id, indexPattern, rollupIndexName, cron,
|
||||
pageSize, groups, metrics, timeout);
|
||||
|
||||
PutRollupJobRequest request = new PutRollupJobRequest(config);
|
||||
AcknowledgedResponse response = client.rollup().putRollupJob(request, RequestOptions.DEFAULT);
|
||||
|
||||
boolean acknowledged = response.isAcknowledged();
|
||||
//end::x-pack-rollup-get-rollup-caps-setup
|
||||
assertTrue(acknowledged);
|
||||
|
||||
ClusterHealthRequest healthRequest = new ClusterHealthRequest(config.getRollupIndex()).waitForYellowStatus();
|
||||
ClusterHealthResponse healthResponse = client.cluster().health(healthRequest, RequestOptions.DEFAULT);
|
||||
assertFalse(healthResponse.isTimedOut());
|
||||
assertThat(healthResponse.getStatus(), oneOf(ClusterHealthStatus.YELLOW, ClusterHealthStatus.GREEN));
|
||||
|
||||
// Now that the job is created, we should have a rollup index with metadata.
|
||||
// We can test out the caps API now.
|
||||
|
||||
//tag::x-pack-rollup-get-rollup-caps-request
|
||||
GetRollupCapsRequest getRollupCapsRequest = new GetRollupCapsRequest("docs");
|
||||
//end::x-pack-rollup-get-rollup-caps-request
|
||||
|
||||
//tag::x-pack-rollup-get-rollup-caps-execute
|
||||
GetRollupCapsResponse capsResponse = client.rollup().getRollupCapabilities(getRollupCapsRequest, RequestOptions.DEFAULT);
|
||||
//end::x-pack-rollup-get-rollup-caps-execute
|
||||
|
||||
//tag::x-pack-rollup-get-rollup-caps-response
|
||||
Map<String, RollableIndexCaps> rolledPatterns = capsResponse.getJobs();
|
||||
|
||||
RollableIndexCaps docsPattern = rolledPatterns.get("docs");
|
||||
|
||||
// indexName will be "docs" in this case... the index pattern that we rolled up
|
||||
String indexName = docsPattern.getIndexName();
|
||||
|
||||
// Each index pattern can have multiple jobs that rolled it up, so `getJobCaps()`
|
||||
// returns a list of jobs that rolled up the pattern
|
||||
List<RollupJobCaps> rollupJobs = docsPattern.getJobCaps();
|
||||
RollupJobCaps jobCaps = rollupJobs.get(0);
|
||||
|
||||
// jobID is the identifier we used when we created the job (e.g. `job1`)
|
||||
String jobID = jobCaps.getJobID();
|
||||
|
||||
// rollupIndex is the location that the job stored it's rollup docs (e.g. `rollup`)
|
||||
String rollupIndex = jobCaps.getRollupIndex();
|
||||
|
||||
// indexPattern is the same as the indexName that we retrieved earlier, redundant info
|
||||
assert jobCaps.getIndexPattern().equals(indexName);
|
||||
|
||||
// Finally, fieldCaps are the capabilities of individual fields in the config
|
||||
// The key is the field name, and the value is a RollupFieldCaps object which
|
||||
// provides more info.
|
||||
Map<String, RollupJobCaps.RollupFieldCaps> fieldCaps = jobCaps.getFieldCaps();
|
||||
|
||||
// If we retrieve the "timestamp" field, it returns a list of maps. Each list
|
||||
// item represents a different aggregation that can be run against the "timestamp"
|
||||
// field, and any additional details specific to that agg (interval, etc)
|
||||
List<Map<String, Object>> timestampCaps = fieldCaps.get("timestamp").getAggs();
|
||||
logger.error(timestampCaps.get(0).toString());
|
||||
assert timestampCaps.get(0).toString().equals("{agg=date_histogram, fixed_interval=1h, delay=7d, time_zone=UTC}");
|
||||
|
||||
// In contrast to the timestamp field, the temperature field has multiple aggs configured
|
||||
List<Map<String, Object>> temperatureCaps = fieldCaps.get("temperature").getAggs();
|
||||
assert temperatureCaps.toString().equals("[{agg=min}, {agg=max}, {agg=sum}]");
|
||||
//end::x-pack-rollup-get-rollup-caps-response
|
||||
|
||||
assertThat(indexName, equalTo("docs"));
|
||||
assertThat(jobID, equalTo("job_1"));
|
||||
assertThat(rollupIndex, equalTo("rollup"));
|
||||
assertThat(fieldCaps.size(), equalTo(8));
|
||||
|
||||
// tag::x-pack-rollup-get-rollup-caps-execute-listener
|
||||
ActionListener<GetRollupCapsResponse> listener = new ActionListener<GetRollupCapsResponse>() {
|
||||
@Override
|
||||
public void onResponse(GetRollupCapsResponse response) {
|
||||
|
||||
// <1>
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
};
|
||||
// end::x-pack-rollup-get-rollup-caps-execute-listener
|
||||
|
||||
// Replace the empty listener by a blocking listener in test
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
// tag::x-pack-rollup-get-rollup-caps-execute-async
|
||||
client.rollup().getRollupCapabilitiesAsync(getRollupCapsRequest, RequestOptions.DEFAULT, listener); // <1>
|
||||
// end::x-pack-rollup-get-rollup-caps-execute-async
|
||||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
}
|
||||
|
||||
@SuppressWarnings("unused")
|
||||
public void testGetRollupIndexCaps() throws Exception {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
|
||||
DateHistogramGroupConfig dateHistogram = new DateHistogramGroupConfig.FixedInterval(
|
||||
"timestamp",
|
||||
DateHistogramInterval.HOUR,
|
||||
new DateHistogramInterval("7d"),
|
||||
"UTC"
|
||||
); // <1>
|
||||
TermsGroupConfig terms = new TermsGroupConfig("hostname", "datacenter");
|
||||
HistogramGroupConfig histogram = new HistogramGroupConfig(5L, "load", "net_in", "net_out");
|
||||
GroupConfig groups = new GroupConfig(dateHistogram, histogram, terms);
|
||||
List<MetricConfig> metrics = new ArrayList<>(); // <1>
|
||||
metrics.add(new MetricConfig("temperature", Arrays.asList("min", "max", "sum")));
|
||||
metrics.add(new MetricConfig("voltage", Arrays.asList("avg", "value_count")));
|
||||
|
||||
//tag::x-pack-rollup-get-rollup-index-caps-setup
|
||||
final String indexPattern = "docs";
|
||||
final String rollupIndexName = "rollup";
|
||||
final String cron = "*/1 * * * * ?";
|
||||
final int pageSize = 100;
|
||||
final TimeValue timeout = null;
|
||||
|
||||
String id = "job_1";
|
||||
RollupJobConfig config = new RollupJobConfig(id, indexPattern, rollupIndexName, cron,
|
||||
pageSize, groups, metrics, timeout);
|
||||
|
||||
PutRollupJobRequest request = new PutRollupJobRequest(config);
|
||||
AcknowledgedResponse response = client.rollup().putRollupJob(request, RequestOptions.DEFAULT);
|
||||
|
||||
boolean acknowledged = response.isAcknowledged();
|
||||
//end::x-pack-rollup-get-rollup-index-caps-setup
|
||||
assertTrue(acknowledged);
|
||||
|
||||
ClusterHealthRequest healthRequest = new ClusterHealthRequest(config.getRollupIndex()).waitForYellowStatus();
|
||||
ClusterHealthResponse healthResponse = client.cluster().health(healthRequest, RequestOptions.DEFAULT);
|
||||
assertFalse(healthResponse.isTimedOut());
|
||||
assertThat(healthResponse.getStatus(), oneOf(ClusterHealthStatus.YELLOW, ClusterHealthStatus.GREEN));
|
||||
|
||||
// Now that the job is created, we should have a rollup index with metadata.
|
||||
// We can test out the caps API now.
|
||||
|
||||
//tag::x-pack-rollup-get-rollup-index-caps-request
|
||||
GetRollupIndexCapsRequest getRollupIndexCapsRequest = new GetRollupIndexCapsRequest("rollup");
|
||||
//end::x-pack-rollup-get-rollup-index-caps-request
|
||||
|
||||
//tag::x-pack-rollup-get-rollup-index-caps-execute
|
||||
GetRollupIndexCapsResponse capsResponse = client.rollup()
|
||||
.getRollupIndexCapabilities(getRollupIndexCapsRequest, RequestOptions.DEFAULT);
|
||||
//end::x-pack-rollup-get-rollup-index-caps-execute
|
||||
|
||||
//tag::x-pack-rollup-get-rollup-index-caps-response
|
||||
Map<String, RollableIndexCaps> rolledPatterns = capsResponse.getJobs();
|
||||
|
||||
RollableIndexCaps docsPattern = rolledPatterns.get("rollup");
|
||||
|
||||
// indexName will be "rollup", the target index we requested
|
||||
String indexName = docsPattern.getIndexName();
|
||||
|
||||
// Each index pattern can have multiple jobs that rolled it up, so `getJobCaps()`
|
||||
// returns a list of jobs that rolled up the pattern
|
||||
List<RollupJobCaps> rollupJobs = docsPattern.getJobCaps();
|
||||
RollupJobCaps jobCaps = rollupJobs.get(0);
|
||||
|
||||
// jobID is the identifier we used when we created the job (e.g. `job1`)
|
||||
String jobID = jobCaps.getJobID();
|
||||
|
||||
// rollupIndex is the location that the job stored it's rollup docs (e.g. `rollup`)
|
||||
String rollupIndex = jobCaps.getRollupIndex();
|
||||
|
||||
// Finally, fieldCaps are the capabilities of individual fields in the config
|
||||
// The key is the field name, and the value is a RollupFieldCaps object which
|
||||
// provides more info.
|
||||
Map<String, RollupJobCaps.RollupFieldCaps> fieldCaps = jobCaps.getFieldCaps();
|
||||
|
||||
// If we retrieve the "timestamp" field, it returns a list of maps. Each list
|
||||
// item represents a different aggregation that can be run against the "timestamp"
|
||||
// field, and any additional details specific to that agg (interval, etc)
|
||||
List<Map<String, Object>> timestampCaps = fieldCaps.get("timestamp").getAggs();
|
||||
logger.error(timestampCaps.get(0).toString());
|
||||
assert timestampCaps.get(0).toString().equals("{agg=date_histogram, fixed_interval=1h, delay=7d, time_zone=UTC}");
|
||||
|
||||
// In contrast to the timestamp field, the temperature field has multiple aggs configured
|
||||
List<Map<String, Object>> temperatureCaps = fieldCaps.get("temperature").getAggs();
|
||||
assert temperatureCaps.toString().equals("[{agg=min}, {agg=max}, {agg=sum}]");
|
||||
//end::x-pack-rollup-get-rollup-index-caps-response
|
||||
|
||||
assertThat(indexName, equalTo("rollup"));
|
||||
assertThat(jobID, equalTo("job_1"));
|
||||
assertThat(rollupIndex, equalTo("rollup"));
|
||||
assertThat(fieldCaps.size(), equalTo(8));
|
||||
|
||||
// tag::x-pack-rollup-get-rollup-index-caps-execute-listener
|
||||
ActionListener<GetRollupIndexCapsResponse> listener = new ActionListener<GetRollupIndexCapsResponse>() {
|
||||
@Override
|
||||
public void onResponse(GetRollupIndexCapsResponse response) {
|
||||
|
||||
// <1>
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
};
|
||||
// end::x-pack-rollup-get-rollup-index-caps-execute-listener
|
||||
|
||||
// Replace the empty listener by a blocking listener in test
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
// tag::x-pack-rollup-get-rollup-index-caps-execute-async
|
||||
client.rollup().getRollupIndexCapabilitiesAsync(getRollupIndexCapsRequest, RequestOptions.DEFAULT, listener); // <1>
|
||||
// end::x-pack-rollup-get-rollup-index-caps-execute-async
|
||||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
}
|
||||
|
||||
@SuppressWarnings("unused")
|
||||
public void testDeleteRollupJob() throws Exception {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
|
||||
String id = "job_2";
|
||||
|
||||
// tag::rollup-delete-job-request
|
||||
DeleteRollupJobRequest request = new DeleteRollupJobRequest(id); // <1>
|
||||
// end::rollup-delete-job-request
|
||||
try {
|
||||
// tag::rollup-delete-job-execute
|
||||
AcknowledgedResponse response = client.rollup().deleteRollupJob(request, RequestOptions.DEFAULT);
|
||||
// end::rollup-delete-job-execute
|
||||
|
||||
// tag::rollup-delete-job-response
|
||||
response.isAcknowledged(); // <1>
|
||||
// end::rollup-delete-job-response
|
||||
} catch (Exception e) {
|
||||
// Swallow any exception, this test does not test actually cancelling.
|
||||
}
|
||||
|
||||
// tag::rollup-delete-job-execute-listener
|
||||
ActionListener<AcknowledgedResponse> listener = new ActionListener<AcknowledgedResponse>() {
|
||||
@Override
|
||||
public void onResponse(AcknowledgedResponse response) {
|
||||
boolean acknowledged = response.isAcknowledged(); // <1>
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
};
|
||||
// end::rollup-delete-job-execute-listener
|
||||
|
||||
// Replace the empty listener by a blocking listener in test
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
// tag::rollup-delete-job-execute-async
|
||||
client.rollup().deleteRollupJobAsync(request, RequestOptions.DEFAULT, listener); // <1>
|
||||
// end::rollup-delete-job-execute-async
|
||||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
}
|
||||
}
|
File diff suppressed because it is too large
Load diff
|
@ -1,188 +0,0 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0 and the Server Side Public License, v 1; you may not use this file except
|
||||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
||||
* Side Public License, v 1.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.client.documentation;
|
||||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.LatchedActionListener;
|
||||
import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryRequest;
|
||||
import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotRequest;
|
||||
import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse;
|
||||
import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse;
|
||||
import org.elasticsearch.action.index.IndexRequest;
|
||||
import org.elasticsearch.action.index.IndexResponse;
|
||||
import org.elasticsearch.action.support.WriteRequest;
|
||||
import org.elasticsearch.action.support.master.AcknowledgedResponse;
|
||||
import org.elasticsearch.client.ESRestHighLevelClientTestCase;
|
||||
import org.elasticsearch.client.RequestOptions;
|
||||
import org.elasticsearch.client.RestHighLevelClient;
|
||||
import org.elasticsearch.client.indices.CreateIndexRequest;
|
||||
import org.elasticsearch.client.indices.CreateIndexResponse;
|
||||
import org.elasticsearch.client.searchable_snapshots.CachesStatsRequest;
|
||||
import org.elasticsearch.client.searchable_snapshots.CachesStatsResponse;
|
||||
import org.elasticsearch.client.searchable_snapshots.CachesStatsResponse.NodeCachesStats;
|
||||
import org.elasticsearch.client.searchable_snapshots.MountSnapshotRequest;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.core.TimeValue;
|
||||
import org.elasticsearch.repositories.fs.FsRepository;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
import org.elasticsearch.snapshots.RestoreInfo;
|
||||
import org.elasticsearch.xcontent.XContentType;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import static org.hamcrest.Matchers.is;
|
||||
|
||||
@SuppressWarnings("removal")
|
||||
public class SearchableSnapshotsDocumentationIT extends ESRestHighLevelClientTestCase {
|
||||
|
||||
public void testMountSnapshot() throws IOException, InterruptedException {
|
||||
final RestHighLevelClient client = highLevelClient();
|
||||
{
|
||||
final CreateIndexRequest request = new CreateIndexRequest("index");
|
||||
final CreateIndexResponse response = client.indices().create(request, RequestOptions.DEFAULT);
|
||||
assertTrue(response.isAcknowledged());
|
||||
}
|
||||
|
||||
{
|
||||
final IndexRequest request = new IndexRequest("index").setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE)
|
||||
.source("{}", XContentType.JSON);
|
||||
final IndexResponse response = client.index(request, RequestOptions.DEFAULT);
|
||||
assertThat(response.status(), is(RestStatus.CREATED));
|
||||
}
|
||||
|
||||
{
|
||||
final PutRepositoryRequest request = new PutRepositoryRequest("repository");
|
||||
request.settings("{\"location\": \".\"}", XContentType.JSON);
|
||||
request.type(FsRepository.TYPE);
|
||||
final AcknowledgedResponse response = client.snapshot().createRepository(request, RequestOptions.DEFAULT);
|
||||
assertTrue(response.isAcknowledged());
|
||||
}
|
||||
|
||||
{
|
||||
final CreateSnapshotRequest request = new CreateSnapshotRequest("repository", "snapshot").waitForCompletion(true);
|
||||
final CreateSnapshotResponse response = client.snapshot().create(request, RequestOptions.DEFAULT);
|
||||
assertThat(response.getSnapshotInfo().status(), is(RestStatus.OK));
|
||||
}
|
||||
|
||||
// tag::searchable-snapshots-mount-snapshot-request
|
||||
final MountSnapshotRequest request = new MountSnapshotRequest(
|
||||
"repository", // <1>
|
||||
"snapshot", // <2>
|
||||
"index" // <3>
|
||||
);
|
||||
request.masterTimeout(TimeValue.timeValueSeconds(30)); // <4>
|
||||
request.waitForCompletion(true); // <5>
|
||||
request.storage(MountSnapshotRequest.Storage.FULL_COPY); // <6>
|
||||
request.renamedIndex("renamed_index"); // <7>
|
||||
final Settings indexSettings = Settings.builder()
|
||||
.put("index.number_of_replicas", 0)
|
||||
.build();
|
||||
request.indexSettings(indexSettings); // <8>
|
||||
request.ignoredIndexSettings(
|
||||
new String[]{"index.refresh_interval"}); // <9>
|
||||
// end::searchable-snapshots-mount-snapshot-request
|
||||
|
||||
// tag::searchable-snapshots-mount-snapshot-execute
|
||||
final RestoreSnapshotResponse response = client
|
||||
.searchableSnapshots()
|
||||
.mountSnapshot(request, RequestOptions.DEFAULT);
|
||||
// end::searchable-snapshots-mount-snapshot-execute
|
||||
|
||||
// tag::searchable-snapshots-mount-snapshot-response
|
||||
final RestoreInfo restoreInfo = response.getRestoreInfo(); // <1>
|
||||
// end::searchable-snapshots-mount-snapshot-response
|
||||
|
||||
// tag::searchable-snapshots-mount-snapshot-execute-listener
|
||||
ActionListener<RestoreSnapshotResponse> listener =
|
||||
new ActionListener<RestoreSnapshotResponse>() {
|
||||
|
||||
@Override
|
||||
public void onResponse(
|
||||
final RestoreSnapshotResponse response) { // <1>
|
||||
final RestoreInfo restoreInfo = response.getRestoreInfo();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(final Exception e) {
|
||||
// <2>
|
||||
}
|
||||
|
||||
};
|
||||
// end::searchable-snapshots-mount-snapshot-execute-listener
|
||||
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
// tag::searchable-snapshots-mount-snapshot-execute-async
|
||||
client.searchableSnapshots().mountSnapshotAsync(
|
||||
request,
|
||||
RequestOptions.DEFAULT,
|
||||
listener // <1>
|
||||
);
|
||||
// end::searchable-snapshots-mount-snapshot-execute-async
|
||||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
}
|
||||
|
||||
public void testCachesStatsSnapshot() throws Exception {
|
||||
final RestHighLevelClient client = highLevelClient();
|
||||
|
||||
// tag::searchable-snapshots-caches-stats-request
|
||||
CachesStatsRequest request = new CachesStatsRequest(); // <1>
|
||||
request = new CachesStatsRequest( // <2>
|
||||
"eerrtBMtQEisohZzxBLUSw",
|
||||
"klksqQSSzASDqDMLQ"
|
||||
);
|
||||
// end::searchable-snapshots-caches-stats-request
|
||||
|
||||
// tag::searchable-snapshots-caches-stats-execute
|
||||
final CachesStatsResponse response = client
|
||||
.searchableSnapshots()
|
||||
.cacheStats(request, RequestOptions.DEFAULT);
|
||||
// end::searchable-snapshots-caches-stats-execute
|
||||
|
||||
// tag::searchable-snapshots-caches-stats-response
|
||||
final List<NodeCachesStats> nodeCachesStats =
|
||||
response.getNodeCachesStats(); // <1>
|
||||
// end::searchable-snapshots-caches-stats-response
|
||||
|
||||
// tag::searchable-snapshots-caches-stats-execute-listener
|
||||
ActionListener<CachesStatsResponse> listener =
|
||||
new ActionListener<CachesStatsResponse>() {
|
||||
|
||||
@Override
|
||||
public void onResponse(final CachesStatsResponse response) {
|
||||
// <1>
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(final Exception e) {
|
||||
// <2>
|
||||
}
|
||||
};
|
||||
// end::searchable-snapshots-caches-stats-execute-listener
|
||||
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
// tag::searchable-snapshots-caches-stats-execute-async
|
||||
client.searchableSnapshots().cacheStatsAsync(
|
||||
request,
|
||||
RequestOptions.DEFAULT,
|
||||
listener // <1>
|
||||
);
|
||||
// end::searchable-snapshots-caches-stats-execute-async
|
||||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
}
|
||||
|
||||
}
|
File diff suppressed because it is too large
Load diff
|
@ -1,898 +0,0 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0 and the Server Side Public License, v 1; you may not use this file except
|
||||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
||||
* Side Public License, v 1.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.client.documentation;
|
||||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.LatchedActionListener;
|
||||
import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequest;
|
||||
import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesRequest;
|
||||
import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesResponse;
|
||||
import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryRequest;
|
||||
import org.elasticsearch.action.admin.cluster.repositories.verify.VerifyRepositoryRequest;
|
||||
import org.elasticsearch.action.admin.cluster.repositories.verify.VerifyRepositoryResponse;
|
||||
import org.elasticsearch.action.admin.cluster.snapshots.clone.CloneSnapshotRequest;
|
||||
import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotRequest;
|
||||
import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse;
|
||||
import org.elasticsearch.action.admin.cluster.snapshots.delete.DeleteSnapshotRequest;
|
||||
import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsRequest;
|
||||
import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsResponse;
|
||||
import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotRequest;
|
||||
import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse;
|
||||
import org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotStats;
|
||||
import org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotStatus;
|
||||
import org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotsStatusRequest;
|
||||
import org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotsStatusResponse;
|
||||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.action.support.master.AcknowledgedResponse;
|
||||
import org.elasticsearch.client.ESRestHighLevelClientTestCase;
|
||||
import org.elasticsearch.client.Request;
|
||||
import org.elasticsearch.client.RequestOptions;
|
||||
import org.elasticsearch.client.Response;
|
||||
import org.elasticsearch.client.RestHighLevelClient;
|
||||
import org.elasticsearch.client.indices.CreateIndexRequest;
|
||||
import org.elasticsearch.cluster.SnapshotsInProgress;
|
||||
import org.elasticsearch.cluster.metadata.RepositoryMetadata;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.core.Booleans;
|
||||
import org.elasticsearch.core.TimeValue;
|
||||
import org.elasticsearch.repositories.fs.FsRepository;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
import org.elasticsearch.snapshots.RestoreInfo;
|
||||
import org.elasticsearch.snapshots.SnapshotId;
|
||||
import org.elasticsearch.snapshots.SnapshotInfo;
|
||||
import org.elasticsearch.snapshots.SnapshotShardFailure;
|
||||
import org.elasticsearch.snapshots.SnapshotState;
|
||||
import org.elasticsearch.xcontent.XContentType;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
import java.util.EnumSet;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
|
||||
/**
|
||||
* This class is used to generate the Java Snapshot API documentation.
|
||||
* You need to wrap your code between two tags like:
|
||||
* // tag::example
|
||||
* // end::example
|
||||
*
|
||||
* Where example is your tag name.
|
||||
*
|
||||
* Then in the documentation, you can extract what is between tag and end tags with
|
||||
* ["source","java",subs="attributes,callouts,macros"]
|
||||
* --------------------------------------------------
|
||||
* include-tagged::{doc-tests}/SnapshotClientDocumentationIT.java[example]
|
||||
* --------------------------------------------------
|
||||
*
|
||||
* The column width of the code block is 84. If the code contains a line longer
|
||||
* than 84, the line will be cut and a horizontal scroll bar will be displayed.
|
||||
* (the code indentation of the tag is not included in the width)
|
||||
*/
|
||||
@SuppressWarnings("removal")
|
||||
public class SnapshotClientDocumentationIT extends ESRestHighLevelClientTestCase {
|
||||
|
||||
private static final String repositoryName = "test_repository";
|
||||
private static final String snapshotName = "test_snapshot";
|
||||
private static final String indexName = "test_index";
|
||||
|
||||
@Override
|
||||
protected boolean waitForAllSnapshotsWiped() {
|
||||
return true;
|
||||
}
|
||||
|
||||
public void testSnapshotCreateRepository() throws IOException {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
|
||||
// tag::create-repository-request
|
||||
PutRepositoryRequest request = new PutRepositoryRequest();
|
||||
// end::create-repository-request
|
||||
|
||||
// tag::create-repository-create-settings
|
||||
String locationKey = FsRepository.LOCATION_SETTING.getKey();
|
||||
String locationValue = ".";
|
||||
String compressKey = FsRepository.COMPRESS_SETTING.getKey();
|
||||
boolean compressValue = true;
|
||||
|
||||
Settings settings = Settings.builder()
|
||||
.put(locationKey, locationValue)
|
||||
.put(compressKey, compressValue)
|
||||
.build(); // <1>
|
||||
// end::create-repository-create-settings
|
||||
|
||||
// tag::create-repository-request-repository-settings
|
||||
request.settings(settings); // <1>
|
||||
// end::create-repository-request-repository-settings
|
||||
|
||||
{
|
||||
// tag::create-repository-settings-builder
|
||||
Settings.Builder settingsBuilder = Settings.builder()
|
||||
.put(locationKey, locationValue)
|
||||
.put(compressKey, compressValue);
|
||||
request.settings(settingsBuilder); // <1>
|
||||
// end::create-repository-settings-builder
|
||||
}
|
||||
{
|
||||
// tag::create-repository-settings-map
|
||||
Map<String, Object> map = new HashMap<>();
|
||||
map.put(locationKey, locationValue);
|
||||
map.put(compressKey, compressValue);
|
||||
request.settings(map); // <1>
|
||||
// end::create-repository-settings-map
|
||||
}
|
||||
{
|
||||
// tag::create-repository-settings-source
|
||||
request.settings("""
|
||||
{"location": ".", "compress": "true"}
|
||||
""", XContentType.JSON); // <1>
|
||||
// end::create-repository-settings-source
|
||||
}
|
||||
|
||||
// tag::create-repository-request-name
|
||||
request.name(repositoryName); // <1>
|
||||
// end::create-repository-request-name
|
||||
// tag::create-repository-request-type
|
||||
request.type(FsRepository.TYPE); // <1>
|
||||
// end::create-repository-request-type
|
||||
|
||||
// tag::create-repository-request-masterTimeout
|
||||
request.masterNodeTimeout(TimeValue.timeValueMinutes(1)); // <1>
|
||||
request.masterNodeTimeout("1m"); // <2>
|
||||
// end::create-repository-request-masterTimeout
|
||||
// tag::create-repository-request-timeout
|
||||
request.timeout(TimeValue.timeValueMinutes(1)); // <1>
|
||||
request.timeout("1m"); // <2>
|
||||
// end::create-repository-request-timeout
|
||||
// tag::create-repository-request-verify
|
||||
request.verify(true); // <1>
|
||||
// end::create-repository-request-verify
|
||||
|
||||
// tag::create-repository-execute
|
||||
AcknowledgedResponse response = client.snapshot().createRepository(request, RequestOptions.DEFAULT);
|
||||
// end::create-repository-execute
|
||||
|
||||
// tag::create-repository-response
|
||||
boolean acknowledged = response.isAcknowledged(); // <1>
|
||||
// end::create-repository-response
|
||||
assertTrue(acknowledged);
|
||||
}
|
||||
|
||||
public void testSnapshotCreateRepositoryAsync() throws InterruptedException {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
{
|
||||
PutRepositoryRequest request = new PutRepositoryRequest(repositoryName);
|
||||
|
||||
// tag::create-repository-execute-listener
|
||||
ActionListener<AcknowledgedResponse> listener =
|
||||
new ActionListener<AcknowledgedResponse>() {
|
||||
@Override
|
||||
public void onResponse(AcknowledgedResponse putRepositoryResponse) {
|
||||
// <1>
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
};
|
||||
// end::create-repository-execute-listener
|
||||
|
||||
// Replace the empty listener by a blocking listener in test
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
// tag::create-repository-execute-async
|
||||
client.snapshot().createRepositoryAsync(request, RequestOptions.DEFAULT, listener); // <1>
|
||||
// end::create-repository-execute-async
|
||||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
}
|
||||
}
|
||||
|
||||
public void testSnapshotGetRepository() throws IOException {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
|
||||
createTestRepositories();
|
||||
|
||||
// tag::get-repository-request
|
||||
GetRepositoriesRequest request = new GetRepositoriesRequest();
|
||||
// end::get-repository-request
|
||||
|
||||
// tag::get-repository-request-repositories
|
||||
String [] repositories = new String[] {repositoryName};
|
||||
request.repositories(repositories); // <1>
|
||||
// end::get-repository-request-repositories
|
||||
// tag::get-repository-request-local
|
||||
request.local(true); // <1>
|
||||
// end::get-repository-request-local
|
||||
// tag::get-repository-request-masterTimeout
|
||||
request.masterNodeTimeout(TimeValue.timeValueMinutes(1)); // <1>
|
||||
request.masterNodeTimeout("1m"); // <2>
|
||||
// end::get-repository-request-masterTimeout
|
||||
|
||||
// tag::get-repository-execute
|
||||
GetRepositoriesResponse response = client.snapshot().getRepository(request, RequestOptions.DEFAULT);
|
||||
// end::get-repository-execute
|
||||
|
||||
// tag::get-repository-response
|
||||
List<RepositoryMetadata> repositoryMetadataResponse = response.repositories();
|
||||
// end::get-repository-response
|
||||
assertThat(1, equalTo(repositoryMetadataResponse.size()));
|
||||
assertThat(repositoryName, equalTo(repositoryMetadataResponse.get(0).name()));
|
||||
}
|
||||
|
||||
public void testSnapshotGetRepositoryAsync() throws InterruptedException {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
{
|
||||
GetRepositoriesRequest request = new GetRepositoriesRequest();
|
||||
|
||||
// tag::get-repository-execute-listener
|
||||
ActionListener<GetRepositoriesResponse> listener =
|
||||
new ActionListener<GetRepositoriesResponse>() {
|
||||
@Override
|
||||
public void onResponse(GetRepositoriesResponse getRepositoriesResponse) {
|
||||
// <1>
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
};
|
||||
// end::get-repository-execute-listener
|
||||
|
||||
// Replace the empty listener by a blocking listener in test
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
// tag::get-repository-execute-async
|
||||
client.snapshot().getRepositoryAsync(request, RequestOptions.DEFAULT, listener); // <1>
|
||||
// end::get-repository-execute-async
|
||||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
}
|
||||
}
|
||||
|
||||
public void testRestoreSnapshot() throws IOException {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
|
||||
createTestRepositories();
|
||||
createTestIndex();
|
||||
createTestSnapshots();
|
||||
|
||||
// tag::restore-snapshot-request
|
||||
RestoreSnapshotRequest request = new RestoreSnapshotRequest(repositoryName, snapshotName);
|
||||
// end::restore-snapshot-request
|
||||
// we need to restore as a different index name
|
||||
|
||||
// tag::restore-snapshot-request-masterTimeout
|
||||
request.masterNodeTimeout(TimeValue.timeValueMinutes(1)); // <1>
|
||||
request.masterNodeTimeout("1m"); // <2>
|
||||
// end::restore-snapshot-request-masterTimeout
|
||||
|
||||
// tag::restore-snapshot-request-waitForCompletion
|
||||
request.waitForCompletion(true); // <1>
|
||||
// end::restore-snapshot-request-waitForCompletion
|
||||
|
||||
// tag::restore-snapshot-request-partial
|
||||
request.partial(false); // <1>
|
||||
// end::restore-snapshot-request-partial
|
||||
|
||||
// tag::restore-snapshot-request-include-global-state
|
||||
request.includeGlobalState(false); // <1>
|
||||
// end::restore-snapshot-request-include-global-state
|
||||
|
||||
// tag::restore-snapshot-request-include-aliases
|
||||
request.includeAliases(false); // <1>
|
||||
// end::restore-snapshot-request-include-aliases
|
||||
|
||||
// tag::restore-snapshot-request-indices
|
||||
request.indices("test_index"); // <1>
|
||||
// end::restore-snapshot-request-indices
|
||||
|
||||
String restoredIndexName = "restored_index";
|
||||
// tag::restore-snapshot-request-rename
|
||||
request.renamePattern("test_(.+)"); // <1>
|
||||
request.renameReplacement("restored_$1"); // <2>
|
||||
// end::restore-snapshot-request-rename
|
||||
|
||||
// tag::restore-snapshot-request-index-settings
|
||||
request.indexSettings( // <1>
|
||||
Settings.builder()
|
||||
.put("index.number_of_replicas", 0)
|
||||
.build());
|
||||
|
||||
request.ignoreIndexSettings("index.refresh_interval", "index.search.idle.after"); // <2>
|
||||
request.indicesOptions(new IndicesOptions( // <3>
|
||||
EnumSet.of(IndicesOptions.Option.IGNORE_UNAVAILABLE),
|
||||
EnumSet.of(IndicesOptions.WildcardStates.OPEN)));
|
||||
// end::restore-snapshot-request-index-settings
|
||||
|
||||
// tag::restore-snapshot-execute
|
||||
RestoreSnapshotResponse response = client.snapshot().restore(request, RequestOptions.DEFAULT);
|
||||
// end::restore-snapshot-execute
|
||||
|
||||
// tag::restore-snapshot-response
|
||||
RestoreInfo restoreInfo = response.getRestoreInfo();
|
||||
List<String> indices = restoreInfo.indices(); // <1>
|
||||
// end::restore-snapshot-response
|
||||
assertEquals(Collections.singletonList(restoredIndexName), indices);
|
||||
assertEquals(0, restoreInfo.failedShards());
|
||||
assertTrue(restoreInfo.successfulShards() > 0);
|
||||
}
|
||||
|
||||
public void testRestoreSnapshotAsync() throws InterruptedException {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
{
|
||||
RestoreSnapshotRequest request = new RestoreSnapshotRequest();
|
||||
|
||||
// tag::restore-snapshot-execute-listener
|
||||
ActionListener<RestoreSnapshotResponse> listener =
|
||||
new ActionListener<RestoreSnapshotResponse>() {
|
||||
@Override
|
||||
public void onResponse(RestoreSnapshotResponse restoreSnapshotResponse) {
|
||||
// <1>
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
};
|
||||
// end::restore-snapshot-execute-listener
|
||||
|
||||
// Replace the empty listener by a blocking listener in test
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
// tag::restore-snapshot-execute-async
|
||||
client.snapshot().restoreAsync(request, RequestOptions.DEFAULT, listener); // <1>
|
||||
// end::restore-snapshot-execute-async
|
||||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
}
|
||||
}
|
||||
|
||||
public void testSnapshotDeleteRepository() throws IOException {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
|
||||
createTestRepositories();
|
||||
|
||||
// tag::delete-repository-request
|
||||
DeleteRepositoryRequest request = new DeleteRepositoryRequest(repositoryName);
|
||||
// end::delete-repository-request
|
||||
|
||||
// tag::delete-repository-request-masterTimeout
|
||||
request.masterNodeTimeout(TimeValue.timeValueMinutes(1)); // <1>
|
||||
request.masterNodeTimeout("1m"); // <2>
|
||||
// end::delete-repository-request-masterTimeout
|
||||
// tag::delete-repository-request-timeout
|
||||
request.timeout(TimeValue.timeValueMinutes(1)); // <1>
|
||||
request.timeout("1m"); // <2>
|
||||
// end::delete-repository-request-timeout
|
||||
|
||||
// tag::delete-repository-execute
|
||||
AcknowledgedResponse response = client.snapshot().deleteRepository(request, RequestOptions.DEFAULT);
|
||||
// end::delete-repository-execute
|
||||
|
||||
// tag::delete-repository-response
|
||||
boolean acknowledged = response.isAcknowledged(); // <1>
|
||||
// end::delete-repository-response
|
||||
assertTrue(acknowledged);
|
||||
}
|
||||
|
||||
public void testSnapshotDeleteRepositoryAsync() throws InterruptedException {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
{
|
||||
DeleteRepositoryRequest request = new DeleteRepositoryRequest();
|
||||
|
||||
// tag::delete-repository-execute-listener
|
||||
ActionListener<AcknowledgedResponse> listener =
|
||||
new ActionListener<AcknowledgedResponse>() {
|
||||
@Override
|
||||
public void onResponse(AcknowledgedResponse deleteRepositoryResponse) {
|
||||
// <1>
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
};
|
||||
// end::delete-repository-execute-listener
|
||||
|
||||
// Replace the empty listener by a blocking listener in test
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
// tag::delete-repository-execute-async
|
||||
client.snapshot().deleteRepositoryAsync(request, RequestOptions.DEFAULT, listener); // <1>
|
||||
// end::delete-repository-execute-async
|
||||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
}
|
||||
}
|
||||
|
||||
public void testSnapshotVerifyRepository() throws IOException {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
createTestRepositories();
|
||||
|
||||
// tag::verify-repository-request
|
||||
VerifyRepositoryRequest request = new VerifyRepositoryRequest(repositoryName);
|
||||
// end::verify-repository-request
|
||||
|
||||
// tag::verify-repository-request-masterTimeout
|
||||
request.masterNodeTimeout(TimeValue.timeValueMinutes(1)); // <1>
|
||||
request.masterNodeTimeout("1m"); // <2>
|
||||
// end::verify-repository-request-masterTimeout
|
||||
// tag::verify-repository-request-timeout
|
||||
request.timeout(TimeValue.timeValueMinutes(1)); // <1>
|
||||
request.timeout("1m"); // <2>
|
||||
// end::verify-repository-request-timeout
|
||||
|
||||
// tag::verify-repository-execute
|
||||
VerifyRepositoryResponse response = client.snapshot().verifyRepository(request, RequestOptions.DEFAULT);
|
||||
// end::verify-repository-execute
|
||||
|
||||
// tag::verify-repository-response
|
||||
List<VerifyRepositoryResponse.NodeView> repositoryMetadataResponse = response.getNodes();
|
||||
// end::verify-repository-response
|
||||
assertThat(1, equalTo(repositoryMetadataResponse.size()));
|
||||
final boolean async = Booleans.parseBoolean(System.getProperty("tests.rest.async", "false"));
|
||||
if (async) {
|
||||
assertThat("asyncIntegTest-0", equalTo(repositoryMetadataResponse.get(0).getName()));
|
||||
} else {
|
||||
assertThat("integTest-0", equalTo(repositoryMetadataResponse.get(0).getName()));
|
||||
}
|
||||
}
|
||||
|
||||
public void testSnapshotVerifyRepositoryAsync() throws InterruptedException {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
{
|
||||
VerifyRepositoryRequest request = new VerifyRepositoryRequest(repositoryName);
|
||||
|
||||
// tag::verify-repository-execute-listener
|
||||
ActionListener<VerifyRepositoryResponse> listener =
|
||||
new ActionListener<VerifyRepositoryResponse>() {
|
||||
@Override
|
||||
public void onResponse(VerifyRepositoryResponse verifyRepositoryRestResponse) {
|
||||
// <1>
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
};
|
||||
// end::verify-repository-execute-listener
|
||||
|
||||
// Replace the empty listener by a blocking listener in test
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
// tag::verify-repository-execute-async
|
||||
client.snapshot().verifyRepositoryAsync(request, RequestOptions.DEFAULT, listener); // <1>
|
||||
// end::verify-repository-execute-async
|
||||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
}
|
||||
}
|
||||
|
||||
public void testSnapshotCreate() throws IOException {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
|
||||
CreateIndexRequest createIndexRequest = new CreateIndexRequest("test-index0");
|
||||
client.indices().create(createIndexRequest, RequestOptions.DEFAULT);
|
||||
createIndexRequest = new CreateIndexRequest("test-index1");
|
||||
client.indices().create(createIndexRequest, RequestOptions.DEFAULT);
|
||||
|
||||
createTestRepositories();
|
||||
|
||||
// tag::create-snapshot-request
|
||||
CreateSnapshotRequest request = new CreateSnapshotRequest();
|
||||
// end::create-snapshot-request
|
||||
|
||||
// tag::create-snapshot-request-repositoryName
|
||||
request.repository(repositoryName); // <1>
|
||||
// end::create-snapshot-request-repositoryName
|
||||
// tag::create-snapshot-request-snapshotName
|
||||
request.snapshot(snapshotName); // <1>
|
||||
// end::create-snapshot-request-snapshotName
|
||||
// tag::create-snapshot-request-indices
|
||||
request.indices("test-index0", "test-index1"); // <1>
|
||||
// end::create-snapshot-request-indices
|
||||
// tag::create-snapshot-request-indicesOptions
|
||||
request.indicesOptions(IndicesOptions.fromOptions(false, false, true, true)); // <1>
|
||||
// end::create-snapshot-request-indicesOptions
|
||||
// tag::create-snapshot-request-partial
|
||||
request.partial(false); // <1>
|
||||
// end::create-snapshot-request-partial
|
||||
// tag::create-snapshot-request-includeGlobalState
|
||||
request.includeGlobalState(true); // <1>
|
||||
// end::create-snapshot-request-includeGlobalState
|
||||
|
||||
// tag::create-snapshot-request-masterTimeout
|
||||
request.masterNodeTimeout(TimeValue.timeValueMinutes(1)); // <1>
|
||||
request.masterNodeTimeout("1m"); // <2>
|
||||
// end::create-snapshot-request-masterTimeout
|
||||
// tag::create-snapshot-request-waitForCompletion
|
||||
request.waitForCompletion(true); // <1>
|
||||
// end::create-snapshot-request-waitForCompletion
|
||||
|
||||
// tag::create-snapshot-execute
|
||||
CreateSnapshotResponse response = client.snapshot().create(request, RequestOptions.DEFAULT);
|
||||
// end::create-snapshot-execute
|
||||
|
||||
// tag::create-snapshot-response
|
||||
RestStatus status = response.status(); // <1>
|
||||
// end::create-snapshot-response
|
||||
|
||||
assertEquals(RestStatus.OK, status);
|
||||
|
||||
// tag::create-snapshot-response-snapshot-info
|
||||
SnapshotInfo snapshotInfo = response.getSnapshotInfo(); // <1>
|
||||
// end::create-snapshot-response-snapshot-info
|
||||
|
||||
assertNotNull(snapshotInfo);
|
||||
}
|
||||
|
||||
public void testSnapshotCreateAsync() throws InterruptedException {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
{
|
||||
CreateSnapshotRequest request = new CreateSnapshotRequest(repositoryName, snapshotName);
|
||||
|
||||
// tag::create-snapshot-execute-listener
|
||||
ActionListener<CreateSnapshotResponse> listener =
|
||||
new ActionListener<CreateSnapshotResponse>() {
|
||||
@Override
|
||||
public void onResponse(CreateSnapshotResponse createSnapshotResponse) {
|
||||
// <1>
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception exception) {
|
||||
// <2>
|
||||
}
|
||||
};
|
||||
// end::create-snapshot-execute-listener
|
||||
|
||||
// Replace the empty listener by a blocking listener in test
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
// tag::create-snapshot-execute-async
|
||||
client.snapshot().createAsync(request, RequestOptions.DEFAULT, listener); // <1>
|
||||
// end::create-snapshot-execute-async
|
||||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
}
|
||||
}
|
||||
|
||||
@SuppressWarnings("unused")
|
||||
public void testSnapshotGetSnapshots() throws IOException {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
|
||||
createTestRepositories();
|
||||
createTestIndex();
|
||||
createTestSnapshots();
|
||||
|
||||
// tag::get-snapshots-request
|
||||
GetSnapshotsRequest request = new GetSnapshotsRequest();
|
||||
// end::get-snapshots-request
|
||||
|
||||
// tag::get-snapshots-request-repositoryName
|
||||
request.repositories(repositoryName); // <1>
|
||||
// end::get-snapshots-request-repositoryName
|
||||
|
||||
// tag::get-snapshots-request-snapshots
|
||||
String[] snapshots = { snapshotName };
|
||||
request.snapshots(snapshots); // <1>
|
||||
// end::get-snapshots-request-snapshots
|
||||
|
||||
// tag::get-snapshots-request-masterTimeout
|
||||
request.masterNodeTimeout(TimeValue.timeValueMinutes(1)); // <1>
|
||||
request.masterNodeTimeout("1m"); // <2>
|
||||
// end::get-snapshots-request-masterTimeout
|
||||
|
||||
// tag::get-snapshots-request-verbose
|
||||
request.verbose(true); // <1>
|
||||
// end::get-snapshots-request-verbose
|
||||
|
||||
// tag::get-snapshots-request-ignore-unavailable
|
||||
request.ignoreUnavailable(false); // <1>
|
||||
// end::get-snapshots-request-ignore-unavailable
|
||||
|
||||
// tag::get-snapshots-execute
|
||||
GetSnapshotsResponse response = client.snapshot().get(request, RequestOptions.DEFAULT);
|
||||
// end::get-snapshots-execute
|
||||
|
||||
// tag::get-snapshots-response
|
||||
List<SnapshotInfo> snapshotsInfos = response.getSnapshots();
|
||||
SnapshotInfo snapshotInfo = snapshotsInfos.get(0);
|
||||
RestStatus restStatus = snapshotInfo.status(); // <1>
|
||||
SnapshotId snapshotId = snapshotInfo.snapshotId(); // <2>
|
||||
SnapshotState snapshotState = snapshotInfo.state(); // <3>
|
||||
List<SnapshotShardFailure> snapshotShardFailures = snapshotInfo.shardFailures(); // <4>
|
||||
long startTime = snapshotInfo.startTime(); // <5>
|
||||
long endTime = snapshotInfo.endTime(); // <6>
|
||||
// end::get-snapshots-response
|
||||
assertEquals(1, snapshotsInfos.size());
|
||||
}
|
||||
|
||||
public void testSnapshotGetSnapshotsAsync() throws InterruptedException {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
{
|
||||
GetSnapshotsRequest request = new GetSnapshotsRequest(repositoryName);
|
||||
|
||||
// tag::get-snapshots-execute-listener
|
||||
ActionListener<GetSnapshotsResponse> listener =
|
||||
new ActionListener<GetSnapshotsResponse>() {
|
||||
@Override
|
||||
public void onResponse(GetSnapshotsResponse getSnapshotsResponse) {
|
||||
// <1>
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
};
|
||||
// end::get-snapshots-execute-listener
|
||||
|
||||
// Replace the empty listener by a blocking listener in test
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
// tag::get-snapshots-execute-async
|
||||
client.snapshot().getAsync(request, RequestOptions.DEFAULT, listener); // <1>
|
||||
// end::get-snapshots-execute-async
|
||||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
}
|
||||
}
|
||||
|
||||
public void testSnapshotSnapshotsStatus() throws IOException {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
createTestRepositories();
|
||||
createTestIndex();
|
||||
createTestSnapshots();
|
||||
|
||||
// tag::snapshots-status-request
|
||||
SnapshotsStatusRequest request = new SnapshotsStatusRequest();
|
||||
// end::snapshots-status-request
|
||||
|
||||
// tag::snapshots-status-request-repository
|
||||
request.repository(repositoryName); // <1>
|
||||
// end::snapshots-status-request-repository
|
||||
// tag::snapshots-status-request-snapshots
|
||||
String [] snapshots = new String[] {snapshotName};
|
||||
request.snapshots(snapshots); // <1>
|
||||
// end::snapshots-status-request-snapshots
|
||||
// tag::snapshots-status-request-ignoreUnavailable
|
||||
request.ignoreUnavailable(true); // <1>
|
||||
// end::snapshots-status-request-ignoreUnavailable
|
||||
// tag::snapshots-status-request-masterTimeout
|
||||
request.masterNodeTimeout(TimeValue.timeValueMinutes(1)); // <1>
|
||||
request.masterNodeTimeout("1m"); // <2>
|
||||
// end::snapshots-status-request-masterTimeout
|
||||
|
||||
// tag::snapshots-status-execute
|
||||
SnapshotsStatusResponse response = client.snapshot().status(request, RequestOptions.DEFAULT);
|
||||
// end::snapshots-status-execute
|
||||
|
||||
// tag::snapshots-status-response
|
||||
List<SnapshotStatus> snapshotStatusesResponse = response.getSnapshots();
|
||||
SnapshotStatus snapshotStatus = snapshotStatusesResponse.get(0); // <1>
|
||||
SnapshotsInProgress.State snapshotState = snapshotStatus.getState(); // <2>
|
||||
SnapshotStats shardStats = snapshotStatus.getIndices().get(indexName).getShards().get(0).getStats(); // <3>
|
||||
// end::snapshots-status-response
|
||||
assertThat(snapshotStatusesResponse.size(), equalTo(1));
|
||||
assertThat(snapshotStatusesResponse.get(0).getSnapshot().getRepository(), equalTo(SnapshotClientDocumentationIT.repositoryName));
|
||||
assertThat(snapshotStatusesResponse.get(0).getSnapshot().getSnapshotId().getName(), equalTo(snapshotName));
|
||||
assertThat(snapshotState.completed(), equalTo(true));
|
||||
}
|
||||
|
||||
public void testSnapshotSnapshotsStatusAsync() throws InterruptedException {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
{
|
||||
SnapshotsStatusRequest request = new SnapshotsStatusRequest();
|
||||
|
||||
// tag::snapshots-status-execute-listener
|
||||
ActionListener<SnapshotsStatusResponse> listener =
|
||||
new ActionListener<SnapshotsStatusResponse>() {
|
||||
@Override
|
||||
public void onResponse(SnapshotsStatusResponse snapshotsStatusResponse) {
|
||||
// <1>
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
};
|
||||
// end::snapshots-status-execute-listener
|
||||
|
||||
// Replace the empty listener with a blocking listener in test
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
// tag::snapshots-status-execute-async
|
||||
client.snapshot().statusAsync(request, RequestOptions.DEFAULT, listener); // <1>
|
||||
// end::snapshots-status-execute-async
|
||||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
}
|
||||
}
|
||||
|
||||
public void testSnapshotDeleteSnapshot() throws IOException {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
|
||||
createTestRepositories();
|
||||
createTestIndex();
|
||||
createTestSnapshots();
|
||||
|
||||
// tag::delete-snapshot-request
|
||||
DeleteSnapshotRequest request = new DeleteSnapshotRequest(repositoryName);
|
||||
request.snapshots(snapshotName);
|
||||
// end::delete-snapshot-request
|
||||
|
||||
// tag::delete-snapshot-request-masterTimeout
|
||||
request.masterNodeTimeout(TimeValue.timeValueMinutes(1)); // <1>
|
||||
request.masterNodeTimeout("1m"); // <2>
|
||||
// end::delete-snapshot-request-masterTimeout
|
||||
|
||||
// tag::delete-snapshot-execute
|
||||
AcknowledgedResponse response = client.snapshot().delete(request, RequestOptions.DEFAULT);
|
||||
// end::delete-snapshot-execute
|
||||
|
||||
// tag::delete-snapshot-response
|
||||
boolean acknowledged = response.isAcknowledged(); // <1>
|
||||
// end::delete-snapshot-response
|
||||
assertTrue(acknowledged);
|
||||
}
|
||||
|
||||
public void testSnapshotDeleteSnapshotAsync() throws InterruptedException {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
{
|
||||
DeleteSnapshotRequest request = new DeleteSnapshotRequest();
|
||||
|
||||
// tag::delete-snapshot-execute-listener
|
||||
ActionListener<AcknowledgedResponse> listener =
|
||||
new ActionListener<AcknowledgedResponse>() {
|
||||
@Override
|
||||
public void onResponse(AcknowledgedResponse deleteSnapshotResponse) {
|
||||
// <1>
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
};
|
||||
// end::delete-snapshot-execute-listener
|
||||
|
||||
// Replace the empty listener by a blocking listener in test
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
// tag::delete-snapshot-execute-async
|
||||
client.snapshot().deleteAsync(request, RequestOptions.DEFAULT, listener); // <1>
|
||||
// end::delete-snapshot-execute-async
|
||||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
}
|
||||
}
|
||||
|
||||
public void testCloneSnapshot() throws IOException {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
|
||||
createTestRepositories();
|
||||
createTestIndex();
|
||||
createTestSnapshots();
|
||||
|
||||
String sourceSnapshotName = snapshotName;
|
||||
String targetSnapshotName = snapshotName + "_clone";
|
||||
String[] indices = new String[] { indexName };
|
||||
|
||||
// tag::clone-snapshot-request
|
||||
CloneSnapshotRequest request = new CloneSnapshotRequest(repositoryName, sourceSnapshotName, targetSnapshotName, indices);
|
||||
// end::clone-snapshot-request
|
||||
|
||||
// tag::clone-snapshot-request-indices
|
||||
request.indices("test_index"); // <1>
|
||||
// end::clone-snapshot-request-indices
|
||||
|
||||
// tag::clone-snapshot-request-masterTimeout
|
||||
request.masterNodeTimeout(TimeValue.timeValueMinutes(1)); // <1>
|
||||
request.masterNodeTimeout("1m"); // <2>
|
||||
// end::clone-snapshot-request-masterTimeout
|
||||
|
||||
// tag::clone-snapshot-request-index-settings
|
||||
request.indicesOptions(new IndicesOptions(
|
||||
EnumSet.of(IndicesOptions.Option.IGNORE_UNAVAILABLE), // <1>
|
||||
EnumSet.of(
|
||||
IndicesOptions.WildcardStates.OPEN,
|
||||
IndicesOptions.WildcardStates.CLOSED,
|
||||
IndicesOptions.WildcardStates.HIDDEN))
|
||||
);
|
||||
// end::clone-snapshot-request-index-settings
|
||||
|
||||
// tag::clone-snapshot-execute
|
||||
AcknowledgedResponse response = client.snapshot().clone(request, RequestOptions.DEFAULT);
|
||||
// end::clone-snapshot-execute
|
||||
|
||||
// tag::clone-snapshot-response
|
||||
boolean acknowledged = response.isAcknowledged(); // <1>
|
||||
// end::clone-snapshot-response
|
||||
assertTrue(acknowledged);
|
||||
}
|
||||
|
||||
public void testCloneSnapshotAsync() throws InterruptedException {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
{
|
||||
String targetSnapshot = snapshotName + "_clone";
|
||||
CloneSnapshotRequest request = new CloneSnapshotRequest(
|
||||
repositoryName,
|
||||
snapshotName,
|
||||
targetSnapshot,
|
||||
new String[] { indexName }
|
||||
);
|
||||
|
||||
// tag::clone-snapshot-execute-listener
|
||||
ActionListener<AcknowledgedResponse> listener =
|
||||
new ActionListener<AcknowledgedResponse>() {
|
||||
@Override
|
||||
public void onResponse(AcknowledgedResponse acknowledgedResponse) {
|
||||
// <1>
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
};
|
||||
// end::clone-snapshot-execute-listener
|
||||
|
||||
// Replace the empty listener by a blocking listener in test
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
// tag::clone-snapshot-execute-async
|
||||
client.snapshot().cloneAsync(request, RequestOptions.DEFAULT, listener); // <1>
|
||||
// end::clone-snapshot-execute-async
|
||||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
}
|
||||
}
|
||||
|
||||
private void createTestRepositories() throws IOException {
|
||||
PutRepositoryRequest request = new PutRepositoryRequest(repositoryName);
|
||||
request.type(FsRepository.TYPE);
|
||||
request.settings("{\"location\": \".\"}", XContentType.JSON);
|
||||
assertTrue(highLevelClient().snapshot().createRepository(request, RequestOptions.DEFAULT).isAcknowledged());
|
||||
}
|
||||
|
||||
private void createTestIndex() throws IOException {
|
||||
createIndex(indexName, Settings.EMPTY);
|
||||
}
|
||||
|
||||
private void createTestSnapshots() throws IOException {
|
||||
Request createSnapshot = new Request("put", String.format(Locale.ROOT, "_snapshot/%s/%s", repositoryName, snapshotName));
|
||||
createSnapshot.addParameter("wait_for_completion", "true");
|
||||
createSnapshot.setJsonEntity("{\"indices\":\"" + indexName + "\"}");
|
||||
Response response = highLevelClient().getLowLevelClient().performRequest(createSnapshot);
|
||||
// check that the request went ok without parsing JSON here. When using the high level client, check acknowledgement instead.
|
||||
assertEquals(200, response.getStatusLine().getStatusCode());
|
||||
}
|
||||
}
|
|
@ -1,308 +0,0 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0 and the Server Side Public License, v 1; you may not use this file except
|
||||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
||||
* Side Public License, v 1.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.client.documentation;
|
||||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.LatchedActionListener;
|
||||
import org.elasticsearch.action.admin.cluster.storedscripts.DeleteStoredScriptRequest;
|
||||
import org.elasticsearch.action.admin.cluster.storedscripts.GetStoredScriptRequest;
|
||||
import org.elasticsearch.action.admin.cluster.storedscripts.GetStoredScriptResponse;
|
||||
import org.elasticsearch.action.admin.cluster.storedscripts.PutStoredScriptRequest;
|
||||
import org.elasticsearch.action.support.master.AcknowledgedResponse;
|
||||
import org.elasticsearch.client.ESRestHighLevelClientTestCase;
|
||||
import org.elasticsearch.client.RequestOptions;
|
||||
import org.elasticsearch.client.RestHighLevelClient;
|
||||
import org.elasticsearch.common.bytes.BytesArray;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.core.TimeValue;
|
||||
import org.elasticsearch.script.Script;
|
||||
import org.elasticsearch.script.StoredScriptSource;
|
||||
import org.elasticsearch.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.xcontent.XContentFactory;
|
||||
import org.elasticsearch.xcontent.XContentType;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import static org.elasticsearch.common.xcontent.support.XContentMapValues.extractValue;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
|
||||
/**
|
||||
* This class is used to generate the Java Stored Scripts API documentation.
|
||||
* You need to wrap your code between two tags like:
|
||||
* // tag::example
|
||||
* // end::example
|
||||
*
|
||||
* Where example is your tag name.
|
||||
*
|
||||
* Then in the documentation, you can extract what is between tag and end tags with
|
||||
* ["source","java",subs="attributes,callouts,macros"]
|
||||
* --------------------------------------------------
|
||||
* include-tagged::{doc-tests}/StoredScriptsDocumentationIT.java[example]
|
||||
* --------------------------------------------------
|
||||
*
|
||||
* The column width of the code block is 84. If the code contains a line longer
|
||||
* than 84, the line will be cut and a horizontal scroll bar will be displayed.
|
||||
* (the code indentation of the tag is not included in the width)
|
||||
*/
|
||||
@SuppressWarnings("removal")
|
||||
public class StoredScriptsDocumentationIT extends ESRestHighLevelClientTestCase {
|
||||
|
||||
@SuppressWarnings("unused")
|
||||
public void testGetStoredScript() throws Exception {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
|
||||
final StoredScriptSource scriptSource = new StoredScriptSource(
|
||||
"painless",
|
||||
"Math.log(_score * 2) + params.my_modifier",
|
||||
Collections.singletonMap(Script.CONTENT_TYPE_OPTION, XContentType.JSON.mediaType())
|
||||
);
|
||||
|
||||
putStoredScript("calculate-score", scriptSource);
|
||||
|
||||
{
|
||||
// tag::get-stored-script-request
|
||||
GetStoredScriptRequest request = new GetStoredScriptRequest("calculate-score"); // <1>
|
||||
// end::get-stored-script-request
|
||||
|
||||
// tag::get-stored-script-request-masterTimeout
|
||||
request.masterNodeTimeout(TimeValue.timeValueSeconds(50)); // <1>
|
||||
request.masterNodeTimeout("50s"); // <2>
|
||||
// end::get-stored-script-request-masterTimeout
|
||||
|
||||
// tag::get-stored-script-execute
|
||||
GetStoredScriptResponse getResponse = client.getScript(request, RequestOptions.DEFAULT);
|
||||
// end::get-stored-script-execute
|
||||
|
||||
// tag::get-stored-script-response
|
||||
StoredScriptSource storedScriptSource = getResponse.getSource(); // <1>
|
||||
|
||||
String lang = storedScriptSource.getLang(); // <2>
|
||||
String source = storedScriptSource.getSource(); // <3>
|
||||
Map<String, String> options = storedScriptSource.getOptions(); // <4>
|
||||
// end::get-stored-script-response
|
||||
|
||||
assertThat(storedScriptSource, equalTo(scriptSource));
|
||||
|
||||
// tag::get-stored-script-execute-listener
|
||||
ActionListener<GetStoredScriptResponse> listener =
|
||||
new ActionListener<GetStoredScriptResponse>() {
|
||||
@Override
|
||||
public void onResponse(GetStoredScriptResponse response) {
|
||||
// <1>
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
};
|
||||
// end::get-stored-script-execute-listener
|
||||
|
||||
// Replace the empty listener by a blocking listener in test
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
// tag::get-stored-script-execute-async
|
||||
client.getScriptAsync(request, RequestOptions.DEFAULT, listener); // <1>
|
||||
// end::get-stored-script-execute-async
|
||||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@SuppressWarnings("unused")
|
||||
public void testDeleteStoredScript() throws Exception {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
|
||||
final StoredScriptSource scriptSource = new StoredScriptSource(
|
||||
"painless",
|
||||
"Math.log(_score * 2) + params.my_modifier",
|
||||
Collections.singletonMap(Script.CONTENT_TYPE_OPTION, XContentType.JSON.mediaType())
|
||||
);
|
||||
|
||||
putStoredScript("calculate-score", scriptSource);
|
||||
|
||||
// tag::delete-stored-script-request
|
||||
DeleteStoredScriptRequest deleteRequest = new DeleteStoredScriptRequest("calculate-score"); // <1>
|
||||
// end::delete-stored-script-request
|
||||
|
||||
// tag::delete-stored-script-request-masterTimeout
|
||||
deleteRequest.masterNodeTimeout(TimeValue.timeValueSeconds(50)); // <1>
|
||||
deleteRequest.masterNodeTimeout("50s"); // <2>
|
||||
// end::delete-stored-script-request-masterTimeout
|
||||
|
||||
// tag::delete-stored-script-request-timeout
|
||||
deleteRequest.timeout(TimeValue.timeValueSeconds(60)); // <1>
|
||||
deleteRequest.timeout("60s"); // <2>
|
||||
// end::delete-stored-script-request-timeout
|
||||
|
||||
// tag::delete-stored-script-execute
|
||||
AcknowledgedResponse deleteResponse = client.deleteScript(deleteRequest, RequestOptions.DEFAULT);
|
||||
// end::delete-stored-script-execute
|
||||
|
||||
// tag::delete-stored-script-response
|
||||
boolean acknowledged = deleteResponse.isAcknowledged();// <1>
|
||||
// end::delete-stored-script-response
|
||||
|
||||
putStoredScript("calculate-score", scriptSource);
|
||||
|
||||
// tag::delete-stored-script-execute-listener
|
||||
ActionListener<AcknowledgedResponse> listener =
|
||||
new ActionListener<AcknowledgedResponse>() {
|
||||
@Override
|
||||
public void onResponse(AcknowledgedResponse response) {
|
||||
// <1>
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
};
|
||||
// end::delete-stored-script-execute-listener
|
||||
|
||||
// Replace the empty listener by a blocking listener in test
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
// tag::delete-stored-script-execute-async
|
||||
client.deleteScriptAsync(deleteRequest, RequestOptions.DEFAULT, listener); // <1>
|
||||
// end::delete-stored-script-execute-async
|
||||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
}
|
||||
|
||||
public void testPutScript() throws Exception {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
|
||||
{
|
||||
// tag::put-stored-script-request
|
||||
PutStoredScriptRequest request = new PutStoredScriptRequest();
|
||||
request.id("id"); // <1>
|
||||
request.content(new BytesArray("""
|
||||
{
|
||||
"script": {
|
||||
"lang": "painless",
|
||||
"source": "Math.log(_score * 2) + params.multiplier"
|
||||
}
|
||||
}
|
||||
"""
|
||||
), XContentType.JSON); // <2>
|
||||
// end::put-stored-script-request
|
||||
|
||||
// tag::put-stored-script-context
|
||||
request.context("context"); // <1>
|
||||
// end::put-stored-script-context
|
||||
|
||||
// tag::put-stored-script-timeout
|
||||
request.timeout(TimeValue.timeValueMinutes(2)); // <1>
|
||||
request.timeout("2m"); // <2>
|
||||
// end::put-stored-script-timeout
|
||||
|
||||
// tag::put-stored-script-masterTimeout
|
||||
request.masterNodeTimeout(TimeValue.timeValueMinutes(1)); // <1>
|
||||
request.masterNodeTimeout("1m"); // <2>
|
||||
// end::put-stored-script-masterTimeout
|
||||
}
|
||||
|
||||
{
|
||||
PutStoredScriptRequest request = new PutStoredScriptRequest();
|
||||
request.id("id");
|
||||
|
||||
// tag::put-stored-script-content-painless
|
||||
XContentBuilder builder = XContentFactory.jsonBuilder();
|
||||
builder.startObject();
|
||||
{
|
||||
builder.startObject("script");
|
||||
{
|
||||
builder.field("lang", "painless");
|
||||
builder.field("source", "Math.log(_score * 2) + params.multiplier");
|
||||
}
|
||||
builder.endObject();
|
||||
}
|
||||
builder.endObject();
|
||||
request.content(BytesReference.bytes(builder), XContentType.JSON); // <1>
|
||||
// end::put-stored-script-content-painless
|
||||
|
||||
// tag::put-stored-script-execute
|
||||
AcknowledgedResponse putStoredScriptResponse = client.putScript(request, RequestOptions.DEFAULT);
|
||||
// end::put-stored-script-execute
|
||||
|
||||
// tag::put-stored-script-response
|
||||
boolean acknowledged = putStoredScriptResponse.isAcknowledged(); // <1>
|
||||
// end::put-stored-script-response
|
||||
|
||||
assertTrue(acknowledged);
|
||||
|
||||
// tag::put-stored-script-execute-listener
|
||||
ActionListener<AcknowledgedResponse> listener =
|
||||
new ActionListener<AcknowledgedResponse>() {
|
||||
@Override
|
||||
public void onResponse(AcknowledgedResponse response) {
|
||||
// <1>
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
};
|
||||
// end::put-stored-script-execute-listener
|
||||
|
||||
// Replace the empty listener by a blocking listener in test
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
// tag::put-stored-script-execute-async
|
||||
client.putScriptAsync(request, RequestOptions.DEFAULT, listener); // <1>
|
||||
// end::put-stored-script-execute-async
|
||||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
}
|
||||
|
||||
{
|
||||
PutStoredScriptRequest request = new PutStoredScriptRequest();
|
||||
request.id("id");
|
||||
|
||||
// tag::put-stored-script-content-mustache
|
||||
XContentBuilder builder = XContentFactory.jsonBuilder();
|
||||
builder.startObject();
|
||||
{
|
||||
builder.startObject("script");
|
||||
{
|
||||
builder.field("lang", "mustache");
|
||||
builder.field("source", """
|
||||
{"query":{"match":{"title":"{{query_string}}"}}}""");
|
||||
}
|
||||
builder.endObject();
|
||||
}
|
||||
builder.endObject();
|
||||
request.content(BytesReference.bytes(builder), XContentType.JSON); // <1>
|
||||
// end::put-stored-script-content-mustache
|
||||
|
||||
client.putScript(request, RequestOptions.DEFAULT);
|
||||
|
||||
Map<String, Object> script = getAsMap("/_scripts/id");
|
||||
assertThat(extractValue("script.lang", script), equalTo("mustache"));
|
||||
assertThat(extractValue("script.source", script), equalTo("""
|
||||
{"query":{"match":{"title":"{{query_string}}"}}}"""));
|
||||
}
|
||||
}
|
||||
|
||||
private void putStoredScript(String id, StoredScriptSource scriptSource) throws IOException {
|
||||
PutStoredScriptRequest request = new PutStoredScriptRequest(id, "score", new BytesArray("{}"), XContentType.JSON, scriptSource);
|
||||
assertAcked(execute(request, highLevelClient()::putScript, highLevelClient()::putScriptAsync));
|
||||
}
|
||||
}
|
|
@ -1,220 +0,0 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0 and the Server Side Public License, v 1; you may not use this file except
|
||||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
||||
* Side Public License, v 1.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.client.documentation;
|
||||
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.LatchedActionListener;
|
||||
import org.elasticsearch.action.TaskOperationFailure;
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequest;
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse;
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.list.TaskGroup;
|
||||
import org.elasticsearch.client.ESRestHighLevelClientTestCase;
|
||||
import org.elasticsearch.client.RequestOptions;
|
||||
import org.elasticsearch.client.RestHighLevelClient;
|
||||
import org.elasticsearch.client.tasks.CancelTasksRequest;
|
||||
import org.elasticsearch.client.tasks.CancelTasksResponse;
|
||||
import org.elasticsearch.core.TimeValue;
|
||||
import org.elasticsearch.tasks.TaskId;
|
||||
import org.elasticsearch.tasks.TaskInfo;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import static java.util.Collections.emptyList;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.greaterThanOrEqualTo;
|
||||
import static org.hamcrest.Matchers.notNullValue;
|
||||
|
||||
/**
|
||||
* This class is used to generate the Java Tasks API documentation.
|
||||
* You need to wrap your code between two tags like:
|
||||
* // tag::example
|
||||
* // end::example
|
||||
*
|
||||
* Where example is your tag name.
|
||||
*
|
||||
* Then in the documentation, you can extract what is between tag and end tags with
|
||||
* ["source","java",subs="attributes,callouts,macros"]
|
||||
* --------------------------------------------------
|
||||
* include-tagged::{doc-tests}/{@link TasksClientDocumentationIT}.java[example]
|
||||
* --------------------------------------------------
|
||||
*
|
||||
* The column width of the code block is 84. If the code contains a line longer
|
||||
* than 84, the line will be cut and a horizontal scroll bar will be displayed.
|
||||
* (the code indentation of the tag is not included in the width)
|
||||
*/
|
||||
@SuppressWarnings("removal")
|
||||
public class TasksClientDocumentationIT extends ESRestHighLevelClientTestCase {
|
||||
|
||||
@SuppressWarnings("unused")
|
||||
public void testListTasks() throws IOException {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
{
|
||||
// tag::list-tasks-request
|
||||
ListTasksRequest request = new ListTasksRequest();
|
||||
// end::list-tasks-request
|
||||
|
||||
// tag::list-tasks-request-filter
|
||||
request.setActions("cluster:*"); // <1>
|
||||
request.setNodes("nodeId1", "nodeId2"); // <2>
|
||||
request.setTargetParentTaskId(new TaskId("parentTaskId", 42)); // <3>
|
||||
// end::list-tasks-request-filter
|
||||
|
||||
// tag::list-tasks-request-detailed
|
||||
request.setDetailed(true); // <1>
|
||||
// end::list-tasks-request-detailed
|
||||
|
||||
// tag::list-tasks-request-wait-completion
|
||||
request.setWaitForCompletion(true); // <1>
|
||||
request.setTimeout(TimeValue.timeValueSeconds(50)); // <2>
|
||||
request.setTimeout("50s"); // <3>
|
||||
// end::list-tasks-request-wait-completion
|
||||
}
|
||||
|
||||
ListTasksRequest request = new ListTasksRequest();
|
||||
|
||||
// tag::list-tasks-execute
|
||||
ListTasksResponse response = client.tasks().list(request, RequestOptions.DEFAULT);
|
||||
// end::list-tasks-execute
|
||||
|
||||
assertThat(response, notNullValue());
|
||||
|
||||
// tag::list-tasks-response-tasks
|
||||
List<TaskInfo> tasks = response.getTasks(); // <1>
|
||||
// end::list-tasks-response-tasks
|
||||
|
||||
// tag::list-tasks-response-calc
|
||||
Map<String, List<TaskInfo>> perNodeTasks = response.getPerNodeTasks(); // <1>
|
||||
List<TaskGroup> groups = response.getTaskGroups(); // <2>
|
||||
// end::list-tasks-response-calc
|
||||
|
||||
// tag::list-tasks-response-failures
|
||||
List<ElasticsearchException> nodeFailures = response.getNodeFailures(); // <1>
|
||||
List<TaskOperationFailure> taskFailures = response.getTaskFailures(); // <2>
|
||||
// end::list-tasks-response-failures
|
||||
|
||||
assertThat(response.getNodeFailures(), equalTo(emptyList()));
|
||||
assertThat(response.getTaskFailures(), equalTo(emptyList()));
|
||||
assertThat(response.getTasks().size(), greaterThanOrEqualTo(2));
|
||||
}
|
||||
|
||||
public void testListTasksAsync() throws Exception {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
{
|
||||
ListTasksRequest request = new ListTasksRequest();
|
||||
|
||||
// tag::list-tasks-execute-listener
|
||||
ActionListener<ListTasksResponse> listener =
|
||||
new ActionListener<ListTasksResponse>() {
|
||||
@Override
|
||||
public void onResponse(ListTasksResponse response) {
|
||||
// <1>
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
};
|
||||
// end::list-tasks-execute-listener
|
||||
|
||||
// Replace the empty listener by a blocking listener in test
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
// tag::list-tasks-execute-async
|
||||
client.tasks().listAsync(request, RequestOptions.DEFAULT, listener); // <1>
|
||||
// end::list-tasks-execute-async
|
||||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
}
|
||||
}
|
||||
|
||||
@SuppressWarnings("unused")
|
||||
public void testCancelTasks() throws IOException {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
{
|
||||
// tag::cancel-tasks-request
|
||||
CancelTasksRequest request = new org.elasticsearch.client.tasks.CancelTasksRequest.Builder()
|
||||
.withNodesFiltered(List.of("nodeId1", "nodeId2"))
|
||||
.withActionsFiltered(List.of("cluster:*"))
|
||||
.build();
|
||||
// end::cancel-tasks-request
|
||||
|
||||
// tag::cancel-tasks-request-filter
|
||||
CancelTasksRequest byTaskIdRequest = new org.elasticsearch.client.tasks.CancelTasksRequest.Builder() // <1>
|
||||
.withTaskId(new org.elasticsearch.client.tasks.TaskId("myNode",44L)) // <2>
|
||||
.withWaitForCompletion(true) // <3>
|
||||
.build(); // <4>
|
||||
// end::cancel-tasks-request-filter
|
||||
|
||||
}
|
||||
|
||||
CancelTasksRequest request = new org.elasticsearch.client.tasks.CancelTasksRequest.Builder().build();
|
||||
|
||||
// tag::cancel-tasks-execute
|
||||
CancelTasksResponse response = client.tasks().cancel(request, RequestOptions.DEFAULT);
|
||||
// end::cancel-tasks-execute
|
||||
|
||||
assertThat(response, notNullValue());
|
||||
|
||||
// tag::cancel-tasks-response-tasks
|
||||
List<org.elasticsearch.client.tasks.TaskInfo> tasks = response.getTasks(); // <1>
|
||||
// end::cancel-tasks-response-tasks
|
||||
|
||||
// tag::cancel-tasks-response-calc
|
||||
Map<String, List<org.elasticsearch.client.tasks.TaskInfo>> perNodeTasks = response.getPerNodeTasks(); // <1>
|
||||
List<org.elasticsearch.client.tasks.TaskGroup> groups = response.getTaskGroups(); // <2>
|
||||
// end::cancel-tasks-response-calc
|
||||
|
||||
// tag::cancel-tasks-response-failures
|
||||
List<org.elasticsearch.client.tasks.ElasticsearchException> nodeFailures = response.getNodeFailures(); // <1>
|
||||
List<org.elasticsearch.client.tasks.TaskOperationFailure> taskFailures = response.getTaskFailures(); // <2>
|
||||
// end::cancel-tasks-response-failures
|
||||
|
||||
assertThat(response.getNodeFailures(), equalTo(emptyList()));
|
||||
assertThat(response.getTaskFailures(), equalTo(emptyList()));
|
||||
}
|
||||
|
||||
public void testAsyncCancelTasks() throws InterruptedException {
|
||||
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
{
|
||||
CancelTasksRequest request = new org.elasticsearch.client.tasks.CancelTasksRequest.Builder().build();
|
||||
|
||||
// tag::cancel-tasks-execute-listener
|
||||
ActionListener<CancelTasksResponse> listener =
|
||||
new ActionListener<CancelTasksResponse>() {
|
||||
@Override
|
||||
public void onResponse(CancelTasksResponse response) {
|
||||
// <1>
|
||||
}
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
};
|
||||
// end::cancel-tasks-execute-listener
|
||||
|
||||
// Replace the empty listener by a blocking listener in test
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
// tag::cancel-tasks-execute-async
|
||||
client.tasks().cancelAsync(request, RequestOptions.DEFAULT, listener); // <1>
|
||||
// end::cancel-tasks-execute-async
|
||||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,97 +0,0 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0 and the Server Side Public License, v 1; you may not use this file except
|
||||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
||||
* Side Public License, v 1.
|
||||
*/
|
||||
package org.elasticsearch.client.documentation;
|
||||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.LatchedActionListener;
|
||||
import org.elasticsearch.client.ESRestHighLevelClientTestCase;
|
||||
import org.elasticsearch.client.RequestOptions;
|
||||
import org.elasticsearch.client.RestHighLevelClient;
|
||||
import org.elasticsearch.client.textstructure.FindStructureRequest;
|
||||
import org.elasticsearch.client.textstructure.FindStructureResponse;
|
||||
import org.elasticsearch.client.textstructure.structurefinder.TextStructure;
|
||||
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.util.Collections;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
@SuppressWarnings("removal")
|
||||
public class TextStructureClientDocumentationIT extends ESRestHighLevelClientTestCase {
|
||||
|
||||
public void testFindStructure() throws Exception {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
|
||||
Path anInterestingFile = createTempFile();
|
||||
String contents = """
|
||||
{"logger":"controller","timestamp":1478261151445,"level":"INFO","pid":42,"thread":"0x7fff7d2a8000","message":"message 1",\
|
||||
"class":"ml","method":"core::SomeNoiseMaker","file":"Noisemaker.cc","line":333}
|
||||
{"logger":"controller","timestamp":1478261151445,"level":"INFO","pid":42,"thread":"0x7fff7d2a8000","message":"message 2",\
|
||||
"class":"ml","method":"core::SomeNoiseMaker","file":"Noisemaker.cc","line":333}
|
||||
""";
|
||||
Files.write(anInterestingFile, Collections.singleton(contents), StandardCharsets.UTF_8);
|
||||
|
||||
{
|
||||
// tag::find-structure-request
|
||||
FindStructureRequest request = new FindStructureRequest(); // <1>
|
||||
request.setSample(Files.readAllBytes(anInterestingFile)); // <2>
|
||||
// end::find-structure-request
|
||||
|
||||
// tag::find-structure-request-options
|
||||
request.setLinesToSample(500); // <1>
|
||||
request.setExplain(true); // <2>
|
||||
// end::find-structure-request-options
|
||||
|
||||
// tag::find-structure-execute
|
||||
FindStructureResponse response = client
|
||||
.textStructure()
|
||||
.findStructure(
|
||||
request,
|
||||
RequestOptions.DEFAULT
|
||||
);
|
||||
// end::find-structure-execute
|
||||
|
||||
// tag::find-structure-response
|
||||
TextStructure structure = response.getFileStructure(); // <1>
|
||||
// end::find-structure-response
|
||||
assertEquals(2, structure.getNumLinesAnalyzed());
|
||||
}
|
||||
{
|
||||
// tag::find-structure-execute-listener
|
||||
ActionListener<FindStructureResponse> listener = new ActionListener<>() {
|
||||
@Override
|
||||
public void onResponse(FindStructureResponse response) {
|
||||
// <1>
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
};
|
||||
// end::find-structure-execute-listener
|
||||
FindStructureRequest request = new FindStructureRequest();
|
||||
request.setSample(Files.readAllBytes(anInterestingFile));
|
||||
|
||||
// Replace the empty listener by a blocking listener in test
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
// tag::find-structure-execute-async
|
||||
client
|
||||
.textStructure()
|
||||
.findStructureAsync(request, RequestOptions.DEFAULT, listener); // <1>
|
||||
// end::find-structure-execute-async
|
||||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -1,749 +0,0 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0 and the Server Side Public License, v 1; you may not use this file except
|
||||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
||||
* Side Public License, v 1.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.client.documentation;
|
||||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.LatchedActionListener;
|
||||
import org.elasticsearch.client.ESRestHighLevelClientTestCase;
|
||||
import org.elasticsearch.client.RequestOptions;
|
||||
import org.elasticsearch.client.RestHighLevelClient;
|
||||
import org.elasticsearch.client.core.AcknowledgedResponse;
|
||||
import org.elasticsearch.client.core.PageParams;
|
||||
import org.elasticsearch.client.indices.CreateIndexRequest;
|
||||
import org.elasticsearch.client.indices.CreateIndexResponse;
|
||||
import org.elasticsearch.client.transform.DeleteTransformRequest;
|
||||
import org.elasticsearch.client.transform.GetTransformRequest;
|
||||
import org.elasticsearch.client.transform.GetTransformResponse;
|
||||
import org.elasticsearch.client.transform.GetTransformStatsRequest;
|
||||
import org.elasticsearch.client.transform.GetTransformStatsResponse;
|
||||
import org.elasticsearch.client.transform.PreviewTransformRequest;
|
||||
import org.elasticsearch.client.transform.PreviewTransformResponse;
|
||||
import org.elasticsearch.client.transform.PutTransformRequest;
|
||||
import org.elasticsearch.client.transform.StartTransformRequest;
|
||||
import org.elasticsearch.client.transform.StartTransformResponse;
|
||||
import org.elasticsearch.client.transform.StopTransformRequest;
|
||||
import org.elasticsearch.client.transform.StopTransformResponse;
|
||||
import org.elasticsearch.client.transform.UpdateTransformRequest;
|
||||
import org.elasticsearch.client.transform.UpdateTransformResponse;
|
||||
import org.elasticsearch.client.transform.transforms.DestConfig;
|
||||
import org.elasticsearch.client.transform.transforms.NodeAttributes;
|
||||
import org.elasticsearch.client.transform.transforms.QueryConfig;
|
||||
import org.elasticsearch.client.transform.transforms.RetentionPolicyConfig;
|
||||
import org.elasticsearch.client.transform.transforms.SettingsConfig;
|
||||
import org.elasticsearch.client.transform.transforms.SourceConfig;
|
||||
import org.elasticsearch.client.transform.transforms.SyncConfig;
|
||||
import org.elasticsearch.client.transform.transforms.TimeRetentionPolicyConfig;
|
||||
import org.elasticsearch.client.transform.transforms.TimeSyncConfig;
|
||||
import org.elasticsearch.client.transform.transforms.TransformConfig;
|
||||
import org.elasticsearch.client.transform.transforms.TransformConfigUpdate;
|
||||
import org.elasticsearch.client.transform.transforms.TransformIndexerStats;
|
||||
import org.elasticsearch.client.transform.transforms.TransformProgress;
|
||||
import org.elasticsearch.client.transform.transforms.TransformStats;
|
||||
import org.elasticsearch.client.transform.transforms.pivot.AggregationConfig;
|
||||
import org.elasticsearch.client.transform.transforms.pivot.GroupConfig;
|
||||
import org.elasticsearch.client.transform.transforms.pivot.PivotConfig;
|
||||
import org.elasticsearch.client.transform.transforms.pivot.TermsGroupSource;
|
||||
import org.elasticsearch.core.TimeValue;
|
||||
import org.elasticsearch.index.query.MatchAllQueryBuilder;
|
||||
import org.elasticsearch.search.aggregations.AggregationBuilders;
|
||||
import org.elasticsearch.search.aggregations.AggregatorFactories;
|
||||
import org.elasticsearch.xcontent.XContentBuilder;
|
||||
import org.junit.After;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.hasSize;
|
||||
|
||||
@SuppressWarnings("removal")
|
||||
public class TransformDocumentationIT extends ESRestHighLevelClientTestCase {
|
||||
|
||||
private List<String> transformsToClean = new ArrayList<>();
|
||||
|
||||
@After
|
||||
public void cleanUpTransforms() throws Exception {
|
||||
for (String transformId : transformsToClean) {
|
||||
adminHighLevelClient().transform()
|
||||
.stopTransform(new StopTransformRequest(transformId, true, TimeValue.timeValueSeconds(20), false), RequestOptions.DEFAULT);
|
||||
}
|
||||
|
||||
for (String transformId : transformsToClean) {
|
||||
adminHighLevelClient().transform().deleteTransform(new DeleteTransformRequest(transformId), RequestOptions.DEFAULT);
|
||||
}
|
||||
|
||||
transformsToClean = new ArrayList<>();
|
||||
waitForPendingTasks(adminClient());
|
||||
}
|
||||
|
||||
private void createIndex(String indexName) throws IOException {
|
||||
|
||||
XContentBuilder builder = jsonBuilder();
|
||||
builder.startObject()
|
||||
.startObject("properties")
|
||||
.startObject("timestamp")
|
||||
.field("type", "date")
|
||||
.endObject()
|
||||
.startObject("user_id")
|
||||
.field("type", "keyword")
|
||||
.endObject()
|
||||
.startObject("stars")
|
||||
.field("type", "integer")
|
||||
.endObject()
|
||||
.endObject()
|
||||
.endObject();
|
||||
|
||||
CreateIndexRequest request = new CreateIndexRequest(indexName);
|
||||
request.mapping(builder);
|
||||
CreateIndexResponse response = highLevelClient().indices().create(request, RequestOptions.DEFAULT);
|
||||
assertTrue(response.isAcknowledged());
|
||||
}
|
||||
|
||||
public void testPutTransform() throws IOException, InterruptedException {
|
||||
createIndex("source-index");
|
||||
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
|
||||
// tag::put-transform-query-config
|
||||
QueryConfig queryConfig = new QueryConfig(new MatchAllQueryBuilder());
|
||||
// end::put-transform-query-config
|
||||
// tag::put-transform-source-config
|
||||
SourceConfig sourceConfig = SourceConfig.builder()
|
||||
.setIndex("source-index")
|
||||
.setQueryConfig(queryConfig).build();
|
||||
// end::put-transform-source-config
|
||||
// tag::put-transform-dest-config
|
||||
DestConfig destConfig = DestConfig.builder()
|
||||
.setIndex("pivot-destination")
|
||||
.setPipeline("my-pipeline").build();
|
||||
// end::put-transform-dest-config
|
||||
destConfig = DestConfig.builder().setIndex("pivot-destination").build();
|
||||
// tag::put-transform-group-config
|
||||
GroupConfig groupConfig = GroupConfig.builder()
|
||||
.groupBy("reviewer", // <1>
|
||||
TermsGroupSource.builder().setField("user_id").build()) // <2>
|
||||
.build();
|
||||
// end::put-transform-group-config
|
||||
// tag::put-transform-agg-config
|
||||
AggregatorFactories.Builder aggBuilder = new AggregatorFactories.Builder();
|
||||
aggBuilder.addAggregator(
|
||||
AggregationBuilders.avg("avg_rating").field("stars")); // <1>
|
||||
AggregationConfig aggConfig = new AggregationConfig(aggBuilder);
|
||||
// end::put-transform-agg-config
|
||||
// tag::put-transform-pivot-config
|
||||
PivotConfig pivotConfig = PivotConfig.builder()
|
||||
.setGroups(groupConfig) // <1>
|
||||
.setAggregationConfig(aggConfig) // <2>
|
||||
.build();
|
||||
// end::put-transform-pivot-config
|
||||
// tag::put-transform-settings-config
|
||||
SettingsConfig settings = SettingsConfig.builder()
|
||||
.setMaxPageSearchSize(1000) // <1>
|
||||
.build();
|
||||
// end::put-transform-settings-config
|
||||
// tag::put-transform-retention-policy-config
|
||||
RetentionPolicyConfig retentionPolicy = TimeRetentionPolicyConfig.builder()
|
||||
.setField("time-field") // <1>
|
||||
.setMaxAge(TimeValue.timeValueDays(30)) // <2>
|
||||
.build();
|
||||
// end::put-transform-retention-policy-config
|
||||
// tag::put-transform-sync-config
|
||||
SyncConfig syncConfig = TimeSyncConfig.builder()
|
||||
.setField("time-field") // <1>
|
||||
.setDelay(TimeValue.timeValueSeconds(30)) // <2>
|
||||
.build();
|
||||
// end::put-transform-sync-config
|
||||
// tag::put-transform-config
|
||||
TransformConfig transformConfig = TransformConfig
|
||||
.builder()
|
||||
.setId("reviewer-avg-rating") // <1>
|
||||
.setSource(sourceConfig) // <2>
|
||||
.setDest(destConfig) // <3>
|
||||
.setFrequency(TimeValue.timeValueSeconds(15)) // <4>
|
||||
.setPivotConfig(pivotConfig) // <5>
|
||||
.setDescription("This is my test transform") // <6>
|
||||
.setSettings(settings) // <7>
|
||||
.setRetentionPolicyConfig(retentionPolicy) // <8>
|
||||
.setSyncConfig(syncConfig) // <9>
|
||||
.build();
|
||||
// end::put-transform-config
|
||||
|
||||
{
|
||||
// tag::put-transform-request
|
||||
PutTransformRequest request =
|
||||
new PutTransformRequest(transformConfig); // <1>
|
||||
request.setDeferValidation(false); // <2>
|
||||
// end::put-transform-request
|
||||
|
||||
// tag::put-transform-execute
|
||||
AcknowledgedResponse response =
|
||||
client.transform().putTransform(
|
||||
request, RequestOptions.DEFAULT);
|
||||
// end::put-transform-execute
|
||||
transformsToClean.add(request.getConfig().getId());
|
||||
|
||||
assertTrue(response.isAcknowledged());
|
||||
}
|
||||
{
|
||||
TransformConfig configWithDifferentId = TransformConfig.builder()
|
||||
.setId("reviewer-avg-rating2")
|
||||
.setSource(transformConfig.getSource())
|
||||
.setDest(transformConfig.getDestination())
|
||||
.setPivotConfig(transformConfig.getPivotConfig())
|
||||
.build();
|
||||
PutTransformRequest request = new PutTransformRequest(configWithDifferentId);
|
||||
|
||||
// tag::put-transform-execute-listener
|
||||
ActionListener<AcknowledgedResponse> listener =
|
||||
new ActionListener<AcknowledgedResponse>() {
|
||||
@Override
|
||||
public void onResponse(AcknowledgedResponse response) {
|
||||
// <1>
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
};
|
||||
// end::put-transform-execute-listener
|
||||
|
||||
// Replace the empty listener by a blocking listener in test
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
// tag::put-transform-execute-async
|
||||
client.transform().putTransformAsync(
|
||||
request, RequestOptions.DEFAULT, listener); // <1>
|
||||
// end::put-transform-execute-async
|
||||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
transformsToClean.add(request.getConfig().getId());
|
||||
}
|
||||
}
|
||||
|
||||
public void testUpdateTransform() throws IOException, InterruptedException {
|
||||
createIndex("source-data");
|
||||
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
QueryConfig queryConfig = new QueryConfig(new MatchAllQueryBuilder());
|
||||
GroupConfig groupConfig = GroupConfig.builder().groupBy("reviewer", TermsGroupSource.builder().setField("user_id").build()).build();
|
||||
AggregatorFactories.Builder aggBuilder = new AggregatorFactories.Builder();
|
||||
aggBuilder.addAggregator(AggregationBuilders.avg("avg_rating").field("stars"));
|
||||
AggregationConfig aggConfig = new AggregationConfig(aggBuilder);
|
||||
PivotConfig pivotConfig = PivotConfig.builder().setGroups(groupConfig).setAggregationConfig(aggConfig).build();
|
||||
|
||||
TransformConfig transformConfig = TransformConfig.builder()
|
||||
.setId("my-transform-to-update")
|
||||
.setSource(SourceConfig.builder().setIndex("source-data").setQueryConfig(queryConfig).build())
|
||||
.setDest(DestConfig.builder().setIndex("pivot-dest").build())
|
||||
.setPivotConfig(pivotConfig)
|
||||
.setSyncConfig(TimeSyncConfig.builder().setField("time-field").setDelay(TimeValue.timeValueSeconds(120)).build())
|
||||
.build();
|
||||
|
||||
client.transform().putTransform(new PutTransformRequest(transformConfig), RequestOptions.DEFAULT);
|
||||
transformsToClean.add(transformConfig.getId());
|
||||
|
||||
// tag::update-transform-config
|
||||
TransformConfigUpdate update = TransformConfigUpdate
|
||||
.builder()
|
||||
.setSource(SourceConfig.builder()
|
||||
.setIndex("source-data")
|
||||
.build()) // <1>
|
||||
.setDest(DestConfig.builder()
|
||||
.setIndex("pivot-dest")
|
||||
.build()) // <2>
|
||||
.setFrequency(TimeValue.timeValueSeconds(15)) // <3>
|
||||
.setSyncConfig(TimeSyncConfig.builder()
|
||||
.setField("time-field")
|
||||
.setDelay(TimeValue.timeValueSeconds(120))
|
||||
.build()) // <4>
|
||||
.setDescription("This is my updated transform") // <5>
|
||||
.setRetentionPolicyConfig(TimeRetentionPolicyConfig.builder()
|
||||
.setField("time-field")
|
||||
.setMaxAge(TimeValue.timeValueDays(30))
|
||||
.build()) // <6>
|
||||
.build();
|
||||
// end::update-transform-config
|
||||
|
||||
{
|
||||
// tag::update-transform-request
|
||||
UpdateTransformRequest request =
|
||||
new UpdateTransformRequest(
|
||||
update, // <1>
|
||||
"my-transform-to-update"); // <2>
|
||||
request.setDeferValidation(false); // <3>
|
||||
// end::update-transform-request
|
||||
|
||||
// tag::update-transform-execute
|
||||
UpdateTransformResponse response =
|
||||
client.transform().updateTransform(request,
|
||||
RequestOptions.DEFAULT);
|
||||
TransformConfig updatedConfig =
|
||||
response.getTransformConfiguration();
|
||||
// end::update-transform-execute
|
||||
|
||||
assertThat(updatedConfig.getDescription(), equalTo("This is my updated transform"));
|
||||
}
|
||||
{
|
||||
UpdateTransformRequest request = new UpdateTransformRequest(update, "my-transform-to-update");
|
||||
|
||||
// tag::update-transform-execute-listener
|
||||
ActionListener<UpdateTransformResponse> listener =
|
||||
new ActionListener<UpdateTransformResponse>() {
|
||||
@Override
|
||||
public void onResponse(UpdateTransformResponse response) {
|
||||
// <1>
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
};
|
||||
// end::update-transform-execute-listener
|
||||
|
||||
// Replace the empty listener by a blocking listener in test
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
// tag::update-transform-execute-async
|
||||
client.transform().updateTransformAsync(
|
||||
request, RequestOptions.DEFAULT, listener); // <1>
|
||||
// end::update-transform-execute-async
|
||||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
}
|
||||
}
|
||||
|
||||
public void testStartStop() throws IOException, InterruptedException {
|
||||
createIndex("source-data");
|
||||
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
|
||||
QueryConfig queryConfig = new QueryConfig(new MatchAllQueryBuilder());
|
||||
GroupConfig groupConfig = GroupConfig.builder().groupBy("reviewer", TermsGroupSource.builder().setField("user_id").build()).build();
|
||||
AggregatorFactories.Builder aggBuilder = new AggregatorFactories.Builder();
|
||||
aggBuilder.addAggregator(AggregationBuilders.avg("avg_rating").field("stars"));
|
||||
AggregationConfig aggConfig = new AggregationConfig(aggBuilder);
|
||||
PivotConfig pivotConfig = PivotConfig.builder().setGroups(groupConfig).setAggregationConfig(aggConfig).build();
|
||||
|
||||
TransformConfig transformConfig = TransformConfig.builder()
|
||||
.setId("mega-transform")
|
||||
.setSource(SourceConfig.builder().setIndex("source-data").setQueryConfig(queryConfig).build())
|
||||
.setDest(DestConfig.builder().setIndex("pivot-dest").build())
|
||||
.setPivotConfig(pivotConfig)
|
||||
.build();
|
||||
|
||||
client.transform().putTransform(new PutTransformRequest(transformConfig), RequestOptions.DEFAULT);
|
||||
transformsToClean.add(transformConfig.getId());
|
||||
|
||||
{
|
||||
// tag::start-transform-request
|
||||
StartTransformRequest request =
|
||||
new StartTransformRequest("mega-transform"); // <1>
|
||||
// end::start-transform-request
|
||||
|
||||
// tag::start-transform-request-options
|
||||
request.setTimeout(TimeValue.timeValueSeconds(20)); // <1>
|
||||
// end::start-transform-request-options
|
||||
|
||||
// tag::start-transform-execute
|
||||
StartTransformResponse response =
|
||||
client.transform().startTransform(
|
||||
request, RequestOptions.DEFAULT);
|
||||
// end::start-transform-execute
|
||||
|
||||
assertTrue(response.isAcknowledged());
|
||||
}
|
||||
{
|
||||
// tag::stop-transform-request
|
||||
StopTransformRequest request =
|
||||
new StopTransformRequest("mega-transform"); // <1>
|
||||
// end::stop-transform-request
|
||||
|
||||
// tag::stop-transform-request-options
|
||||
request.setWaitForCompletion(Boolean.TRUE); // <1>
|
||||
request.setTimeout(TimeValue.timeValueSeconds(30)); // <2>
|
||||
request.setAllowNoMatch(true); // <3>
|
||||
// end::stop-transform-request-options
|
||||
|
||||
// tag::stop-transform-execute
|
||||
StopTransformResponse response =
|
||||
client.transform().stopTransform(
|
||||
request, RequestOptions.DEFAULT);
|
||||
// end::stop-transform-execute
|
||||
|
||||
assertTrue(response.isAcknowledged());
|
||||
}
|
||||
{
|
||||
// tag::start-transform-execute-listener
|
||||
ActionListener<StartTransformResponse> listener =
|
||||
new ActionListener<StartTransformResponse>() {
|
||||
@Override
|
||||
public void onResponse(
|
||||
StartTransformResponse response) {
|
||||
// <1>
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
};
|
||||
// end::start-transform-execute-listener
|
||||
|
||||
// Replace the empty listener by a blocking listener in test
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
StartTransformRequest request = new StartTransformRequest("mega-transform");
|
||||
// tag::start-transform-execute-async
|
||||
client.transform().startTransformAsync(
|
||||
request, RequestOptions.DEFAULT, listener); // <1>
|
||||
// end::start-transform-execute-async
|
||||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
}
|
||||
{
|
||||
// tag::stop-transform-execute-listener
|
||||
ActionListener<StopTransformResponse> listener =
|
||||
new ActionListener<StopTransformResponse>() {
|
||||
@Override
|
||||
public void onResponse(
|
||||
StopTransformResponse response) {
|
||||
// <1>
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
};
|
||||
// end::stop-transform-execute-listener
|
||||
|
||||
// Replace the empty listener by a blocking listener in test
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
StopTransformRequest request = new StopTransformRequest("mega-transform");
|
||||
// tag::stop-transform-execute-async
|
||||
client.transform().stopTransformAsync(
|
||||
request, RequestOptions.DEFAULT, listener); // <1>
|
||||
// end::stop-transform-execute-async
|
||||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
}
|
||||
}
|
||||
|
||||
public void testDeleteTransform() throws IOException, InterruptedException {
|
||||
createIndex("source-data");
|
||||
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
|
||||
GroupConfig groupConfig = GroupConfig.builder().groupBy("reviewer", TermsGroupSource.builder().setField("user_id").build()).build();
|
||||
AggregatorFactories.Builder aggBuilder = new AggregatorFactories.Builder();
|
||||
aggBuilder.addAggregator(AggregationBuilders.avg("avg_rating").field("stars"));
|
||||
AggregationConfig aggConfig = new AggregationConfig(aggBuilder);
|
||||
PivotConfig pivotConfig = PivotConfig.builder().setGroups(groupConfig).setAggregationConfig(aggConfig).build();
|
||||
|
||||
TransformConfig transformConfig1 = TransformConfig.builder()
|
||||
.setId("mega-transform")
|
||||
.setSource(SourceConfig.builder().setIndex("source-data").setQuery(new MatchAllQueryBuilder()).build())
|
||||
.setDest(DestConfig.builder().setIndex("pivot-dest").build())
|
||||
.setPivotConfig(pivotConfig)
|
||||
.build();
|
||||
TransformConfig transformConfig2 = TransformConfig.builder()
|
||||
.setId("mega-transform2")
|
||||
.setSource(SourceConfig.builder().setIndex("source-data").setQuery(new MatchAllQueryBuilder()).build())
|
||||
.setDest(DestConfig.builder().setIndex("pivot-dest2").build())
|
||||
.setPivotConfig(pivotConfig)
|
||||
.build();
|
||||
|
||||
client.transform().putTransform(new PutTransformRequest(transformConfig1), RequestOptions.DEFAULT);
|
||||
client.transform().putTransform(new PutTransformRequest(transformConfig2), RequestOptions.DEFAULT);
|
||||
|
||||
{
|
||||
// tag::delete-transform-request
|
||||
DeleteTransformRequest request =
|
||||
new DeleteTransformRequest("mega-transform"); // <1>
|
||||
request.setForce(false); // <2>
|
||||
// end::delete-transform-request
|
||||
|
||||
// tag::delete-transform-execute
|
||||
AcknowledgedResponse response =
|
||||
client.transform()
|
||||
.deleteTransform(request, RequestOptions.DEFAULT);
|
||||
// end::delete-transform-execute
|
||||
|
||||
assertTrue(response.isAcknowledged());
|
||||
}
|
||||
{
|
||||
// tag::delete-transform-execute-listener
|
||||
ActionListener<AcknowledgedResponse> listener =
|
||||
new ActionListener<AcknowledgedResponse>() {
|
||||
@Override
|
||||
public void onResponse(AcknowledgedResponse response) {
|
||||
// <1>
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
};
|
||||
// end::delete-transform-execute-listener
|
||||
|
||||
// Replace the empty listener by a blocking listener in test
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
DeleteTransformRequest request = new DeleteTransformRequest("mega-transform2");
|
||||
|
||||
// tag::delete-transform-execute-async
|
||||
client.transform().deleteTransformAsync(
|
||||
request, RequestOptions.DEFAULT, listener); // <1>
|
||||
// end::delete-transform-execute-async
|
||||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
}
|
||||
}
|
||||
|
||||
public void testPreview() throws IOException, InterruptedException {
|
||||
createIndex("source-data");
|
||||
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
|
||||
QueryConfig queryConfig = new QueryConfig(new MatchAllQueryBuilder());
|
||||
GroupConfig groupConfig = GroupConfig.builder().groupBy("reviewer", TermsGroupSource.builder().setField("user_id").build()).build();
|
||||
AggregatorFactories.Builder aggBuilder = new AggregatorFactories.Builder();
|
||||
aggBuilder.addAggregator(AggregationBuilders.avg("avg_rating").field("stars"));
|
||||
AggregationConfig aggConfig = new AggregationConfig(aggBuilder);
|
||||
PivotConfig pivotConfig = PivotConfig.builder().setGroups(groupConfig).setAggregationConfig(aggConfig).build();
|
||||
|
||||
// tag::preview-transform-request
|
||||
TransformConfig transformConfig =
|
||||
TransformConfig.forPreview(
|
||||
SourceConfig.builder()
|
||||
.setIndex("source-data")
|
||||
.setQueryConfig(queryConfig)
|
||||
.build(), // <1>
|
||||
pivotConfig); // <2>
|
||||
|
||||
PreviewTransformRequest request =
|
||||
new PreviewTransformRequest(transformConfig); // <3>
|
||||
// end::preview-transform-request
|
||||
|
||||
{
|
||||
// tag::preview-transform-execute
|
||||
PreviewTransformResponse response =
|
||||
client.transform()
|
||||
.previewTransform(request, RequestOptions.DEFAULT);
|
||||
// end::preview-transform-execute
|
||||
|
||||
assertNotNull(response.getDocs());
|
||||
assertNotNull(response.getMappings());
|
||||
}
|
||||
{
|
||||
// tag::preview-transform-execute-listener
|
||||
ActionListener<PreviewTransformResponse> listener =
|
||||
new ActionListener<PreviewTransformResponse>() {
|
||||
@Override
|
||||
public void onResponse(PreviewTransformResponse response) {
|
||||
// <1>
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
};
|
||||
// end::preview-transform-execute-listener
|
||||
|
||||
// Replace the empty listener by a blocking listener in test
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
// tag::preview-transform-execute-async
|
||||
client.transform().previewTransformAsync(
|
||||
request, RequestOptions.DEFAULT, listener); // <1>
|
||||
// end::preview-transform-execute-async
|
||||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
}
|
||||
}
|
||||
|
||||
public void testGetStats() throws IOException, InterruptedException {
|
||||
createIndex("source-data");
|
||||
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
|
||||
GroupConfig groupConfig = GroupConfig.builder().groupBy("reviewer", TermsGroupSource.builder().setField("user_id").build()).build();
|
||||
AggregatorFactories.Builder aggBuilder = new AggregatorFactories.Builder();
|
||||
aggBuilder.addAggregator(AggregationBuilders.avg("avg_rating").field("stars"));
|
||||
AggregationConfig aggConfig = new AggregationConfig(aggBuilder);
|
||||
PivotConfig pivotConfig = PivotConfig.builder().setGroups(groupConfig).setAggregationConfig(aggConfig).build();
|
||||
|
||||
String id = "statisitcal-transform";
|
||||
TransformConfig transformConfig = TransformConfig.builder()
|
||||
.setId(id)
|
||||
.setSource(SourceConfig.builder().setIndex("source-data").setQuery(new MatchAllQueryBuilder()).build())
|
||||
.setDest(DestConfig.builder().setIndex("pivot-dest").build())
|
||||
.setPivotConfig(pivotConfig)
|
||||
.build();
|
||||
client.transform().putTransform(new PutTransformRequest(transformConfig), RequestOptions.DEFAULT);
|
||||
transformsToClean.add(id);
|
||||
|
||||
// tag::get-transform-stats-request
|
||||
GetTransformStatsRequest request =
|
||||
new GetTransformStatsRequest(id); // <1>
|
||||
// end::get-transform-stats-request
|
||||
|
||||
// tag::get-transform-stats-request-options
|
||||
request.setPageParams(new PageParams(0, 100)); // <1>
|
||||
request.setAllowNoMatch(true); // <2>
|
||||
// end::get-transform-stats-request-options
|
||||
|
||||
{
|
||||
// tag::get-transform-stats-execute
|
||||
GetTransformStatsResponse response =
|
||||
client.transform()
|
||||
.getTransformStats(request, RequestOptions.DEFAULT);
|
||||
// end::get-transform-stats-execute
|
||||
|
||||
assertThat(response.getTransformsStats(), hasSize(1));
|
||||
|
||||
// tag::get-transform-stats-response
|
||||
TransformStats stats =
|
||||
response.getTransformsStats().get(0); // <1>
|
||||
TransformStats.State state =
|
||||
stats.getState(); // <2>
|
||||
TransformIndexerStats indexerStats =
|
||||
stats.getIndexerStats(); // <3>
|
||||
TransformProgress progress =
|
||||
stats.getCheckpointingInfo()
|
||||
.getNext().getCheckpointProgress(); // <4>
|
||||
NodeAttributes node =
|
||||
stats.getNode(); // <5>
|
||||
// end::get-transform-stats-response
|
||||
|
||||
assertEquals(TransformStats.State.STOPPED, state);
|
||||
assertNotNull(indexerStats);
|
||||
assertNull(progress);
|
||||
}
|
||||
{
|
||||
// tag::get-transform-stats-execute-listener
|
||||
ActionListener<GetTransformStatsResponse> listener =
|
||||
new ActionListener<GetTransformStatsResponse>() {
|
||||
@Override
|
||||
public void onResponse(
|
||||
GetTransformStatsResponse response) {
|
||||
// <1>
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
};
|
||||
// end::get-transform-stats-execute-listener
|
||||
|
||||
// Replace the empty listener by a blocking listener in test
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
// tag::get-transform-stats-execute-async
|
||||
client.transform().getTransformStatsAsync(
|
||||
request, RequestOptions.DEFAULT, listener); // <1>
|
||||
// end::get-transform-stats-execute-async
|
||||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
}
|
||||
}
|
||||
|
||||
public void testGetTransform() throws IOException, InterruptedException {
|
||||
createIndex("source-data");
|
||||
|
||||
GroupConfig groupConfig = GroupConfig.builder().groupBy("reviewer", TermsGroupSource.builder().setField("user_id").build()).build();
|
||||
AggregatorFactories.Builder aggBuilder = new AggregatorFactories.Builder();
|
||||
aggBuilder.addAggregator(AggregationBuilders.avg("avg_rating").field("stars"));
|
||||
AggregationConfig aggConfig = new AggregationConfig(aggBuilder);
|
||||
PivotConfig pivotConfig = PivotConfig.builder().setGroups(groupConfig).setAggregationConfig(aggConfig).build();
|
||||
|
||||
TransformConfig putTransformConfig = TransformConfig.builder()
|
||||
.setId("mega-transform")
|
||||
.setSource(SourceConfig.builder().setIndex("source-data").setQuery(new MatchAllQueryBuilder()).build())
|
||||
.setDest(DestConfig.builder().setIndex("pivot-dest").build())
|
||||
.setPivotConfig(pivotConfig)
|
||||
.build();
|
||||
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
client.transform().putTransform(new PutTransformRequest(putTransformConfig), RequestOptions.DEFAULT);
|
||||
transformsToClean.add(putTransformConfig.getId());
|
||||
|
||||
{
|
||||
// tag::get-transform-request
|
||||
GetTransformRequest request =
|
||||
new GetTransformRequest("mega-transform"); // <1>
|
||||
// end::get-transform-request
|
||||
|
||||
// tag::get-transform-request-options
|
||||
request.setPageParams(new PageParams(0, 100)); // <1>
|
||||
request.setAllowNoMatch(true); // <2>
|
||||
request.setExcludeGenerated(false); // <3>
|
||||
// end::get-transform-request-options
|
||||
|
||||
// tag::get-transform-execute
|
||||
GetTransformResponse response =
|
||||
client.transform()
|
||||
.getTransform(request, RequestOptions.DEFAULT);
|
||||
// end::get-transform-execute
|
||||
|
||||
// tag::get-transform-response
|
||||
List<TransformConfig> transformConfigs =
|
||||
response.getTransformConfigurations();
|
||||
// end::get-transform-response
|
||||
|
||||
assertEquals(1, transformConfigs.size());
|
||||
}
|
||||
{
|
||||
// tag::get-transform-execute-listener
|
||||
ActionListener<GetTransformResponse> listener =
|
||||
new ActionListener<GetTransformResponse>() {
|
||||
@Override
|
||||
public void onResponse(GetTransformResponse response) {
|
||||
// <1>
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
};
|
||||
// end::get-transform-execute-listener
|
||||
|
||||
// Replace the empty listener by a blocking listener in test
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
GetTransformRequest request = new GetTransformRequest("mega-transform");
|
||||
|
||||
// tag::get-transform-execute-async
|
||||
client.transform().getTransformAsync(
|
||||
request, RequestOptions.DEFAULT, listener); // <1>
|
||||
// end::get-transform-execute-async
|
||||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,611 +0,0 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0 and the Server Side Public License, v 1; you may not use this file except
|
||||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
||||
* Side Public License, v 1.
|
||||
*/
|
||||
package org.elasticsearch.client.documentation;
|
||||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.LatchedActionListener;
|
||||
import org.elasticsearch.action.support.master.AcknowledgedResponse;
|
||||
import org.elasticsearch.client.ESRestHighLevelClientTestCase;
|
||||
import org.elasticsearch.client.Request;
|
||||
import org.elasticsearch.client.RequestOptions;
|
||||
import org.elasticsearch.client.Response;
|
||||
import org.elasticsearch.client.RestHighLevelClient;
|
||||
import org.elasticsearch.client.watcher.AckWatchRequest;
|
||||
import org.elasticsearch.client.watcher.AckWatchResponse;
|
||||
import org.elasticsearch.client.watcher.ActionStatus;
|
||||
import org.elasticsearch.client.watcher.ActionStatus.AckStatus;
|
||||
import org.elasticsearch.client.watcher.ActivateWatchRequest;
|
||||
import org.elasticsearch.client.watcher.ActivateWatchResponse;
|
||||
import org.elasticsearch.client.watcher.DeactivateWatchRequest;
|
||||
import org.elasticsearch.client.watcher.DeactivateWatchResponse;
|
||||
import org.elasticsearch.client.watcher.DeleteWatchRequest;
|
||||
import org.elasticsearch.client.watcher.DeleteWatchResponse;
|
||||
import org.elasticsearch.client.watcher.ExecuteWatchRequest;
|
||||
import org.elasticsearch.client.watcher.ExecuteWatchResponse;
|
||||
import org.elasticsearch.client.watcher.GetWatchRequest;
|
||||
import org.elasticsearch.client.watcher.GetWatchResponse;
|
||||
import org.elasticsearch.client.watcher.PutWatchRequest;
|
||||
import org.elasticsearch.client.watcher.PutWatchResponse;
|
||||
import org.elasticsearch.client.watcher.StartWatchServiceRequest;
|
||||
import org.elasticsearch.client.watcher.StopWatchServiceRequest;
|
||||
import org.elasticsearch.client.watcher.WatchStatus;
|
||||
import org.elasticsearch.client.watcher.WatcherStatsRequest;
|
||||
import org.elasticsearch.client.watcher.WatcherStatsResponse;
|
||||
import org.elasticsearch.common.bytes.BytesArray;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
import org.elasticsearch.xcontent.ObjectPath;
|
||||
import org.elasticsearch.xcontent.XContentType;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import static org.hamcrest.Matchers.is;
|
||||
|
||||
@SuppressWarnings("removal")
|
||||
public class WatcherDocumentationIT extends ESRestHighLevelClientTestCase {
|
||||
|
||||
public void testStartStopWatchService() throws Exception {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
|
||||
{
|
||||
//tag::start-watch-service-request
|
||||
StartWatchServiceRequest request = new StartWatchServiceRequest();
|
||||
//end::start-watch-service-request
|
||||
|
||||
//tag::start-watch-service-execute
|
||||
AcknowledgedResponse response = client.watcher().startWatchService(request, RequestOptions.DEFAULT);
|
||||
//end::start-watch-service-execute
|
||||
|
||||
//tag::start-watch-service-response
|
||||
boolean isAcknowledged = response.isAcknowledged(); // <1>
|
||||
//end::start-watch-service-response
|
||||
}
|
||||
|
||||
{
|
||||
//tag::stop-watch-service-request
|
||||
StopWatchServiceRequest request = new StopWatchServiceRequest();
|
||||
//end::stop-watch-service-request
|
||||
|
||||
//tag::stop-watch-service-execute
|
||||
AcknowledgedResponse response = client.watcher().stopWatchService(request, RequestOptions.DEFAULT);
|
||||
//end::stop-watch-service-execute
|
||||
|
||||
//tag::stop-watch-service-response
|
||||
boolean isAcknowledged = response.isAcknowledged(); // <1>
|
||||
//end::stop-watch-service-response
|
||||
}
|
||||
|
||||
{
|
||||
StartWatchServiceRequest request = new StartWatchServiceRequest();
|
||||
|
||||
// tag::start-watch-service-execute-listener
|
||||
ActionListener<AcknowledgedResponse> listener = new ActionListener<AcknowledgedResponse>() {
|
||||
@Override
|
||||
public void onResponse(AcknowledgedResponse response) {
|
||||
// <1>
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
};
|
||||
// end::start-watch-service-execute-listener
|
||||
|
||||
CountDownLatch latch = new CountDownLatch(1);
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
// tag::start-watch-service-execute-async
|
||||
client.watcher().startWatchServiceAsync(request, RequestOptions.DEFAULT, listener); // <1>
|
||||
// end::start-watch-service-execute-async
|
||||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
}
|
||||
|
||||
{
|
||||
StopWatchServiceRequest request = new StopWatchServiceRequest();
|
||||
|
||||
// tag::stop-watch-service-execute-listener
|
||||
ActionListener<AcknowledgedResponse> listener = new ActionListener<AcknowledgedResponse>() {
|
||||
@Override
|
||||
public void onResponse(AcknowledgedResponse response) {
|
||||
// <1>
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
};
|
||||
// end::stop-watch-service-execute-listener
|
||||
|
||||
CountDownLatch latch = new CountDownLatch(1);
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
// tag::stop-watch-service-execute-async
|
||||
client.watcher().stopWatchServiceAsync(request, RequestOptions.DEFAULT, listener); // <1>
|
||||
// end::stop-watch-service-execute-async
|
||||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
}
|
||||
}
|
||||
|
||||
public void testWatcher() throws Exception {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
|
||||
{
|
||||
//tag::x-pack-put-watch-execute
|
||||
// you can also use the WatchSourceBuilder from org.elasticsearch.plugin:x-pack-core to create a watch programmatically
|
||||
BytesReference watch = new BytesArray("""
|
||||
{
|
||||
"trigger": { "schedule": { "interval": "10h" } },
|
||||
"input": { "simple": { "foo" : "bar" } },
|
||||
"actions": { "logme": { "logging": { "text": "{{ctx.payload}}" } } }
|
||||
}""");
|
||||
PutWatchRequest request = new PutWatchRequest("my_watch_id", watch, XContentType.JSON);
|
||||
request.setActive(false); // <1>
|
||||
PutWatchResponse response = client.watcher().putWatch(request, RequestOptions.DEFAULT);
|
||||
//end::x-pack-put-watch-execute
|
||||
|
||||
//tag::x-pack-put-watch-response
|
||||
String watchId = response.getId(); // <1>
|
||||
boolean isCreated = response.isCreated(); // <2>
|
||||
long version = response.getVersion(); // <3>
|
||||
//end::x-pack-put-watch-response
|
||||
}
|
||||
|
||||
{
|
||||
BytesReference watch = new BytesArray("""
|
||||
{
|
||||
"trigger": { "schedule": { "interval": "10h" } },
|
||||
"input": { "simple": { "foo" : "bar" } },
|
||||
"actions": { "logme": { "logging": { "text": "{{ctx.payload}}" } } }
|
||||
}""");
|
||||
PutWatchRequest request = new PutWatchRequest("my_other_watch_id", watch, XContentType.JSON);
|
||||
// tag::x-pack-put-watch-execute-listener
|
||||
ActionListener<PutWatchResponse> listener = new ActionListener<PutWatchResponse>() {
|
||||
@Override
|
||||
public void onResponse(PutWatchResponse response) {
|
||||
// <1>
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
};
|
||||
// end::x-pack-put-watch-execute-listener
|
||||
|
||||
// Replace the empty listener by a blocking listener in test
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
// tag::x-pack-put-watch-execute-async
|
||||
client.watcher().putWatchAsync(request, RequestOptions.DEFAULT, listener); // <1>
|
||||
// end::x-pack-put-watch-execute-async
|
||||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
}
|
||||
|
||||
{
|
||||
// tag::x-pack-execute-watch-by-id
|
||||
ExecuteWatchRequest request = ExecuteWatchRequest.byId("my_watch_id");
|
||||
request.setAlternativeInput("{ \"foo\" : \"bar\" }"); // <1>
|
||||
request.setActionMode("action1", ExecuteWatchRequest.ActionExecutionMode.SIMULATE); // <2>
|
||||
request.setRecordExecution(true); // <3>
|
||||
request.setIgnoreCondition(true); // <4>
|
||||
request.setTriggerData("{\"triggered_time\":\"now\"}"); // <5>
|
||||
request.setDebug(true); // <6>
|
||||
ExecuteWatchResponse response = client.watcher().executeWatch(request, RequestOptions.DEFAULT);
|
||||
// end::x-pack-execute-watch-by-id
|
||||
|
||||
// tag::x-pack-execute-watch-by-id-response
|
||||
String id = response.getRecordId(); // <1>
|
||||
Map<String, Object> watch = response.getRecordAsMap(); // <2>
|
||||
String watch_id = ObjectPath.eval("watch_record.watch_id", watch); // <3>
|
||||
// end::x-pack-execute-watch-by-id-response
|
||||
}
|
||||
|
||||
{
|
||||
ExecuteWatchRequest request = ExecuteWatchRequest.byId("my_watch_id");
|
||||
// tag::x-pack-execute-watch-by-id-execute-listener
|
||||
ActionListener<ExecuteWatchResponse> listener = new ActionListener<ExecuteWatchResponse>() {
|
||||
@Override
|
||||
public void onResponse(ExecuteWatchResponse response) {
|
||||
// <1>
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
};
|
||||
// end::x-pack-execute-watch-by-id-execute-listener
|
||||
|
||||
// Replace the empty listener by a blocking listener in test
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
// tag::x-pack-execute-watch-by-id-execute-async
|
||||
client.watcher().executeWatchAsync(request, RequestOptions.DEFAULT, listener); // <1>
|
||||
// end::x-pack-execute-watch-by-id-execute-async
|
||||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
}
|
||||
|
||||
{
|
||||
//tag::get-watch-request
|
||||
GetWatchRequest request = new GetWatchRequest("my_watch_id");
|
||||
//end::get-watch-request
|
||||
|
||||
//tag::get-watch-execute
|
||||
GetWatchResponse response = client.watcher().getWatch(request, RequestOptions.DEFAULT);
|
||||
//end::get-watch-execute
|
||||
|
||||
//tag::get-watch-response
|
||||
String watchId = response.getId(); // <1>
|
||||
boolean found = response.isFound(); // <2>
|
||||
long version = response.getVersion(); // <3>
|
||||
WatchStatus status = response.getStatus(); // <4>
|
||||
BytesReference source = response.getSource(); // <5>
|
||||
//end::get-watch-response
|
||||
}
|
||||
|
||||
{
|
||||
GetWatchRequest request = new GetWatchRequest("my_other_watch_id");
|
||||
// tag::get-watch-execute-listener
|
||||
ActionListener<GetWatchResponse> listener = new ActionListener<GetWatchResponse>() {
|
||||
@Override
|
||||
public void onResponse(GetWatchResponse response) {
|
||||
// <1>
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
};
|
||||
// end::get-watch-execute-listener
|
||||
|
||||
// Replace the empty listener by a blocking listener in test
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
// tag::get-watch-execute-async
|
||||
client.watcher().getWatchAsync(request, RequestOptions.DEFAULT, listener); // <1>
|
||||
// end::get-watch-execute-async
|
||||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
}
|
||||
|
||||
{
|
||||
//tag::x-pack-delete-watch-execute
|
||||
DeleteWatchRequest request = new DeleteWatchRequest("my_watch_id");
|
||||
DeleteWatchResponse response = client.watcher().deleteWatch(request, RequestOptions.DEFAULT);
|
||||
//end::x-pack-delete-watch-execute
|
||||
|
||||
//tag::x-pack-delete-watch-response
|
||||
String watchId = response.getId(); // <1>
|
||||
boolean found = response.isFound(); // <2>
|
||||
long version = response.getVersion(); // <3>
|
||||
//end::x-pack-delete-watch-response
|
||||
}
|
||||
|
||||
{
|
||||
DeleteWatchRequest request = new DeleteWatchRequest("my_other_watch_id");
|
||||
// tag::x-pack-delete-watch-execute-listener
|
||||
ActionListener<DeleteWatchResponse> listener = new ActionListener<DeleteWatchResponse>() {
|
||||
@Override
|
||||
public void onResponse(DeleteWatchResponse response) {
|
||||
// <1>
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
};
|
||||
// end::x-pack-delete-watch-execute-listener
|
||||
|
||||
// Replace the empty listener by a blocking listener in test
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
// tag::x-pack-delete-watch-execute-async
|
||||
client.watcher().deleteWatchAsync(request, RequestOptions.DEFAULT, listener); // <1>
|
||||
// end::x-pack-delete-watch-execute-async
|
||||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
}
|
||||
}
|
||||
|
||||
public void testExecuteInlineWatch() throws Exception {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
|
||||
{
|
||||
// tag::x-pack-execute-watch-inline
|
||||
String watchJson = """
|
||||
{
|
||||
"trigger": { "schedule": { "interval": "10h" } },
|
||||
"input": { "none": {} },
|
||||
"actions": { "logme": { "logging": { "text": "{{ctx.payload}}" } } }
|
||||
}""";
|
||||
ExecuteWatchRequest request = ExecuteWatchRequest.inline(watchJson);
|
||||
request.setAlternativeInput("{ \"foo\" : \"bar\" }"); // <1>
|
||||
request.setActionMode("action1", ExecuteWatchRequest.ActionExecutionMode.SIMULATE); // <2>
|
||||
request.setIgnoreCondition(true); // <3>
|
||||
request.setTriggerData("{\"triggered_time\":\"now\"}"); // <4>
|
||||
request.setDebug(true); // <5>
|
||||
ExecuteWatchResponse response = client.watcher().executeWatch(request, RequestOptions.DEFAULT);
|
||||
// end::x-pack-execute-watch-inline
|
||||
|
||||
// tag::x-pack-execute-watch-inline-response
|
||||
String id = response.getRecordId(); // <1>
|
||||
Map<String, Object> watch = response.getRecordAsMap(); // <2>
|
||||
String watch_id = ObjectPath.eval("watch_record.watch_id", watch); // <3>
|
||||
// end::x-pack-execute-watch-inline-response
|
||||
}
|
||||
|
||||
{
|
||||
String watchJson = """
|
||||
{
|
||||
"trigger": { "schedule": { "interval": "10h" } },
|
||||
"input": { "none": {} },
|
||||
"actions": { "logme": { "logging": { "text": "{{ctx.payload}}" } } }
|
||||
}""";
|
||||
ExecuteWatchRequest request = ExecuteWatchRequest.inline(watchJson);
|
||||
// tag::x-pack-execute-watch-inline-execute-listener
|
||||
ActionListener<ExecuteWatchResponse> listener = new ActionListener<ExecuteWatchResponse>() {
|
||||
@Override
|
||||
public void onResponse(ExecuteWatchResponse response) {
|
||||
// <1>
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
};
|
||||
// end::x-pack-execute-watch-inline-execute-listener
|
||||
|
||||
// Replace the empty listener by a blocking listener in test
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
// tag::x-pack-execute-watch-inline-execute-async
|
||||
client.watcher().executeWatchAsync(request, RequestOptions.DEFAULT, listener); // <1>
|
||||
// end::x-pack-execute-watch-inline-execute-async
|
||||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
}
|
||||
}
|
||||
|
||||
public void testAckWatch() throws Exception {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
|
||||
{
|
||||
BytesReference watch = new BytesArray("""
|
||||
{
|
||||
"trigger": { "schedule": { "interval": "10h" } },
|
||||
"input": { "simple": { "foo" : "bar" } },
|
||||
"actions": { "logme": { "logging": { "text": "{{ctx.payload}}" } } }
|
||||
}""");
|
||||
PutWatchRequest putWatchRequest = new PutWatchRequest("my_watch_id", watch, XContentType.JSON);
|
||||
client.watcher().putWatch(putWatchRequest, RequestOptions.DEFAULT);
|
||||
|
||||
// TODO: use the high-level REST client here once it supports 'execute watch'.
|
||||
Request executeWatchRequest = new Request("POST", "_watcher/watch/my_watch_id/_execute");
|
||||
executeWatchRequest.setJsonEntity("{ \"record_execution\": true }");
|
||||
Response executeResponse = client().performRequest(executeWatchRequest);
|
||||
assertEquals(RestStatus.OK.getStatus(), executeResponse.getStatusLine().getStatusCode());
|
||||
}
|
||||
|
||||
{
|
||||
//tag::ack-watch-request
|
||||
AckWatchRequest request = new AckWatchRequest("my_watch_id", // <1>
|
||||
"logme", "emailme"); // <2>
|
||||
//end::ack-watch-request
|
||||
|
||||
//tag::ack-watch-execute
|
||||
AckWatchResponse response = client.watcher().ackWatch(request, RequestOptions.DEFAULT);
|
||||
//end::ack-watch-execute
|
||||
|
||||
//tag::ack-watch-response
|
||||
WatchStatus watchStatus = response.getStatus();
|
||||
ActionStatus actionStatus = watchStatus.actionStatus("logme"); // <1>
|
||||
AckStatus.State ackState = actionStatus.ackStatus().state(); // <2>
|
||||
//end::ack-watch-response
|
||||
|
||||
assertEquals(AckStatus.State.ACKED, ackState);
|
||||
}
|
||||
|
||||
{
|
||||
AckWatchRequest request = new AckWatchRequest("my_watch_id");
|
||||
// tag::ack-watch-execute-listener
|
||||
ActionListener<AckWatchResponse> listener = new ActionListener<AckWatchResponse>() {
|
||||
@Override
|
||||
public void onResponse(AckWatchResponse response) {
|
||||
// <1>
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
};
|
||||
// end::ack-watch-execute-listener
|
||||
|
||||
// For testing, replace the empty listener by a blocking listener.
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
// tag::ack-watch-execute-async
|
||||
client.watcher().ackWatchAsync(request, RequestOptions.DEFAULT, listener); // <1>
|
||||
// end::ack-watch-execute-async
|
||||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
}
|
||||
}
|
||||
|
||||
public void testDeactivateWatch() throws Exception {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
|
||||
{
|
||||
BytesReference watch = new BytesArray("""
|
||||
{
|
||||
"trigger": { "schedule": { "interval": "10h" } },
|
||||
"input": { "simple": { "foo" : "bar" } },
|
||||
"actions": { "logme": { "logging": { "text": "{{ctx.payload}}" } } }
|
||||
}""");
|
||||
PutWatchRequest putWatchRequest = new PutWatchRequest("my_watch_id", watch, XContentType.JSON);
|
||||
client.watcher().putWatch(putWatchRequest, RequestOptions.DEFAULT);
|
||||
}
|
||||
|
||||
{
|
||||
//tag::deactivate-watch-execute
|
||||
DeactivateWatchRequest request = new DeactivateWatchRequest("my_watch_id");
|
||||
DeactivateWatchResponse response = client.watcher().deactivateWatch(request, RequestOptions.DEFAULT);
|
||||
//end::deactivate-watch-execute
|
||||
|
||||
assertThat(response.getStatus().state().isActive(), is(false));
|
||||
}
|
||||
|
||||
{
|
||||
DeactivateWatchRequest request = new DeactivateWatchRequest("my_watch_id");
|
||||
// tag::deactivate-watch-execute-listener
|
||||
ActionListener<DeactivateWatchResponse> listener = new ActionListener<DeactivateWatchResponse>() {
|
||||
@Override
|
||||
public void onResponse(DeactivateWatchResponse response) {
|
||||
// <1>
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
};
|
||||
// end::deactivate-watch-execute-listener
|
||||
|
||||
// For testing, replace the empty listener by a blocking listener.
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
// tag::deactivate-watch-execute-async
|
||||
client.watcher().deactivateWatchAsync(request, RequestOptions.DEFAULT, listener); // <1>
|
||||
// end::deactivate-watch-execute-async
|
||||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
}
|
||||
}
|
||||
|
||||
public void testActivateWatch() throws Exception {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
|
||||
{
|
||||
BytesReference watch = new BytesArray("""
|
||||
{
|
||||
"trigger": { "schedule": { "interval": "10h" } },
|
||||
"input": { "simple": { "foo" : "bar" } },
|
||||
"actions": { "logme": { "logging": { "text": "{{ctx.payload}}" } } }
|
||||
}""");
|
||||
PutWatchRequest request = new PutWatchRequest("my_watch_id", watch, XContentType.JSON);
|
||||
request.setActive(false); // <1>
|
||||
PutWatchResponse response = client.watcher().putWatch(request, RequestOptions.DEFAULT);
|
||||
}
|
||||
|
||||
{
|
||||
//tag::activate-watch-request
|
||||
ActivateWatchRequest request = new ActivateWatchRequest("my_watch_id");
|
||||
ActivateWatchResponse response = client.watcher().activateWatch(request, RequestOptions.DEFAULT);
|
||||
//end::activate-watch-request
|
||||
|
||||
//tag::activate-watch-response
|
||||
WatchStatus watchStatus = response.getStatus(); // <1>
|
||||
//end::activate-watch-response
|
||||
|
||||
assertTrue(watchStatus.state().isActive());
|
||||
}
|
||||
|
||||
{
|
||||
ActivateWatchRequest request = new ActivateWatchRequest("my_watch_id");
|
||||
//tag::activate-watch-request-listener
|
||||
ActionListener<ActivateWatchResponse> listener = new ActionListener<ActivateWatchResponse>() {
|
||||
@Override
|
||||
public void onResponse(ActivateWatchResponse response) {
|
||||
// <1>
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
};
|
||||
//end::activate-watch-request-listener
|
||||
|
||||
// Replace the empty listener by a blocking listener in test
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
//tag::activate-watch-request-async
|
||||
client.watcher().activateWatchAsync(request, RequestOptions.DEFAULT, listener); // <1>
|
||||
//end::activate-watch-request-async
|
||||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
public void testWatcherStats() throws Exception {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
|
||||
{
|
||||
//tag::watcher-stats-request
|
||||
WatcherStatsRequest request = new WatcherStatsRequest(true, true);
|
||||
//end::watcher-stats-request
|
||||
|
||||
//tag::watcher-stats-execute
|
||||
WatcherStatsResponse response = client.watcher().watcherStats(request, RequestOptions.DEFAULT);
|
||||
//end::watcher-stats-execute
|
||||
|
||||
//tag::watcher-stats-response
|
||||
List<WatcherStatsResponse.Node> nodes = response.getNodes(); // <1>
|
||||
//end::watcher-stats-response
|
||||
}
|
||||
|
||||
{
|
||||
WatcherStatsRequest request = new WatcherStatsRequest();
|
||||
|
||||
// tag::watcher-stats-execute-listener
|
||||
ActionListener<WatcherStatsResponse> listener = new ActionListener<WatcherStatsResponse>() {
|
||||
@Override
|
||||
public void onResponse(WatcherStatsResponse response) {
|
||||
// <1>
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
};
|
||||
// end::watcher-stats-execute-listener
|
||||
|
||||
CountDownLatch latch = new CountDownLatch(1);
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
// tag::watcher-stats-execute-async
|
||||
client.watcher().watcherStatsAsync(request, RequestOptions.DEFAULT, listener); // <1>
|
||||
// end::watcher-stats-execute-async
|
||||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -21,10 +21,8 @@ Javadoc roots used to generate links from Painless's API reference
|
|||
|
||||
ifeval::["{release-state}"=="unreleased"]
|
||||
:elasticsearch-javadoc: https://snapshots.elastic.co/javadoc/org/elasticsearch/elasticsearch/{version}-SNAPSHOT
|
||||
:transport-client-javadoc: https://snapshots.elastic.co/javadoc/org/elasticsearch/client/transport/{version}-SNAPSHOT
|
||||
:rest-client-javadoc: https://snapshots.elastic.co/javadoc/org/elasticsearch/client/elasticsearch-rest-client/{version}-SNAPSHOT
|
||||
:rest-client-sniffer-javadoc: https://snapshots.elastic.co/javadoc/org/elasticsearch/client/elasticsearch-rest-client-sniffer/{version}-SNAPSHOT
|
||||
:rest-high-level-client-javadoc: https://snapshots.elastic.co/javadoc/org/elasticsearch/client/elasticsearch-rest-high-level-client/{version}-SNAPSHOT
|
||||
:mapper-extras-client-javadoc: https://snapshots.elastic.co/javadoc/org/elasticsearch/plugin/mapper-extras-client/{version}-SNAPSHOT
|
||||
:painless-javadoc: https://snapshots.elastic.co/javadoc/org/elasticsearch/painless/lang-painless/{version}-SNAPSHOT
|
||||
:parent-join-client-javadoc: https://snapshots.elastic.co/javadoc/org/elasticsearch/plugin/parent-join-client/{version}-SNAPSHOT
|
||||
|
@ -36,10 +34,8 @@ endif::[]
|
|||
|
||||
ifeval::["{release-state}"!="unreleased"]
|
||||
:elasticsearch-javadoc: https://artifacts.elastic.co/javadoc/org/elasticsearch/elasticsearch/{version}
|
||||
:transport-client-javadoc: https://artifacts.elastic.co/javadoc/org/elasticsearch/client/transport/{version}
|
||||
:rest-client-javadoc: https://artifacts.elastic.co/javadoc/org/elasticsearch/client/elasticsearch-rest-client/{version}
|
||||
:rest-client-sniffer-javadoc: https://artifacts.elastic.co/javadoc/org/elasticsearch/client/elasticsearch-rest-client-sniffer/{version}
|
||||
:rest-high-level-client-javadoc: https://artifacts.elastic.co/javadoc/org/elasticsearch/client/elasticsearch-rest-high-level-client/{version}
|
||||
:mapper-extras-client-javadoc: https://snapshots.elastic.co/javadoc/org/elasticsearch/plugin/mapper-extras-client/{version}
|
||||
:painless-javadoc: https://artifacts.elastic.co/javadoc/org/elasticsearch/painless/lang-painless/{version}
|
||||
:parent-join-client-javadoc: https://artifacts.elastic.co/javadoc/org/elasticsearch/plugin/parent-join-client/{version}
|
||||
|
@ -49,8 +45,6 @@ ifeval::["{release-state}"!="unreleased"]
|
|||
:version_qualified: {bare_version}
|
||||
endif::[]
|
||||
|
||||
:javadoc-client: {rest-high-level-client-javadoc}/org/elasticsearch/client
|
||||
:javadoc-xpack: {rest-high-level-client-javadoc}/org/elasticsearch/protocol/xpack
|
||||
:javadoc-license: {rest-high-level-client-javadoc}/org/elasticsearch/protocol/xpack/license
|
||||
:javadoc-watcher: {rest-high-level-client-javadoc}/org/elasticsearch/protocol/xpack/watcher
|
||||
|
||||
|
|
|
@ -1,84 +0,0 @@
|
|||
[[java-rest-high-aggregation-builders]]
|
||||
=== Building Aggregations
|
||||
|
||||
This page lists all the available aggregations with their corresponding `AggregationBuilder` class name and helper method name in the
|
||||
`AggregationBuilders` or `PipelineAggregatorBuilders` utility classes.
|
||||
|
||||
:agg-ref: {elasticsearch-javadoc}/org/elasticsearch/search/aggregations
|
||||
:parentjoin-ref: {parent-join-client-javadoc}/org/elasticsearch/join/aggregations
|
||||
:matrixstats-ref: {matrixstats-client-javadoc}/org/elasticsearch/search/aggregations
|
||||
|
||||
==== Metrics Aggregations
|
||||
[options="header"]
|
||||
|======
|
||||
| Aggregation | AggregationBuilder Class | Method in AggregationBuilders
|
||||
| {ref}/search-aggregations-metrics-avg-aggregation.html[Avg] | {agg-ref}/metrics/AvgAggregationBuilder.html[AvgAggregationBuilder] | {agg-ref}/AggregationBuilders.html#avg-java.lang.String-[AggregationBuilders.avg()]
|
||||
| {ref}/search-aggregations-metrics-cardinality-aggregation.html[Cardinality] | {agg-ref}/metrics/CardinalityAggregationBuilder.html[CardinalityAggregationBuilder] | {agg-ref}/AggregationBuilders.html#cardinality-java.lang.String-[AggregationBuilders.cardinality()]
|
||||
| {ref}/search-aggregations-metrics-extendedstats-aggregation.html[Extended Stats] | {agg-ref}/metrics/ExtendedStatsAggregationBuilder.html[ExtendedStatsAggregationBuilder] | {agg-ref}/AggregationBuilders.html#extendedStats-java.lang.String-[AggregationBuilders.extendedStats()]
|
||||
| {ref}/search-aggregations-metrics-geobounds-aggregation.html[Geo Bounds] | {agg-ref}/metrics/GeoBoundsAggregationBuilder.html[GeoBoundsAggregationBuilder] | {agg-ref}/AggregationBuilders.html#geoBounds-java.lang.String-[AggregationBuilders.geoBounds()]
|
||||
| {ref}/search-aggregations-metrics-geocentroid-aggregation.html[Geo Centroid] | {agg-ref}/metrics/GeoCentroidAggregationBuilder.html[GeoCentroidAggregationBuilder] | {agg-ref}/AggregationBuilders.html#geoCentroid-java.lang.String-[AggregationBuilders.geoCentroid()]
|
||||
| {ref}/search-aggregations-metrics-max-aggregation.html[Max] | {agg-ref}/metrics/MaxAggregationBuilder.html[MaxAggregationBuilder] | {agg-ref}/AggregationBuilders.html#max-java.lang.String-[AggregationBuilders.max()]
|
||||
| {ref}/search-aggregations-metrics-min-aggregation.html[Min] | {agg-ref}/metrics/MinAggregationBuilder.html[MinAggregationBuilder] | {agg-ref}/AggregationBuilders.html#min-java.lang.String-[AggregationBuilders.min()]
|
||||
| {ref}/search-aggregations-metrics-percentile-aggregation.html[Percentiles] | {agg-ref}/metrics/PercentilesAggregationBuilder.html[PercentilesAggregationBuilder] | {agg-ref}/AggregationBuilders.html#percentiles-java.lang.String-[AggregationBuilders.percentiles()]
|
||||
| {ref}/search-aggregations-metrics-percentile-rank-aggregation.html[Percentile Ranks] | {agg-ref}/metrics/PercentileRanksAggregationBuilder.html[PercentileRanksAggregationBuilder] | {agg-ref}/AggregationBuilders.html#percentileRanks-java.lang.String-[AggregationBuilders.percentileRanks()]
|
||||
| {ref}/search-aggregations-metrics-scripted-metric-aggregation.html[Scripted Metric] | {agg-ref}/metrics/ScriptedMetricAggregationBuilder.html[ScriptedMetricAggregationBuilder] | {agg-ref}/AggregationBuilders.html#scriptedMetric-java.lang.String-[AggregationBuilders.scriptedMetric()]
|
||||
| {ref}/search-aggregations-metrics-stats-aggregation.html[Stats] | {agg-ref}/metrics/StatsAggregationBuilder.html[StatsAggregationBuilder] | {agg-ref}/AggregationBuilders.html#stats-java.lang.String-[AggregationBuilders.stats()]
|
||||
| {ref}/search-aggregations-metrics-sum-aggregation.html[Sum] | {agg-ref}/metrics/SumAggregationBuilder.html[SumAggregationBuilder] | {agg-ref}/AggregationBuilders.html#sum-java.lang.String-[AggregationBuilders.sum()]
|
||||
| {ref}/search-aggregations-metrics-top-hits-aggregation.html[Top hits] | {agg-ref}/metrics/TopHitsAggregationBuilder.html[TopHitsAggregationBuilder] | {agg-ref}/AggregationBuilders.html#topHits-java.lang.String-[AggregationBuilders.topHits()]
|
||||
| {ref}/search-aggregations-metrics-top-metrics.html[Top Metrics] | {javadoc-client}/analytics/TopMetricsAggregationBuilder.html[TopMetricsAggregationBuilder] | None
|
||||
| {ref}/search-aggregations-metrics-valuecount-aggregation.html[Value Count] | {agg-ref}/metrics/ValueCountAggregationBuilder.html[ValueCountAggregationBuilder] | {agg-ref}/AggregationBuilders.html#count-java.lang.String-[AggregationBuilders.count()]
|
||||
| {ref}/search-aggregations-metrics-string-stats-aggregation.html[String Stats] | {javadoc-client}/analytics/StringStatsAggregationBuilder.html[StringStatsAggregationBuilder] | None
|
||||
|======
|
||||
|
||||
==== Bucket Aggregations
|
||||
[options="header"]
|
||||
|======
|
||||
| Aggregation | AggregationBuilder Class | Method in AggregationBuilders
|
||||
| {ref}/search-aggregations-bucket-adjacency-matrix-aggregation.html[Adjacency Matrix] | {agg-ref}/bucket/adjacency/AdjacencyMatrixAggregationBuilder.html[AdjacencyMatrixAggregationBuilder] | {agg-ref}/AggregationBuilders.html#adjacencyMatrix-java.lang.String-java.util.Map-[AggregationBuilders.adjacencyMatrix()]
|
||||
| {ref}/search-aggregations-bucket-children-aggregation.html[Children] | {parentjoin-ref}/ChildrenAggregationBuilder.html[ChildrenAggregationBuilder] |
|
||||
| {ref}/search-aggregations-bucket-datehistogram-aggregation.html[Date Histogram] | {agg-ref}/bucket/histogram/DateHistogramAggregationBuilder.html[DateHistogramAggregationBuilder] | {agg-ref}/AggregationBuilders.html#dateHistogram-java.lang.String-[AggregationBuilders.dateHistogram()]
|
||||
| {ref}/search-aggregations-bucket-daterange-aggregation.html[Date Range] | {agg-ref}/bucket/range/DateRangeAggregationBuilder.html[DateRangeAggregationBuilder] | {agg-ref}/AggregationBuilders.html#dateRange-java.lang.String-[AggregationBuilders.dateRange()]
|
||||
| {ref}/search-aggregations-bucket-diversified-sampler-aggregation.html[Diversified Sampler] | {agg-ref}/bucket/sampler/DiversifiedAggregationBuilder.html[DiversifiedAggregationBuilder] | {agg-ref}/AggregationBuilders.html#diversifiedSampler-java.lang.String-[AggregationBuilders.diversifiedSampler()]
|
||||
| {ref}/search-aggregations-bucket-filter-aggregation.html[Filter] | {agg-ref}/bucket/filter/FilterAggregationBuilder.html[FilterAggregationBuilder] | {agg-ref}/AggregationBuilders.html#filter-java.lang.String-org.elasticsearch.index.query.QueryBuilder-[AggregationBuilders.filter()]
|
||||
| {ref}/search-aggregations-bucket-filters-aggregation.html[Filters] | {agg-ref}/bucket/filters/FiltersAggregationBuilder.html[FiltersAggregationBuilder] | {agg-ref}/AggregationBuilders.html#filters-java.lang.String-org.elasticsearch.index.query.QueryBuilder...-[AggregationBuilders.filters()]
|
||||
| {ref}/search-aggregations-bucket-geodistance-aggregation.html[Geo Distance] | {agg-ref}/bucket/range/GeoDistanceAggregationBuilder.html[GeoDistanceAggregationBuilder] | {agg-ref}/AggregationBuilders.html#geoDistance-java.lang.String-org.elasticsearch.common.geo.GeoPoint-[AggregationBuilders.geoDistance()]
|
||||
| {ref}/search-aggregations-bucket-geohashgrid-aggregation.html[GeoHash Grid] | {agg-ref}/bucket/geogrid/GeoGridAggregationBuilder.html[GeoGridAggregationBuilder] | {agg-ref}/AggregationBuilders.html#geohashGrid-java.lang.String-[AggregationBuilders.geohashGrid()]
|
||||
| {ref}/search-aggregations-bucket-global-aggregation.html[Global] | {agg-ref}/bucket/global/GlobalAggregationBuilder.html[GlobalAggregationBuilder] | {agg-ref}/AggregationBuilders.html#global-java.lang.String-[AggregationBuilders.global()]
|
||||
| {ref}/search-aggregations-bucket-histogram-aggregation.html[Histogram] | {agg-ref}/bucket/histogram/HistogramAggregationBuilder.html[HistogramAggregationBuilder] | {agg-ref}/AggregationBuilders.html#histogram-java.lang.String-[AggregationBuilders.histogram()]
|
||||
| {ref}/search-aggregations-bucket-iprange-aggregation.html[IP Range] | {agg-ref}/bucket/range/IpRangeAggregationBuilder.html[IpRangeAggregationBuilder] | {agg-ref}/AggregationBuilders.html#ipRange-java.lang.String-[AggregationBuilders.ipRange()]
|
||||
| {ref}/search-aggregations-bucket-missing-aggregation.html[Missing] | {agg-ref}/bucket/missing/MissingAggregationBuilder.html[MissingAggregationBuilder] | {agg-ref}/AggregationBuilders.html#missing-java.lang.String-[AggregationBuilders.missing()]
|
||||
| {ref}/search-aggregations-bucket-nested-aggregation.html[Nested] | {agg-ref}/bucket/nested/NestedAggregationBuilder.html[NestedAggregationBuilder] | {agg-ref}/AggregationBuilders.html#nested-java.lang.String-java.lang.String-[AggregationBuilders.nested()]
|
||||
| {ref}/search-aggregations-bucket-range-aggregation.html[Range] | {agg-ref}/bucket/range/RangeAggregationBuilder.html[RangeAggregationBuilder] | {agg-ref}/AggregationBuilders.html#range-java.lang.String-[AggregationBuilders.range()]
|
||||
| {ref}/search-aggregations-bucket-reverse-nested-aggregation.html[Reverse nested] | {agg-ref}/bucket/nested/ReverseNestedAggregationBuilder.html[ReverseNestedAggregationBuilder] | {agg-ref}/AggregationBuilders.html#reverseNested-java.lang.String-[AggregationBuilders.reverseNested()]
|
||||
| {ref}/search-aggregations-bucket-sampler-aggregation.html[Sampler] | {agg-ref}/bucket/sampler/SamplerAggregationBuilder.html[SamplerAggregationBuilder] | {agg-ref}/AggregationBuilders.html#sampler-java.lang.String-[AggregationBuilders.sampler()]
|
||||
| {ref}/search-aggregations-bucket-significantterms-aggregation.html[Significant Terms] | {agg-ref}/bucket/significant/SignificantTermsAggregationBuilder.html[SignificantTermsAggregationBuilder] | {agg-ref}/AggregationBuilders.html#significantTerms-java.lang.String-[AggregationBuilders.significantTerms()]
|
||||
| {ref}/search-aggregations-bucket-significanttext-aggregation.html[Significant Text] | {agg-ref}/bucket/significant/SignificantTextAggregationBuilder.html[SignificantTextAggregationBuilder] | {agg-ref}/AggregationBuilders.html#significantText-java.lang.String-java.lang.String-[AggregationBuilders.significantText()]
|
||||
| {ref}/search-aggregations-bucket-terms-aggregation.html[Terms] | {agg-ref}/bucket/terms/TermsAggregationBuilder.html[TermsAggregationBuilder] | {agg-ref}/AggregationBuilders.html#terms-java.lang.String-[AggregationBuilders.terms()]
|
||||
|======
|
||||
|
||||
==== Pipeline Aggregations
|
||||
[options="header"]
|
||||
|======
|
||||
| Pipeline on | PipelineAggregationBuilder Class | Method in PipelineAggregatorBuilders
|
||||
| {ref}/search-aggregations-pipeline-avg-bucket-aggregation.html[Avg Bucket] | {agg-ref}/pipeline/AvgBucketPipelineAggregationBuilder.html[AvgBucketPipelineAggregationBuilder] | {agg-ref}/PipelineAggregatorBuilders.html#avgBucket-java.lang.String-java.lang.String-[PipelineAggregatorBuilders.avgBucket()]
|
||||
| {ref}/search-aggregations-pipeline-derivative-aggregation.html[Derivative] | {agg-ref}/pipeline/DerivativePipelineAggregationBuilder.html[DerivativePipelineAggregationBuilder] | {agg-ref}/PipelineAggregatorBuilders.html#derivative-java.lang.String-java.lang.String-[PipelineAggregatorBuilders.derivative()]
|
||||
| {ref}/search-aggregations-pipeline-inference-bucket-aggregation.html[Inference] | {javadoc-client}/analytics/InferencePipelineAggregationBuilder.html[InferencePipelineAggregationBuilder] | None
|
||||
| {ref}/search-aggregations-pipeline-max-bucket-aggregation.html[Max Bucket] | {agg-ref}/pipeline/MaxBucketPipelineAggregationBuilder.html[MaxBucketPipelineAggregationBuilder] | {agg-ref}/PipelineAggregatorBuilders.html#maxBucket-java.lang.String-java.lang.String-[PipelineAggregatorBuilders.maxBucket()]
|
||||
| {ref}/search-aggregations-pipeline-min-bucket-aggregation.html[Min Bucket] | {agg-ref}/pipeline/MinBucketPipelineAggregationBuilder.html[MinBucketPipelineAggregationBuilder] | {agg-ref}/PipelineAggregatorBuilders.html#minBucket-java.lang.String-java.lang.String-[PipelineAggregatorBuilders.minBucket()]
|
||||
| {ref}/search-aggregations-pipeline-sum-bucket-aggregation.html[Sum Bucket] | {agg-ref}/pipeline/SumBucketPipelineAggregationBuilder.html[SumBucketPipelineAggregationBuilder] | {agg-ref}/PipelineAggregatorBuilders.html#sumBucket-java.lang.String-java.lang.String-[PipelineAggregatorBuilders.sumBucket()]
|
||||
| {ref}/search-aggregations-pipeline-stats-bucket-aggregation.html[Stats Bucket] | {agg-ref}/pipeline/StatsBucketPipelineAggregationBuilder.html[StatsBucketPipelineAggregationBuilder] | {agg-ref}/PipelineAggregatorBuilders.html#statsBucket-java.lang.String-java.lang.String-[PipelineAggregatorBuilders.statsBucket()]
|
||||
| {ref}/search-aggregations-pipeline-extended-stats-bucket-aggregation.html[Extended Stats Bucket] | {agg-ref}/pipeline/ExtendedStatsBucketPipelineAggregationBuilder.html[ExtendedStatsBucketPipelineAggregationBuilder] | {agg-ref}/PipelineAggregatorBuilders.html#extendedStatsBucket-java.lang.String-java.lang.String-[PipelineAggregatorBuilders.extendedStatsBucket()]
|
||||
| {ref}/search-aggregations-pipeline-percentiles-bucket-aggregation.html[Percentiles Bucket] | {agg-ref}/pipeline/PercentilesBucketPipelineAggregationBuilder.html[PercentilesBucketPipelineAggregationBuilder] | {agg-ref}/PipelineAggregatorBuilders.html#percentilesBucket-java.lang.String-java.lang.String-[PipelineAggregatorBuilders.percentilesBucket()]
|
||||
| {ref}/search-aggregations-pipeline-movfn-aggregation.html[Moving Function] | {agg-ref}/pipeline/MovFnPipelineAggregationBuilder.html[MovFnPipelineAggregationBuilder] | {agg-ref}/PipelineAggregatorBuilders.html#movingFunction-java.lang.String-org.elasticsearch.script.Script-java.lang.String-int-[PipelineAggregatorBuilders.movingFunction()]
|
||||
| {ref}/search-aggregations-pipeline-cumulative-sum-aggregation.html[Cumulative Sum] | {agg-ref}/pipeline/CumulativeSumPipelineAggregationBuilder.html[CumulativeSumPipelineAggregationBuilder] | {agg-ref}/PipelineAggregatorBuilders.html#cumulativeSum-java.lang.String-java.lang.String-[PipelineAggregatorBuilders.cumulativeSum()]
|
||||
| {ref}/search-aggregations-pipeline-bucket-script-aggregation.html[Bucket Script] | {agg-ref}/pipeline/BucketScriptPipelineAggregationBuilder.html[BucketScriptPipelineAggregationBuilder] | {agg-ref}/PipelineAggregatorBuilders.html#bucketScript-java.lang.String-java.util.Map-org.elasticsearch.script.Script-[PipelineAggregatorBuilders.bucketScript()]
|
||||
| {ref}/search-aggregations-pipeline-bucket-selector-aggregation.html[Bucket Selector] | {agg-ref}/pipeline/BucketSelectorPipelineAggregationBuilder.html[BucketSelectorPipelineAggregationBuilder] | {agg-ref}/PipelineAggregatorBuilders.html#bucketSelector-java.lang.String-java.util.Map-org.elasticsearch.script.Script-[PipelineAggregatorBuilders.bucketSelector()]
|
||||
| {ref}/search-aggregations-pipeline-serialdiff-aggregation.html[Serial Differencing] | {agg-ref}/pipeline/SerialDiffPipelineAggregationBuilder.html[SerialDiffPipelineAggregationBuilder] | {agg-ref}/PipelineAggregatorBuilders.html#diff-java.lang.String-java.lang.String-[PipelineAggregatorBuilders.diff()]
|
||||
|======
|
||||
|
||||
==== Matrix Aggregations
|
||||
[options="header"]
|
||||
|======
|
||||
| Aggregation | AggregationBuilder Class | Method in MatrixStatsAggregationBuilders
|
||||
| {ref}/search-aggregations-matrix-stats-aggregation.html[Matrix Stats] | {matrixstats-ref}/matrix/stats/MatrixStatsAggregationBuilder.html[MatrixStatsAggregationBuilder] | {matrixstats-ref}/MatrixStatsAggregationBuilders.html#matrixStats-java.lang.String-[MatrixStatsAggregationBuilders.matrixStats()]
|
||||
|======
|
|
@ -1,68 +0,0 @@
|
|||
--
|
||||
:api: asyncsearch-delete
|
||||
:request: DeleteAsyncSearchRequest
|
||||
:response: AcknowledgedResponse
|
||||
--
|
||||
|
||||
[role="xpack"]
|
||||
[id="{upid}-{api}"]
|
||||
=== Delete Async Search API
|
||||
|
||||
[id="{upid}-{api}-request"]
|
||||
==== Request
|
||||
|
||||
A +{request}+ allows deleting a running asynchronous search task using
|
||||
its id. Required arguments are the `id` of a running search:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request]
|
||||
--------------------------------------------------
|
||||
|
||||
[id="{upid}-{api}-sync"]
|
||||
==== Synchronous Execution
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-execute]
|
||||
--------------------------------------------------
|
||||
<1> Execute the request and get back the response as an +{response}+ object.
|
||||
|
||||
[id="{upid}-{api}-async"]
|
||||
==== Asynchronous Execution
|
||||
|
||||
The asynchronous execution of a +{request}+ allows to use an
|
||||
`ActionListener` to be called back when the submit request returns:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-execute-async]
|
||||
--------------------------------------------------
|
||||
<1> The +{request}+ to execute and the `ActionListener` to use when
|
||||
the execution completes
|
||||
|
||||
The asynchronous method does not block and returns immediately. Once it is
|
||||
completed the `ActionListener` is called back using the `onResponse` method
|
||||
if the execution successfully completed or using the `onFailure` method if
|
||||
it failed.
|
||||
|
||||
A typical listener for +{response}+ looks like:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-listener]
|
||||
--------------------------------------------------
|
||||
<1> Called when the execution is successfully completed. The response is
|
||||
provided as an argument
|
||||
<2> Called in case of failure. The raised exception is provided as an argument
|
||||
|
||||
[id="{upid}-{api}-response"]
|
||||
==== Response
|
||||
|
||||
The returned +{response}+ indicates the acknowledgement of the request:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-response]
|
||||
--------------------------------------------------
|
||||
<1> `isAcknowledged` was the deletion request acknowledged or not.
|
|
@ -1,87 +0,0 @@
|
|||
--
|
||||
:api: asyncsearch-get
|
||||
:request: GetAsyncSearchRequest
|
||||
:response: AsyncSearchResponse
|
||||
--
|
||||
|
||||
[role="xpack"]
|
||||
[id="{upid}-{api}"]
|
||||
=== Get Async Search API
|
||||
|
||||
[id="{upid}-{api}-request"]
|
||||
==== Request
|
||||
|
||||
A +{request}+ allows to get a running asynchronous search task by
|
||||
its id. Required arguments are the `id` of a running async search:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request]
|
||||
--------------------------------------------------
|
||||
|
||||
==== Optional arguments
|
||||
The following arguments can optionally be provided:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-arguments]
|
||||
--------------------------------------------------
|
||||
<1> The minimum time that the request should wait before
|
||||
returning a partial result (defaults to no wait).
|
||||
<2> The expiration time of the request (defaults to none).
|
||||
|
||||
|
||||
[id="{upid}-{api}-sync"]
|
||||
==== Synchronous Execution
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-execute]
|
||||
--------------------------------------------------
|
||||
<1> Execute the request and get back the response as an +{response}+ object.
|
||||
|
||||
[id="{upid}-{api}-async"]
|
||||
==== Asynchronous Execution
|
||||
|
||||
The asynchronous execution of a +{request}+ allows to use an
|
||||
`ActionListener` to be called back when the submit request returns:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-execute-async]
|
||||
--------------------------------------------------
|
||||
<1> The +{request}+ to execute and the `ActionListener` to use when
|
||||
the execution completes
|
||||
|
||||
The asynchronous method does not block and returns immediately. Once it is
|
||||
completed the `ActionListener` is called back using the `onResponse` method
|
||||
if the execution successfully completed or using the `onFailure` method if
|
||||
it failed.
|
||||
|
||||
A typical listener for +{response}+ looks like:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-listener]
|
||||
--------------------------------------------------
|
||||
<1> Called when the execution is successfully completed. The response is
|
||||
provided as an argument
|
||||
<2> Called in case of failure. The raised exception is provided as an argument
|
||||
|
||||
[id="{upid}-{api}-response"]
|
||||
==== Response
|
||||
|
||||
The returned +{response}+ allows to retrieve information about the executed
|
||||
operation as follows:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-response]
|
||||
--------------------------------------------------
|
||||
<1> The `SearchResponse`, or `null` if not available yet
|
||||
<2> The id of the async search request, `null` if the response isn't stored
|
||||
<3> `true` when the response contains partial results
|
||||
<4> `true` when the search is still running
|
||||
<5> The time the response was created (millis since epoch)
|
||||
<6> The time the response expires (millis since epoch)
|
||||
<7> Get failure reasons or `null` for no failures
|
|
@ -1,94 +0,0 @@
|
|||
--
|
||||
:api: asyncsearch-submit
|
||||
:request: SubmitAsyncSearchRequest
|
||||
:response: AsyncSearchResponse
|
||||
--
|
||||
|
||||
[role="xpack"]
|
||||
[id="{upid}-{api}"]
|
||||
=== Submit Async Search API
|
||||
|
||||
[id="{upid}-{api}-request"]
|
||||
==== Request
|
||||
|
||||
A +{request}+ allows to submit an asynchronous search task to
|
||||
the cluster. Required arguments are the `SearchSourceBuilder` defining
|
||||
the search and the target indices:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request]
|
||||
--------------------------------------------------
|
||||
<1> The definition of the search to run
|
||||
<2> The target indices for the search
|
||||
|
||||
==== Optional arguments
|
||||
The following arguments can optionally be provided:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-arguments]
|
||||
--------------------------------------------------
|
||||
<1> The minimum time that the request should wait before
|
||||
returning a partial result (defaults to 1 second).
|
||||
<2> The expiration time of the request (defaults to 5 days).
|
||||
<3> Controls whether the results should be stored if the request
|
||||
completed within the provided `wait_for_completion` time (default: false)
|
||||
|
||||
[id="{upid}-{api}-sync"]
|
||||
==== Synchronous Execution
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-execute]
|
||||
--------------------------------------------------
|
||||
<1> Execute the request and get back the response as an +{response}+ object.
|
||||
|
||||
[id="{upid}-{api}-async"]
|
||||
==== Asynchronous Execution
|
||||
|
||||
The asynchronous execution of a +{request}+ allows to use an
|
||||
`ActionListener` to be called back when the submit request returns. Note
|
||||
that this is does not concern the execution of the submitted search request,
|
||||
which always executes asynchronously. The listener, however, waits for the
|
||||
submit request itself to come back:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-execute-async]
|
||||
--------------------------------------------------
|
||||
<1> The +{request}+ to execute and the `ActionListener` to use when
|
||||
the execution completes
|
||||
|
||||
The asynchronous method does not block and returns immediately. Once it is
|
||||
completed the `ActionListener` is called back using the `onResponse` method
|
||||
if the execution successfully completed or using the `onFailure` method if
|
||||
it failed.
|
||||
|
||||
A typical listener for +{response}+ looks like:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-listener]
|
||||
--------------------------------------------------
|
||||
<1> Called when the execution is successfully completed. The response is
|
||||
provided as an argument
|
||||
<2> Called in case of failure. The raised exception is provided as an argument
|
||||
|
||||
[id="{upid}-{api}-response"]
|
||||
==== Response
|
||||
|
||||
The returned +{response}+ allows to retrieve information about the executed
|
||||
operation as follows:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-response]
|
||||
--------------------------------------------------
|
||||
<1> The `SearchResponse`, or `null` if not available yet
|
||||
<2> The id of the async search request, `null` if the response isn't stored
|
||||
<3> `true` when the response contains partial results
|
||||
<4> `true` when the search is still running
|
||||
<5> The time the response was created (millis since epoch)
|
||||
<6> The time the response expires (millis since epoch)
|
||||
<7> Get failure reasons or `null` for no failures
|
|
@ -1,32 +0,0 @@
|
|||
--
|
||||
:api: ccr-delete-auto-follow-pattern
|
||||
:request: DeleteAutoFollowPatternRequest
|
||||
:response: AcknowledgedResponse
|
||||
--
|
||||
[role="xpack"]
|
||||
[id="{upid}-{api}"]
|
||||
=== Delete Auto Follow Pattern API
|
||||
|
||||
[id="{upid}-{api}-request"]
|
||||
==== Request
|
||||
|
||||
The Delete Auto Follow Pattern API allows you to delete an auto follow pattern.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request]
|
||||
--------------------------------------------------
|
||||
<1> The name of the auto follow pattern to delete.
|
||||
|
||||
[id="{upid}-{api}-response"]
|
||||
==== Response
|
||||
|
||||
The returned +{response}+ indicates if the delete auto follow pattern request was received.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-response]
|
||||
--------------------------------------------------
|
||||
<1> Whether or not the delete auto follow pattern request was acknowledged.
|
||||
|
||||
include::../execution.asciidoc[]
|
|
@ -1,45 +0,0 @@
|
|||
--
|
||||
:api: ccr-forget-follower
|
||||
:request: ForgetFollowerRequest
|
||||
:response: BroadcastResponse
|
||||
--
|
||||
[role="xpack"]
|
||||
[id="{upid}-{api}"]
|
||||
=== Forget Follower API
|
||||
|
||||
[id="{upid}-{api}-request"]
|
||||
==== Request
|
||||
|
||||
The Forget Follower API allows you to manually remove the follower retention
|
||||
leases from the leader. Note that these retention leases are automatically
|
||||
managed by the following index. This API exists only for cases when invoking
|
||||
the unfollow API on the follower index is unable to remove the follower
|
||||
retention leases.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request]
|
||||
--------------------------------------------------
|
||||
<1> The name of the cluster containing the follower index.
|
||||
<2> The name of the follower index.
|
||||
<3> The UUID of the follower index (can be obtained from index stats).
|
||||
<4> The alias of the remote cluster containing the leader index.
|
||||
<5> The name of the leader index.
|
||||
|
||||
[id="{upid}-{api}-response"]
|
||||
==== Response
|
||||
|
||||
The returned +{response}+ indicates if the response was successful.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-response]
|
||||
--------------------------------------------------
|
||||
<1> The high-level shards summary.
|
||||
<2> The total number of shards the request was executed on.
|
||||
<3> The total number of shards the request was successful on.
|
||||
<4> The total number of shards the request was skipped on (should always be zero).
|
||||
<5> The total number of shards the request failed on.
|
||||
<6> The shard-level failures.
|
||||
|
||||
include::../execution.asciidoc[]
|
|
@ -1,35 +0,0 @@
|
|||
--
|
||||
:api: ccr-get-auto-follow-pattern
|
||||
:request: GetAutoFollowPatternRequest
|
||||
:response: GetAutoFollowPatternResponse
|
||||
--
|
||||
[role="xpack"]
|
||||
[id="{upid}-{api}"]
|
||||
=== Get Auto Follow Pattern API
|
||||
|
||||
[id="{upid}-{api}-request"]
|
||||
==== Request
|
||||
|
||||
The Get Auto Follow Pattern API allows you to get a specified auto follow pattern
|
||||
or all auto follow patterns.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request]
|
||||
--------------------------------------------------
|
||||
<1> The name of the auto follow pattern to get.
|
||||
Use the default constructor to get all auto follow patterns.
|
||||
|
||||
[id="{upid}-{api}-response"]
|
||||
==== Response
|
||||
|
||||
The returned +{response}+ includes the requested auto follow pattern or
|
||||
all auto follow patterns if default constructor or request class was used.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-response]
|
||||
--------------------------------------------------
|
||||
<1> Get the requested pattern from the list of returned patterns
|
||||
|
||||
include::../execution.asciidoc[]
|
|
@ -1,35 +0,0 @@
|
|||
--
|
||||
:api: ccr-get-follow-info
|
||||
:request: FollowInfoRequest
|
||||
:response: FollowInfoResponse
|
||||
--
|
||||
[role="xpack"]
|
||||
[id="{upid}-{api}"]
|
||||
=== Get Follow Info API
|
||||
|
||||
|
||||
[id="{upid}-{api}-request"]
|
||||
==== Request
|
||||
|
||||
The Get Follow Info API allows you to get follow information (parameters and status) for specific follower indices.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request]
|
||||
--------------------------------------------------
|
||||
<1> The follower index to get follow information for.
|
||||
|
||||
[id="{upid}-{api}-response"]
|
||||
==== Response
|
||||
|
||||
The returned +{response}+ includes follow information for the specified follower indices
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-response]
|
||||
--------------------------------------------------
|
||||
<1> The follow information for specified follower indices.
|
||||
|
||||
include::../execution.asciidoc[]
|
||||
|
||||
|
|
@ -1,35 +0,0 @@
|
|||
--
|
||||
:api: ccr-get-follow-stats
|
||||
:request: FollowStatsRequest
|
||||
:response: FollowStatsResponse
|
||||
--
|
||||
[role="xpack"]
|
||||
[id="{upid}-{api}"]
|
||||
=== Get Follow Stats API
|
||||
|
||||
|
||||
[id="{upid}-{api}-request"]
|
||||
==== Request
|
||||
|
||||
The Get Follow Stats API allows you to get follow statistics for specific follower indices.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request]
|
||||
--------------------------------------------------
|
||||
<1> The follower index to get follow statistics for.
|
||||
|
||||
[id="{upid}-{api}-response"]
|
||||
==== Response
|
||||
|
||||
The returned +{response}+ includes follow statistics for the specified follower indices
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-response]
|
||||
--------------------------------------------------
|
||||
<1> The follow stats for specified follower indices.
|
||||
|
||||
include::../execution.asciidoc[]
|
||||
|
||||
|
|
@ -1,37 +0,0 @@
|
|||
--
|
||||
:api: ccr-get-stats
|
||||
:request: CcrStatsRequest
|
||||
:response: CcrStatsResponse
|
||||
--
|
||||
[role="xpack"]
|
||||
[id="{upid}-{api}"]
|
||||
=== Get CCR Stats API
|
||||
|
||||
|
||||
[id="{upid}-{api}-request"]
|
||||
==== Request
|
||||
|
||||
The Get CCR Stats API allows you to get statistics about index following and auto following.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request]
|
||||
--------------------------------------------------
|
||||
<1> The request accepts no parameters.
|
||||
|
||||
[id="{upid}-{api}-response"]
|
||||
==== Response
|
||||
|
||||
The returned +{response}+ always includes index follow statistics of all follow indices and
|
||||
auto follow statistics.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-response]
|
||||
--------------------------------------------------
|
||||
<1> The follow stats of active follower indices.
|
||||
<2> The auto follow stats of the cluster that has been queried.
|
||||
|
||||
include::../execution.asciidoc[]
|
||||
|
||||
|
|
@ -1,32 +0,0 @@
|
|||
--
|
||||
:api: ccr-pause-auto-follow-pattern
|
||||
:request: PauseAutoFollowPatternRequest
|
||||
:response: AcknowledgedResponse
|
||||
--
|
||||
[role="xpack"]
|
||||
[id="{upid}-{api}"]
|
||||
=== Pause Auto Follow Pattern API
|
||||
|
||||
[id="{upid}-{api}-request"]
|
||||
==== Request
|
||||
|
||||
The Pause Auto Follow Pattern API allows you to pause an existing auto follow pattern.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request]
|
||||
--------------------------------------------------
|
||||
<1> The name of the auto follow pattern.
|
||||
|
||||
[id="{upid}-{api}-response"]
|
||||
==== Response
|
||||
|
||||
The returned +{response}+ indicates if the pause auto follow pattern request was received.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-response]
|
||||
--------------------------------------------------
|
||||
<1> Whether or not the pause auto follow pattern request was acknowledged.
|
||||
|
||||
include::../execution.asciidoc[]
|
|
@ -1,35 +0,0 @@
|
|||
--
|
||||
:api: ccr-pause-follow
|
||||
:request: PauseFollowRequest
|
||||
:response: AcknowledgedResponse
|
||||
--
|
||||
[role="xpack"]
|
||||
[id="{upid}-{api}"]
|
||||
=== Pause Follow API
|
||||
|
||||
|
||||
[id="{upid}-{api}-request"]
|
||||
==== Request
|
||||
|
||||
The Pause Follow API allows you to pause following by follow index name.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request]
|
||||
--------------------------------------------------
|
||||
<1> The name of follow index.
|
||||
|
||||
[id="{upid}-{api}-response"]
|
||||
==== Response
|
||||
|
||||
The returned +{response}+ indicates if the pause follow request was received.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-response]
|
||||
--------------------------------------------------
|
||||
<1> Whether or not the pause follow was acknowledge.
|
||||
|
||||
include::../execution.asciidoc[]
|
||||
|
||||
|
|
@ -1,39 +0,0 @@
|
|||
--
|
||||
:api: ccr-put-auto-follow-pattern
|
||||
:request: PutAutoFollowPatternRequest
|
||||
:response: AcknowledgedResponse
|
||||
--
|
||||
[role="xpack"]
|
||||
[id="{upid}-{api}"]
|
||||
=== Put Auto Follow Pattern API
|
||||
|
||||
[id="{upid}-{api}-request"]
|
||||
==== Request
|
||||
|
||||
The Put Auto Follow Pattern API allows you to store auto follow patterns in order
|
||||
to automatically follow leader indices in a remote clusters matching certain
|
||||
index name patterns.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request]
|
||||
--------------------------------------------------
|
||||
<1> The name of the auto follow pattern.
|
||||
<2> The name of the remote cluster.
|
||||
<3> The leader index patterns.
|
||||
<4> The leader index exclusion patterns.
|
||||
<5> The pattern used to create the follower index.
|
||||
<6> The settings overrides for the follower index.
|
||||
|
||||
[id="{upid}-{api}-response"]
|
||||
==== Response
|
||||
|
||||
The returned +{response}+ indicates if the put auto follow pattern request was received.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-response]
|
||||
--------------------------------------------------
|
||||
<1> Whether or not the put auto follow pattern request was acknowledged.
|
||||
|
||||
include::../execution.asciidoc[]
|
|
@ -1,39 +0,0 @@
|
|||
--
|
||||
:api: ccr-put-follow
|
||||
:request: PutFollowRequest
|
||||
:response: PutFollowResponse
|
||||
--
|
||||
[role="xpack"]
|
||||
[id="{upid}-{api}"]
|
||||
=== Create follower API
|
||||
|
||||
|
||||
[id="{upid}-{api}-request"]
|
||||
==== Request
|
||||
|
||||
Creates a follower index and makes that index follow a leader index.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request]
|
||||
--------------------------------------------------
|
||||
<1> The name of the remote cluster alias.
|
||||
<2> The name of the leader in the remote cluster.
|
||||
<3> The name of the follower index to create.
|
||||
<4> The number of shard copies that must be active before the call returns.
|
||||
<5> The settings overrides for the follower index.
|
||||
|
||||
[id="{upid}-{api}-response"]
|
||||
==== Response
|
||||
|
||||
The +{response}+ indicates if the request was received.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-response]
|
||||
--------------------------------------------------
|
||||
<1> Whether the follower index was created.
|
||||
<2> Whether the follower shards are started.
|
||||
<3> Whether the follower index started following the leader index.
|
||||
|
||||
include::../execution.asciidoc[]
|
|
@ -1,33 +0,0 @@
|
|||
--
|
||||
:api: ccr-resume-auto-follow-pattern
|
||||
:request: ResumeAutoFollowPatternRequest
|
||||
:response: AcknowledgedResponse
|
||||
--
|
||||
[role="xpack"]
|
||||
[id="{upid}-{api}"]
|
||||
=== Resume Auto Follow Pattern API
|
||||
|
||||
[id="{upid}-{api}-request"]
|
||||
==== Request
|
||||
|
||||
The Resume Auto Follow Pattern API allows you to resume the activity
|
||||
for a pause auto follow pattern.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request]
|
||||
--------------------------------------------------
|
||||
<1> The name of the auto follow pattern.
|
||||
|
||||
[id="{upid}-{api}-response"]
|
||||
==== Response
|
||||
|
||||
The returned +{response}+ indicates if the resume auto follow pattern request was received.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-response]
|
||||
--------------------------------------------------
|
||||
<1> Whether or not the resume auto follow pattern request was acknowledged.
|
||||
|
||||
include::../execution.asciidoc[]
|
|
@ -1,35 +0,0 @@
|
|||
--
|
||||
:api: ccr-resume-follow
|
||||
:request: ResumeFollowRequest
|
||||
:response: AcknowledgedResponse
|
||||
--
|
||||
[role="xpack"]
|
||||
[id="{upid}-{api}"]
|
||||
=== Resume Follow API
|
||||
|
||||
|
||||
[id="{upid}-{api}-request"]
|
||||
==== Request
|
||||
|
||||
The Resume Follow API allows you to resume following a follower index that has been paused.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request]
|
||||
--------------------------------------------------
|
||||
<1> The name of follower index.
|
||||
|
||||
[id="{upid}-{api}-response"]
|
||||
==== Response
|
||||
|
||||
The returned +{response}+ indicates if the resume follow request was received.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-response]
|
||||
--------------------------------------------------
|
||||
<1> Whether or not the resume follow was acknowledged.
|
||||
|
||||
include::../execution.asciidoc[]
|
||||
|
||||
|
|
@ -1,36 +0,0 @@
|
|||
--
|
||||
:api: ccr-unfollow
|
||||
:request: UnfollowRequest
|
||||
:response: AcknowledgedResponse
|
||||
--
|
||||
[role="xpack"]
|
||||
[id="{upid}-{api}"]
|
||||
=== Unfollow API
|
||||
|
||||
|
||||
[id="{upid}-{api}-request"]
|
||||
==== Request
|
||||
|
||||
The Unfollow API allows you to unfollow a follower index and make it a regular index.
|
||||
Note that the follower index needs to be paused and the follower index needs to be closed.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request]
|
||||
--------------------------------------------------
|
||||
<1> The name of follow index to unfollow.
|
||||
|
||||
[id="{upid}-{api}-response"]
|
||||
==== Response
|
||||
|
||||
The returned +{response}+ indicates if the unfollow request was received.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-response]
|
||||
--------------------------------------------------
|
||||
<1> Whether or not the unfollow was acknowledge.
|
||||
|
||||
include::../execution.asciidoc[]
|
||||
|
||||
|
|
@ -1,41 +0,0 @@
|
|||
--
|
||||
:api: delete-component-template
|
||||
:request: DeleteComponentTemplateRequest
|
||||
:response: AcknowledgedResponse
|
||||
--
|
||||
|
||||
[id="{upid}-{api}"]
|
||||
=== Delete Component Template API
|
||||
|
||||
[id="{upid}-{api}-request"]
|
||||
==== Request
|
||||
|
||||
The Delete Component Template API allows you to delete a component template.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request]
|
||||
--------------------------------------------------
|
||||
<1> The name of the component template to delete.
|
||||
|
||||
=== Optional arguments
|
||||
The following arguments can optionally be provided:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-masterTimeout]
|
||||
--------------------------------------------------
|
||||
<1> Timeout to connect to the master node as a `TimeValue`
|
||||
|
||||
[id="{upid}-{api}-response"]
|
||||
==== Response
|
||||
|
||||
The returned +{response}+ indicates if the delete component template request was received.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-response]
|
||||
--------------------------------------------------
|
||||
<1> Whether or not the delete component template request was acknowledged.
|
||||
|
||||
include::../execution.asciidoc[]
|
|
@ -1,42 +0,0 @@
|
|||
--
|
||||
:api: get-component-templates
|
||||
:request: GetComponentTemplatesRequest
|
||||
:response: GetComponentTemplatesResponse
|
||||
--
|
||||
|
||||
[id="{upid}-{api}"]
|
||||
=== Get Component Templates API
|
||||
|
||||
The Get Component Templates API allows to retrieve information about one or more component templates.
|
||||
|
||||
[id="{upid}-{api}-request"]
|
||||
==== Get Component Templates Request
|
||||
|
||||
A +{request}+ specifies one component template name to retrieve.
|
||||
To return all component templates omit the name altogether.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request]
|
||||
--------------------------------------------------
|
||||
<1> A single component template name
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-masterTimeout]
|
||||
--------------------------------------------------
|
||||
<1> Timeout to connect to the master node as a `TimeValue`
|
||||
<2> Timeout to connect to the master node as a `String`
|
||||
|
||||
include::../execution.asciidoc[]
|
||||
|
||||
[id="{upid}-{api}-response"]
|
||||
==== Get Component Templates Response
|
||||
|
||||
The returned +{response}+ consists a map of component template names and their corresponding definition.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-response]
|
||||
--------------------------------------------------
|
||||
<1> A map of matching component template names and the corresponding definitions
|
|
@ -1,63 +0,0 @@
|
|||
--
|
||||
:api: get-settings
|
||||
:request: ClusterGetSettingsRequest
|
||||
:response: ClusterGetSettingsResponse
|
||||
--
|
||||
|
||||
[id="{upid}-{api}"]
|
||||
=== Cluster Get Settings API
|
||||
|
||||
The Cluster Get Settings API allows to get the cluster wide settings.
|
||||
|
||||
[id="{upid}-{api}-request"]
|
||||
==== Cluster Get Settings Request
|
||||
|
||||
A +{request}+:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request]
|
||||
--------------------------------------------------
|
||||
|
||||
==== Optional arguments
|
||||
The following arguments can optionally be provided:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-includeDefaults]
|
||||
--------------------------------------------------
|
||||
<1> By default only those settings that were explicitly set are returned. Setting this to true also returns
|
||||
the default settings.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-local]
|
||||
--------------------------------------------------
|
||||
<1> By default the request goes to the master of the cluster to get the latest results. If local is specified it gets
|
||||
the results from whichever node the request goes to.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-masterTimeout]
|
||||
--------------------------------------------------
|
||||
<1> Timeout to connect to the master node as a `TimeValue`
|
||||
<2> Timeout to connect to the master node as a `String`
|
||||
|
||||
include::../execution.asciidoc[]
|
||||
|
||||
[id="{upid}-{api}-response"]
|
||||
==== Cluster Get Settings Response
|
||||
|
||||
The returned +{response}+ allows to retrieve information about the
|
||||
executed operation as follows:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-response]
|
||||
--------------------------------------------------
|
||||
<1> Get the persistent settings.
|
||||
<2> Get the transient settings.
|
||||
<3> Get the default settings (returns empty settings if `includeDefaults` was not set to `true`).
|
||||
<4> Get the value as a `String` for a particular setting. The order of searching is first in `persistentSettings` then in
|
||||
`transientSettings` and finally, if not found in either, in `defaultSettings`.
|
||||
|
|
@ -1,177 +0,0 @@
|
|||
--
|
||||
:api: health
|
||||
:request: ClusterHealthRequest
|
||||
:response: ClusterHealthResponse
|
||||
--
|
||||
|
||||
[id="{upid}-{api}"]
|
||||
=== Cluster Health API
|
||||
|
||||
The Cluster Health API allows getting cluster health.
|
||||
|
||||
[id="{upid}-{api}-request"]
|
||||
==== Cluster Health Request
|
||||
|
||||
A +{request}+:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request]
|
||||
--------------------------------------------------
|
||||
There are no required parameters. By default, the client will check all indices and will not wait
|
||||
for any events.
|
||||
|
||||
==== Indices
|
||||
|
||||
Indices which should be checked can be passed in the constructor:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-indices-ctr]
|
||||
--------------------------------------------------
|
||||
|
||||
Or using the corresponding setter method:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-indices-setter]
|
||||
--------------------------------------------------
|
||||
|
||||
==== Other parameters
|
||||
|
||||
Other parameters can be passed only through setter methods:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-timeout]
|
||||
--------------------------------------------------
|
||||
<1> Timeout for the request as a `TimeValue`. Defaults to 30 seconds
|
||||
<2> As a `String`
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-master-timeout]
|
||||
--------------------------------------------------
|
||||
<1> Timeout to connect to the master node as a `TimeValue`. Defaults to the same as `timeout`
|
||||
<2> As a `String`
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-wait-status]
|
||||
--------------------------------------------------
|
||||
<1> The status to wait (e.g. `green`, `yellow`, or `red`). Accepts a `ClusterHealthStatus` value.
|
||||
<2> Using predefined method
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-wait-events]
|
||||
--------------------------------------------------
|
||||
<1> The priority of the events to wait for. Accepts a `Priority` value.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-level]
|
||||
--------------------------------------------------
|
||||
<1> The level of detail of the returned health information. Accepts a +{request}.Level+ value.
|
||||
Default value is `cluster`.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-wait-relocation]
|
||||
--------------------------------------------------
|
||||
<1> Wait for 0 relocating shards. Defaults to `false`
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-wait-initializing]
|
||||
--------------------------------------------------
|
||||
<1> Wait for 0 initializing shards. Defaults to `false`
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-wait-nodes]
|
||||
--------------------------------------------------
|
||||
<1> Wait for `N` nodes in the cluster. Defaults to `0`
|
||||
<2> Using `>=N`, `<=N`, `>N` and `<N` notation
|
||||
<3> Using `ge(N)`, `le(N)`, `gt(N)`, `lt(N)` notation
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-wait-active]
|
||||
--------------------------------------------------
|
||||
|
||||
<1> Wait for all shards to be active in the cluster
|
||||
<2> Wait for `N` shards to be active in the cluster
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-local]
|
||||
--------------------------------------------------
|
||||
<1> Non-master node can be used for this request. Defaults to `false`
|
||||
|
||||
include::../execution.asciidoc[]
|
||||
|
||||
[id="{upid}-{api}-response"]
|
||||
==== Cluster Health Response
|
||||
|
||||
The returned +{response}+ contains the next information about the
|
||||
cluster:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-response-general]
|
||||
--------------------------------------------------
|
||||
<1> Name of the cluster
|
||||
<2> Cluster status (`green`, `yellow` or `red`)
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-response-request-status]
|
||||
--------------------------------------------------
|
||||
<1> Whether request was timed out while processing
|
||||
<2> Status of the request (`OK` or `REQUEST_TIMEOUT`). Other errors will be thrown as exceptions
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-response-nodes]
|
||||
--------------------------------------------------
|
||||
<1> Number of nodes in the cluster
|
||||
<2> Number of data nodes in the cluster
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-response-shards]
|
||||
--------------------------------------------------
|
||||
<1> Number of active shards
|
||||
<2> Number of primary active shards
|
||||
<3> Number of relocating shards
|
||||
<4> Number of initializing shards
|
||||
<5> Number of unassigned shards
|
||||
<6> Number of unassigned shards that are currently being delayed
|
||||
<7> Percent of active shards
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-response-task]
|
||||
--------------------------------------------------
|
||||
<1> Maximum wait time of all tasks in the queue
|
||||
<2> Number of currently pending tasks
|
||||
<3> Number of async fetches that are currently ongoing
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-response-indices]
|
||||
--------------------------------------------------
|
||||
<1> Detailed information about indices in the cluster
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-response-index]
|
||||
--------------------------------------------------
|
||||
<1> Detailed information about a specific index
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-response-shard-details]
|
||||
--------------------------------------------------
|
||||
<1> Detailed information about a specific shard
|
|
@ -1,64 +0,0 @@
|
|||
--
|
||||
:api: put-component-template
|
||||
:request: PutComponentTemplateRequest
|
||||
:response: AcknowledgedResponse
|
||||
--
|
||||
|
||||
[id="{upid}-{api}"]
|
||||
=== Create or update component template API
|
||||
|
||||
Creates or updates a component template.
|
||||
|
||||
[id="{upid}-{api}-request"]
|
||||
==== Request
|
||||
|
||||
A +{request}+ specifies the name of the component template and the template definition,
|
||||
which can consist of the settings, mappings or aliases, together with a version (which
|
||||
can be used to simply component template management by external systems) and a metadata
|
||||
map consisting of user specific information.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request]
|
||||
--------------------------------------------------
|
||||
<1> The name of the component template
|
||||
<2> Template configuration containing the settings, mappings and aliases for this component template
|
||||
|
||||
===== Version
|
||||
A component template can optionally specify a version number which can be used to simplify template
|
||||
management by external systems.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-version]
|
||||
--------------------------------------------------
|
||||
<1> The version number of the template
|
||||
|
||||
=== Optional arguments
|
||||
The following arguments can optionally be provided:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-create]
|
||||
--------------------------------------------------
|
||||
<1> To force to only create a new template; do not overwrite the existing template
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-masterTimeout]
|
||||
--------------------------------------------------
|
||||
<1> Timeout to connect to the master node as a `TimeValue`
|
||||
|
||||
include::../execution.asciidoc[]
|
||||
|
||||
[id="{upid}-{api}-response"]
|
||||
==== Response
|
||||
|
||||
The returned +{response}+ allows to retrieve information about the
|
||||
executed operation as follows:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-response]
|
||||
--------------------------------------------------
|
||||
<1> Indicates whether all of the nodes have acknowledged the request
|
|
@ -1,93 +0,0 @@
|
|||
--
|
||||
:api: put-settings
|
||||
:request: ClusterUpdateSettingsRequest
|
||||
:response: ClusterUpdateSettingsResponse
|
||||
--
|
||||
|
||||
[id="{upid}-{api}"]
|
||||
=== Cluster Update Settings API
|
||||
|
||||
The Cluster Update Settings API allows to update cluster wide settings.
|
||||
|
||||
[id="{upid}-{api}-request"]
|
||||
==== Cluster Update Settings Request
|
||||
|
||||
A +{request}+:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request]
|
||||
--------------------------------------------------
|
||||
|
||||
==== Cluster Settings
|
||||
At least one setting to be updated must be provided:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-cluster-settings]
|
||||
--------------------------------------------------
|
||||
<1> Sets the transient settings to be applied
|
||||
<2> Sets the persistent setting to be applied
|
||||
|
||||
==== Providing the Settings
|
||||
The settings to be applied can be provided in different ways:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-create-settings]
|
||||
--------------------------------------------------
|
||||
<1> Creates a transient setting as `Settings`
|
||||
<2> Creates a persistent setting as `Settings`
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-settings-builder]
|
||||
--------------------------------------------------
|
||||
<1> Settings provided as `Settings.Builder`
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-settings-source]
|
||||
--------------------------------------------------
|
||||
<1> Settings provided as `String`
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-settings-map]
|
||||
--------------------------------------------------
|
||||
<1> Settings provided as a `Map`
|
||||
|
||||
==== Optional Arguments
|
||||
The following arguments can optionally be provided:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-timeout]
|
||||
--------------------------------------------------
|
||||
<1> Timeout to wait for the all the nodes to acknowledge the settings were applied
|
||||
as a `TimeValue`
|
||||
<2> Timeout to wait for the all the nodes to acknowledge the settings were applied
|
||||
as a `String`
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-masterTimeout]
|
||||
--------------------------------------------------
|
||||
<1> Timeout to connect to the master node as a `TimeValue`
|
||||
<2> Timeout to connect to the master node as a `String`
|
||||
|
||||
include::../execution.asciidoc[]
|
||||
|
||||
[id="{upid}-{api}-response"]
|
||||
==== Cluster Update Settings Response
|
||||
|
||||
The returned +{response}+ allows to retrieve information about the
|
||||
executed operation as follows:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-response]
|
||||
--------------------------------------------------
|
||||
<1> Indicates whether all of the nodes have acknowledged the request
|
||||
<2> Indicates which transient settings have been applied
|
||||
<3> Indicates which persistent settings have been applied
|
|
@ -1,32 +0,0 @@
|
|||
--
|
||||
:api: remote-info
|
||||
:request: RemoteInfoRequest
|
||||
:response: RemoteInfoResponse
|
||||
--
|
||||
|
||||
[id="{upid}-{api}"]
|
||||
=== Remote Cluster Info API
|
||||
|
||||
The Remote cluster info API allows to get all of the configured remote cluster information.
|
||||
|
||||
[id="{upid}-{api}-request"]
|
||||
==== Remote Cluster Info Request
|
||||
|
||||
A +{request}+:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request]
|
||||
--------------------------------------------------
|
||||
|
||||
There are no required parameters.
|
||||
|
||||
==== Remote Cluster Info Response
|
||||
|
||||
The returned +{response}+ allows to retrieve remote cluster information.
|
||||
It returns connection and endpoint information keyed by the configured remote cluster alias.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-response]
|
||||
--------------------------------------------------
|
|
@ -1,217 +0,0 @@
|
|||
--
|
||||
:api: bulk
|
||||
:request: BulkRequest
|
||||
:response: BulkResponse
|
||||
--
|
||||
|
||||
[id="{upid}-{api}"]
|
||||
=== Bulk API
|
||||
|
||||
NOTE: The Java High Level REST Client provides the
|
||||
<<{upid}-{api}-processor>> to assist with bulk requests.
|
||||
|
||||
[id="{upid}-{api}-request"]
|
||||
==== Bulk Request
|
||||
|
||||
A +{request}+ can be used to execute multiple index, update and/or delete
|
||||
operations using a single request.
|
||||
|
||||
It requires at least one operation to be added to the Bulk request:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request]
|
||||
--------------------------------------------------
|
||||
<1> Creates the +{request}+
|
||||
<2> Adds a first `IndexRequest` to the Bulk request. See <<{upid}-index>> for
|
||||
more information on how to build `IndexRequest`.
|
||||
<3> Adds a second `IndexRequest`
|
||||
<4> Adds a third `IndexRequest`
|
||||
|
||||
WARNING: The Bulk API supports only documents encoded in JSON or SMILE.
|
||||
Providing documents in any other format will result in an error.
|
||||
|
||||
And different operation types can be added to the same +{request}+:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-with-mixed-operations]
|
||||
--------------------------------------------------
|
||||
<1> Adds a `DeleteRequest` to the +{request}+. See <<{upid}-delete>>
|
||||
for more information on how to build `DeleteRequest`.
|
||||
<2> Adds an `UpdateRequest` to the +{request}+. See <<{upid}-update>>
|
||||
for more information on how to build `UpdateRequest`.
|
||||
<3> Adds an `IndexRequest` using the SMILE format
|
||||
|
||||
==== Optional arguments
|
||||
The following arguments can optionally be provided:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-timeout]
|
||||
--------------------------------------------------
|
||||
<1> Timeout to wait for the bulk request to be performed as a `TimeValue`
|
||||
<2> Timeout to wait for the bulk request to be performed as a `String`
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-refresh]
|
||||
--------------------------------------------------
|
||||
<1> Refresh policy as a `WriteRequest.RefreshPolicy` instance
|
||||
<2> Refresh policy as a `String`
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-active-shards]
|
||||
--------------------------------------------------
|
||||
<1> Sets the number of shard copies that must be active before proceeding with
|
||||
the index/update/delete operations.
|
||||
<2> Number of shard copies provided as a `ActiveShardCount`: can be
|
||||
`ActiveShardCount.ALL`, `ActiveShardCount.ONE` or
|
||||
`ActiveShardCount.DEFAULT` (default)
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-pipeline]
|
||||
--------------------------------------------------
|
||||
<1> Global pipelineId used on all sub requests, unless overridden on a sub request
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-routing]
|
||||
--------------------------------------------------
|
||||
<1> Global routingId used on all sub requests, unless overridden on a sub request
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-index-type]
|
||||
--------------------------------------------------
|
||||
<1> A bulk request with a global index used on all sub requests, unless overridden on a sub request.
|
||||
This parameter is @Nullable and can only be set during +{request}+ creation.
|
||||
|
||||
include::../execution.asciidoc[]
|
||||
|
||||
[id="{upid}-{api}-response"]
|
||||
==== Bulk Response
|
||||
|
||||
The returned +{response}+ contains information about the executed operations and
|
||||
allows to iterate over each result as follows:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-response]
|
||||
--------------------------------------------------
|
||||
<1> Iterate over the results of all operations
|
||||
<2> Retrieve the response of the operation (successful or not), can be
|
||||
`IndexResponse`, `UpdateResponse` or `DeleteResponse` which can all be seen as
|
||||
`DocWriteResponse` instances
|
||||
<3> Handle the response of an index operation
|
||||
<4> Handle the response of a update operation
|
||||
<5> Handle the response of a delete operation
|
||||
|
||||
The Bulk response provides a method to quickly check if one or more operation
|
||||
has failed:
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-has-failures]
|
||||
--------------------------------------------------
|
||||
<1> This method returns `true` if at least one operation failed
|
||||
|
||||
In such situation it is necessary to iterate over all operation results in order
|
||||
to check if the operation failed, and if so, retrieve the corresponding failure:
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-errors]
|
||||
--------------------------------------------------
|
||||
<1> Indicate if a given operation failed
|
||||
<2> Retrieve the failure of the failed operation
|
||||
|
||||
[id="{upid}-{api}-processor"]
|
||||
==== Bulk Processor
|
||||
|
||||
The `BulkProcessor` simplifies the usage of the Bulk API by providing
|
||||
a utility class that allows index/update/delete operations to be
|
||||
transparently executed as they are added to the processor.
|
||||
|
||||
In order to execute the requests, the `BulkProcessor` requires the following
|
||||
components:
|
||||
|
||||
`RestHighLevelClient`:: This client is used to execute the +{request}+
|
||||
and to retrieve the `BulkResponse`
|
||||
`BulkProcessor.Listener`:: This listener is called before and after
|
||||
every +{request}+ execution or when a +{request}+ failed
|
||||
|
||||
Then the `BulkProcessor.builder` method can be used to build a new
|
||||
`BulkProcessor`:
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-processor-init]
|
||||
--------------------------------------------------
|
||||
<1> Create the `BulkProcessor.Listener`
|
||||
<2> This method is called before each execution of a +{request}+
|
||||
<3> This method is called after each execution of a +{request}+
|
||||
<4> This method is called when a +{request}+ failed
|
||||
<5> Create the `BulkProcessor` by calling the `build()` method from
|
||||
the `BulkProcessor.Builder`. The `RestHighLevelClient.bulkAsync()`
|
||||
method will be used to execute the +{request}+ under the hood.
|
||||
|
||||
The `BulkProcessor.Builder` provides methods to configure how the
|
||||
`BulkProcessor` should handle requests execution:
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-processor-options]
|
||||
--------------------------------------------------
|
||||
<1> Set when to flush a new bulk request based on the number of
|
||||
actions currently added (defaults to 1000, use -1 to disable it)
|
||||
<2> Set when to flush a new bulk request based on the size of
|
||||
actions currently added (defaults to 5Mb, use -1 to disable it)
|
||||
<3> Set the number of concurrent requests allowed to be executed
|
||||
(default to 1, use 0 to only allow the execution of a single request)
|
||||
<4> Set a flush interval flushing any +{request}+ pending if the
|
||||
interval passes (defaults to not set)
|
||||
<5> Set a constant back off policy that initially waits for 1 second
|
||||
and retries up to 3 times. See `BackoffPolicy.noBackoff()`,
|
||||
`BackoffPolicy.constantBackoff()` and `BackoffPolicy.exponentialBackoff()`
|
||||
for more options.
|
||||
|
||||
Once the `BulkProcessor` is created requests can be added to it:
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-processor-add]
|
||||
--------------------------------------------------
|
||||
|
||||
The requests will be executed by the `BulkProcessor`, which takes care of
|
||||
calling the `BulkProcessor.Listener` for every bulk request.
|
||||
|
||||
The listener provides methods to access to the +{request}+ and the +{response}+:
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-processor-listener]
|
||||
--------------------------------------------------
|
||||
<1> Called before each execution of a +{request}+, this method allows to know
|
||||
the number of operations that are going to be executed within the +{request}+
|
||||
<2> Called after each execution of a +{request}+, this method allows to know if
|
||||
the +{response}+ contains errors
|
||||
<3> Called if the +{request}+ failed, this method allows to know
|
||||
the failure
|
||||
|
||||
Once all requests have been added to the `BulkProcessor`, its instance needs to
|
||||
be closed using one of the two available closing methods.
|
||||
|
||||
The `awaitClose()` method can be used to wait until all requests have been
|
||||
processed or the specified waiting time elapses:
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-processor-await]
|
||||
--------------------------------------------------
|
||||
<1> The method returns `true` if all bulk requests completed and `false` if the
|
||||
waiting time elapsed before all the bulk requests completed
|
||||
|
||||
The `close()` method can be used to immediately close the `BulkProcessor`:
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-processor-close]
|
||||
--------------------------------------------------
|
||||
|
||||
Both methods flush the requests added to the processor before closing the
|
||||
processor and also forbid any new request to be added to it.
|
|
@ -1,131 +0,0 @@
|
|||
--
|
||||
:api: delete-by-query
|
||||
:request: DeleteByQueryRequest
|
||||
:response: DeleteByQueryResponse
|
||||
--
|
||||
|
||||
[id="{upid}-{api}"]
|
||||
=== Delete By Query API
|
||||
|
||||
[id="{upid}-{api}-request"]
|
||||
==== Delete By Query Request
|
||||
|
||||
A +{request}+ can be used to delete documents from an index. It requires an
|
||||
existing index (or a set of indices) on which deletion is to be performed.
|
||||
|
||||
The simplest form of a +{request}+ looks like this and deletes all documents
|
||||
in an index:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request]
|
||||
--------------------------------------------------
|
||||
<1> Creates the +{request}+ on a set of indices.
|
||||
|
||||
By default version conflicts abort the +{request}+ process but you can just
|
||||
count them with this:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-conflicts]
|
||||
--------------------------------------------------
|
||||
<1> Set `proceed` on version conflict
|
||||
|
||||
You can limit the documents by adding a query.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-query]
|
||||
--------------------------------------------------
|
||||
<1> Only copy documents which have field `user` set to `kimchy`
|
||||
|
||||
It’s also possible to limit the number of processed documents by setting `maxDocs`.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-maxDocs]
|
||||
--------------------------------------------------
|
||||
<1> Only copy 10 documents
|
||||
|
||||
By default +{request}+ uses batches of 1000. You can change the batch size
|
||||
with `setBatchSize`.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-scrollSize]
|
||||
--------------------------------------------------
|
||||
<1> Use batches of 100 documents
|
||||
|
||||
+{request}+ can also be parallelized using `sliced-scroll` with `setSlices`:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-slices]
|
||||
--------------------------------------------------
|
||||
<1> set number of slices to use
|
||||
|
||||
+{request}+ uses the `scroll` parameter to control how long it keeps the
|
||||
"search context" alive.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-scroll]
|
||||
--------------------------------------------------
|
||||
<1> set scroll time
|
||||
|
||||
If you provide routing then the routing is copied to the scroll query, limiting the process to the shards that match
|
||||
that routing value.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-routing]
|
||||
--------------------------------------------------
|
||||
<1> set routing
|
||||
|
||||
|
||||
==== Optional arguments
|
||||
In addition to the options above the following arguments can optionally be also provided:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-timeout]
|
||||
--------------------------------------------------
|
||||
<1> Timeout to wait for the delete by query request to be performed as a `TimeValue`
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-refresh]
|
||||
--------------------------------------------------
|
||||
<1> Refresh index after calling delete by query
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-indicesOptions]
|
||||
--------------------------------------------------
|
||||
<1> Set indices options
|
||||
|
||||
include::../execution.asciidoc[]
|
||||
|
||||
[id="{upid}-{api}-response"]
|
||||
==== Delete By Query Response
|
||||
|
||||
The returned +{response}+ contains information about the executed operations and
|
||||
allows to iterate over each result as follows:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-response]
|
||||
--------------------------------------------------
|
||||
<1> Get total time taken
|
||||
<2> Check if the request timed out
|
||||
<3> Get total number of docs processed
|
||||
<4> Number of docs that were deleted
|
||||
<5> Number of batches that were executed
|
||||
<6> Number of skipped docs
|
||||
<7> Number of version conflicts
|
||||
<8> Number of times request had to retry bulk index operations
|
||||
<9> Number of times request had to retry search operations
|
||||
<10> The total time this request has throttled itself not including the current throttle time if it is currently sleeping
|
||||
<11> Remaining delay of any current throttle sleep or 0 if not sleeping
|
||||
<12> Failures during search phase
|
||||
<13> Failures during bulk index operation
|
|
@ -1,90 +0,0 @@
|
|||
--
|
||||
:api: delete
|
||||
:request: DeleteRequest
|
||||
:response: DeleteResponse
|
||||
--
|
||||
|
||||
[id="{upid}-{api}"]
|
||||
=== Delete API
|
||||
|
||||
[id="{upid}-{api}-request"]
|
||||
==== Delete Request
|
||||
|
||||
A +{request}+ has two required arguments:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request]
|
||||
--------------------------------------------------
|
||||
<1> Index
|
||||
<2> Document id
|
||||
|
||||
==== Optional arguments
|
||||
The following arguments can optionally be provided:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-routing]
|
||||
--------------------------------------------------
|
||||
<1> Routing value
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-timeout]
|
||||
--------------------------------------------------
|
||||
<1> Timeout to wait for primary shard to become available as a `TimeValue`
|
||||
<2> Timeout to wait for primary shard to become available as a `String`
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-refresh]
|
||||
--------------------------------------------------
|
||||
<1> Refresh policy as a `WriteRequest.RefreshPolicy` instance
|
||||
<2> Refresh policy as a `String`
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-version]
|
||||
--------------------------------------------------
|
||||
<1> Version
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-version-type]
|
||||
--------------------------------------------------
|
||||
<1> Version type
|
||||
|
||||
include::../execution.asciidoc[]
|
||||
|
||||
[id="{upid}-{api}-response"]
|
||||
==== Delete Response
|
||||
|
||||
The returned +{response}+ allows to retrieve information about the executed
|
||||
operation as follows:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-response]
|
||||
--------------------------------------------------
|
||||
<1> Handle the situation where number of successful shards is less than
|
||||
total shards
|
||||
<2> Handle the potential failures
|
||||
|
||||
|
||||
It is also possible to check whether the document was found or not:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-notfound]
|
||||
--------------------------------------------------
|
||||
<1> Do something if the document to be deleted was not found
|
||||
|
||||
If there is a version conflict, an `ElasticsearchException` will
|
||||
be thrown:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-conflict]
|
||||
--------------------------------------------------
|
||||
<1> The raised exception indicates that a version conflict error was returned
|
||||
|
|
@ -1,37 +0,0 @@
|
|||
--
|
||||
:api: exists
|
||||
:request: GetRequest
|
||||
:response: boolean
|
||||
--
|
||||
|
||||
[id="{upid}-{api}"]
|
||||
=== Exists API
|
||||
|
||||
The exists API returns `true` if a document exists, and `false` otherwise.
|
||||
|
||||
[id="{upid}-{api}-request"]
|
||||
==== Exists Request
|
||||
|
||||
It uses +{request}+ just like the <<java-rest-high-document-get>>.
|
||||
All of its <<java-rest-high-document-get-request-optional-arguments, optional arguments>>
|
||||
are supported. Since `exists()` only returns `true` or `false`, we recommend
|
||||
turning off fetching `_source` and any stored fields so the request is
|
||||
slightly lighter:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request]
|
||||
--------------------------------------------------
|
||||
<1> Index
|
||||
<2> Document id
|
||||
<3> Disable fetching `_source`.
|
||||
<4> Disable fetching stored fields.
|
||||
|
||||
include::../execution.asciidoc[]
|
||||
|
||||
|
||||
==== Source exists request
|
||||
A variant of the exists request is `existsSource` method which has the additional check
|
||||
that the document in question has stored the `source`. If the mapping for the index has opted
|
||||
to remove support for storing JSON source in documents then this method will return false
|
||||
for documents in this index.
|
|
@ -1,72 +0,0 @@
|
|||
--
|
||||
:api: get-source
|
||||
:request: GetSourceRequest
|
||||
:response: GetSourceResponse
|
||||
--
|
||||
|
||||
[id="{upid}-{api}"]
|
||||
=== Get Source API
|
||||
|
||||
This API helps to get only the `_source` field of a document.
|
||||
|
||||
[id="{upid}-{api}-request"]
|
||||
==== Get Source Request
|
||||
|
||||
A +{request}+ requires the following arguments:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request]
|
||||
--------------------------------------------------
|
||||
<1> Index
|
||||
<2> Document id
|
||||
|
||||
[id="{upid}-{api}-request-optional"]
|
||||
==== Optional arguments
|
||||
The following arguments can optionally be provided:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-optional]
|
||||
--------------------------------------------------
|
||||
<1> `FetchSourceContext` 's first argument `fetchSource` must be `true`, otherwise
|
||||
`ElasticsearchException` get thrown
|
||||
<2> Arguments of the context `excludes` and `includes` are optional
|
||||
(see examples in Get API documentation)
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-routing]
|
||||
--------------------------------------------------
|
||||
<1> Routing value
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-preference]
|
||||
--------------------------------------------------
|
||||
<1> Preference value
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-realtime]
|
||||
--------------------------------------------------
|
||||
<1> Set realtime flag to `false` (`true` by default)
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-refresh]
|
||||
--------------------------------------------------
|
||||
<1> Perform a refresh before retrieving the document (`false` by default)
|
||||
|
||||
include::../execution.asciidoc[]
|
||||
|
||||
[id="{upid}-{api}-response"]
|
||||
==== Get Source Response
|
||||
|
||||
The returned +{response}+ contains the field `source` that represents the
|
||||
source of a document as a map.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-response]
|
||||
--------------------------------------------------
|
|
@ -1,126 +0,0 @@
|
|||
--
|
||||
:api: get
|
||||
:request: GetRequest
|
||||
:response: GetResponse
|
||||
--
|
||||
|
||||
[id="{upid}-{api}"]
|
||||
=== Get API
|
||||
|
||||
[id="{upid}-{api}-request"]
|
||||
==== Get Request
|
||||
|
||||
A +{request}+ requires the following arguments:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request]
|
||||
--------------------------------------------------
|
||||
<1> Index
|
||||
<2> Document id
|
||||
|
||||
[id="{upid}-{api}-request-optional-arguments"]
|
||||
==== Optional arguments
|
||||
The following arguments can optionally be provided:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-no-source]
|
||||
--------------------------------------------------
|
||||
<1> Disable source retrieval, enabled by default
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-source-include]
|
||||
--------------------------------------------------
|
||||
<1> Configure source inclusion for specific fields
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-source-exclude]
|
||||
--------------------------------------------------
|
||||
<1> Configure source exclusion for specific fields
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-stored]
|
||||
--------------------------------------------------
|
||||
<1> Configure retrieval for specific stored fields (requires fields to be
|
||||
stored separately in the mappings)
|
||||
<2> Retrieve the `message` stored field (requires the field to be stored
|
||||
separately in the mappings)
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-routing]
|
||||
--------------------------------------------------
|
||||
<1> Routing value
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-preference]
|
||||
--------------------------------------------------
|
||||
<1> Preference value
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-realtime]
|
||||
--------------------------------------------------
|
||||
<1> Set realtime flag to `false` (`true` by default)
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-refresh]
|
||||
--------------------------------------------------
|
||||
<1> Perform a refresh before retrieving the document (`false` by default)
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-version]
|
||||
--------------------------------------------------
|
||||
<1> Version
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-version-type]
|
||||
--------------------------------------------------
|
||||
<1> Version type
|
||||
|
||||
include::../execution.asciidoc[]
|
||||
|
||||
[id="{upid}-{api}-response"]
|
||||
==== Get Response
|
||||
|
||||
The returned +{response}+ allows to retrieve the requested document along with
|
||||
its metadata and eventually stored fields.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-response]
|
||||
--------------------------------------------------
|
||||
<1> Retrieve the document as a `String`
|
||||
<2> Retrieve the document as a `Map<String, Object>`
|
||||
<3> Retrieve the document as a `byte[]`
|
||||
<4> Handle the scenario where the document was not found. Note that although
|
||||
the returned response has `404` status code, a valid +{response}+ is
|
||||
returned rather than an exception thrown. Such response does not hold any
|
||||
source document and its `isExists` method returns `false`.
|
||||
|
||||
When a get request is performed against an index that does not exist, the
|
||||
response has `404` status code, an `ElasticsearchException` gets thrown
|
||||
which needs to be handled as follows:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-indexnotfound]
|
||||
--------------------------------------------------
|
||||
<1> Handle the exception thrown because the index does not exist
|
||||
|
||||
In case a specific document version has been requested, and the existing
|
||||
document has a different version number, a version conflict is raised:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-conflict]
|
||||
--------------------------------------------------
|
||||
<1> The raised exception indicates that a version conflict error was returned
|
|
@ -1,132 +0,0 @@
|
|||
--
|
||||
:api: index
|
||||
:request: IndexRequest
|
||||
:response: IndexResponse
|
||||
--
|
||||
|
||||
[id="{upid}-{api}"]
|
||||
=== Index API
|
||||
|
||||
[id="{upid}-{api}-request"]
|
||||
==== Index Request
|
||||
|
||||
An +{request}+ requires the following arguments:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-string]
|
||||
--------------------------------------------------
|
||||
<1> Index
|
||||
<2> Document id for the request
|
||||
<3> Document source provided as a `String`
|
||||
|
||||
==== Providing the document source
|
||||
The document source can be provided in different ways in addition to the
|
||||
`String` example shown above:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-map]
|
||||
--------------------------------------------------
|
||||
<1> Document source provided as a `Map` which gets automatically converted
|
||||
to JSON format
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-xcontent]
|
||||
--------------------------------------------------
|
||||
<1> Document source provided as an `XContentBuilder` object, the Elasticsearch
|
||||
built-in helpers to generate JSON content
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-shortcut]
|
||||
--------------------------------------------------
|
||||
<1> Document source provided as `Object` key-pairs, which gets converted to
|
||||
JSON format
|
||||
|
||||
==== Optional arguments
|
||||
The following arguments can optionally be provided:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-routing]
|
||||
--------------------------------------------------
|
||||
<1> Routing value
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-timeout]
|
||||
--------------------------------------------------
|
||||
<1> Timeout to wait for primary shard to become available as a `TimeValue`
|
||||
<2> Timeout to wait for primary shard to become available as a `String`
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-refresh]
|
||||
--------------------------------------------------
|
||||
<1> Refresh policy as a `WriteRequest.RefreshPolicy` instance
|
||||
<2> Refresh policy as a `String`
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-version]
|
||||
--------------------------------------------------
|
||||
<1> Version
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-version-type]
|
||||
--------------------------------------------------
|
||||
<1> Version type
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-op-type]
|
||||
--------------------------------------------------
|
||||
<1> Operation type provided as an `DocWriteRequest.OpType` value
|
||||
<2> Operation type provided as a `String`: can be `create` or `index` (default)
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-pipeline]
|
||||
--------------------------------------------------
|
||||
<1> The name of the ingest pipeline to be executed before indexing the document
|
||||
|
||||
include::../execution.asciidoc[]
|
||||
|
||||
[id="{upid}-{api}-response"]
|
||||
==== Index Response
|
||||
|
||||
The returned +{response}+ allows to retrieve information about the executed
|
||||
operation as follows:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-response]
|
||||
--------------------------------------------------
|
||||
<1> Handle (if needed) the case where the document was created for the first
|
||||
time
|
||||
<2> Handle (if needed) the case where the document was rewritten as it was
|
||||
already existing
|
||||
<3> Handle the situation where number of successful shards is less than
|
||||
total shards
|
||||
<4> Handle the potential failures
|
||||
|
||||
If there is a version conflict, an `ElasticsearchException` will
|
||||
be thrown:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-conflict]
|
||||
--------------------------------------------------
|
||||
<1> The raised exception indicates that a version conflict error was returned
|
||||
|
||||
Same will happen in case `opType` was set to `create` and a document with
|
||||
same index and id already existed:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-optype]
|
||||
--------------------------------------------------
|
||||
<1> The raised exception indicates that a version conflict error was returned
|
|
@ -1,134 +0,0 @@
|
|||
--
|
||||
:api: multi-get
|
||||
:request: MultiGetRequest
|
||||
:response: MultiGetResponse
|
||||
--
|
||||
|
||||
[id="{upid}-{api}"]
|
||||
=== Multi-Get API
|
||||
|
||||
The `multiGet` API executes multiple <<java-rest-high-document-get,`get`>>
|
||||
requests in a single http request in parallel.
|
||||
|
||||
[id="{upid}-{api}-request"]
|
||||
==== Multi-Get Request
|
||||
|
||||
A +{request}+ is built empty and you add `MultiGetRequest.Item`s to configure
|
||||
what to fetch:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request]
|
||||
--------------------------------------------------
|
||||
<1> Index
|
||||
<2> Document id
|
||||
<3> Add another item to fetch
|
||||
|
||||
==== Optional arguments
|
||||
|
||||
`multiGet` supports the same optional arguments that the
|
||||
<<java-rest-high-document-get-request-optional-arguments,`get` API>> supports.
|
||||
You can set most of these on the `Item`:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-no-source]
|
||||
--------------------------------------------------
|
||||
<1> Disable source retrieval, enabled by default
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-source-include]
|
||||
--------------------------------------------------
|
||||
<1> Configure source inclusion for specific fields
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-source-exclude]
|
||||
--------------------------------------------------
|
||||
<1> Configure source exclusion for specific fields
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-stored]
|
||||
--------------------------------------------------
|
||||
<1> Configure retrieval for specific stored fields (requires fields to be
|
||||
stored separately in the mappings)
|
||||
<2> Retrieve the `foo` stored field (requires the field to be stored
|
||||
separately in the mappings)
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-item-extras]
|
||||
--------------------------------------------------
|
||||
<1> Routing value
|
||||
<2> Version
|
||||
<3> Version type
|
||||
|
||||
{ref}/search-search.html#search-preference[`preference`],
|
||||
{ref}/docs-get.html#realtime[`realtime`]
|
||||
and
|
||||
{ref}/docs-get.html#get-refresh[`refresh`] can be set on the main request but
|
||||
not on any items:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-top-level-extras]
|
||||
--------------------------------------------------
|
||||
<1> Preference value
|
||||
<2> Set realtime flag to `false` (`true` by default)
|
||||
<3> Perform a refresh before retrieving the document (`false` by default)
|
||||
|
||||
include::../execution.asciidoc[]
|
||||
|
||||
[id="{upid}-{api}-response"]
|
||||
==== Multi Get Response
|
||||
|
||||
The returned +{response}+ contains a list of `MultiGetItemResponse`s in
|
||||
`getResponses` in the same order that they were requested.
|
||||
`MultiGetItemResponse` contains *either* a
|
||||
<<java-rest-high-document-get-response, `GetResponse`>> if the get succeeded
|
||||
or a `MultiGetResponse.Failure` if it failed. A success looks just like a
|
||||
normal `GetResponse`.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-response]
|
||||
--------------------------------------------------
|
||||
<1> `getFailure` returns null because there isn't a failure.
|
||||
<2> `getResponse` returns the `GetResponse`.
|
||||
<3> Retrieve the document as a `String`
|
||||
<4> Retrieve the document as a `Map<String, Object>`
|
||||
<5> Retrieve the document as a `byte[]`
|
||||
<6> Handle the scenario where the document was not found. Note that although
|
||||
the returned response has `404` status code, a valid `GetResponse` is
|
||||
returned rather than an exception thrown. Such response does not hold any
|
||||
source document and its `isExists` method returns `false`.
|
||||
|
||||
When one of the subrequests as performed against an index that does not exist
|
||||
`getFailure` will contain an exception:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-indexnotfound]
|
||||
--------------------------------------------------
|
||||
<1> `getResponse` is null.
|
||||
<2> `getFailure` isn't and contains an `Exception`.
|
||||
<3> That `Exception` is actually an `ElasticsearchException`
|
||||
<4> and it has a status of `NOT_FOUND`. It'd have been an HTTP 404 if this
|
||||
wasn't a multi get.
|
||||
<5> `getMessage` explains the actual cause, `no such index`.
|
||||
|
||||
In case a specific document version has been requested, and the existing
|
||||
document has a different version number, a version conflict is raised:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-conflict]
|
||||
--------------------------------------------------
|
||||
<1> `getResponse` is null.
|
||||
<2> `getFailure` isn't and contains an `Exception`.
|
||||
<3> That `Exception` is actually an `ElasticsearchException`
|
||||
<4> and it has a status of `CONFLICT`. It'd have been an HTTP 409 if this
|
||||
wasn't a multi get.
|
||||
<5> `getMessage` explains the actual cause, `
|
|
@ -1,59 +0,0 @@
|
|||
--
|
||||
:api: multi-term-vectors
|
||||
:request: MultiTermVectorsRequest
|
||||
:response: MultiTermVectorsResponse
|
||||
:tvrequest: TermVectorsRequest
|
||||
--
|
||||
|
||||
[id="{upid}-{api}"]
|
||||
=== Multi Term Vectors API
|
||||
|
||||
Multi Term Vectors API allows to get multiple term vectors at once.
|
||||
|
||||
[id="{upid}-{api}-request"]
|
||||
==== Multi Term Vectors Request
|
||||
There are two ways to create a +{request}+.
|
||||
|
||||
The first way is to create an empty +{request}+, and then add individual
|
||||
<<java-rest-high-document-term-vectors, term vectors requests>> to it.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request]
|
||||
--------------------------------------------------
|
||||
<1> Create an empty +{request}+.
|
||||
<2> Add the first +{tvrequest}+ to the +{request}+.
|
||||
<3> Add the second +{tvrequest}+ for an artificial doc to the +{request}+.
|
||||
|
||||
|
||||
The second way can be used when all term vectors requests share the same
|
||||
arguments, such as index and other settings. In this case, a template
|
||||
+{tvrequest}+ can be created with all necessary settings set, and
|
||||
this template request can be passed to +{request}+ along with all
|
||||
documents' ids for which to execute these requests.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-template]
|
||||
--------------------------------------------------
|
||||
<1> Create a template +{tvrequest}+.
|
||||
<2> Pass documents' ids and the template to the +{request}+.
|
||||
|
||||
|
||||
include::../execution.asciidoc[]
|
||||
|
||||
|
||||
[id="{upid}-{api}-response"]
|
||||
==== Multi Term Vectors Response
|
||||
|
||||
+{response}+ allows to get the list of term vectors responses,
|
||||
each of which can be inspected as described in
|
||||
<<java-rest-high-document-term-vectors>>.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-response]
|
||||
--------------------------------------------------
|
||||
<1> Get a list of `TermVectorsResponse`
|
||||
|
||||
|
|
@ -1,186 +0,0 @@
|
|||
--
|
||||
:api: reindex
|
||||
:request: ReindexRequest
|
||||
:response: BulkByScrollResponse
|
||||
--
|
||||
|
||||
[id="{upid}-{api}"]
|
||||
=== Reindex API
|
||||
|
||||
[id="{upid}-{api}-request"]
|
||||
==== Reindex Request
|
||||
|
||||
A +{request}+ can be used to copy documents from one or more indexes into a
|
||||
destination index.
|
||||
|
||||
It requires an existing source index and a target index which may or may not exist pre-request. Reindex does not attempt
|
||||
to set up the destination index. It does not copy the settings of the source index. You should set up the destination
|
||||
index prior to running a _reindex action, including setting up mappings, shard counts, replicas, etc.
|
||||
|
||||
The simplest form of a +{request}+ looks like this:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request]
|
||||
--------------------------------------------------
|
||||
<1> Creates the +{request}+
|
||||
<2> Adds a list of sources to copy from
|
||||
<3> Adds the destination index
|
||||
|
||||
The `dest` element can be configured like the index API to control optimistic concurrency control. Just leaving out
|
||||
`versionType` (as above) or setting it to internal will cause Elasticsearch to blindly dump documents into the target.
|
||||
Setting `versionType` to external will cause Elasticsearch to preserve the version from the source, create any documents
|
||||
that are missing, and update any documents that have an older version in the destination index than they do in the
|
||||
source index.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-versionType]
|
||||
--------------------------------------------------
|
||||
<1> Set the versionType to `EXTERNAL`
|
||||
|
||||
Setting `opType` to `create` will cause `_reindex` to only create missing documents in the target index. All existing
|
||||
documents will cause a version conflict. The default `opType` is `index`.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-opType]
|
||||
--------------------------------------------------
|
||||
<1> Set the opType to `create`
|
||||
|
||||
By default version conflicts abort the `_reindex` process but you can just count
|
||||
them instead with:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-conflicts]
|
||||
--------------------------------------------------
|
||||
<1> Set `proceed` on version conflict
|
||||
|
||||
You can limit the documents by adding a query.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-query]
|
||||
--------------------------------------------------
|
||||
<1> Only copy documents which have field `user` set to `kimchy`
|
||||
|
||||
It’s also possible to limit the number of processed documents by setting `maxDocs`.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-maxDocs]
|
||||
--------------------------------------------------
|
||||
<1> Only copy 10 documents
|
||||
|
||||
By default `_reindex` uses batches of 1000. You can change the batch size with `sourceBatchSize`.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-sourceSize]
|
||||
--------------------------------------------------
|
||||
<1> Use batches of 100 documents
|
||||
|
||||
Reindex can also use the ingest feature by specifying a `pipeline`.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-pipeline]
|
||||
--------------------------------------------------
|
||||
<1> set pipeline to `my_pipeline`
|
||||
|
||||
+{request}+ also supports a `script` that modifies the document. It allows you to
|
||||
also change the document's metadata. The following example illustrates that.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-script]
|
||||
--------------------------------------------------
|
||||
<1> `setScript` to increment the `likes` field on all documents with user `kimchy`.
|
||||
|
||||
+{request}+ supports reindexing from a remote Elasticsearch cluster. When using a remote cluster the query should be
|
||||
specified inside the `RemoteInfo` object and not using `setSourceQuery`. If both the remote info and the source query are
|
||||
set it results in a validation error during the request. The reason for this is that the remote Elasticsearch may not
|
||||
understand queries built by the modern query builders. The remote cluster support works all the way back to Elasticsearch
|
||||
0.90 and the query language has changed since then. When reaching older versions, it is safer to write the query by hand
|
||||
in JSON.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-remote]
|
||||
--------------------------------------------------
|
||||
<1> set remote elastic cluster
|
||||
|
||||
+{request}+ also helps in automatically parallelizing using `sliced-scroll` to
|
||||
slice on `_id`. Use `setSlices` to specify the number of slices to use.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-slices]
|
||||
--------------------------------------------------
|
||||
<1> set number of slices to use
|
||||
|
||||
+{request}+ uses the `scroll` parameter to control how long it keeps the
|
||||
"search context" alive.
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-scroll]
|
||||
--------------------------------------------------
|
||||
<1> set scroll time
|
||||
|
||||
|
||||
==== Optional arguments
|
||||
In addition to the options above the following arguments can optionally be also provided:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-timeout]
|
||||
--------------------------------------------------
|
||||
<1> Timeout to wait for the reindex request to be performed as a `TimeValue`
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-refresh]
|
||||
--------------------------------------------------
|
||||
<1> Refresh index after calling reindex
|
||||
|
||||
include::../execution.asciidoc[]
|
||||
|
||||
[id="{upid}-{api}-task-submission"]
|
||||
==== Reindex task submission
|
||||
It is also possible to submit a +{request}+ and not wait for it completion with the use of Task API. This is an equivalent of a REST request
|
||||
with wait_for_completion flag set to false.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{hlrc-tests}/ReindexIT.java[submit-reindex-task]
|
||||
--------------------------------------------------
|
||||
<1> A +{request}+ is constructed the same way as for the synchronous method
|
||||
<2> A submit method returns a `TaskSubmissionResponse` which contains a task identifier.
|
||||
<3> The task identifier can be used to get `response` from a completed task.
|
||||
|
||||
[id="{upid}-{api}-response"]
|
||||
==== Reindex Response
|
||||
|
||||
The returned +{response}+ contains information about the executed operations and
|
||||
allows to iterate over each result as follows:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-response]
|
||||
--------------------------------------------------
|
||||
<1> Get total time taken
|
||||
<2> Check if the request timed out
|
||||
<3> Get total number of docs processed
|
||||
<4> Number of docs that were updated
|
||||
<5> Number of docs that were created
|
||||
<6> Number of docs that were deleted
|
||||
<7> Number of batches that were executed
|
||||
<8> Number of skipped docs
|
||||
<9> Number of version conflicts
|
||||
<10> Number of times request had to retry bulk index operations
|
||||
<11> Number of times request had to retry search operations
|
||||
<12> The total time this request has throttled itself not including the current throttle time if it is currently sleeping
|
||||
<13> Remaining delay of any current throttle sleep or 0 if not sleeping
|
||||
<14> Failures during search phase
|
||||
<15> Failures during bulk index operation
|
|
@ -1,79 +0,0 @@
|
|||
--
|
||||
:api: rethrottle
|
||||
:request: RethrottleRequest
|
||||
:response: ListTasksResponse
|
||||
--
|
||||
|
||||
[id="{upid}-{api}"]
|
||||
=== Rethrottle API
|
||||
|
||||
[id="{upid}-{api}-request"]
|
||||
==== Rethrottle Request
|
||||
|
||||
A +{request}+ can be used to change the current throttling on a running
|
||||
reindex, update-by-query or delete-by-query task or to disable throttling of
|
||||
the task entirely. It requires the task Id of the task to change.
|
||||
|
||||
In its simplest form, you can use it to disable throttling of a running
|
||||
task using the following:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-disable-request]
|
||||
--------------------------------------------------
|
||||
<1> Create a +{request}+ that disables throttling for a specific task id
|
||||
|
||||
By providing a `requestsPerSecond` argument, the request will change the
|
||||
existing task throttling to the specified value:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request]
|
||||
--------------------------------------------------
|
||||
<1> Request to change the throttling of a task to 100 requests per second
|
||||
|
||||
The rethrottling request can be executed by using one of the three appropriate
|
||||
methods depending on whether a reindex, update-by-query or delete-by-query task
|
||||
should be rethrottled:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-execution]
|
||||
--------------------------------------------------
|
||||
<1> Execute reindex rethrottling request
|
||||
<2> The same for update-by-query
|
||||
<3> The same for delete-by-query
|
||||
|
||||
[id="{upid}-{api}-async"]
|
||||
==== Asynchronous Execution
|
||||
|
||||
The asynchronous execution of a rethrottle request requires both the +{request}+
|
||||
instance and an `ActionListener` instance to be passed to the asynchronous
|
||||
method:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-execute-async]
|
||||
--------------------------------------------------
|
||||
<1> Execute reindex rethrottling asynchronously
|
||||
<2> The same for update-by-query
|
||||
<3> The same for delete-by-query
|
||||
|
||||
The asynchronous method does not block and returns immediately.
|
||||
Once it is completed the `ActionListener` is called back using the `onResponse` method
|
||||
if the execution successfully completed or using the `onFailure` method if
|
||||
it failed. A typical listener looks like this:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-async-listener]
|
||||
--------------------------------------------------
|
||||
<1> Code executed when the request is successfully completed
|
||||
<2> Code executed when the request fails with an exception
|
||||
|
||||
[id="{upid}-{api}-response"]
|
||||
==== Rethrottle Response
|
||||
|
||||
Rethrottling returns the task that has been rethrottled in the form of a
|
||||
+{response}+. The structure of this response object is described in detail
|
||||
in <<java-rest-high-cluster-list-tasks-response,this section>>.
|
|
@ -1,100 +0,0 @@
|
|||
--
|
||||
:api: term-vectors
|
||||
:request: TermVectorsRequest
|
||||
:response: TermVectorsResponse
|
||||
--
|
||||
|
||||
[id="{upid}-{api}"]
|
||||
=== Term Vectors API
|
||||
|
||||
Term Vectors API returns information and statistics on terms in the fields
|
||||
of a particular document. The document could be stored in the index or
|
||||
artificially provided by the user.
|
||||
|
||||
|
||||
[id="{upid}-{api}-request"]
|
||||
==== Term Vectors Request
|
||||
|
||||
A +{request}+ expects an `index` and an `id` to specify
|
||||
a certain document, and fields for which the information is retrieved.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request]
|
||||
--------------------------------------------------
|
||||
|
||||
Term vectors can also be generated for artificial documents, that is for
|
||||
documents not present in the index:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-artificial]
|
||||
--------------------------------------------------
|
||||
<1> An artificial document is provided as an `XContentBuilder` object,
|
||||
the Elasticsearch built-in helper to generate JSON content.
|
||||
|
||||
===== Optional arguments
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-optional-arguments]
|
||||
--------------------------------------------------
|
||||
<1> Set `fieldStatistics` to `false` (default is `true`) to omit document count,
|
||||
sum of document frequencies, sum of total term frequencies.
|
||||
<2> Set `termStatistics` to `true` (default is `false`) to display
|
||||
total term frequency and document frequency.
|
||||
<3> Set `positions` to `false` (default is `true`) to omit the output of
|
||||
positions.
|
||||
<4> Set `offsets` to `false` (default is `true`) to omit the output of
|
||||
offsets.
|
||||
<5> Set `payloads` to `false` (default is `true`) to omit the output of
|
||||
payloads.
|
||||
<6> Set `filterSettings` to filter the terms that can be returned based
|
||||
on their tf-idf scores.
|
||||
<7> Set `perFieldAnalyzer` to specify a different analyzer than
|
||||
the one that the field has.
|
||||
<8> Set `realtime` to `false` (default is `true`) to retrieve term vectors
|
||||
near realtime.
|
||||
<9> Set a routing parameter
|
||||
|
||||
|
||||
include::../execution.asciidoc[]
|
||||
|
||||
|
||||
[id="{upid}-{api}-response"]
|
||||
==== Term Vectors Response
|
||||
|
||||
+{response}+ contains the following information:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-response]
|
||||
--------------------------------------------------
|
||||
<1> The index name of the document.
|
||||
<2> The id of the document.
|
||||
<3> Indicates whether or not the document found.
|
||||
|
||||
|
||||
===== Inspecting Term Vectors
|
||||
If +{response}+ contains non-null list of term vectors,
|
||||
more information about each term vector can be obtained using the following:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-term-vectors]
|
||||
--------------------------------------------------
|
||||
<1> The name of the current field
|
||||
<2> Fields statistics for the current field - document count
|
||||
<3> Fields statistics for the current field - sum of total term frequencies
|
||||
<4> Fields statistics for the current field - sum of document frequencies
|
||||
<5> Terms for the current field
|
||||
<6> The name of the term
|
||||
<7> Term frequency of the term
|
||||
<8> Document frequency of the term
|
||||
<9> Total term frequency of the term
|
||||
<10> Score of the term
|
||||
<11> Tokens of the term
|
||||
<12> Position of the token
|
||||
<13> Start offset of the token
|
||||
<14> End offset of the token
|
||||
<15> Payload of the token
|
|
@ -1,148 +0,0 @@
|
|||
--
|
||||
:api: update-by-query
|
||||
:request: UpdateByQueryRequest
|
||||
:response: UpdateByQueryResponse
|
||||
--
|
||||
|
||||
[id="{upid}-{api}"]
|
||||
=== Update By Query API
|
||||
|
||||
[id="{upid}-{api}-request"]
|
||||
==== Update By Query Request
|
||||
|
||||
A +{request}+ can be used to update documents in an index.
|
||||
|
||||
It requires an existing index (or a set of indices) on which the update is to
|
||||
be performed.
|
||||
|
||||
The simplest form of a +{request}+ looks like this:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request]
|
||||
--------------------------------------------------
|
||||
<1> Creates the +{request}+ on a set of indices.
|
||||
|
||||
By default version conflicts abort the +{request}+ process but you can just
|
||||
count them instead with:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-conflicts]
|
||||
--------------------------------------------------
|
||||
<1> Set `proceed` on version conflict
|
||||
|
||||
You can limit the documents by adding a query.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-query]
|
||||
--------------------------------------------------
|
||||
<1> Only copy documents which have field `user` set to `kimchy`
|
||||
|
||||
It’s also possible to limit the number of processed documents by setting `maxDocs`.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-maxDocs]
|
||||
--------------------------------------------------
|
||||
<1> Only copy 10 documents
|
||||
|
||||
By default +{request}+ uses batches of 1000. You can change the batch size with
|
||||
`setBatchSize`.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-scrollSize]
|
||||
--------------------------------------------------
|
||||
<1> Use batches of 100 documents
|
||||
|
||||
Update by query can also use the ingest feature by specifying a `pipeline`.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-pipeline]
|
||||
--------------------------------------------------
|
||||
<1> set pipeline to `my_pipeline`
|
||||
|
||||
+{request}+ also supports a `script` that modifies the document:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-script]
|
||||
--------------------------------------------------
|
||||
<1> `setScript` to increment the `likes` field on all documents with user `kimchy`.
|
||||
|
||||
+{request}+ can be parallelized using `sliced-scroll` with `setSlices`:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-slices]
|
||||
--------------------------------------------------
|
||||
<1> set number of slices to use
|
||||
|
||||
`UpdateByQueryRequest` uses the `scroll` parameter to control how long it keeps the "search context" alive.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-scroll]
|
||||
--------------------------------------------------
|
||||
<1> set scroll time
|
||||
|
||||
If you provide routing then the routing is copied to the scroll query, limiting the process to the shards that match
|
||||
that routing value.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-routing]
|
||||
--------------------------------------------------
|
||||
<1> set routing
|
||||
|
||||
|
||||
==== Optional arguments
|
||||
In addition to the options above the following arguments can optionally be also provided:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-timeout]
|
||||
--------------------------------------------------
|
||||
<1> Timeout to wait for the update by query request to be performed as a `TimeValue`
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-refresh]
|
||||
--------------------------------------------------
|
||||
<1> Refresh index after calling update by query
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-indicesOptions]
|
||||
--------------------------------------------------
|
||||
<1> Set indices options
|
||||
|
||||
include::../execution.asciidoc[]
|
||||
|
||||
[id="{upid}-{api}-response"]
|
||||
==== Update By Query Response
|
||||
|
||||
The returned +{response}+ contains information about the executed operations and
|
||||
allows to iterate over each result as follows:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-response]
|
||||
--------------------------------------------------
|
||||
<1> Get total time taken
|
||||
<2> Check if the request timed out
|
||||
<3> Get total number of docs processed
|
||||
<4> Number of docs that were updated
|
||||
<5> Number of docs that were deleted
|
||||
<6> Number of batches that were executed
|
||||
<7> Number of skipped docs
|
||||
<8> Number of version conflicts
|
||||
<9> Number of times request had to retry bulk index operations
|
||||
<10> Number of times request had to retry search operations
|
||||
<11> The total time this request has throttled itself not including the current throttle time if it is currently sleeping
|
||||
<12> Remaining delay of any current throttle sleep or 0 if not sleeping
|
||||
<13> Failures during search phase
|
||||
<14> Failures during bulk index operation
|
|
@ -1,237 +0,0 @@
|
|||
--
|
||||
:api: update
|
||||
:request: UpdateRequest
|
||||
:response: UpdateResponse
|
||||
--
|
||||
|
||||
[id="{upid}-{api}"]
|
||||
=== Update API
|
||||
|
||||
[id="{upid}-{api}-request"]
|
||||
==== Update Request
|
||||
|
||||
An +{request}+ requires the following arguments:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request]
|
||||
--------------------------------------------------
|
||||
<1> Index
|
||||
<2> Document id
|
||||
|
||||
The Update API allows to update an existing document by using a script
|
||||
or by passing a partial document.
|
||||
|
||||
==== Updates with a script
|
||||
The script can be provided as an inline script:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-with-inline-script]
|
||||
--------------------------------------------------
|
||||
<1> Script parameters provided as a `Map` of objects
|
||||
<2> Create an inline script using the `painless` language and the previous parameters
|
||||
<3> Sets the script to the update request
|
||||
|
||||
Or as a stored script:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-with-stored-script]
|
||||
--------------------------------------------------
|
||||
<1> Reference to a script stored under the name `increment-field` in the `painless` language
|
||||
<2> Sets the script in the update request
|
||||
|
||||
==== Updates with a partial document
|
||||
When using updates with a partial document, the partial document will be merged with the
|
||||
existing document.
|
||||
|
||||
The partial document can be provided in different ways:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-with-doc-as-string]
|
||||
--------------------------------------------------
|
||||
<1> Partial document source provided as a `String` in JSON format
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-with-doc-as-map]
|
||||
--------------------------------------------------
|
||||
<1> Partial document source provided as a `Map` which gets automatically converted
|
||||
to JSON format
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-with-doc-as-xcontent]
|
||||
--------------------------------------------------
|
||||
<1> Partial document source provided as an `XContentBuilder` object, the Elasticsearch
|
||||
built-in helpers to generate JSON content
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-shortcut]
|
||||
--------------------------------------------------
|
||||
<1> Partial document source provided as `Object` key-pairs, which gets converted to
|
||||
JSON format
|
||||
|
||||
==== Upserts
|
||||
If the document does not already exist, it is possible to define some content that
|
||||
will be inserted as a new document using the `upsert` method:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-upsert]
|
||||
--------------------------------------------------
|
||||
<1> Upsert document source provided as a `String`
|
||||
|
||||
Similarly to the partial document updates, the content of the `upsert` document
|
||||
can be defined using methods that accept `String`, `Map`, `XContentBuilder` or
|
||||
`Object` key-pairs.
|
||||
|
||||
==== Optional arguments
|
||||
The following arguments can optionally be provided:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-routing]
|
||||
--------------------------------------------------
|
||||
<1> Routing value
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-timeout]
|
||||
--------------------------------------------------
|
||||
<1> Timeout to wait for primary shard to become available as a `TimeValue`
|
||||
<2> Timeout to wait for primary shard to become available as a `String`
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-refresh]
|
||||
--------------------------------------------------
|
||||
<1> Refresh policy as a `WriteRequest.RefreshPolicy` instance
|
||||
<2> Refresh policy as a `String`
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-retry]
|
||||
--------------------------------------------------
|
||||
<1> How many times to retry the update operation if the document to update has
|
||||
been changed by another operation between the get and indexing phases of the
|
||||
update operation
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-no-source]
|
||||
--------------------------------------------------
|
||||
<1> Enable source retrieval, disabled by default
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-source-include]
|
||||
--------------------------------------------------
|
||||
<1> Configure source inclusion for specific fields
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-source-exclude]
|
||||
--------------------------------------------------
|
||||
<1> Configure source exclusion for specific fields
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-cas]
|
||||
--------------------------------------------------
|
||||
<1> ifSeqNo
|
||||
<2> ifPrimaryTerm
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-detect-noop]
|
||||
--------------------------------------------------
|
||||
<1> Disable the noop detection
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-scripted-upsert]
|
||||
--------------------------------------------------
|
||||
<1> Indicate that the script must run regardless of whether the document exists or not,
|
||||
ie the script takes care of creating the document if it does not already exist.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-doc-upsert]
|
||||
--------------------------------------------------
|
||||
<1> Indicate that the partial document must be used as the upsert document if it
|
||||
does not exist yet.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-active-shards]
|
||||
--------------------------------------------------
|
||||
<1> Sets the number of shard copies that must be active before proceeding with
|
||||
the update operation.
|
||||
<2> Number of shard copies provided as a `ActiveShardCount`: can be `ActiveShardCount.ALL`,
|
||||
`ActiveShardCount.ONE` or `ActiveShardCount.DEFAULT` (default)
|
||||
|
||||
include::../execution.asciidoc[]
|
||||
|
||||
[id="{upid}-{api}-response"]
|
||||
==== Update Response
|
||||
|
||||
The returned +{response}+ allows to retrieve information about the executed
|
||||
operation as follows:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-response]
|
||||
--------------------------------------------------
|
||||
<1> Handle the case where the document was created for the first time (upsert)
|
||||
<2> Handle the case where the document was updated
|
||||
<3> Handle the case where the document was deleted
|
||||
<4> Handle the case where the document was not impacted by the update,
|
||||
ie no operation (noop) was executed on the document
|
||||
|
||||
When the source retrieval is enabled in the `UpdateRequest`
|
||||
through the fetchSource method, the response contains the
|
||||
source of the updated document:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-getresult]
|
||||
--------------------------------------------------
|
||||
<1> Retrieve the updated document as a `GetResult`
|
||||
<2> Retrieve the source of the updated document as a `String`
|
||||
<3> Retrieve the source of the updated document as a `Map<String, Object>`
|
||||
<4> Retrieve the source of the updated document as a `byte[]`
|
||||
<5> Handle the scenario where the source of the document is not present in
|
||||
the response (this is the case by default)
|
||||
|
||||
It is also possible to check for shard failures:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-failure]
|
||||
--------------------------------------------------
|
||||
<1> Handle the situation where number of successful shards is less than
|
||||
total shards
|
||||
<2> Handle the potential failures
|
||||
|
||||
When a `UpdateRequest` is performed against a document that does not exist,
|
||||
the response has `404` status code, an `ElasticsearchException` gets thrown
|
||||
which needs to be handled as follows:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-docnotfound]
|
||||
--------------------------------------------------
|
||||
<1> Handle the exception thrown because the document not exist
|
||||
|
||||
If there is a version conflict, an `ElasticsearchException` will
|
||||
be thrown:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-conflict]
|
||||
--------------------------------------------------
|
||||
<1> The raised exception indicates that a version conflict error was returned.
|
|
@ -1,31 +0,0 @@
|
|||
--
|
||||
:api: enrich-delete-policy
|
||||
:request: DeletePolicyRequest
|
||||
:response: AcknowledgedResponse
|
||||
--
|
||||
|
||||
[id="{upid}-{api}"]
|
||||
=== Delete Policy API
|
||||
|
||||
[id="{upid}-{api}-request"]
|
||||
==== Request
|
||||
|
||||
The Delete Policy API deletes an enrich policy from Elasticsearch.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request]
|
||||
--------------------------------------------------
|
||||
|
||||
[id="{upid}-{api}-response"]
|
||||
==== Response
|
||||
|
||||
The returned +{response}+ indicates if the delete policy request was acknowledged.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-response]
|
||||
--------------------------------------------------
|
||||
<1> Whether delete policy request was acknowledged.
|
||||
|
||||
include::../execution.asciidoc[]
|
|
@ -1,30 +0,0 @@
|
|||
--
|
||||
:api: enrich-execute-policy
|
||||
:request: ExecutePolicyRequest
|
||||
:response: ExecutePolicyResponse
|
||||
--
|
||||
|
||||
[id="{upid}-{api}"]
|
||||
=== Execute Policy API
|
||||
|
||||
[id="{upid}-{api}-request"]
|
||||
==== Request
|
||||
|
||||
The Execute Policy API allows to execute an enrich policy by name.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request]
|
||||
--------------------------------------------------
|
||||
|
||||
[id="{upid}-{api}-response"]
|
||||
==== Response
|
||||
|
||||
The returned +{response}+ includes either the status or task id.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-response]
|
||||
--------------------------------------------------
|
||||
|
||||
include::../execution.asciidoc[]
|
|
@ -1,32 +0,0 @@
|
|||
--
|
||||
:api: enrich-get-policy
|
||||
:request: GetPolicyRequest
|
||||
:response: GetPolicyResponse
|
||||
--
|
||||
|
||||
[id="{upid}-{api}"]
|
||||
=== Get Policy API
|
||||
|
||||
[id="{upid}-{api}-request"]
|
||||
==== Request
|
||||
|
||||
The Get Policy API allows to retrieve enrich policies by name
|
||||
or all policies if no name is provided.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request]
|
||||
--------------------------------------------------
|
||||
|
||||
[id="{upid}-{api}-response"]
|
||||
==== Response
|
||||
|
||||
The returned +{response}+ includes the requested enrich policy.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-response]
|
||||
--------------------------------------------------
|
||||
<1> The actual enrich policy.
|
||||
|
||||
include::../execution.asciidoc[]
|
|
@ -1,31 +0,0 @@
|
|||
--
|
||||
:api: enrich-put-policy
|
||||
:request: PutPolicyRequest
|
||||
:response: AcknowledgedResponse
|
||||
--
|
||||
|
||||
[id="{upid}-{api}"]
|
||||
=== Create enrich policy API
|
||||
|
||||
[id="{upid}-{api}-request"]
|
||||
==== Request
|
||||
|
||||
Creates an enrich policy.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request]
|
||||
--------------------------------------------------
|
||||
|
||||
[id="{upid}-{api}-response"]
|
||||
==== Response
|
||||
|
||||
The +{response}+ indicates if the request was acknowledged.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-response]
|
||||
--------------------------------------------------
|
||||
<1> Whether the request was acknowledged.
|
||||
|
||||
include::../execution.asciidoc[]
|
|
@ -1,33 +0,0 @@
|
|||
--
|
||||
:api: enrich-stats
|
||||
:request: StatsRequest
|
||||
:response: StatsResponse
|
||||
--
|
||||
|
||||
[id="{upid}-{api}"]
|
||||
=== Stats API
|
||||
|
||||
[id="{upid}-{api}-request"]
|
||||
==== Request
|
||||
|
||||
The stats API returns enrich related stats.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request]
|
||||
--------------------------------------------------
|
||||
|
||||
[id="{upid}-{api}-response"]
|
||||
==== Response
|
||||
|
||||
The returned +{response}+ includes enrich related stats.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-response]
|
||||
--------------------------------------------------
|
||||
<1> List of policies that are currently executing with
|
||||
additional details.
|
||||
<2> List of coordinator stats per ingest node.
|
||||
|
||||
include::../execution.asciidoc[]
|
|
@ -1,55 +0,0 @@
|
|||
////
|
||||
This file is included by high level rest client API documentation pages
|
||||
where the client method does not use a request object.
|
||||
For methods with requests, see execution.asciidoc
|
||||
////
|
||||
|
||||
[id="{upid}-{api}-sync"]
|
||||
==== Synchronous execution
|
||||
|
||||
When executing the +{api}+ API in the following manner, the client waits
|
||||
for the +{response}+ to be returned before continuing with code execution:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-execute]
|
||||
--------------------------------------------------
|
||||
|
||||
Synchronous calls may throw an `IOException` in case of either failing to
|
||||
parse the REST response in the high-level REST client, the request times out
|
||||
or similar cases where there is no response coming back from the server.
|
||||
|
||||
In cases where the server returns a `4xx` or `5xx` error code, the high-level
|
||||
client tries to parse the response body error details instead and then throws
|
||||
a generic `ElasticsearchException` and adds the original `ResponseException` as a
|
||||
suppressed exception to it.
|
||||
|
||||
[id="{upid}-{api}-async"]
|
||||
==== Asynchronous execution
|
||||
|
||||
The +{api}+ API can also be called in an asynchronous fashion so that
|
||||
the client can return directly. Users need to specify how the response or
|
||||
potential failures will be handled by passing a listener to the
|
||||
asynchronous {api} method:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-execute-async]
|
||||
--------------------------------------------------
|
||||
<1> The `RequestOptions` and `ActionListener` to use when the execution
|
||||
completes
|
||||
|
||||
The asynchronous method does not block and returns immediately. Once it is
|
||||
completed the `ActionListener` is called back using the `onResponse` method
|
||||
if the execution successfully completed or using the `onFailure` method if
|
||||
it failed. Failure scenarios and expected exceptions are the same as in the
|
||||
synchronous execution case.
|
||||
|
||||
A typical listener for +{api}+ looks like:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-execute-listener]
|
||||
--------------------------------------------------
|
||||
<1> Called when the execution is successfully completed.
|
||||
<2> Called when the +{api}+ call fails.
|
|
@ -1,58 +0,0 @@
|
|||
////
|
||||
This file is included by every high level rest client API documentation page
|
||||
so we don't have to copy and paste the same asciidoc over and over again. We
|
||||
*do* have to copy and paste the same Java tests over and over again. For now
|
||||
this is intentional because it forces us to *write* and execute the tests
|
||||
which, while a bit ceremonial, does force us to cover these calls in *some*
|
||||
test.
|
||||
////
|
||||
|
||||
[id="{upid}-{api}-sync"]
|
||||
==== Synchronous execution
|
||||
|
||||
When executing a +{request}+ in the following manner, the client waits
|
||||
for the +{response}+ to be returned before continuing with code execution:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-execute]
|
||||
--------------------------------------------------
|
||||
|
||||
Synchronous calls may throw an `IOException` in case of either failing to
|
||||
parse the REST response in the high-level REST client, the request times out
|
||||
or similar cases where there is no response coming back from the server.
|
||||
|
||||
In cases where the server returns a `4xx` or `5xx` error code, the high-level
|
||||
client tries to parse the response body error details instead and then throws
|
||||
a generic `ElasticsearchException` and adds the original `ResponseException` as a
|
||||
suppressed exception to it.
|
||||
|
||||
[id="{upid}-{api}-async"]
|
||||
==== Asynchronous execution
|
||||
|
||||
Executing a +{request}+ can also be done in an asynchronous fashion so that
|
||||
the client can return directly. Users need to specify how the response or
|
||||
potential failures will be handled by passing the request and a listener to the
|
||||
asynchronous {api} method:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-execute-async]
|
||||
--------------------------------------------------
|
||||
<1> The +{request}+ to execute and the `ActionListener` to use when
|
||||
the execution completes
|
||||
|
||||
The asynchronous method does not block and returns immediately. Once it is
|
||||
completed the `ActionListener` is called back using the `onResponse` method
|
||||
if the execution successfully completed or using the `onFailure` method if
|
||||
it failed. Failure scenarios and expected exceptions are the same as in the
|
||||
synchronous execution case.
|
||||
|
||||
A typical listener for +{api}+ looks like:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-execute-listener]
|
||||
--------------------------------------------------
|
||||
<1> Called when the execution is successfully completed.
|
||||
<2> Called when the whole +{request}+ fails.
|
|
@ -1,197 +0,0 @@
|
|||
[[java-rest-high-getting-started]]
|
||||
== Getting started
|
||||
|
||||
This section describes how to get started with the high-level REST client from
|
||||
getting the artifact to using it in an application.
|
||||
|
||||
[[java-rest-high-compatibility]]
|
||||
=== Compatibility
|
||||
The Java High Level REST Client requires at least Java 1.8 and depends on the Elasticsearch
|
||||
core project. The client version is the same as the Elasticsearch version that the
|
||||
client was developed for. It accepts the same request arguments as the `TransportClient`
|
||||
and returns the same response objects. See the <<java-rest-high-level-migration>>
|
||||
if you need to migrate an application from `TransportClient` to the new REST client.
|
||||
|
||||
The High Level Client is guaranteed to be able to communicate with any Elasticsearch
|
||||
node running on the same major version and greater or equal minor version. It
|
||||
doesn't need to be in the same minor version as the Elasticsearch nodes it
|
||||
communicates with, as it is forward compatible meaning that it supports
|
||||
communicating with later versions of Elasticsearch than the one it was developed for.
|
||||
|
||||
The 6.0 client is able to communicate with any 6.x Elasticsearch node, while the 6.1
|
||||
client is for sure able to communicate with 6.1, 6.2 and any later 6.x version, but
|
||||
there may be incompatibility issues when communicating with a previous Elasticsearch
|
||||
node version, for instance between 6.1 and 6.0, in case the 6.1 client supports new
|
||||
request body fields for some APIs that are not known by the 6.0 node(s).
|
||||
|
||||
It is recommended to upgrade the High Level Client when upgrading the Elasticsearch
|
||||
cluster to a new major version, as REST API breaking changes may cause unexpected
|
||||
results depending on the node that is hit by the request, and newly added APIs will
|
||||
only be supported by the newer version of the client. The client should always be
|
||||
updated last, once all of the nodes in the cluster have been upgraded to the new
|
||||
major version.
|
||||
|
||||
[[java-rest-high-javadoc]]
|
||||
=== Javadoc
|
||||
|
||||
The javadoc for the REST high level client can be found at {rest-high-level-client-javadoc}/index.html.
|
||||
|
||||
[[java-rest-high-getting-started-maven]]
|
||||
=== Maven Repository
|
||||
|
||||
The high-level Java REST client is hosted on
|
||||
https://search.maven.org/search?q=g:org.elasticsearch.client[Maven
|
||||
Central]. The minimum Java version required is `1.8`.
|
||||
|
||||
The High Level REST Client is subject to the same release cycle as
|
||||
Elasticsearch. Replace the version with the desired client version.
|
||||
|
||||
If you are looking for a SNAPSHOT version, you should add our snapshot repository to your Maven config:
|
||||
|
||||
["source","xml",subs="attributes"]
|
||||
--------------------------------------------------
|
||||
<repositories>
|
||||
<repository>
|
||||
<id>es-snapshots</id>
|
||||
<name>elasticsearch snapshot repo</name>
|
||||
<url>https://snapshots.elastic.co/maven/</url>
|
||||
</repository>
|
||||
</repositories>
|
||||
--------------------------------------------------
|
||||
|
||||
or in Gradle:
|
||||
|
||||
["source","groovy",subs="attributes"]
|
||||
--------------------------------------------------
|
||||
maven {
|
||||
url "https://snapshots.elastic.co/maven/"
|
||||
}
|
||||
--------------------------------------------------
|
||||
|
||||
[[java-rest-high-getting-started-maven-maven]]
|
||||
==== Maven configuration
|
||||
|
||||
Here is how you can configure the dependency using maven as a dependency manager.
|
||||
Add the following to your `pom.xml` file:
|
||||
|
||||
["source","xml",subs="attributes"]
|
||||
--------------------------------------------------
|
||||
<dependency>
|
||||
<groupId>org.elasticsearch.client</groupId>
|
||||
<artifactId>elasticsearch-rest-high-level-client</artifactId>
|
||||
<version>{version}</version>
|
||||
</dependency>
|
||||
--------------------------------------------------
|
||||
|
||||
[[java-rest-high-getting-started-maven-gradle]]
|
||||
==== Gradle configuration
|
||||
|
||||
Here is how you can configure the dependency using gradle as a dependency manager.
|
||||
Add the following to your `build.gradle` file:
|
||||
|
||||
["source","groovy",subs="attributes"]
|
||||
--------------------------------------------------
|
||||
dependencies {
|
||||
compile 'org.elasticsearch.client:elasticsearch-rest-high-level-client:{version}'
|
||||
}
|
||||
--------------------------------------------------
|
||||
|
||||
[[java-rest-high-getting-started-maven-lucene]]
|
||||
==== Lucene Snapshot repository
|
||||
|
||||
The very first releases of any major version (like a beta), might have been built on top of a Lucene Snapshot version.
|
||||
In such a case you will be unable to resolve the Lucene dependencies of the client.
|
||||
|
||||
For example, if you want to use the `7.0.0-beta1` version which depends on Lucene `8.0.0-snapshot-83f9835`, you must
|
||||
define the following repository.
|
||||
|
||||
For Maven:
|
||||
|
||||
["source","xml",subs="attributes"]
|
||||
--------------------------------------------------
|
||||
<repository>
|
||||
<id>elastic-lucene-snapshots</id>
|
||||
<name>Elastic Lucene Snapshots</name>
|
||||
<url>https://s3.amazonaws.com/download.elasticsearch.org/lucenesnapshots/83f9835</url>
|
||||
<releases><enabled>true</enabled></releases>
|
||||
<snapshots><enabled>false</enabled></snapshots>
|
||||
</repository>
|
||||
--------------------------------------------------
|
||||
|
||||
For Gradle:
|
||||
|
||||
["source","groovy",subs="attributes"]
|
||||
--------------------------------------------------
|
||||
maven {
|
||||
name 'lucene-snapshots'
|
||||
url 'https://s3.amazonaws.com/download.elasticsearch.org/lucenesnapshots/83f9835'
|
||||
}
|
||||
--------------------------------------------------
|
||||
|
||||
[[java-rest-high-getting-started-dependencies]]
|
||||
=== Dependencies
|
||||
|
||||
The High Level Java REST Client depends on the following artifacts and their
|
||||
transitive dependencies:
|
||||
|
||||
- org.elasticsearch.client:elasticsearch-rest-client
|
||||
- org.elasticsearch:elasticsearch
|
||||
|
||||
|
||||
[[java-rest-high-getting-started-initialization]]
|
||||
=== Initialization
|
||||
|
||||
A `RestHighLevelClient` instance needs a <<java-rest-low-usage-initialization,REST low-level client builder>>
|
||||
to be built as follows:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/MiscellaneousDocumentationIT.java[rest-high-level-client-init]
|
||||
--------------------------------------------------
|
||||
|
||||
The high-level client will internally create the low-level client used to
|
||||
perform requests based on the provided builder. That low-level client
|
||||
maintains a pool of connections and starts some threads so you should
|
||||
close the high-level client when you are well and truly done with
|
||||
it and it will in turn close the internal low-level client to free those
|
||||
resources. This can be done through the `close`:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/MiscellaneousDocumentationIT.java[rest-high-level-client-close]
|
||||
--------------------------------------------------
|
||||
|
||||
In the rest of this documentation about the Java High Level Client, the `RestHighLevelClient` instance
|
||||
will be referenced as `client`.
|
||||
|
||||
[[java-rest-high-getting-started-request-options]]
|
||||
=== RequestOptions
|
||||
|
||||
All APIs in the `RestHighLevelClient` accept a `RequestOptions` which you can
|
||||
use to customize the request in ways that won't change how Elasticsearch
|
||||
executes the request. For example, this is the place where you'd specify a
|
||||
`NodeSelector` to control which node receives the request. See the
|
||||
<<java-rest-low-usage-request-options,low level client documentation>> for
|
||||
more examples of customizing the options.
|
||||
|
||||
[[java-rest-high-getting-started-asynchronous-usage]]
|
||||
=== Asynchronous usage
|
||||
|
||||
All of the methods across the different clients exist in a traditional synchronous and
|
||||
asynchronous variant. The difference is that the asynchronous ones use asynchronous requests
|
||||
in the REST Low Level Client. This is useful if you are doing multiple requests or are using e.g.
|
||||
rx java, Kotlin co-routines, or similar frameworks.
|
||||
|
||||
The asynchronous methods are recognizable by the fact that they have the word "Async" in their name
|
||||
and return a `Cancellable` instance. The asynchronous methods accept the same request object
|
||||
as the synchronous variant and accept a generic `ActionListener<T>` where `T` is the return
|
||||
type of the synchronous method.
|
||||
|
||||
All asynchronous methods return a `Cancellable` object with a `cancel` method that you may call
|
||||
in case you want to abort the request. Cancelling
|
||||
no longer needed requests is a good way to avoid putting unnecessary
|
||||
load on Elasticsearch.
|
||||
|
||||
Using the `Cancellable` instance is optional and you can safely ignore this if you have
|
||||
no need for this. A use case for this would be using this with e.g. Kotlin's `suspendCancellableCoRoutine`.
|
||||
|
|
@ -1,54 +0,0 @@
|
|||
[role="xpack"]
|
||||
[[java-rest-high-x-pack-graph-explore]]
|
||||
=== Graph explore API
|
||||
|
||||
[[java-rest-high-x-pack-graph-explore-execution]]
|
||||
==== Initial request
|
||||
|
||||
Graph queries are executed using the `explore()` method:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/GraphDocumentationIT.java[x-pack-graph-explore-request]
|
||||
--------------------------------------------------
|
||||
<1> In this example we seed the exploration with a query to find messages mentioning the mysterious `projectx`
|
||||
<2> What we want to discover in these messages are the ids of `participants` in the communications and the md5 hashes
|
||||
of any attached files. In each case, we want to find people or files that have had at least one document connecting them
|
||||
to projectx.
|
||||
<3> The next "hop" in the graph exploration is to find the people who have shared several messages with the people or files
|
||||
discovered in the previous hop (the projectx conspirators). The `minDocCount` control is used here to ensure the people
|
||||
discovered have had at least 5 communications with projectx entities. Note we could also supply a "guiding query" here e.g. a
|
||||
date range to consider only recent communications but we pass null to consider all connections.
|
||||
<4> Finally we call the graph explore API with the GraphExploreRequest object.
|
||||
|
||||
|
||||
==== Response
|
||||
|
||||
Graph responses consist of Vertex and Connection objects (aka "nodes" and "edges" respectively):
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/GraphDocumentationIT.java[x-pack-graph-explore-response]
|
||||
--------------------------------------------------
|
||||
<1> Each Vertex is a unique term (a combination of fieldname and term value). The "hopDepth" property tells us at which point in the
|
||||
requested exploration this term was first discovered.
|
||||
<2> Each Connection is a pair of Vertex objects and includes a docCount property telling us how many times these two
|
||||
Vertex terms have been sighted together
|
||||
|
||||
|
||||
[[java-rest-high-x-pack-graph-expand-execution]]
|
||||
==== Expanding a client-side Graph
|
||||
|
||||
Typically once an application has rendered an initial GraphExploreResponse as a collection of vertices and connecting lines (graph visualization toolkits such as D3, sigma.js or Keylines help here) the next step a user may want to do is "expand". This involves finding new vertices that might be connected to the existing ones currently shown.
|
||||
|
||||
To do this we use the same `explore` method but our request contains details about which vertices to expand from and which vertices to avoid re-discovering.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/GraphDocumentationIT.java[x-pack-graph-explore-expand]
|
||||
--------------------------------------------------
|
||||
<1> Unlike the initial request we do not need to pass a starting query
|
||||
<2> In the first hop which represents our "from" vertices we explicitly list the terms that we already have on-screen and want to expand by using the `addInclude` filter.
|
||||
We can supply a boost for those terms that are considered more important to follow than others but here we select a common value of 1 for all.
|
||||
<3> When defining the second hop which represents the "to" vertices we hope to discover we explicitly list the terms that we already know about using the `addExclude` filter
|
||||
|
|
@ -1,36 +0,0 @@
|
|||
--
|
||||
:api: ilm-delete-lifecycle-policy
|
||||
:request: DeleteLifecyclePolicyRequest
|
||||
:response: AcknowledgedResponse
|
||||
--
|
||||
[role="xpack"]
|
||||
[id="{upid}-{api}"]
|
||||
=== Delete Lifecycle Policy API
|
||||
|
||||
|
||||
[id="{upid}-{api}-request"]
|
||||
==== Request
|
||||
|
||||
The Delete Lifecycle Policy API allows you to delete an Index Lifecycle
|
||||
Management Policy from the cluster.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request]
|
||||
--------------------------------------------------
|
||||
<1> The policy named `my_policy` will be deleted.
|
||||
|
||||
[id="{upid}-{api}-response"]
|
||||
==== Response
|
||||
|
||||
The returned +{response}+ indicates if the delete lifecycle policy request was received.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-response]
|
||||
--------------------------------------------------
|
||||
<1> Whether or not the delete lifecycle policy request was acknowledged.
|
||||
|
||||
include::../execution.asciidoc[]
|
||||
|
||||
|
|
@ -1,36 +0,0 @@
|
|||
--
|
||||
:api: slm-delete-snapshot-lifecycle-policy
|
||||
:request: DeleteSnapshotLifecyclePolicyRequest
|
||||
:response: AcknowledgedResponse
|
||||
--
|
||||
[role="xpack"]
|
||||
[id="{upid}-{api}"]
|
||||
=== Delete Snapshot Lifecycle Policy API
|
||||
|
||||
|
||||
[id="{upid}-{api}-request"]
|
||||
==== Request
|
||||
|
||||
The Delete Snapshot Lifecycle Policy API allows you to delete a Snapshot Lifecycle Management Policy
|
||||
from the cluster.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request]
|
||||
--------------------------------------------------
|
||||
<1> The policy with the id `policy_id` will be deleted.
|
||||
|
||||
[id="{upid}-{api}-response"]
|
||||
==== Response
|
||||
|
||||
The returned +{response}+ indicates if the delete snapshot lifecycle policy request was received.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-response]
|
||||
--------------------------------------------------
|
||||
<1> Whether or not the delete snapshot lifecycle policy request was acknowledged.
|
||||
|
||||
include::../execution.asciidoc[]
|
||||
|
||||
|
|
@ -1,36 +0,0 @@
|
|||
--
|
||||
:api: slm-execute-snapshot-lifecycle-policy
|
||||
:request: ExecuteSnapshotLifecyclePolicyRequest
|
||||
:response: ExecuteSnapshotLifecyclePolicyResponse
|
||||
--
|
||||
[role="xpack"]
|
||||
[id="{upid}-{api}"]
|
||||
=== Execute Snapshot Lifecycle Policy API
|
||||
|
||||
|
||||
[id="{upid}-{api}-request"]
|
||||
==== Request
|
||||
|
||||
The Execute Snapshot Lifecycle Policy API allows you to execute a Snapshot Lifecycle Management
|
||||
Policy, taking a snapshot immediately.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request]
|
||||
--------------------------------------------------
|
||||
<1> The policy id to execute
|
||||
|
||||
[id="{upid}-{api}-response"]
|
||||
==== Response
|
||||
|
||||
The returned +{response}+ contains the name of the snapshot that was created.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-response]
|
||||
--------------------------------------------------
|
||||
<1> The created snapshot name
|
||||
|
||||
include::../execution.asciidoc[]
|
||||
|
||||
|
|
@ -1,35 +0,0 @@
|
|||
--
|
||||
:api: slm-execute-snapshot-lifecycle-retention
|
||||
:request: ExecuteSnapshotLifecycleRetentionRequest
|
||||
:response: AcknowledgedResponse
|
||||
--
|
||||
[role="xpack"]
|
||||
[id="{upid}-{api}"]
|
||||
=== Execute Snapshot Lifecycle Retention API
|
||||
|
||||
|
||||
[id="{upid}-{api}-request"]
|
||||
==== Request
|
||||
|
||||
The Execute Snapshot Lifecycle Retention API allows you to execute Snapshot Lifecycle Management
|
||||
Retention immediately, rather than waiting for its regularly scheduled execution.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request]
|
||||
--------------------------------------------------
|
||||
|
||||
[id="{upid}-{api}-response"]
|
||||
==== Response
|
||||
|
||||
The returned +{response}+ contains a boolean for whether the request was
|
||||
acknowledged by the master node.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-response]
|
||||
--------------------------------------------------
|
||||
|
||||
include::../execution.asciidoc[]
|
||||
|
||||
|
|
@ -1,50 +0,0 @@
|
|||
--
|
||||
:api: ilm-explain-lifecycle
|
||||
:request: ExplainLifecycleRequest
|
||||
:response: ExplainLifecycleResponse
|
||||
--
|
||||
[role="xpack"]
|
||||
[id="{upid}-{api}"]
|
||||
=== Explain Lifecycle API
|
||||
|
||||
|
||||
[id="{upid}-{api}-request"]
|
||||
==== Request
|
||||
|
||||
The Explain Lifecycle API allows you to retrieve information about the execution
|
||||
of a Lifecycle Policy with respect to one or more indices.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request]
|
||||
--------------------------------------------------
|
||||
<1> Requests an explanation of policy execution for `my_index` and `other_index`
|
||||
|
||||
|
||||
[id="{upid}-{api}-response"]
|
||||
==== Response
|
||||
|
||||
The returned +{response}+ contains a map of `LifecyclePolicyMetadata`,
|
||||
accessible by the name of the policy, which contains data about each policy,
|
||||
as well as the policy definition.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-response]
|
||||
--------------------------------------------------
|
||||
<1> The name of the policy in use for this index, if any. Will be `null` if the
|
||||
index does not have an associated policy.
|
||||
<2> Indicates whether this index is being managed by Index Lifecycle Management.
|
||||
<3> The Phase (`hot`, `warm`, etc.) this index is currently in. Will be `null` if
|
||||
the index is not managed by Index Lifecycle Management.
|
||||
<4> The time this index entered this Phase of execution.
|
||||
<5> The Action (`rollover`, `shrink`, etc.) this index is currently in. Will be `null` if
|
||||
the index is not managed by Index Lifecycle Management.
|
||||
<6> The Step this index is currently in. Will be `null` if
|
||||
the index is not managed by Index Lifecycle Management.
|
||||
<7> If this index is in the `ERROR` Step, this will indicate which Step failed.
|
||||
Otherwise, it will be `null`.
|
||||
|
||||
include::../execution.asciidoc[]
|
||||
|
||||
|
|
@ -1,40 +0,0 @@
|
|||
--
|
||||
:api: ilm-get-lifecycle-policy
|
||||
:request: GetLifecyclePolicyRequest
|
||||
:response: GetLifecyclePolicyResponse
|
||||
--
|
||||
[role="xpack"]
|
||||
[id="{upid}-{api}"]
|
||||
=== Get Lifecycle Policy API
|
||||
|
||||
|
||||
[id="{upid}-{api}-request"]
|
||||
==== Request
|
||||
|
||||
The Get Lifecycle Policy API allows you to retrieve the definition of an Index
|
||||
Lifecycle Management Policy from the cluster.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request]
|
||||
--------------------------------------------------
|
||||
<1> Gets all policies.
|
||||
<2> Gets `my_policy` and `other_policy`
|
||||
|
||||
[id="{upid}-{api}-response"]
|
||||
==== Response
|
||||
|
||||
The returned +{response}+ contains a map of `LifecyclePolicyMetadata`,
|
||||
accessible by the name of the policy, which contains data about each policy,
|
||||
as well as the policy definition.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-response]
|
||||
--------------------------------------------------
|
||||
<1> The retrieved policies are retrieved by name.
|
||||
<2> The policy definition itself.
|
||||
|
||||
include::../execution.asciidoc[]
|
||||
|
||||
|
|
@ -1,39 +0,0 @@
|
|||
--
|
||||
:api: slm-get-snapshot-lifecycle-policy
|
||||
:request: GetSnapshotLifecyclePolicyRequest
|
||||
:response: GetSnapshotLifecyclePolicyResponse
|
||||
--
|
||||
[role="xpack"]
|
||||
[id="{upid}-{api}"]
|
||||
=== Get Snapshot Lifecycle Policy API
|
||||
|
||||
|
||||
[id="{upid}-{api}-request"]
|
||||
==== Request
|
||||
|
||||
The Get Snapshot Lifecycle Policy API allows you to retrieve the definition of a Snapshot Lifecycle
|
||||
Management Policy from the cluster.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request]
|
||||
--------------------------------------------------
|
||||
<1> Gets all policies.
|
||||
<2> Gets `policy_id`
|
||||
|
||||
[id="{upid}-{api}-response"]
|
||||
==== Response
|
||||
|
||||
The returned +{response}+ contains a map of `SnapshotLifecyclePolicyMetadata`, accessible by the id
|
||||
of the policy, which contains data about each policy, as well as the policy definition.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-response]
|
||||
--------------------------------------------------
|
||||
<1> The retrieved policies are retrieved by id.
|
||||
<2> The policy definition itself.
|
||||
|
||||
include::../execution.asciidoc[]
|
||||
|
||||
|
|
@ -1,35 +0,0 @@
|
|||
--
|
||||
:api: slm-get-snapshot-lifecycle-stats
|
||||
:request: GetSnapshotLifecycleStatsRequest
|
||||
:response: GetSnapshotLifecycleStatsResponse
|
||||
--
|
||||
[role="xpack"]
|
||||
[id="{upid}-{api}"]
|
||||
=== Get Snapshot Lifecycle Stats API
|
||||
|
||||
|
||||
[id="{upid}-{api}-request"]
|
||||
==== Request
|
||||
|
||||
The Get Snapshot Lifecycle Stats API allows you to retrieve statistics about snapshots taken or
|
||||
deleted, as well as retention runs by the snapshot lifecycle service.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request]
|
||||
--------------------------------------------------
|
||||
|
||||
[id="{upid}-{api}-response"]
|
||||
==== Response
|
||||
|
||||
The returned +{response}+ contains global statistics as well as a map of `SnapshotPolicyStats`,
|
||||
accessible by the id of the policy, which contains statistics about each policy.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-response]
|
||||
--------------------------------------------------
|
||||
|
||||
include::../execution.asciidoc[]
|
||||
|
||||
|
|
@ -1,36 +0,0 @@
|
|||
--
|
||||
:api: ilm-status
|
||||
:request: LifecycleManagementStatusRequest
|
||||
:response: AcknowledgedResponse
|
||||
--
|
||||
[role="xpack"]
|
||||
[id="{upid}-{api}"]
|
||||
=== Index Lifecycle Management Status API
|
||||
|
||||
|
||||
[id="{upid}-{api}-request"]
|
||||
==== Request
|
||||
|
||||
The Index Lifecycle Management Status API allows you to retrieve the status
|
||||
of Index Lifecycle Management
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request]
|
||||
--------------------------------------------------
|
||||
|
||||
|
||||
[id="{upid}-{api}-response"]
|
||||
==== Response
|
||||
|
||||
The returned +{response}+ indicates the status of Index Lifecycle Management.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-response]
|
||||
--------------------------------------------------
|
||||
<1> The returned status can be `RUNNING`, `STOPPING`, or `STOPPED`.
|
||||
|
||||
include::../execution.asciidoc[]
|
||||
|
||||
|
|
@ -1,36 +0,0 @@
|
|||
--
|
||||
:api: ilm-put-lifecycle-policy
|
||||
:request: PutLifecyclePolicyRequest
|
||||
:response: AcknowledgedResponse
|
||||
--
|
||||
[role="xpack"]
|
||||
[id="{upid}-{api}"]
|
||||
=== Create or update lifecycle policy API
|
||||
|
||||
|
||||
[id="{upid}-{api}-request"]
|
||||
==== Request
|
||||
|
||||
Creates or updates an index lifecycle management policy.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request]
|
||||
--------------------------------------------------
|
||||
<1> Adds a hot phase with a rollover action
|
||||
<2> Adds a delete phase that will delete in the index 90 days after rollover
|
||||
<3> Creates the policy with the defined phases and the name `my_policy`
|
||||
|
||||
[id="{upid}-{api}-response"]
|
||||
==== Response
|
||||
|
||||
The +{response}+ indicates if the request was received.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-response]
|
||||
--------------------------------------------------
|
||||
<1> Whether or not the request was acknowledged.
|
||||
|
||||
include::../execution.asciidoc[]
|
||||
|
|
@ -1,33 +0,0 @@
|
|||
--
|
||||
:api: slm-put-snapshot-lifecycle-policy
|
||||
:request: PutSnapshotLifecyclePolicyRequest
|
||||
:response: AcknowledgedResponse
|
||||
--
|
||||
[role="xpack"]
|
||||
[id="{upid}-{api}"]
|
||||
=== Create or update snapshot lifecycle policy API
|
||||
|
||||
|
||||
[id="{upid}-{api}-request"]
|
||||
==== Request
|
||||
|
||||
Creates or updates a snapshot lifecycle management policy.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request]
|
||||
--------------------------------------------------
|
||||
|
||||
[id="{upid}-{api}-response"]
|
||||
==== Response
|
||||
|
||||
The +{response}+ indicates if the request was received.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-response]
|
||||
--------------------------------------------------
|
||||
<1> Whether or not the request was acknowledged.
|
||||
|
||||
include::../execution.asciidoc[]
|
||||
|
|
@ -1,38 +0,0 @@
|
|||
--
|
||||
:api: ilm-remove-lifecycle-policy-from-index
|
||||
:request: RemoveIndexLifecyclePolicyRequest
|
||||
:response: AcknowledgedResponse
|
||||
--
|
||||
[role="xpack"]
|
||||
[id="{upid}-{api}"]
|
||||
=== Remove Policy from Index API
|
||||
|
||||
|
||||
[id="{upid}-{api}-request"]
|
||||
==== Request
|
||||
|
||||
Removes the assigned lifecycle policy from an index.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request]
|
||||
--------------------------------------------------
|
||||
<1> removes the `my_policy` policy from `my_index`
|
||||
|
||||
|
||||
[id="{upid}-{api}-response"]
|
||||
==== Response
|
||||
|
||||
The returned +{response}+ indicates if the request to remove
|
||||
the lifecycle policy from the index was received.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-response]
|
||||
--------------------------------------------------
|
||||
<1> Whether or not there were any policies failed
|
||||
to be removed from any indices from the request
|
||||
<2> A list of index names which are still managed
|
||||
by their policies.
|
||||
|
||||
include::../execution.asciidoc[]
|
|
@ -1,36 +0,0 @@
|
|||
--
|
||||
:api: ilm-retry-lifecycle-policy
|
||||
:request: RetryLifecyclePolicyRequest
|
||||
:response: AcknowledgedResponse
|
||||
--
|
||||
[role="xpack"]
|
||||
[id="{upid}-{api}"]
|
||||
=== Retry Lifecycle Policy API
|
||||
|
||||
|
||||
[id="{upid}-{api}-request"]
|
||||
==== Request
|
||||
|
||||
The Retry Lifecycle Policy API allows you to invoke execution of policies
|
||||
that encountered errors in certain indices.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request]
|
||||
--------------------------------------------------
|
||||
<1> Retries execution of `my_index`'s policy
|
||||
|
||||
[id="{upid}-{api}-response"]
|
||||
==== Response
|
||||
|
||||
The returned +{response}+ indicates if the retry lifecycle policy request was received.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-response]
|
||||
--------------------------------------------------
|
||||
<1> Whether or not the lifecycle policy retry was acknowledged.
|
||||
|
||||
include::../execution.asciidoc[]
|
||||
|
||||
|
|
@ -1,36 +0,0 @@
|
|||
--
|
||||
:api: slm-status
|
||||
:request: SnapshotLifecycleManagementStatusRequest
|
||||
:response: AcknowledgedResponse
|
||||
--
|
||||
[role="xpack"]
|
||||
[id="{upid}-{api}"]
|
||||
=== Snapshot Lifecycle Management Status API
|
||||
|
||||
|
||||
[id="{upid}-{api}-request"]
|
||||
==== Request
|
||||
|
||||
The Snapshot Lifecycle Management Status API allows you to retrieve the status
|
||||
of Snapshot Lifecycle Management
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request]
|
||||
--------------------------------------------------
|
||||
|
||||
|
||||
[id="{upid}-{api}-response"]
|
||||
==== Response
|
||||
|
||||
The returned +{response}+ indicates the status of Snapshot Lifecycle Management.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-response]
|
||||
--------------------------------------------------
|
||||
<1> The returned status can be `RUNNING`, `STOPPING`, or `STOPPED`.
|
||||
|
||||
include::../execution.asciidoc[]
|
||||
|
||||
|
|
@ -1,36 +0,0 @@
|
|||
--
|
||||
:api: ilm-start-ilm
|
||||
:request: StartILMRequest
|
||||
:response: AcknowledgedResponse
|
||||
--
|
||||
[role="xpack"]
|
||||
[id="{upid}-{api}"]
|
||||
=== Start Index Lifecycle Management API
|
||||
|
||||
|
||||
[id="{upid}-{api}-request"]
|
||||
==== Request
|
||||
|
||||
The Start Lifecycle Management API allows you to start Index Lifecycle
|
||||
Management if it has previously been stopped.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request]
|
||||
--------------------------------------------------
|
||||
|
||||
|
||||
[id="{upid}-{api}-response"]
|
||||
==== Response
|
||||
|
||||
The returned +{response}+ indicates if the request to start Index Lifecycle
|
||||
Management was received.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-response]
|
||||
--------------------------------------------------
|
||||
<1> Whether or not the request to start Index Lifecycle Management was
|
||||
acknowledged.
|
||||
|
||||
include::../execution.asciidoc[]
|
|
@ -1,36 +0,0 @@
|
|||
--
|
||||
:api: slm-start-slm
|
||||
:request: StartSLMRequest
|
||||
:response: AcknowledgedResponse
|
||||
--
|
||||
[role="xpack"]
|
||||
[id="{upid}-{api}"]
|
||||
=== Start Snapshot Lifecycle Management API
|
||||
|
||||
|
||||
[id="{upid}-{api}-request"]
|
||||
==== Request
|
||||
|
||||
The Start Snapshot Lifecycle Management API allows you to start Snapshot
|
||||
Lifecycle Management if it has previously been stopped.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request]
|
||||
--------------------------------------------------
|
||||
|
||||
|
||||
[id="{upid}-{api}-response"]
|
||||
==== Response
|
||||
|
||||
The returned +{response}+ indicates if the request to start Snapshot Lifecycle
|
||||
Management was received.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-response]
|
||||
--------------------------------------------------
|
||||
<1> Whether or not the request to start Snapshot Lifecycle Management was
|
||||
acknowledged.
|
||||
|
||||
include::../execution.asciidoc[]
|
|
@ -1,38 +0,0 @@
|
|||
--
|
||||
:api: ilm-stop-ilm
|
||||
:request: StopILMRequest
|
||||
:response: AcknowledgedResponse
|
||||
--
|
||||
[role="xpack"]
|
||||
[id="{upid}-{api}"]
|
||||
=== Stop Index Lifecycle Management API
|
||||
|
||||
|
||||
[id="{upid}-{api}-request"]
|
||||
==== Request
|
||||
|
||||
The Stop Lifecycle Management API allows you to stop Index Lifecycle
|
||||
Management temporarily.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request]
|
||||
--------------------------------------------------
|
||||
|
||||
|
||||
[id="{upid}-{api}-response"]
|
||||
==== Response
|
||||
|
||||
The returned +{response}+ indicates if the request to stop Index Lifecycle
|
||||
Management was received.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-response]
|
||||
--------------------------------------------------
|
||||
<1> Whether or not the request to stop Index Lifecycle Management was
|
||||
acknowledged.
|
||||
|
||||
include::../execution.asciidoc[]
|
||||
|
||||
|
|
@ -1,38 +0,0 @@
|
|||
--
|
||||
:api: slm-stop-slm
|
||||
:request: StopSLMRequest
|
||||
:response: AcknowledgedResponse
|
||||
--
|
||||
[role="xpack"]
|
||||
[id="{upid}-{api}"]
|
||||
=== Stop Snapshot Lifecycle Management API
|
||||
|
||||
|
||||
[id="{upid}-{api}-request"]
|
||||
==== Request
|
||||
|
||||
The Stop Snapshot Management API allows you to stop Snapshot Lifecycle
|
||||
Management temporarily.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request]
|
||||
--------------------------------------------------
|
||||
|
||||
|
||||
[id="{upid}-{api}-response"]
|
||||
==== Response
|
||||
|
||||
The returned +{response}+ indicates if the request to stop Snapshot
|
||||
Lifecycle Management was received.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-response]
|
||||
--------------------------------------------------
|
||||
<1> Whether or not the request to stop Snapshot Lifecycle Management was
|
||||
acknowledged.
|
||||
|
||||
include::../execution.asciidoc[]
|
||||
|
||||
|
|
@ -1,38 +0,0 @@
|
|||
:mainid: java-rest-high
|
||||
|
||||
[id="{mainid}"]
|
||||
= Java High Level REST Client
|
||||
|
||||
[partintro]
|
||||
--
|
||||
|
||||
deprecated[7.15.0, The High Level REST Client is deprecated in favour of the {java-api-client}/index.html[Java API Client].]
|
||||
|
||||
The Java High Level REST Client works on top of the Java Low Level REST client.
|
||||
Its main goal is to expose API specific methods, that accept request objects as
|
||||
an argument and return response objects, so that request marshalling and
|
||||
response un-marshalling is handled by the client itself.
|
||||
|
||||
Each API can be called synchronously or asynchronously. The synchronous
|
||||
methods return a response object, while the asynchronous methods, whose names
|
||||
end with the `async` suffix, require a listener argument that is notified
|
||||
(on the thread pool managed by the low level client) once a response or an
|
||||
error is received.
|
||||
|
||||
The Java High Level REST Client depends on the Elasticsearch core project.
|
||||
It accepts the same request arguments as the `TransportClient` and returns
|
||||
the same response objects.
|
||||
|
||||
--
|
||||
|
||||
:doc-tests: {elasticsearch-root}/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation
|
||||
:hlrc-tests: {elasticsearch-root}/client/rest-high-level/src/test/java/org/elasticsearch/client
|
||||
|
||||
include::getting-started.asciidoc[]
|
||||
include::supported-apis.asciidoc[]
|
||||
include::java-builders.asciidoc[]
|
||||
include::migration.asciidoc[]
|
||||
include::../license.asciidoc[]
|
||||
|
||||
:doc-tests!:
|
||||
:mainid!:
|
|
@ -1,97 +0,0 @@
|
|||
--
|
||||
:api: analyze
|
||||
:request: AnalyzeRequest
|
||||
:response: AnalyzeResponse
|
||||
--
|
||||
|
||||
[id="{upid}-{api}"]
|
||||
=== Analyze API
|
||||
|
||||
[id="{upid}-{api}-request"]
|
||||
==== Analyze Request
|
||||
|
||||
An +{request}+ contains the text to analyze, and one of several options to
|
||||
specify how the analysis should be performed.
|
||||
|
||||
The simplest version uses a built-in analyzer:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
---------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-builtin-request]
|
||||
---------------------------------------------------
|
||||
<1> A built-in analyzer
|
||||
<2> The text to include. Multiple strings are treated as a multi-valued field
|
||||
|
||||
You can configure a custom analyzer:
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
---------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-custom-request]
|
||||
---------------------------------------------------
|
||||
<1> Configuration for a custom tokenfilter
|
||||
<2> Configure the tokenizer
|
||||
<3> Configure char filters
|
||||
<4> Add a built-in tokenfilter
|
||||
<5> Add the custom tokenfilter
|
||||
|
||||
You can also build a custom normalizer, by including only charfilters and
|
||||
tokenfilters:
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
---------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-custom-normalizer-request]
|
||||
---------------------------------------------------
|
||||
|
||||
You can analyze text using an analyzer defined in an existing index:
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
---------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-index-request]
|
||||
---------------------------------------------------
|
||||
<1> The index containing the mappings
|
||||
<2> The analyzer defined on this index to use
|
||||
|
||||
Or you can use a normalizer:
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
---------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-index-normalizer-request]
|
||||
---------------------------------------------------
|
||||
<1> The index containing the mappings
|
||||
<2> The normalizer defined on this index to use
|
||||
|
||||
You can analyze text using the mappings for a particular field in an index:
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
---------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-field-request]
|
||||
---------------------------------------------------
|
||||
|
||||
==== Optional arguments
|
||||
The following arguments can also optionally be provided:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
---------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-explain]
|
||||
---------------------------------------------------
|
||||
<1> Setting `explain` to true will add further details to the response
|
||||
<2> Setting `attributes` allows you to return only token attributes that you are
|
||||
interested in
|
||||
|
||||
include::../execution.asciidoc[]
|
||||
|
||||
[id="{upid}-{api}-response"]
|
||||
==== Analyze Response
|
||||
|
||||
The returned +{response}+ allows you to retrieve details of the analysis as
|
||||
follows:
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
---------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-response-tokens]
|
||||
---------------------------------------------------
|
||||
<1> `AnalyzeToken` holds information about the individual tokens produced by analysis
|
||||
|
||||
If `explain` was set to `true`, then information is instead returned from the `detail()`
|
||||
method:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
---------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-response-detail]
|
||||
---------------------------------------------------
|
||||
<1> `DetailAnalyzeResponse` holds more detailed information about tokens produced by
|
||||
the various substeps in the analysis chain.
|
|
@ -1,80 +0,0 @@
|
|||
--
|
||||
:api: clear-cache
|
||||
:request: ClearIndicesCacheRequest
|
||||
:response: ClearIndicesCacheResponse
|
||||
--
|
||||
|
||||
[id="{upid}-{api}"]
|
||||
=== Clear Cache API
|
||||
|
||||
[id="{upid}-{api}-request"]
|
||||
==== Clear Cache Request
|
||||
|
||||
A +{request}+ can be applied to one or more indices, or even on
|
||||
`_all` the indices:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request]
|
||||
--------------------------------------------------
|
||||
<1> Clears the cache of one index
|
||||
<2> Clears the cache of multiple indices
|
||||
<3> Clears the cache of all the indices
|
||||
|
||||
==== Optional arguments
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-indicesOptions]
|
||||
--------------------------------------------------
|
||||
<1> Setting `IndicesOptions` controls how unavailable indices are resolved and
|
||||
how wildcard expressions are expanded
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-query]
|
||||
--------------------------------------------------
|
||||
<1> Set the `query` flag to `true`
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-fielddata]
|
||||
--------------------------------------------------
|
||||
<1> Set the `fielddata` flag to `true`
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-request]
|
||||
--------------------------------------------------
|
||||
<1> Set the `request` flag to `true`
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-fields]
|
||||
--------------------------------------------------
|
||||
<1> Set the `fields` parameter
|
||||
|
||||
include::../execution.asciidoc[]
|
||||
|
||||
[id="{upid}-{api}-response"]
|
||||
==== Clear Cache Response
|
||||
|
||||
The returned +{response}+ allows to retrieve information about the
|
||||
executed operation as follows:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-response]
|
||||
--------------------------------------------------
|
||||
<1> Total number of shards hit by the clear cache request
|
||||
<2> Number of shards where the clear cache has succeeded
|
||||
<3> Number of shards where the clear cache has failed
|
||||
<4> A list of failures if the operation failed on one or more shards
|
||||
|
||||
By default, if the indices were not found, an `ElasticsearchException` will be thrown:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-notfound]
|
||||
--------------------------------------------------
|
||||
<1> Do something if the indices to be cleared were not found
|
|
@ -1,80 +0,0 @@
|
|||
--
|
||||
:api: clone-index
|
||||
:request: ResizeRequest
|
||||
:response: ResizeResponse
|
||||
--
|
||||
|
||||
[id="{upid}-{api}"]
|
||||
=== Clone Index API
|
||||
|
||||
[id="{upid}-{api}-request"]
|
||||
==== Resize Request
|
||||
|
||||
The Clone Index API requires a +{request}+ instance.
|
||||
A +{request}+ requires two string arguments:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request]
|
||||
--------------------------------------------------
|
||||
<1> The target index (first argument) to clone the source index (second argument) into
|
||||
<2> The resize type needs to be set to `CLONE`
|
||||
|
||||
==== Optional arguments
|
||||
The following arguments can optionally be provided:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-timeout]
|
||||
--------------------------------------------------
|
||||
<1> Timeout to wait for the all the nodes to acknowledge the index is opened
|
||||
as a `TimeValue`
|
||||
<2> Timeout to wait for the all the nodes to acknowledge the index is opened
|
||||
as a `String`
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-masterTimeout]
|
||||
--------------------------------------------------
|
||||
<1> Timeout to connect to the master node as a `TimeValue`
|
||||
<2> Timeout to connect to the master node as a `String`
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-waitForActiveShards]
|
||||
--------------------------------------------------
|
||||
<1> The number of active shard copies to wait for before the clone index API
|
||||
returns a response, as an `int`
|
||||
<2> The number of active shard copies to wait for before the clone index API
|
||||
returns a response, as an `ActiveShardCount`
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-settings]
|
||||
--------------------------------------------------
|
||||
<1> The settings to apply to the target index, which optionally include the
|
||||
number of shards to create for it
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-aliases]
|
||||
--------------------------------------------------
|
||||
<1> The aliases to associate the target index with
|
||||
|
||||
include::../execution.asciidoc[]
|
||||
|
||||
[id="{upid}-{api}-response"]
|
||||
==== Clone Index Response
|
||||
|
||||
The returned +{response}+ allows to retrieve information about the
|
||||
executed operation as follows:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-response]
|
||||
--------------------------------------------------
|
||||
<1> Indicates whether all of the nodes have acknowledged the request
|
||||
<2> Indicates whether the requisite number of shard copies were started for
|
||||
each shard in the index before timing out
|
||||
|
||||
|
|
@ -1,56 +0,0 @@
|
|||
--
|
||||
:api: close-index
|
||||
:request: CloseIndexRequest
|
||||
:response: CloseIndexResponse
|
||||
--
|
||||
|
||||
[id="{upid}-{api}"]
|
||||
=== Close Index API
|
||||
|
||||
[id="{upid}-{api}-request"]
|
||||
==== Close Index Request
|
||||
|
||||
A +{request}+ requires an `index` argument:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request]
|
||||
--------------------------------------------------
|
||||
<1> The index to close
|
||||
|
||||
==== Optional arguments
|
||||
The following arguments can optionally be provided:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-timeout]
|
||||
--------------------------------------------------
|
||||
<1> Timeout to wait for the all the nodes to acknowledge the index is closed
|
||||
as a `TimeValue`
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-masterTimeout]
|
||||
--------------------------------------------------
|
||||
<1> Timeout to connect to the master node as a `TimeValue`
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-indicesOptions]
|
||||
--------------------------------------------------
|
||||
<1> Setting `IndicesOptions` controls how unavailable indices are resolved and
|
||||
how wildcard expressions are expanded
|
||||
|
||||
include::../execution.asciidoc[]
|
||||
|
||||
[id="{upid}-{api}-response"]
|
||||
==== Close Index Response
|
||||
|
||||
The returned +{response}+ allows to retrieve information about the
|
||||
executed operation as follows:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-response]
|
||||
--------------------------------------------------
|
||||
<1> Indicates whether all of the nodes have acknowledged the request
|
|
@ -1,116 +0,0 @@
|
|||
--
|
||||
:api: create-index
|
||||
:request: CreateIndexRequest
|
||||
:response: CreateIndexResponse
|
||||
--
|
||||
|
||||
[id="{upid}-{api}"]
|
||||
=== Create Index API
|
||||
|
||||
[id="{upid}-{api}-request"]
|
||||
==== Create Index Request
|
||||
|
||||
A +{request}+ requires an `index` argument:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request]
|
||||
--------------------------------------------------
|
||||
<1> The index to create
|
||||
|
||||
==== Index settings
|
||||
Each index created can have specific settings associated with it.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-settings]
|
||||
--------------------------------------------------
|
||||
<1> Settings for this index
|
||||
|
||||
[[java-rest-high-create-index-request-mappings]]
|
||||
==== Index mappings
|
||||
An index may be created with mappings for its document types
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-mappings]
|
||||
--------------------------------------------------
|
||||
<1> The type to define
|
||||
<2> The mapping for this type, provided as a JSON string
|
||||
|
||||
The mapping source can be provided in different ways in addition to the
|
||||
`String` example shown above:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-mappings-map]
|
||||
--------------------------------------------------
|
||||
<1> Mapping source provided as a `Map` which gets automatically converted
|
||||
to JSON format
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-mappings-xcontent]
|
||||
--------------------------------------------------
|
||||
<1> Mapping source provided as an `XContentBuilder` object, the Elasticsearch
|
||||
built-in helpers to generate JSON content
|
||||
|
||||
==== Index aliases
|
||||
Aliases can be set at index creation time
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-aliases]
|
||||
--------------------------------------------------
|
||||
<1> The alias to define
|
||||
|
||||
==== Providing the whole source
|
||||
|
||||
The whole source including all of its sections (mappings, settings and aliases)
|
||||
can also be provided:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-whole-source]
|
||||
--------------------------------------------------
|
||||
<1> The source provided as a JSON string. It can also be provided as a `Map`
|
||||
or an `XContentBuilder`.
|
||||
|
||||
==== Optional arguments
|
||||
The following arguments can optionally be provided:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-timeout]
|
||||
--------------------------------------------------
|
||||
<1> Timeout to wait for the all the nodes to acknowledge the index creation as a `TimeValue`
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-masterTimeout]
|
||||
--------------------------------------------------
|
||||
<1> Timeout to connect to the master node as a `TimeValue`
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-waitForActiveShards]
|
||||
--------------------------------------------------
|
||||
<1> The number of active shard copies to wait for before the create index API returns a
|
||||
response, as an `int`
|
||||
<2> The number of active shard copies to wait for before the create index API returns a
|
||||
response, as an `ActiveShardCount`
|
||||
|
||||
include::../execution.asciidoc[]
|
||||
|
||||
[id="{upid}-{api}-response"]
|
||||
==== Create Index Response
|
||||
|
||||
The returned +{response}+ allows to retrieve information about the executed
|
||||
operation as follows:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-response]
|
||||
--------------------------------------------------
|
||||
<1> Indicates whether all of the nodes have acknowledged the request
|
||||
<2> Indicates whether the requisite number of shard copies were started for each shard in the index before timing out
|
|
@ -1,49 +0,0 @@
|
|||
--
|
||||
:api: delete-alias
|
||||
:request: DeleteAliasRequest
|
||||
:response: AcknowledgedResponse
|
||||
--
|
||||
|
||||
[id="{upid}-{api}"]
|
||||
=== Delete Alias API
|
||||
|
||||
[id="{upid}-{api}-request"]
|
||||
==== Delete Alias Request
|
||||
|
||||
An +{request}+ requires an `index` and an `alias` argument:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request]
|
||||
--------------------------------------------------
|
||||
|
||||
==== Optional arguments
|
||||
The following arguments can optionally be provided:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-timeout]
|
||||
--------------------------------------------------
|
||||
<1> Timeout to wait for the all the nodes to acknowledge the index is opened
|
||||
as a `TimeValue`
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-masterTimeout]
|
||||
--------------------------------------------------
|
||||
<1> Timeout to connect to the master node as a `TimeValue`
|
||||
|
||||
[id="{upid}-{api}-response"]
|
||||
==== Delete Alias Response
|
||||
|
||||
The returned +{response}+ indicates if the request to delete the alias
|
||||
was received.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-response]
|
||||
--------------------------------------------------
|
||||
<1> Whether or not the request to delete the alias was
|
||||
acknowledged.
|
||||
|
||||
include::../execution.asciidoc[]
|
|
@ -1,65 +0,0 @@
|
|||
--
|
||||
:api: delete-index
|
||||
:request: DeleteIndexRequest
|
||||
:response: DeleteIndexResponse
|
||||
--
|
||||
|
||||
[id="{upid}-{api}"]
|
||||
=== Delete Index API
|
||||
|
||||
[id="{upid}-{api}-request"]
|
||||
==== Delete Index Request
|
||||
|
||||
A +{request}+ requires an `index` argument:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request]
|
||||
--------------------------------------------------
|
||||
<1> Index
|
||||
|
||||
==== Optional arguments
|
||||
The following arguments can optionally be provided:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-timeout]
|
||||
--------------------------------------------------
|
||||
<1> Timeout to wait for the all the nodes to acknowledge the index deletion as a `TimeValue`
|
||||
<2> Timeout to wait for the all the nodes to acknowledge the index deletion as a `String`
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-masterTimeout]
|
||||
--------------------------------------------------
|
||||
<1> Timeout to connect to the master node as a `TimeValue`
|
||||
<2> Timeout to connect to the master node as a `String`
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-indicesOptions]
|
||||
--------------------------------------------------
|
||||
<1> Setting `IndicesOptions` controls how unavailable indices are resolved and
|
||||
how wildcard expressions are expanded
|
||||
|
||||
include::../execution.asciidoc[]
|
||||
|
||||
[id="{upid}-{api}-response"]
|
||||
==== Delete Index Response
|
||||
|
||||
The returned +{response}+ allows to retrieve information about the executed
|
||||
operation as follows:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-response]
|
||||
--------------------------------------------------
|
||||
<1> Indicates whether all of the nodes have acknowledged the request
|
||||
|
||||
If the index was not found, an `ElasticsearchException` will be thrown:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-notfound]
|
||||
--------------------------------------------------
|
||||
<1> Do something if the index to be deleted was not found
|
|
@ -1,41 +0,0 @@
|
|||
--
|
||||
:api: delete-index-template-v2
|
||||
:request: DeleteIndexTemplateV2Request
|
||||
:response: AcknowledgedResponse
|
||||
--
|
||||
|
||||
[id="{upid}-{api}"]
|
||||
=== Delete Composable Index Template API
|
||||
|
||||
[id="{upid}-{api}-request"]
|
||||
==== Request
|
||||
|
||||
The Delete Composable Index Template API allows you to delete an index template.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request]
|
||||
--------------------------------------------------
|
||||
<1> The name of an index template to delete.
|
||||
|
||||
=== Optional arguments
|
||||
The following arguments can optionally be provided:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request-masterTimeout]
|
||||
--------------------------------------------------
|
||||
<1> Timeout to connect to the master node as a `TimeValue`
|
||||
|
||||
[id="{upid}-{api}-response"]
|
||||
==== Response
|
||||
|
||||
The returned +{response}+ indicates if the delete template request was received.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-response]
|
||||
--------------------------------------------------
|
||||
<1> Whether or not the delete template request was acknowledged.
|
||||
|
||||
include::../execution.asciidoc[]
|
|
@ -1,32 +0,0 @@
|
|||
--
|
||||
:api: delete-template
|
||||
:request: DeleteIndexTemplateRequest
|
||||
:response: AcknowledgedResponse
|
||||
--
|
||||
|
||||
[id="{upid}-{api}"]
|
||||
=== Delete Template API
|
||||
|
||||
[id="{upid}-{api}-request"]
|
||||
==== Request
|
||||
|
||||
The Delete Template API allows you to delete an index template.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-request]
|
||||
--------------------------------------------------
|
||||
<1> The name of an index template to delete.
|
||||
|
||||
[id="{upid}-{api}-response"]
|
||||
==== Response
|
||||
|
||||
The returned +{response}+ indicates if the delete template request was received.
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-response]
|
||||
--------------------------------------------------
|
||||
<1> Whether or not the delete template request was acknowledged.
|
||||
|
||||
include::../execution.asciidoc[]
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue