mirror of
https://github.com/elastic/elasticsearch.git
synced 2025-04-24 23:27:25 -04:00
Merge remote-tracking branch 'upstream-main/main' into update-main-10-10-24
This commit is contained in:
commit
f981d1f9e2
486 changed files with 9344 additions and 2819 deletions
|
@ -44,7 +44,7 @@ class GitInfoPlugin implements Plugin<Project> {
|
||||||
gitInfo.disallowChanges();
|
gitInfo.disallowChanges();
|
||||||
gitInfo.finalizeValueOnRead();
|
gitInfo.finalizeValueOnRead();
|
||||||
|
|
||||||
revision = gitInfo.map(info -> info.getRevision() == null ? info.getRevision() : "master");
|
revision = gitInfo.map(info -> info.getRevision() == null ? info.getRevision() : "main");
|
||||||
}
|
}
|
||||||
|
|
||||||
public Property<GitInfo> getGitInfo() {
|
public Property<GitInfo> getGitInfo() {
|
||||||
|
|
|
@ -21,6 +21,7 @@ import java.util.Map;
|
||||||
public class LicensingPlugin implements Plugin<Project> {
|
public class LicensingPlugin implements Plugin<Project> {
|
||||||
static final String ELASTIC_LICENSE_URL_PREFIX = "https://raw.githubusercontent.com/elastic/elasticsearch/";
|
static final String ELASTIC_LICENSE_URL_PREFIX = "https://raw.githubusercontent.com/elastic/elasticsearch/";
|
||||||
static final String ELASTIC_LICENSE_URL_POSTFIX = "/licenses/ELASTIC-LICENSE-2.0.txt";
|
static final String ELASTIC_LICENSE_URL_POSTFIX = "/licenses/ELASTIC-LICENSE-2.0.txt";
|
||||||
|
static final String AGPL_ELASTIC_LICENSE_URL_POSTFIX = "/licenses/AGPL-3.0+SSPL-1.0+ELASTIC-LICENSE-2.0.txt";
|
||||||
|
|
||||||
private ProviderFactory providerFactory;
|
private ProviderFactory providerFactory;
|
||||||
|
|
||||||
|
@ -36,15 +37,18 @@ public class LicensingPlugin implements Plugin<Project> {
|
||||||
isSnapshotVersion(project) ? revision.get() : "v" + project.getVersion()
|
isSnapshotVersion(project) ? revision.get() : "v" + project.getVersion()
|
||||||
);
|
);
|
||||||
|
|
||||||
Provider<String> projectLicenseURL = licenseCommitProvider.map(licenseCommit -> ELASTIC_LICENSE_URL_PREFIX +
|
Provider<String> elasticLicenseURL = licenseCommitProvider.map(licenseCommit -> ELASTIC_LICENSE_URL_PREFIX +
|
||||||
licenseCommit + ELASTIC_LICENSE_URL_POSTFIX);
|
licenseCommit + ELASTIC_LICENSE_URL_POSTFIX);
|
||||||
|
Provider<String> agplLicenseURL = licenseCommitProvider.map(licenseCommit -> ELASTIC_LICENSE_URL_PREFIX +
|
||||||
|
licenseCommit + AGPL_ELASTIC_LICENSE_URL_POSTFIX);
|
||||||
// But stick the Elastic license url in project.ext so we can get it if we need to switch to it
|
// But stick the Elastic license url in project.ext so we can get it if we need to switch to it
|
||||||
project.getExtensions().getExtraProperties().set("elasticLicenseUrl", projectLicenseURL);
|
project.getExtensions().getExtraProperties().set("elasticLicenseUrl", elasticLicenseURL);
|
||||||
|
|
||||||
MapProperty<String, String> licensesProperty = project.getObjects().mapProperty(String.class, String.class).convention(
|
MapProperty<String, String> licensesProperty = project.getObjects().mapProperty(String.class, String.class).convention(
|
||||||
providerFactory.provider(() -> Map.of(
|
providerFactory.provider(() -> Map.of(
|
||||||
"Server Side Public License, v 1", "https://www.mongodb.com/licensing/server-side-public-license",
|
"Server Side Public License, v 1", "https://www.mongodb.com/licensing/server-side-public-license",
|
||||||
"Elastic License 2.0", projectLicenseURL.get())
|
"Elastic License 2.0", elasticLicenseURL.get(),
|
||||||
|
"GNU Affero General Public License Version 3", agplLicenseURL.get())
|
||||||
)
|
)
|
||||||
);
|
);
|
||||||
|
|
||||||
|
|
|
@ -74,6 +74,11 @@ class PublishPluginFuncTest extends AbstractGradleFuncTest {
|
||||||
<url>https://www.mongodb.com/licensing/server-side-public-license</url>
|
<url>https://www.mongodb.com/licensing/server-side-public-license</url>
|
||||||
<distribution>repo</distribution>
|
<distribution>repo</distribution>
|
||||||
</license>
|
</license>
|
||||||
|
<license>
|
||||||
|
<name>The OSI-approved Open Source license Version 3.0</name>
|
||||||
|
<url>https://raw.githubusercontent.com/elastic/elasticsearch/v1.0/licenses/AGPL-3.0+SSPL-1.0+ELASTIC-LICENSE-2.0.txt</url>
|
||||||
|
<distribution>repo</distribution>
|
||||||
|
</license>
|
||||||
</licenses>
|
</licenses>
|
||||||
<developers>
|
<developers>
|
||||||
<developer>
|
<developer>
|
||||||
|
@ -149,6 +154,11 @@ class PublishPluginFuncTest extends AbstractGradleFuncTest {
|
||||||
<url>https://www.mongodb.com/licensing/server-side-public-license</url>
|
<url>https://www.mongodb.com/licensing/server-side-public-license</url>
|
||||||
<distribution>repo</distribution>
|
<distribution>repo</distribution>
|
||||||
</license>
|
</license>
|
||||||
|
<license>
|
||||||
|
<name>The OSI-approved Open Source license Version 3.0</name>
|
||||||
|
<url>https://raw.githubusercontent.com/elastic/elasticsearch/v1.0/licenses/AGPL-3.0+SSPL-1.0+ELASTIC-LICENSE-2.0.txt</url>
|
||||||
|
<distribution>repo</distribution>
|
||||||
|
</license>
|
||||||
</licenses>
|
</licenses>
|
||||||
<developers>
|
<developers>
|
||||||
<developer>
|
<developer>
|
||||||
|
@ -233,6 +243,11 @@ class PublishPluginFuncTest extends AbstractGradleFuncTest {
|
||||||
<url>https://www.mongodb.com/licensing/server-side-public-license</url>
|
<url>https://www.mongodb.com/licensing/server-side-public-license</url>
|
||||||
<distribution>repo</distribution>
|
<distribution>repo</distribution>
|
||||||
</license>
|
</license>
|
||||||
|
<license>
|
||||||
|
<name>The OSI-approved Open Source license Version 3.0</name>
|
||||||
|
<url>https://raw.githubusercontent.com/elastic/elasticsearch/v1.0/licenses/AGPL-3.0+SSPL-1.0+ELASTIC-LICENSE-2.0.txt</url>
|
||||||
|
<distribution>repo</distribution>
|
||||||
|
</license>
|
||||||
</licenses>
|
</licenses>
|
||||||
<developers>
|
<developers>
|
||||||
<developer>
|
<developer>
|
||||||
|
@ -326,6 +341,11 @@ class PublishPluginFuncTest extends AbstractGradleFuncTest {
|
||||||
<url>https://www.mongodb.com/licensing/server-side-public-license</url>
|
<url>https://www.mongodb.com/licensing/server-side-public-license</url>
|
||||||
<distribution>repo</distribution>
|
<distribution>repo</distribution>
|
||||||
</license>
|
</license>
|
||||||
|
<license>
|
||||||
|
<name>The OSI-approved Open Source license Version 3.0</name>
|
||||||
|
<url>https://raw.githubusercontent.com/elastic/elasticsearch/v1.0/licenses/AGPL-3.0+SSPL-1.0+ELASTIC-LICENSE-2.0.txt</url>
|
||||||
|
<distribution>repo</distribution>
|
||||||
|
</license>
|
||||||
</licenses>
|
</licenses>
|
||||||
<developers>
|
<developers>
|
||||||
<developer>
|
<developer>
|
||||||
|
@ -399,6 +419,11 @@ class PublishPluginFuncTest extends AbstractGradleFuncTest {
|
||||||
<url>https://www.mongodb.com/licensing/server-side-public-license</url>
|
<url>https://www.mongodb.com/licensing/server-side-public-license</url>
|
||||||
<distribution>repo</distribution>
|
<distribution>repo</distribution>
|
||||||
</license>
|
</license>
|
||||||
|
<license>
|
||||||
|
<name>The OSI-approved Open Source license Version 3.0</name>
|
||||||
|
<url>https://raw.githubusercontent.com/elastic/elasticsearch/v2.0/licenses/AGPL-3.0+SSPL-1.0+ELASTIC-LICENSE-2.0.txt</url>
|
||||||
|
<distribution>repo</distribution>
|
||||||
|
</license>
|
||||||
</licenses>
|
</licenses>
|
||||||
<developers>
|
<developers>
|
||||||
<developer>
|
<developer>
|
||||||
|
|
|
@ -31,7 +31,7 @@ public enum DockerBase {
|
||||||
// Chainguard based wolfi image with latest jdk
|
// Chainguard based wolfi image with latest jdk
|
||||||
// This is usually updated via renovatebot
|
// This is usually updated via renovatebot
|
||||||
// spotless:off
|
// spotless:off
|
||||||
WOLFI("docker.elastic.co/wolfi/chainguard-base:latest@sha256:c16d3ad6cebf387e8dd2ad769f54320c4819fbbaa21e729fad087c7ae223b4d0",
|
WOLFI("docker.elastic.co/wolfi/chainguard-base:latest@sha256:90888b190da54062f67f3fef1372eb0ae7d81ea55f5a1f56d748b13e4853d984",
|
||||||
"-wolfi",
|
"-wolfi",
|
||||||
"apk"
|
"apk"
|
||||||
),
|
),
|
||||||
|
|
|
@ -163,7 +163,13 @@ public abstract class ElasticsearchBuildCompletePlugin implements Plugin<Project
|
||||||
// So, if you change this such that the artifact will have a slash/directory in it, you'll need to update the logic
|
// So, if you change this such that the artifact will have a slash/directory in it, you'll need to update the logic
|
||||||
// below as well
|
// below as well
|
||||||
pb.directory(uploadFileDir);
|
pb.directory(uploadFileDir);
|
||||||
pb.start().waitFor();
|
try {
|
||||||
|
// we are very generious here, as the upload can take
|
||||||
|
// a long time depending on its size
|
||||||
|
pb.start().waitFor(30, java.util.concurrent.TimeUnit.MINUTES);
|
||||||
|
} catch (InterruptedException e) {
|
||||||
|
System.out.println("Failed to upload buildkite artifact " + e.getMessage());
|
||||||
|
}
|
||||||
|
|
||||||
System.out.println("Generating buildscan link for artifact...");
|
System.out.println("Generating buildscan link for artifact...");
|
||||||
|
|
||||||
|
|
|
@ -185,8 +185,8 @@ public abstract class ElasticsearchTestBasePlugin implements Plugin<Project> {
|
||||||
});
|
});
|
||||||
|
|
||||||
if (OS.current().equals(OS.WINDOWS) && System.getProperty("tests.timeoutSuite") == null) {
|
if (OS.current().equals(OS.WINDOWS) && System.getProperty("tests.timeoutSuite") == null) {
|
||||||
// override the suite timeout to 30 mins for windows, because it has the most inefficient filesystem known to man
|
// override the suite timeout to 60 mins for windows, because it has the most inefficient filesystem known to man
|
||||||
test.systemProperty("tests.timeoutSuite", "2400000!");
|
test.systemProperty("tests.timeoutSuite", "3600000!");
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -25,7 +25,7 @@ USER root
|
||||||
|
|
||||||
COPY plugins/*.zip /opt/plugins/archive/
|
COPY plugins/*.zip /opt/plugins/archive/
|
||||||
|
|
||||||
RUN chown root.root /opt/plugins/archive/*
|
RUN chown 1000:1000 /opt/plugins/archive/*
|
||||||
RUN chmod 0444 /opt/plugins/archive/*
|
RUN chmod 0444 /opt/plugins/archive/*
|
||||||
|
|
||||||
FROM ${base_image}
|
FROM ${base_image}
|
||||||
|
|
|
@ -123,7 +123,6 @@ testClusters.matching { it.name == "yamlRestTest"}.configureEach {
|
||||||
|
|
||||||
requiresFeature 'es.index_mode_feature_flag_registered', Version.fromString("8.0.0")
|
requiresFeature 'es.index_mode_feature_flag_registered', Version.fromString("8.0.0")
|
||||||
requiresFeature 'es.failure_store_feature_flag_enabled', Version.fromString("8.12.0")
|
requiresFeature 'es.failure_store_feature_flag_enabled', Version.fromString("8.12.0")
|
||||||
requiresFeature 'es.ccs_telemetry_feature_flag_enabled', Version.fromString("8.16.0")
|
|
||||||
|
|
||||||
// TODO Rene: clean up this kind of cross project file references
|
// TODO Rene: clean up this kind of cross project file references
|
||||||
extraConfigFile 'op-jwks.json', project(':x-pack:test:idp-fixture').file("src/main/resources/oidc/op-jwks.json")
|
extraConfigFile 'op-jwks.json', project(':x-pack:test:idp-fixture').file("src/main/resources/oidc/op-jwks.json")
|
||||||
|
|
12
docs/changelog/113825.yaml
Normal file
12
docs/changelog/113825.yaml
Normal file
|
@ -0,0 +1,12 @@
|
||||||
|
pr: 113825
|
||||||
|
summary: Cross-cluster search telemetry
|
||||||
|
area: Search
|
||||||
|
type: feature
|
||||||
|
issues: []
|
||||||
|
highlight:
|
||||||
|
title: Cross-cluster search telemetry
|
||||||
|
body: |-
|
||||||
|
The cross-cluster search telemetry is collected when cross-cluster searches
|
||||||
|
are performed, and is returned as "ccs" field in `_cluster/stats` output.
|
||||||
|
It also add a new parameter `include_remotes=true` to the `_cluster/stats` API
|
||||||
|
which will collect data from connected remote clusters.
|
6
docs/changelog/113981.yaml
Normal file
6
docs/changelog/113981.yaml
Normal file
|
@ -0,0 +1,6 @@
|
||||||
|
pr: 113981
|
||||||
|
summary: "Adding chunking settings to `GoogleVertexAiService,` `AzureAiStudioService,`\
|
||||||
|
\ and `AlibabaCloudSearchService`"
|
||||||
|
area: Machine Learning
|
||||||
|
type: enhancement
|
||||||
|
issues: []
|
5
docs/changelog/114109.yaml
Normal file
5
docs/changelog/114109.yaml
Normal file
|
@ -0,0 +1,5 @@
|
||||||
|
pr: 114109
|
||||||
|
summary: Update cluster stats for retrievers
|
||||||
|
area: Search
|
||||||
|
type: enhancement
|
||||||
|
issues: []
|
5
docs/changelog/114234.yaml
Normal file
5
docs/changelog/114234.yaml
Normal file
|
@ -0,0 +1,5 @@
|
||||||
|
pr: 114234
|
||||||
|
summary: Prevent flattening of ordered and unordered interval sources
|
||||||
|
area: Search
|
||||||
|
type: bug
|
||||||
|
issues: []
|
5
docs/changelog/114321.yaml
Normal file
5
docs/changelog/114321.yaml
Normal file
|
@ -0,0 +1,5 @@
|
||||||
|
pr: 114321
|
||||||
|
summary: Stream Anthropic Completion
|
||||||
|
area: Machine Learning
|
||||||
|
type: enhancement
|
||||||
|
issues: []
|
5
docs/changelog/114358.yaml
Normal file
5
docs/changelog/114358.yaml
Normal file
|
@ -0,0 +1,5 @@
|
||||||
|
pr: 114358
|
||||||
|
summary: "ESQL: Use less memory in listener"
|
||||||
|
area: ES|QL
|
||||||
|
type: enhancement
|
||||||
|
issues: []
|
5
docs/changelog/114363.yaml
Normal file
5
docs/changelog/114363.yaml
Normal file
|
@ -0,0 +1,5 @@
|
||||||
|
pr: 114363
|
||||||
|
summary: Give the kibana system user permission to read security entities
|
||||||
|
area: Infra/Core
|
||||||
|
type: enhancement
|
||||||
|
issues: []
|
5
docs/changelog/114368.yaml
Normal file
5
docs/changelog/114368.yaml
Normal file
|
@ -0,0 +1,5 @@
|
||||||
|
pr: 114368
|
||||||
|
summary: "ESQL: Delay construction of warnings"
|
||||||
|
area: EQL
|
||||||
|
type: enhancement
|
||||||
|
issues: []
|
5
docs/changelog/114375.yaml
Normal file
5
docs/changelog/114375.yaml
Normal file
|
@ -0,0 +1,5 @@
|
||||||
|
pr: 114375
|
||||||
|
summary: Handle `InternalSendException` inline for non-forking handlers
|
||||||
|
area: Distributed
|
||||||
|
type: bug
|
||||||
|
issues: []
|
5
docs/changelog/114389.yaml
Normal file
5
docs/changelog/114389.yaml
Normal file
|
@ -0,0 +1,5 @@
|
||||||
|
pr: 114389
|
||||||
|
summary: Filter empty task settings objects from the API response
|
||||||
|
area: Machine Learning
|
||||||
|
type: enhancement
|
||||||
|
issues: []
|
|
@ -12,7 +12,7 @@ consumption, use the <<aliases,aliases API>>.
|
||||||
====
|
====
|
||||||
|
|
||||||
Retrieves the cluster's <<aliases,index aliases>>, including filter and routing
|
Retrieves the cluster's <<aliases,index aliases>>, including filter and routing
|
||||||
information. The API does not return data stream aliases.
|
information. The API does not return <<data-streams,data stream>> aliases.
|
||||||
|
|
||||||
[[cat-alias-api-request]]
|
[[cat-alias-api-request]]
|
||||||
==== {api-request-title}
|
==== {api-request-title}
|
||||||
|
|
|
@ -7,10 +7,11 @@
|
||||||
[IMPORTANT]
|
[IMPORTANT]
|
||||||
====
|
====
|
||||||
cat APIs are only intended for human consumption using the command line or {kib}
|
cat APIs are only intended for human consumption using the command line or {kib}
|
||||||
console. They are _not_ intended for use by applications.
|
console. They are _not_ intended for use by applications. For application
|
||||||
|
consumption, use the <<cluster-nodes-stats,node stats API>>.
|
||||||
====
|
====
|
||||||
|
|
||||||
Provides a snapshot of the number of shards allocated to each data node
|
Provides a snapshot of the number of shards <<shard-allocation-relocation-recovery,allocated>> to each data node
|
||||||
and their disk space.
|
and their disk space.
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -13,7 +13,7 @@ consumption, use the
|
||||||
<<ml-get-job-stats,get anomaly detection job statistics API>>.
|
<<ml-get-job-stats,get anomaly detection job statistics API>>.
|
||||||
====
|
====
|
||||||
|
|
||||||
Returns configuration and usage information about {anomaly-jobs}.
|
Returns configuration and usage information about {ml-docs}/ml-ad-overview.html[{anomaly-jobs}].
|
||||||
|
|
||||||
[[cat-anomaly-detectors-request]]
|
[[cat-anomaly-detectors-request]]
|
||||||
==== {api-request-title}
|
==== {api-request-title}
|
||||||
|
|
|
@ -12,7 +12,7 @@ console. They are _not_ intended for use by applications. For application
|
||||||
consumption, use the <<ml-get-datafeed-stats,get datafeed statistics API>>.
|
consumption, use the <<ml-get-datafeed-stats,get datafeed statistics API>>.
|
||||||
====
|
====
|
||||||
|
|
||||||
Returns configuration and usage information about {dfeeds}.
|
Returns configuration and usage information about {ml-docs}/ml-ad-run-jobs.html#ml-ad-datafeeds[{dfeeds}].
|
||||||
|
|
||||||
[[cat-datafeeds-request]]
|
[[cat-datafeeds-request]]
|
||||||
==== {api-request-title}
|
==== {api-request-title}
|
||||||
|
|
|
@ -13,7 +13,7 @@ consumption, use the
|
||||||
<<get-dfanalytics-stats,get data frame analytics jobs statistics API>>.
|
<<get-dfanalytics-stats,get data frame analytics jobs statistics API>>.
|
||||||
====
|
====
|
||||||
|
|
||||||
Returns configuration and usage information about {dfanalytics-jobs}.
|
Returns configuration and usage information about {ml-docs}/ml-dfanalytics.html[{dfanalytics-jobs}].
|
||||||
|
|
||||||
|
|
||||||
[[cat-dfanalytics-request]]
|
[[cat-dfanalytics-request]]
|
||||||
|
|
|
@ -11,8 +11,8 @@ console. They are _not_ intended for use by applications. For application
|
||||||
consumption, use the <<indices-get-index,get index API>>.
|
consumption, use the <<indices-get-index,get index API>>.
|
||||||
====
|
====
|
||||||
|
|
||||||
Returns high-level information about indices in a cluster, including backing
|
Returns high-level information about <<documents-indices,indices>> in a cluster, including backing
|
||||||
indices for data streams.
|
indices for <<data-streams,data streams>>.
|
||||||
|
|
||||||
|
|
||||||
[[cat-indices-api-request]]
|
[[cat-indices-api-request]]
|
||||||
|
|
|
@ -11,7 +11,7 @@ console. They are _not_ intended for use by applications. For application
|
||||||
consumption, use the <<cluster-nodes-info,nodes info API>>.
|
consumption, use the <<cluster-nodes-info,nodes info API>>.
|
||||||
====
|
====
|
||||||
|
|
||||||
Returns information about the master node, including the ID, bound IP address,
|
Returns information about the <<cluster-state-publishing,master node>>, including the ID, bound IP address,
|
||||||
and name.
|
and name.
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -11,7 +11,7 @@ console. They are _not_ intended for use by applications. For application
|
||||||
consumption, use the <<cluster-nodes-info,nodes info API>>.
|
consumption, use the <<cluster-nodes-info,nodes info API>>.
|
||||||
====
|
====
|
||||||
|
|
||||||
Returns information about custom node attributes.
|
Returns information about <<shard-allocation-filtering,custom node attributes>>.
|
||||||
|
|
||||||
[[cat-nodeattrs-api-request]]
|
[[cat-nodeattrs-api-request]]
|
||||||
==== {api-request-title}
|
==== {api-request-title}
|
||||||
|
|
|
@ -13,7 +13,7 @@ They are _not_ intended for use by applications.
|
||||||
For application consumption, use the <<cluster-nodes-info,nodes info API>>.
|
For application consumption, use the <<cluster-nodes-info,nodes info API>>.
|
||||||
====
|
====
|
||||||
|
|
||||||
Returns information about a cluster's nodes.
|
Returns information about a <<modules-node,cluster's nodes>>.
|
||||||
|
|
||||||
[[cat-nodes-api-request]]
|
[[cat-nodes-api-request]]
|
||||||
==== {api-request-title}
|
==== {api-request-title}
|
||||||
|
|
|
@ -11,8 +11,7 @@ console. They are _not_ intended for use by applications. For application
|
||||||
consumption, use the <<cluster-pending,pending cluster tasks API>>.
|
consumption, use the <<cluster-pending,pending cluster tasks API>>.
|
||||||
====
|
====
|
||||||
|
|
||||||
Returns cluster-level changes that have not yet been executed, similar to the
|
Returns <<cluster-state-publishing,cluster-level changes>> that have not yet been executed.
|
||||||
<<cluster-pending, pending cluster tasks>> API.
|
|
||||||
|
|
||||||
[[cat-pending-tasks-api-request]]
|
[[cat-pending-tasks-api-request]]
|
||||||
==== {api-request-title}
|
==== {api-request-title}
|
||||||
|
|
|
@ -12,7 +12,7 @@ console. They are _not_ intended for use by applications. For application
|
||||||
consumption, use the <<cluster-nodes-info,nodes info API>>.
|
consumption, use the <<cluster-nodes-info,nodes info API>>.
|
||||||
====
|
====
|
||||||
|
|
||||||
Returns a list of plugins running on each node of a cluster.
|
Returns a list of <<modules-plugins,plugins>> running on each node of a cluster.
|
||||||
|
|
||||||
|
|
||||||
[[cat-plugins-api-request]]
|
[[cat-plugins-api-request]]
|
||||||
|
|
|
@ -11,10 +11,9 @@ console. They are _not_ intended for use by applications. For application
|
||||||
consumption, use the <<indices-recovery,index recovery API>>.
|
consumption, use the <<indices-recovery,index recovery API>>.
|
||||||
====
|
====
|
||||||
|
|
||||||
Returns information about ongoing and completed shard recoveries,
|
Returns information about ongoing and completed <<shard-allocation-relocation-recovery,shard recoveries>>.
|
||||||
similar to the <<indices-recovery, index recovery>> API.
|
|
||||||
|
|
||||||
For data streams, the API returns information about the stream's backing
|
For <<data-streams,data streams>>, the API returns information about the stream's backing
|
||||||
indices.
|
indices.
|
||||||
|
|
||||||
[[cat-recovery-api-request]]
|
[[cat-recovery-api-request]]
|
||||||
|
|
|
@ -12,10 +12,9 @@ consumption, use the <<indices-segments,index segments API>>.
|
||||||
====
|
====
|
||||||
|
|
||||||
Returns low-level information about the https://lucene.apache.org/core/[Lucene]
|
Returns low-level information about the https://lucene.apache.org/core/[Lucene]
|
||||||
segments in index shards, similar to the <<indices-segments, indices segments>>
|
segments in index shards.
|
||||||
API.
|
|
||||||
|
|
||||||
For data streams, the API returns information about the stream's backing
|
For <<data-streams,data streams>>, the API returns information about the stream's backing
|
||||||
indices.
|
indices.
|
||||||
|
|
||||||
[[cat-segments-api-request]]
|
[[cat-segments-api-request]]
|
||||||
|
|
|
@ -9,13 +9,16 @@
|
||||||
====
|
====
|
||||||
cat APIs are only intended for human consumption using the command line or {kib}
|
cat APIs are only intended for human consumption using the command line or {kib}
|
||||||
console.
|
console.
|
||||||
They are _not_ intended for use by applications.
|
They are _not_ intended for use by applications. For application
|
||||||
|
consumption, use the <<cluster-state,cluster state API>>.
|
||||||
====
|
====
|
||||||
|
|
||||||
The `shards` command is the detailed view of what nodes contain which shards.
|
The `shards` command is the detailed view of all nodes' shard <<shard-allocation-relocation-recovery,allocation>>.
|
||||||
It will tell you if it's a primary or replica, the number of docs, the bytes it takes on disk, and the node where it's located.
|
It will tell you if the shard is a primary or replica, the number of docs, the
|
||||||
|
bytes it takes on disk, the node where it's located, and if the shard is
|
||||||
|
currently <<shard-allocation-relocation-recovery,recovering>>.
|
||||||
|
|
||||||
For data streams, the API returns information about the stream's backing indices.
|
For <<data-streams,data streams>>, the API returns information about the stream's backing indices.
|
||||||
|
|
||||||
[[cat-shards-api-request]]
|
[[cat-shards-api-request]]
|
||||||
==== {api-request-title}
|
==== {api-request-title}
|
||||||
|
|
|
@ -12,7 +12,7 @@ console. They are _not_ intended for use by applications. For application
|
||||||
consumption, use the <<get-trained-models,get trained models API>>.
|
consumption, use the <<get-trained-models,get trained models API>>.
|
||||||
====
|
====
|
||||||
|
|
||||||
Returns configuration and usage information about {infer} trained models.
|
Returns configuration and usage information about {ml-docs}/ml-nlp-deploy-models.html[{infer} trained models].
|
||||||
|
|
||||||
|
|
||||||
[[cat-trained-model-request]]
|
[[cat-trained-model-request]]
|
||||||
|
|
|
@ -12,7 +12,7 @@ console. They are _not_ intended for use by applications. For application
|
||||||
consumption, use the <<get-transform,get transforms API>>.
|
consumption, use the <<get-transform,get transforms API>>.
|
||||||
====
|
====
|
||||||
|
|
||||||
Returns configuration and usage information about {transforms}.
|
Returns configuration and usage information about <<transforms,{transforms}>>.
|
||||||
|
|
||||||
[[cat-transforms-api-request]]
|
[[cat-transforms-api-request]]
|
||||||
==== {api-request-title}
|
==== {api-request-title}
|
||||||
|
|
|
@ -762,6 +762,10 @@ Queries are counted once per search request, meaning that if the same query type
|
||||||
(object) Search sections used in selected nodes.
|
(object) Search sections used in selected nodes.
|
||||||
For each section, name and number of times it's been used is reported.
|
For each section, name and number of times it's been used is reported.
|
||||||
|
|
||||||
|
`retrievers`::
|
||||||
|
(object) Retriever types that were used in selected nodes.
|
||||||
|
For each retriever, name and number of times it's been used is reported.
|
||||||
|
|
||||||
=====
|
=====
|
||||||
|
|
||||||
`dense_vector`::
|
`dense_vector`::
|
||||||
|
|
|
@ -185,6 +185,9 @@ Executes another pipeline.
|
||||||
<<reroute-processor, `reroute` processor>>::
|
<<reroute-processor, `reroute` processor>>::
|
||||||
Reroutes documents to another target index or data stream.
|
Reroutes documents to another target index or data stream.
|
||||||
|
|
||||||
|
<<terminate-processor, `terminate` processor>>::
|
||||||
|
Terminates the current ingest pipeline, causing no further processors to be run.
|
||||||
|
|
||||||
[discrete]
|
[discrete]
|
||||||
[[ingest-process-category-array-json-handling]]
|
[[ingest-process-category-array-json-handling]]
|
||||||
=== Array/JSON handling processors
|
=== Array/JSON handling processors
|
||||||
|
@ -258,6 +261,7 @@ include::processors/set.asciidoc[]
|
||||||
include::processors/set-security-user.asciidoc[]
|
include::processors/set-security-user.asciidoc[]
|
||||||
include::processors/sort.asciidoc[]
|
include::processors/sort.asciidoc[]
|
||||||
include::processors/split.asciidoc[]
|
include::processors/split.asciidoc[]
|
||||||
|
include::processors/terminate.asciidoc[]
|
||||||
include::processors/trim.asciidoc[]
|
include::processors/trim.asciidoc[]
|
||||||
include::processors/uppercase.asciidoc[]
|
include::processors/uppercase.asciidoc[]
|
||||||
include::processors/url-decode.asciidoc[]
|
include::processors/url-decode.asciidoc[]
|
||||||
|
|
|
@ -7,7 +7,7 @@
|
||||||
<titleabbrev>Get {dfeed} statistics</titleabbrev>
|
<titleabbrev>Get {dfeed} statistics</titleabbrev>
|
||||||
++++
|
++++
|
||||||
|
|
||||||
Retrieves usage information for {dfeeds}.
|
Retrieves usage information for {ml-docs}/ml-ad-run-jobs.html#ml-ad-datafeeds[{dfeeds}].
|
||||||
|
|
||||||
[[ml-get-datafeed-stats-request]]
|
[[ml-get-datafeed-stats-request]]
|
||||||
== {api-request-title}
|
== {api-request-title}
|
||||||
|
|
|
@ -5,7 +5,7 @@
|
||||||
<titleabbrev>Get job statistics</titleabbrev>
|
<titleabbrev>Get job statistics</titleabbrev>
|
||||||
++++
|
++++
|
||||||
|
|
||||||
Retrieves usage information for {anomaly-jobs}.
|
Retrieves usage information for {ml-docs}/ml-ad-overview.html[{anomaly-jobs}].
|
||||||
|
|
||||||
[[ml-get-job-stats-request]]
|
[[ml-get-job-stats-request]]
|
||||||
== {api-request-title}
|
== {api-request-title}
|
||||||
|
|
|
@ -6,7 +6,7 @@
|
||||||
<titleabbrev>Get {dfanalytics-jobs} stats</titleabbrev>
|
<titleabbrev>Get {dfanalytics-jobs} stats</titleabbrev>
|
||||||
++++
|
++++
|
||||||
|
|
||||||
Retrieves usage information for {dfanalytics-jobs}.
|
Retrieves usage information for {ml-docs}/ml-dfanalytics.html[{dfanalytics-jobs}].
|
||||||
|
|
||||||
|
|
||||||
[[ml-get-dfanalytics-stats-request]]
|
[[ml-get-dfanalytics-stats-request]]
|
||||||
|
|
|
@ -6,7 +6,7 @@
|
||||||
<titleabbrev>Get trained models</titleabbrev>
|
<titleabbrev>Get trained models</titleabbrev>
|
||||||
++++
|
++++
|
||||||
|
|
||||||
Retrieves configuration information for a trained model.
|
Retrieves configuration information about {ml-docs}/ml-nlp-deploy-models.html[{infer} trained models].
|
||||||
|
|
||||||
|
|
||||||
[[ml-get-trained-models-request]]
|
[[ml-get-trained-models-request]]
|
||||||
|
|
|
@ -13,7 +13,6 @@ import org.elasticsearch.action.downsample.DownsampleConfig;
|
||||||
import org.elasticsearch.cluster.ClusterState;
|
import org.elasticsearch.cluster.ClusterState;
|
||||||
import org.elasticsearch.cluster.metadata.ComponentTemplate;
|
import org.elasticsearch.cluster.metadata.ComponentTemplate;
|
||||||
import org.elasticsearch.cluster.metadata.ComposableIndexTemplate;
|
import org.elasticsearch.cluster.metadata.ComposableIndexTemplate;
|
||||||
import org.elasticsearch.cluster.metadata.DataStreamFactoryRetention;
|
|
||||||
import org.elasticsearch.cluster.metadata.DataStreamGlobalRetentionSettings;
|
import org.elasticsearch.cluster.metadata.DataStreamGlobalRetentionSettings;
|
||||||
import org.elasticsearch.cluster.metadata.DataStreamLifecycle;
|
import org.elasticsearch.cluster.metadata.DataStreamLifecycle;
|
||||||
import org.elasticsearch.cluster.metadata.MetadataCreateIndexService;
|
import org.elasticsearch.cluster.metadata.MetadataCreateIndexService;
|
||||||
|
@ -217,10 +216,7 @@ public class MetadataIndexTemplateServiceTests extends ESSingleNodeTestCase {
|
||||||
xContentRegistry(),
|
xContentRegistry(),
|
||||||
EmptySystemIndices.INSTANCE,
|
EmptySystemIndices.INSTANCE,
|
||||||
indexSettingProviders,
|
indexSettingProviders,
|
||||||
DataStreamGlobalRetentionSettings.create(
|
DataStreamGlobalRetentionSettings.create(ClusterSettings.createBuiltInClusterSettings())
|
||||||
ClusterSettings.createBuiltInClusterSettings(),
|
|
||||||
DataStreamFactoryRetention.emptyFactoryRetention()
|
|
||||||
)
|
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -12,7 +12,6 @@ import org.elasticsearch.action.datastreams.GetDataStreamAction;
|
||||||
import org.elasticsearch.cluster.ClusterName;
|
import org.elasticsearch.cluster.ClusterName;
|
||||||
import org.elasticsearch.cluster.ClusterState;
|
import org.elasticsearch.cluster.ClusterState;
|
||||||
import org.elasticsearch.cluster.metadata.DataStream;
|
import org.elasticsearch.cluster.metadata.DataStream;
|
||||||
import org.elasticsearch.cluster.metadata.DataStreamFactoryRetention;
|
|
||||||
import org.elasticsearch.cluster.metadata.DataStreamGlobalRetention;
|
import org.elasticsearch.cluster.metadata.DataStreamGlobalRetention;
|
||||||
import org.elasticsearch.cluster.metadata.DataStreamGlobalRetentionSettings;
|
import org.elasticsearch.cluster.metadata.DataStreamGlobalRetentionSettings;
|
||||||
import org.elasticsearch.cluster.metadata.DataStreamTestHelper;
|
import org.elasticsearch.cluster.metadata.DataStreamTestHelper;
|
||||||
|
@ -47,8 +46,7 @@ public class TransportGetDataStreamsActionTests extends ESTestCase {
|
||||||
private final IndexNameExpressionResolver resolver = TestIndexNameExpressionResolver.newInstance();
|
private final IndexNameExpressionResolver resolver = TestIndexNameExpressionResolver.newInstance();
|
||||||
private final SystemIndices systemIndices = new SystemIndices(List.of());
|
private final SystemIndices systemIndices = new SystemIndices(List.of());
|
||||||
private final DataStreamGlobalRetentionSettings dataStreamGlobalRetentionSettings = DataStreamGlobalRetentionSettings.create(
|
private final DataStreamGlobalRetentionSettings dataStreamGlobalRetentionSettings = DataStreamGlobalRetentionSettings.create(
|
||||||
ClusterSettings.createBuiltInClusterSettings(),
|
ClusterSettings.createBuiltInClusterSettings()
|
||||||
DataStreamFactoryRetention.emptyFactoryRetention()
|
|
||||||
);
|
);
|
||||||
|
|
||||||
public void testGetDataStream() {
|
public void testGetDataStream() {
|
||||||
|
@ -356,8 +354,7 @@ public class TransportGetDataStreamsActionTests extends ESTestCase {
|
||||||
)
|
)
|
||||||
.put(DataStreamGlobalRetentionSettings.DATA_STREAMS_MAX_RETENTION_SETTING.getKey(), globalRetention.maxRetention())
|
.put(DataStreamGlobalRetentionSettings.DATA_STREAMS_MAX_RETENTION_SETTING.getKey(), globalRetention.maxRetention())
|
||||||
.build()
|
.build()
|
||||||
),
|
)
|
||||||
DataStreamFactoryRetention.emptyFactoryRetention()
|
|
||||||
);
|
);
|
||||||
response = TransportGetDataStreamsAction.innerOperation(
|
response = TransportGetDataStreamsAction.innerOperation(
|
||||||
state,
|
state,
|
||||||
|
|
|
@ -37,7 +37,6 @@ import org.elasticsearch.cluster.TestShardRoutingRoleStrategies;
|
||||||
import org.elasticsearch.cluster.block.ClusterBlock;
|
import org.elasticsearch.cluster.block.ClusterBlock;
|
||||||
import org.elasticsearch.cluster.block.ClusterBlocks;
|
import org.elasticsearch.cluster.block.ClusterBlocks;
|
||||||
import org.elasticsearch.cluster.metadata.DataStream;
|
import org.elasticsearch.cluster.metadata.DataStream;
|
||||||
import org.elasticsearch.cluster.metadata.DataStreamFactoryRetention;
|
|
||||||
import org.elasticsearch.cluster.metadata.DataStreamGlobalRetentionSettings;
|
import org.elasticsearch.cluster.metadata.DataStreamGlobalRetentionSettings;
|
||||||
import org.elasticsearch.cluster.metadata.DataStreamLifecycle;
|
import org.elasticsearch.cluster.metadata.DataStreamLifecycle;
|
||||||
import org.elasticsearch.cluster.metadata.DataStreamLifecycle.Downsampling;
|
import org.elasticsearch.cluster.metadata.DataStreamLifecycle.Downsampling;
|
||||||
|
@ -142,8 +141,7 @@ public class DataStreamLifecycleServiceTests extends ESTestCase {
|
||||||
private DoExecuteDelegate clientDelegate;
|
private DoExecuteDelegate clientDelegate;
|
||||||
private ClusterService clusterService;
|
private ClusterService clusterService;
|
||||||
private final DataStreamGlobalRetentionSettings globalRetentionSettings = DataStreamGlobalRetentionSettings.create(
|
private final DataStreamGlobalRetentionSettings globalRetentionSettings = DataStreamGlobalRetentionSettings.create(
|
||||||
ClusterSettings.createBuiltInClusterSettings(),
|
ClusterSettings.createBuiltInClusterSettings()
|
||||||
DataStreamFactoryRetention.emptyFactoryRetention()
|
|
||||||
);
|
);
|
||||||
|
|
||||||
@Before
|
@Before
|
||||||
|
|
|
@ -181,7 +181,11 @@ enum Database {
|
||||||
Property.POSTAL_CODE
|
Property.POSTAL_CODE
|
||||||
),
|
),
|
||||||
Set.of(Property.COUNTRY_ISO_CODE, Property.REGION_NAME, Property.CITY_NAME, Property.LOCATION)
|
Set.of(Property.COUNTRY_ISO_CODE, Property.REGION_NAME, Property.CITY_NAME, Property.LOCATION)
|
||||||
),;
|
),
|
||||||
|
PrivacyDetection(
|
||||||
|
Set.of(Property.IP, Property.HOSTING, Property.PROXY, Property.RELAY, Property.TOR, Property.VPN, Property.SERVICE),
|
||||||
|
Set.of(Property.HOSTING, Property.PROXY, Property.RELAY, Property.TOR, Property.VPN, Property.SERVICE)
|
||||||
|
);
|
||||||
|
|
||||||
private final Set<Property> properties;
|
private final Set<Property> properties;
|
||||||
private final Set<Property> defaultProperties;
|
private final Set<Property> defaultProperties;
|
||||||
|
@ -262,7 +266,13 @@ enum Database {
|
||||||
TYPE,
|
TYPE,
|
||||||
POSTAL_CODE,
|
POSTAL_CODE,
|
||||||
POSTAL_CONFIDENCE,
|
POSTAL_CONFIDENCE,
|
||||||
ACCURACY_RADIUS;
|
ACCURACY_RADIUS,
|
||||||
|
HOSTING,
|
||||||
|
TOR,
|
||||||
|
PROXY,
|
||||||
|
RELAY,
|
||||||
|
VPN,
|
||||||
|
SERVICE;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Parses a string representation of a property into an actual Property instance. Not all properties that exist are
|
* Parses a string representation of a property into an actual Property instance. Not all properties that exist are
|
||||||
|
|
|
@ -58,6 +58,31 @@ final class IpinfoIpDataLookups {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Lax-ly parses a string that contains a boolean into a Boolean (or null, if such parsing isn't possible).
|
||||||
|
* @param bool a potentially empty (or null) string that is expected to contain a parsable boolean
|
||||||
|
* @return the parsed boolean
|
||||||
|
*/
|
||||||
|
static Boolean parseBoolean(final String bool) {
|
||||||
|
if (bool == null) {
|
||||||
|
return null;
|
||||||
|
} else {
|
||||||
|
String trimmed = bool.toLowerCase(Locale.ROOT).trim();
|
||||||
|
if ("true".equals(trimmed)) {
|
||||||
|
return true;
|
||||||
|
} else if ("false".equals(trimmed)) {
|
||||||
|
// "false" can represent false -- this an expected future enhancement in how the database represents booleans
|
||||||
|
return false;
|
||||||
|
} else if (trimmed.isEmpty()) {
|
||||||
|
// empty string can represent false -- this is how the database currently represents 'false' values
|
||||||
|
return false;
|
||||||
|
} else {
|
||||||
|
logger.trace("Unable to parse non-compliant boolean string [{}]", bool);
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Lax-ly parses a string that contains a double into a Double (or null, if such parsing isn't possible).
|
* Lax-ly parses a string that contains a double into a Double (or null, if such parsing isn't possible).
|
||||||
* @param latlon a potentially empty (or null) string that is expected to contain a parsable double
|
* @param latlon a potentially empty (or null) string that is expected to contain a parsable double
|
||||||
|
@ -132,6 +157,22 @@ final class IpinfoIpDataLookups {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public record PrivacyDetectionResult(Boolean hosting, Boolean proxy, Boolean relay, String service, Boolean tor, Boolean vpn) {
|
||||||
|
@SuppressWarnings("checkstyle:RedundantModifier")
|
||||||
|
@MaxMindDbConstructor
|
||||||
|
public PrivacyDetectionResult(
|
||||||
|
@MaxMindDbParameter(name = "hosting") String hosting,
|
||||||
|
// @MaxMindDbParameter(name = "network") String network, // for now we're not exposing this
|
||||||
|
@MaxMindDbParameter(name = "proxy") String proxy,
|
||||||
|
@MaxMindDbParameter(name = "relay") String relay,
|
||||||
|
@MaxMindDbParameter(name = "service") String service, // n.b. this remains a string, the rest are parsed as booleans
|
||||||
|
@MaxMindDbParameter(name = "tor") String tor,
|
||||||
|
@MaxMindDbParameter(name = "vpn") String vpn
|
||||||
|
) {
|
||||||
|
this(parseBoolean(hosting), parseBoolean(proxy), parseBoolean(relay), service, parseBoolean(tor), parseBoolean(vpn));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static class Asn extends AbstractBase<AsnResult> {
|
static class Asn extends AbstractBase<AsnResult> {
|
||||||
Asn(Set<Database.Property> properties) {
|
Asn(Set<Database.Property> properties) {
|
||||||
super(properties, AsnResult.class);
|
super(properties, AsnResult.class);
|
||||||
|
@ -286,6 +327,55 @@ final class IpinfoIpDataLookups {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static class PrivacyDetection extends AbstractBase<PrivacyDetectionResult> {
|
||||||
|
PrivacyDetection(Set<Database.Property> properties) {
|
||||||
|
super(properties, PrivacyDetectionResult.class);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected Map<String, Object> transform(final Result<PrivacyDetectionResult> result) {
|
||||||
|
PrivacyDetectionResult response = result.result;
|
||||||
|
|
||||||
|
Map<String, Object> data = new HashMap<>();
|
||||||
|
for (Database.Property property : this.properties) {
|
||||||
|
switch (property) {
|
||||||
|
case IP -> data.put("ip", result.ip);
|
||||||
|
case HOSTING -> {
|
||||||
|
if (response.hosting != null) {
|
||||||
|
data.put("hosting", response.hosting);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case TOR -> {
|
||||||
|
if (response.tor != null) {
|
||||||
|
data.put("tor", response.tor);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case PROXY -> {
|
||||||
|
if (response.proxy != null) {
|
||||||
|
data.put("proxy", response.proxy);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case RELAY -> {
|
||||||
|
if (response.relay != null) {
|
||||||
|
data.put("relay", response.relay);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case VPN -> {
|
||||||
|
if (response.vpn != null) {
|
||||||
|
data.put("vpn", response.vpn);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case SERVICE -> {
|
||||||
|
if (Strings.hasText(response.service)) {
|
||||||
|
data.put("service", response.service);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return data;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Just a little record holder -- there's the data that we receive via the binding to our record objects from the Reader via the
|
* Just a little record holder -- there's the data that we receive via the binding to our record objects from the Reader via the
|
||||||
* getRecord call, but then we also need to capture the passed-in ip address that came from the caller as well as the network for
|
* getRecord call, but then we also need to capture the passed-in ip address that came from the caller as well as the network for
|
||||||
|
|
|
@ -41,7 +41,7 @@ public class GetDatabaseConfigurationAction extends ActionType<Response> {
|
||||||
super(NAME);
|
super(NAME);
|
||||||
}
|
}
|
||||||
|
|
||||||
public static class Request extends BaseNodesRequest<Request> {
|
public static class Request extends BaseNodesRequest {
|
||||||
private final String[] databaseIds;
|
private final String[] databaseIds;
|
||||||
|
|
||||||
public Request(String... databaseIds) {
|
public Request(String... databaseIds) {
|
||||||
|
|
|
@ -37,7 +37,7 @@ public class GeoIpStatsAction {
|
||||||
|
|
||||||
private GeoIpStatsAction() {/* no instances */}
|
private GeoIpStatsAction() {/* no instances */}
|
||||||
|
|
||||||
public static class Request extends BaseNodesRequest<Request> implements ToXContentObject {
|
public static class Request extends BaseNodesRequest implements ToXContentObject {
|
||||||
|
|
||||||
public Request() {
|
public Request() {
|
||||||
super((String[]) null);
|
super((String[]) null);
|
||||||
|
|
|
@ -38,7 +38,9 @@ import java.util.function.BiConsumer;
|
||||||
import static java.util.Map.entry;
|
import static java.util.Map.entry;
|
||||||
import static org.elasticsearch.ingest.geoip.GeoIpTestUtils.copyDatabase;
|
import static org.elasticsearch.ingest.geoip.GeoIpTestUtils.copyDatabase;
|
||||||
import static org.elasticsearch.ingest.geoip.IpinfoIpDataLookups.parseAsn;
|
import static org.elasticsearch.ingest.geoip.IpinfoIpDataLookups.parseAsn;
|
||||||
|
import static org.elasticsearch.ingest.geoip.IpinfoIpDataLookups.parseBoolean;
|
||||||
import static org.elasticsearch.ingest.geoip.IpinfoIpDataLookups.parseLocationDouble;
|
import static org.elasticsearch.ingest.geoip.IpinfoIpDataLookups.parseLocationDouble;
|
||||||
|
import static org.hamcrest.Matchers.anyOf;
|
||||||
import static org.hamcrest.Matchers.empty;
|
import static org.hamcrest.Matchers.empty;
|
||||||
import static org.hamcrest.Matchers.equalTo;
|
import static org.hamcrest.Matchers.equalTo;
|
||||||
import static org.hamcrest.Matchers.is;
|
import static org.hamcrest.Matchers.is;
|
||||||
|
@ -93,6 +95,21 @@ public class IpinfoIpDataLookupsTests extends ESTestCase {
|
||||||
assertThat(parseAsn("anythingelse"), nullValue());
|
assertThat(parseAsn("anythingelse"), nullValue());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public void testParseBoolean() {
|
||||||
|
// expected cases: "true" is true and "" is false
|
||||||
|
assertThat(parseBoolean("true"), equalTo(true));
|
||||||
|
assertThat(parseBoolean(""), equalTo(false));
|
||||||
|
assertThat(parseBoolean("false"), equalTo(false)); // future proofing
|
||||||
|
// defensive case: null becomes null, this is not expected fwiw
|
||||||
|
assertThat(parseBoolean(null), nullValue());
|
||||||
|
// defensive cases: we strip whitespace and ignore case
|
||||||
|
assertThat(parseBoolean(" "), equalTo(false));
|
||||||
|
assertThat(parseBoolean(" TrUe "), equalTo(true));
|
||||||
|
assertThat(parseBoolean(" FaLSE "), equalTo(false));
|
||||||
|
// bottom case: a non-parsable string is null
|
||||||
|
assertThat(parseBoolean(randomAlphaOfLength(8)), nullValue());
|
||||||
|
}
|
||||||
|
|
||||||
public void testParseLocationDouble() {
|
public void testParseLocationDouble() {
|
||||||
// expected case: "123.45" is 123.45
|
// expected case: "123.45" is 123.45
|
||||||
assertThat(parseLocationDouble("123.45"), equalTo(123.45));
|
assertThat(parseLocationDouble("123.45"), equalTo(123.45));
|
||||||
|
@ -287,6 +304,76 @@ public class IpinfoIpDataLookupsTests extends ESTestCase {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public void testPrivacyDetection() throws IOException {
|
||||||
|
assumeFalse("https://github.com/elastic/elasticsearch/issues/114266", Constants.WINDOWS);
|
||||||
|
Path configDir = tmpDir;
|
||||||
|
copyDatabase("ipinfo/privacy_detection_sample.mmdb", configDir.resolve("privacy_detection_sample.mmdb"));
|
||||||
|
|
||||||
|
GeoIpCache cache = new GeoIpCache(1000); // real cache to test purging of entries upon a reload
|
||||||
|
ConfigDatabases configDatabases = new ConfigDatabases(configDir, cache);
|
||||||
|
configDatabases.initialize(resourceWatcherService);
|
||||||
|
|
||||||
|
// testing the first row in the sample database
|
||||||
|
try (DatabaseReaderLazyLoader loader = configDatabases.getDatabase("privacy_detection_sample.mmdb")) {
|
||||||
|
IpDataLookup lookup = new IpinfoIpDataLookups.PrivacyDetection(Database.PrivacyDetection.properties());
|
||||||
|
Map<String, Object> data = lookup.getData(loader, "1.53.59.33");
|
||||||
|
assertThat(
|
||||||
|
data,
|
||||||
|
equalTo(
|
||||||
|
Map.ofEntries(
|
||||||
|
entry("ip", "1.53.59.33"),
|
||||||
|
entry("hosting", false),
|
||||||
|
entry("proxy", false),
|
||||||
|
entry("relay", false),
|
||||||
|
entry("tor", false),
|
||||||
|
entry("vpn", true)
|
||||||
|
)
|
||||||
|
)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// testing a row with a non-empty service in the sample database
|
||||||
|
try (DatabaseReaderLazyLoader loader = configDatabases.getDatabase("privacy_detection_sample.mmdb")) {
|
||||||
|
IpDataLookup lookup = new IpinfoIpDataLookups.PrivacyDetection(Database.PrivacyDetection.properties());
|
||||||
|
Map<String, Object> data = lookup.getData(loader, "216.131.74.65");
|
||||||
|
assertThat(
|
||||||
|
data,
|
||||||
|
equalTo(
|
||||||
|
Map.ofEntries(
|
||||||
|
entry("ip", "216.131.74.65"),
|
||||||
|
entry("hosting", true),
|
||||||
|
entry("proxy", false),
|
||||||
|
entry("service", "FastVPN"),
|
||||||
|
entry("relay", false),
|
||||||
|
entry("tor", false),
|
||||||
|
entry("vpn", true)
|
||||||
|
)
|
||||||
|
)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public void testPrivacyDetectionInvariants() {
|
||||||
|
assumeFalse("https://github.com/elastic/elasticsearch/issues/114266", Constants.WINDOWS);
|
||||||
|
Path configDir = tmpDir;
|
||||||
|
copyDatabase("ipinfo/privacy_detection_sample.mmdb", configDir.resolve("privacy_detection_sample.mmdb"));
|
||||||
|
|
||||||
|
{
|
||||||
|
final Set<String> expectedColumns = Set.of("network", "service", "hosting", "proxy", "relay", "tor", "vpn");
|
||||||
|
|
||||||
|
Path databasePath = configDir.resolve("privacy_detection_sample.mmdb");
|
||||||
|
assertDatabaseInvariants(databasePath, (ip, row) -> {
|
||||||
|
assertThat(row.keySet(), equalTo(expectedColumns));
|
||||||
|
|
||||||
|
for (String booleanColumn : Set.of("hosting", "proxy", "relay", "tor", "vpn")) {
|
||||||
|
String bool = (String) row.get(booleanColumn);
|
||||||
|
assertThat(bool, anyOf(equalTo("true"), equalTo(""), equalTo("false")));
|
||||||
|
assertThat(parseBoolean(bool), notNullValue());
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
private static void assertDatabaseInvariants(final Path databasePath, final BiConsumer<InetAddress, Map<String, Object>> rowConsumer) {
|
private static void assertDatabaseInvariants(final Path databasePath, final BiConsumer<InetAddress, Map<String, Object>> rowConsumer) {
|
||||||
try (Reader reader = new Reader(pathToFile(databasePath))) {
|
try (Reader reader = new Reader(pathToFile(databasePath))) {
|
||||||
Networks<?> networks = reader.networks(Map.class);
|
Networks<?> networks = reader.networks(Map.class);
|
||||||
|
|
|
@ -361,7 +361,11 @@ public class MaxMindSupportTests extends ESTestCase {
|
||||||
|
|
||||||
private static final Set<Class<? extends AbstractResponse>> KNOWN_UNSUPPORTED_RESPONSE_CLASSES = Set.of(IpRiskResponse.class);
|
private static final Set<Class<? extends AbstractResponse>> KNOWN_UNSUPPORTED_RESPONSE_CLASSES = Set.of(IpRiskResponse.class);
|
||||||
|
|
||||||
private static final Set<Database> KNOWN_UNSUPPORTED_DATABASE_VARIANTS = Set.of(Database.AsnV2, Database.CityV2);
|
private static final Set<Database> KNOWN_UNSUPPORTED_DATABASE_VARIANTS = Set.of(
|
||||||
|
Database.AsnV2,
|
||||||
|
Database.CityV2,
|
||||||
|
Database.PrivacyDetection
|
||||||
|
);
|
||||||
|
|
||||||
public void testMaxMindSupport() {
|
public void testMaxMindSupport() {
|
||||||
for (Database databaseType : Database.values()) {
|
for (Database databaseType : Database.values()) {
|
||||||
|
|
Binary file not shown.
|
@ -278,6 +278,19 @@ public class AzureBlobStore implements BlobStore {
|
||||||
throw exception;
|
throw exception;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* {@inheritDoc}
|
||||||
|
* <p>
|
||||||
|
* Note that in this Azure implementation we issue a series of individual
|
||||||
|
* <a href="https://learn.microsoft.com/en-us/rest/api/storageservices/delete-blob">delete blob</a> calls rather than aggregating
|
||||||
|
* deletions into <a href="https://learn.microsoft.com/en-us/rest/api/storageservices/blob-batch">blob batch</a> calls.
|
||||||
|
* The reason for this is that the blob batch endpoint has limited support for SAS token authentication.
|
||||||
|
*
|
||||||
|
* @see <a href="https://learn.microsoft.com/en-us/rest/api/storageservices/blob-batch?tabs=shared-access-signatures#authorization">
|
||||||
|
* API docs around SAS auth limitations</a>
|
||||||
|
* @see <a href="https://github.com/Azure/azure-storage-java/issues/538">Java SDK issue</a>
|
||||||
|
* @see <a href="https://github.com/elastic/elasticsearch/pull/65140#discussion_r528752070">Discussion on implementing PR</a>
|
||||||
|
*/
|
||||||
@Override
|
@Override
|
||||||
public void deleteBlobsIgnoringIfNotExists(OperationPurpose purpose, Iterator<String> blobs) {
|
public void deleteBlobsIgnoringIfNotExists(OperationPurpose purpose, Iterator<String> blobs) {
|
||||||
if (blobs.hasNext() == false) {
|
if (blobs.hasNext() == false) {
|
||||||
|
|
|
@ -17,9 +17,6 @@ tests:
|
||||||
- class: "org.elasticsearch.xpack.deprecation.DeprecationHttpIT"
|
- class: "org.elasticsearch.xpack.deprecation.DeprecationHttpIT"
|
||||||
issue: "https://github.com/elastic/elasticsearch/issues/108628"
|
issue: "https://github.com/elastic/elasticsearch/issues/108628"
|
||||||
method: "testDeprecatedSettingsReturnWarnings"
|
method: "testDeprecatedSettingsReturnWarnings"
|
||||||
- class: "org.elasticsearch.xpack.test.rest.XPackRestIT"
|
|
||||||
issue: "https://github.com/elastic/elasticsearch/issues/109687"
|
|
||||||
method: "test {p0=sql/translate/Translate SQL}"
|
|
||||||
- class: org.elasticsearch.index.store.FsDirectoryFactoryTests
|
- class: org.elasticsearch.index.store.FsDirectoryFactoryTests
|
||||||
method: testStoreDirectory
|
method: testStoreDirectory
|
||||||
issue: https://github.com/elastic/elasticsearch/issues/110210
|
issue: https://github.com/elastic/elasticsearch/issues/110210
|
||||||
|
@ -95,9 +92,6 @@ tests:
|
||||||
- class: org.elasticsearch.xpack.ml.integration.MlJobIT
|
- class: org.elasticsearch.xpack.ml.integration.MlJobIT
|
||||||
method: testDeleteJobAfterMissingIndex
|
method: testDeleteJobAfterMissingIndex
|
||||||
issue: https://github.com/elastic/elasticsearch/issues/112088
|
issue: https://github.com/elastic/elasticsearch/issues/112088
|
||||||
- class: org.elasticsearch.xpack.test.rest.XPackRestIT
|
|
||||||
method: test {p0=transform/preview_transforms/Test preview transform latest}
|
|
||||||
issue: https://github.com/elastic/elasticsearch/issues/112144
|
|
||||||
- class: org.elasticsearch.smoketest.SmokeTestMultiNodeClientYamlTestSuiteIT
|
- class: org.elasticsearch.smoketest.SmokeTestMultiNodeClientYamlTestSuiteIT
|
||||||
issue: https://github.com/elastic/elasticsearch/issues/112147
|
issue: https://github.com/elastic/elasticsearch/issues/112147
|
||||||
- class: org.elasticsearch.smoketest.WatcherYamlRestIT
|
- class: org.elasticsearch.smoketest.WatcherYamlRestIT
|
||||||
|
@ -127,9 +121,6 @@ tests:
|
||||||
- class: org.elasticsearch.xpack.esql.action.ManyShardsIT
|
- class: org.elasticsearch.xpack.esql.action.ManyShardsIT
|
||||||
method: testConcurrentQueries
|
method: testConcurrentQueries
|
||||||
issue: https://github.com/elastic/elasticsearch/issues/112424
|
issue: https://github.com/elastic/elasticsearch/issues/112424
|
||||||
- class: org.elasticsearch.xpack.inference.external.http.RequestBasedTaskRunnerTests
|
|
||||||
method: testLoopOneAtATime
|
|
||||||
issue: https://github.com/elastic/elasticsearch/issues/112471
|
|
||||||
- class: org.elasticsearch.ingest.geoip.IngestGeoIpClientYamlTestSuiteIT
|
- class: org.elasticsearch.ingest.geoip.IngestGeoIpClientYamlTestSuiteIT
|
||||||
issue: https://github.com/elastic/elasticsearch/issues/111497
|
issue: https://github.com/elastic/elasticsearch/issues/111497
|
||||||
- class: org.elasticsearch.smoketest.SmokeTestIngestWithAllDepsClientYamlTestSuiteIT
|
- class: org.elasticsearch.smoketest.SmokeTestIngestWithAllDepsClientYamlTestSuiteIT
|
||||||
|
@ -141,9 +132,6 @@ tests:
|
||||||
- class: org.elasticsearch.search.basic.SearchWhileRelocatingIT
|
- class: org.elasticsearch.search.basic.SearchWhileRelocatingIT
|
||||||
method: testSearchAndRelocateConcurrentlyRandomReplicas
|
method: testSearchAndRelocateConcurrentlyRandomReplicas
|
||||||
issue: https://github.com/elastic/elasticsearch/issues/112515
|
issue: https://github.com/elastic/elasticsearch/issues/112515
|
||||||
- class: org.elasticsearch.xpack.test.rest.XPackRestIT
|
|
||||||
method: test {p0=terms_enum/10_basic/Test search after on unconfigured constant keyword field}
|
|
||||||
issue: https://github.com/elastic/elasticsearch/issues/112624
|
|
||||||
- class: org.elasticsearch.xpack.esql.EsqlAsyncSecurityIT
|
- class: org.elasticsearch.xpack.esql.EsqlAsyncSecurityIT
|
||||||
method: testIndexPatternErrorMessageComparison_ESQL_SearchDSL
|
method: testIndexPatternErrorMessageComparison_ESQL_SearchDSL
|
||||||
issue: https://github.com/elastic/elasticsearch/issues/112630
|
issue: https://github.com/elastic/elasticsearch/issues/112630
|
||||||
|
@ -230,21 +218,9 @@ tests:
|
||||||
- class: org.elasticsearch.packaging.test.WindowsServiceTests
|
- class: org.elasticsearch.packaging.test.WindowsServiceTests
|
||||||
method: test81JavaOptsInJvmOptions
|
method: test81JavaOptsInJvmOptions
|
||||||
issue: https://github.com/elastic/elasticsearch/issues/113313
|
issue: https://github.com/elastic/elasticsearch/issues/113313
|
||||||
- class: org.elasticsearch.xpack.test.rest.XPackRestIT
|
|
||||||
method: test {p0=esql/50_index_patterns/disjoint_mappings}
|
|
||||||
issue: https://github.com/elastic/elasticsearch/issues/113315
|
|
||||||
- class: org.elasticsearch.xpack.test.rest.XPackRestIT
|
|
||||||
method: test {p0=wildcard/10_wildcard_basic/Query_string wildcard query}
|
|
||||||
issue: https://github.com/elastic/elasticsearch/issues/113316
|
|
||||||
- class: org.elasticsearch.backwards.MixedClusterClientYamlTestSuiteIT
|
- class: org.elasticsearch.backwards.MixedClusterClientYamlTestSuiteIT
|
||||||
method: test {p0=mtermvectors/10_basic/Tests catching other exceptions per item}
|
method: test {p0=mtermvectors/10_basic/Tests catching other exceptions per item}
|
||||||
issue: https://github.com/elastic/elasticsearch/issues/113325
|
issue: https://github.com/elastic/elasticsearch/issues/113325
|
||||||
- class: org.elasticsearch.xpack.test.rest.XPackRestIT
|
|
||||||
method: test {p0=transform/transforms_force_delete/Test force deleting a running transform}
|
|
||||||
issue: https://github.com/elastic/elasticsearch/issues/113327
|
|
||||||
- class: org.elasticsearch.xpack.test.rest.XPackRestIT
|
|
||||||
method: test {p0=analytics/top_metrics/sort by scaled float field}
|
|
||||||
issue: https://github.com/elastic/elasticsearch/issues/113340
|
|
||||||
- class: org.elasticsearch.smoketest.DocsClientYamlTestSuiteIT
|
- class: org.elasticsearch.smoketest.DocsClientYamlTestSuiteIT
|
||||||
method: test {yaml=reference/ccr/apis/follow/post-resume-follow/line_84}
|
method: test {yaml=reference/ccr/apis/follow/post-resume-follow/line_84}
|
||||||
issue: https://github.com/elastic/elasticsearch/issues/113343
|
issue: https://github.com/elastic/elasticsearch/issues/113343
|
||||||
|
@ -307,8 +283,6 @@ tests:
|
||||||
- class: org.elasticsearch.xpack.restart.MLModelDeploymentFullClusterRestartIT
|
- class: org.elasticsearch.xpack.restart.MLModelDeploymentFullClusterRestartIT
|
||||||
method: testDeploymentSurvivesRestart {cluster=UPGRADED}
|
method: testDeploymentSurvivesRestart {cluster=UPGRADED}
|
||||||
issue: https://github.com/elastic/elasticsearch/issues/112980
|
issue: https://github.com/elastic/elasticsearch/issues/112980
|
||||||
- class: org.elasticsearch.xpack.searchablesnapshots.hdfs.SecureHdfsSearchableSnapshotsIT
|
|
||||||
issue: https://github.com/elastic/elasticsearch/issues/113753
|
|
||||||
- class: org.elasticsearch.ingest.geoip.DatabaseNodeServiceIT
|
- class: org.elasticsearch.ingest.geoip.DatabaseNodeServiceIT
|
||||||
method: testNonGzippedDatabase
|
method: testNonGzippedDatabase
|
||||||
issue: https://github.com/elastic/elasticsearch/issues/113821
|
issue: https://github.com/elastic/elasticsearch/issues/113821
|
||||||
|
@ -377,6 +351,25 @@ tests:
|
||||||
- class: org.elasticsearch.xpack.inference.services.cohere.CohereServiceTests
|
- class: org.elasticsearch.xpack.inference.services.cohere.CohereServiceTests
|
||||||
method: testInfer_StreamRequest
|
method: testInfer_StreamRequest
|
||||||
issue: https://github.com/elastic/elasticsearch/issues/114385
|
issue: https://github.com/elastic/elasticsearch/issues/114385
|
||||||
|
- class: org.elasticsearch.xpack.inference.InferenceRestIT
|
||||||
|
method: test {p0=inference/30_semantic_text_inference/Calculates embeddings using the default ELSER 2 endpoint}
|
||||||
|
issue: https://github.com/elastic/elasticsearch/issues/114412
|
||||||
|
- class: org.elasticsearch.xpack.inference.InferenceRestIT
|
||||||
|
method: test {p0=inference/40_semantic_text_query/Query a field that uses the default ELSER 2 endpoint}
|
||||||
|
issue: https://github.com/elastic/elasticsearch/issues/114376
|
||||||
|
- class: org.elasticsearch.search.retriever.RankDocsRetrieverBuilderTests
|
||||||
|
method: testRewrite
|
||||||
|
issue: https://github.com/elastic/elasticsearch/issues/114467
|
||||||
|
- class: org.elasticsearch.xpack.logsdb.LogsdbTestSuiteIT
|
||||||
|
issue: https://github.com/elastic/elasticsearch/issues/114471
|
||||||
|
- class: org.elasticsearch.packaging.test.DockerTests
|
||||||
|
method: test022InstallPluginsFromLocalArchive
|
||||||
|
issue: https://github.com/elastic/elasticsearch/issues/111063
|
||||||
|
- class: org.elasticsearch.smoketest.DocsClientYamlTestSuiteIT
|
||||||
|
method: test {yaml=reference/esql/esql-across-clusters/line_196}
|
||||||
|
issue: https://github.com/elastic/elasticsearch/issues/114488
|
||||||
|
- class: org.elasticsearch.gradle.internal.PublishPluginFuncTest
|
||||||
|
issue: https://github.com/elastic/elasticsearch/issues/114492
|
||||||
|
|
||||||
# Examples:
|
# Examples:
|
||||||
#
|
#
|
||||||
|
|
|
@ -15,7 +15,8 @@ setup:
|
||||||
id: "test-id-3"
|
id: "test-id-3"
|
||||||
- do:
|
- do:
|
||||||
cluster.health:
|
cluster.health:
|
||||||
index: .synonyms-2
|
index: .synonyms
|
||||||
|
timeout: 1m
|
||||||
wait_for_status: green
|
wait_for_status: green
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
|
@ -14,7 +14,6 @@ import org.elasticsearch.action.search.SearchRequest;
|
||||||
import org.elasticsearch.client.internal.Client;
|
import org.elasticsearch.client.internal.Client;
|
||||||
import org.elasticsearch.cluster.health.ClusterHealthStatus;
|
import org.elasticsearch.cluster.health.ClusterHealthStatus;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
import org.elasticsearch.common.util.FeatureFlag;
|
|
||||||
import org.elasticsearch.core.TimeValue;
|
import org.elasticsearch.core.TimeValue;
|
||||||
import org.elasticsearch.index.query.MatchAllQueryBuilder;
|
import org.elasticsearch.index.query.MatchAllQueryBuilder;
|
||||||
import org.elasticsearch.search.builder.SearchSourceBuilder;
|
import org.elasticsearch.search.builder.SearchSourceBuilder;
|
||||||
|
@ -23,7 +22,6 @@ import org.elasticsearch.test.ESIntegTestCase.ClusterScope;
|
||||||
import org.elasticsearch.test.ESIntegTestCase.Scope;
|
import org.elasticsearch.test.ESIntegTestCase.Scope;
|
||||||
import org.elasticsearch.test.InternalTestCluster;
|
import org.elasticsearch.test.InternalTestCluster;
|
||||||
import org.junit.Assert;
|
import org.junit.Assert;
|
||||||
import org.junit.BeforeClass;
|
|
||||||
|
|
||||||
import java.util.Collection;
|
import java.util.Collection;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
@ -46,7 +44,6 @@ public class ClusterStatsRemoteIT extends AbstractMultiClustersTestCase {
|
||||||
private static final String REMOTE2 = "cluster-b";
|
private static final String REMOTE2 = "cluster-b";
|
||||||
|
|
||||||
private static final String INDEX_NAME = "demo";
|
private static final String INDEX_NAME = "demo";
|
||||||
private static final FeatureFlag CCS_TELEMETRY_FEATURE_FLAG = new FeatureFlag("ccs_telemetry");
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected boolean reuseClusters() {
|
protected boolean reuseClusters() {
|
||||||
|
@ -63,11 +60,6 @@ public class ClusterStatsRemoteIT extends AbstractMultiClustersTestCase {
|
||||||
return Map.of(REMOTE1, false, REMOTE2, true);
|
return Map.of(REMOTE1, false, REMOTE2, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
@BeforeClass
|
|
||||||
protected static void skipIfTelemetryDisabled() {
|
|
||||||
assumeTrue("Skipping test as CCS_TELEMETRY_FEATURE_FLAG is disabled", CCS_TELEMETRY_FEATURE_FLAG.isEnabled());
|
|
||||||
}
|
|
||||||
|
|
||||||
public void testRemoteClusterStats() throws ExecutionException, InterruptedException {
|
public void testRemoteClusterStats() throws ExecutionException, InterruptedException {
|
||||||
setupClusters();
|
setupClusters();
|
||||||
final Client client = client(LOCAL_CLUSTER);
|
final Client client = client(LOCAL_CLUSTER);
|
||||||
|
|
|
@ -25,7 +25,6 @@ import org.elasticsearch.client.internal.Client;
|
||||||
import org.elasticsearch.common.bytes.BytesReference;
|
import org.elasticsearch.common.bytes.BytesReference;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
import org.elasticsearch.common.util.CollectionUtils;
|
import org.elasticsearch.common.util.CollectionUtils;
|
||||||
import org.elasticsearch.common.util.FeatureFlag;
|
|
||||||
import org.elasticsearch.core.TimeValue;
|
import org.elasticsearch.core.TimeValue;
|
||||||
import org.elasticsearch.index.query.MatchAllQueryBuilder;
|
import org.elasticsearch.index.query.MatchAllQueryBuilder;
|
||||||
import org.elasticsearch.plugins.Plugin;
|
import org.elasticsearch.plugins.Plugin;
|
||||||
|
@ -40,7 +39,6 @@ import org.elasticsearch.test.AbstractMultiClustersTestCase;
|
||||||
import org.elasticsearch.test.InternalTestCluster;
|
import org.elasticsearch.test.InternalTestCluster;
|
||||||
import org.elasticsearch.usage.UsageService;
|
import org.elasticsearch.usage.UsageService;
|
||||||
import org.junit.Assert;
|
import org.junit.Assert;
|
||||||
import org.junit.BeforeClass;
|
|
||||||
import org.junit.Rule;
|
import org.junit.Rule;
|
||||||
import org.junit.rules.TestRule;
|
import org.junit.rules.TestRule;
|
||||||
import org.junit.runner.Description;
|
import org.junit.runner.Description;
|
||||||
|
@ -73,7 +71,6 @@ public class CCSUsageTelemetryIT extends AbstractMultiClustersTestCase {
|
||||||
private static final Logger LOGGER = LogManager.getLogger(CCSUsageTelemetryIT.class);
|
private static final Logger LOGGER = LogManager.getLogger(CCSUsageTelemetryIT.class);
|
||||||
private static final String REMOTE1 = "cluster-a";
|
private static final String REMOTE1 = "cluster-a";
|
||||||
private static final String REMOTE2 = "cluster-b";
|
private static final String REMOTE2 = "cluster-b";
|
||||||
private static final FeatureFlag CCS_TELEMETRY_FEATURE_FLAG = new FeatureFlag("ccs_telemetry");
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected boolean reuseClusters() {
|
protected boolean reuseClusters() {
|
||||||
|
@ -88,11 +85,6 @@ public class CCSUsageTelemetryIT extends AbstractMultiClustersTestCase {
|
||||||
@Rule
|
@Rule
|
||||||
public SkipUnavailableRule skipOverride = new SkipUnavailableRule(REMOTE1, REMOTE2);
|
public SkipUnavailableRule skipOverride = new SkipUnavailableRule(REMOTE1, REMOTE2);
|
||||||
|
|
||||||
@BeforeClass
|
|
||||||
protected static void skipIfTelemetryDisabled() {
|
|
||||||
assumeTrue("Skipping test as CCS_TELEMETRY_FEATURE_FLAG is disabled", CCS_TELEMETRY_FEATURE_FLAG.isEnabled());
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected Map<String, Boolean> skipUnavailableForRemoteClusters() {
|
protected Map<String, Boolean> skipUnavailableForRemoteClusters() {
|
||||||
var map = skipOverride.getMap();
|
var map = skipOverride.getMap();
|
||||||
|
|
|
@ -31,6 +31,7 @@ import java.util.Map;
|
||||||
import static java.util.Collections.singletonMap;
|
import static java.util.Collections.singletonMap;
|
||||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
|
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
|
||||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
|
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
|
||||||
|
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHits;
|
||||||
|
|
||||||
public class IntervalQueriesIT extends ESIntegTestCase {
|
public class IntervalQueriesIT extends ESIntegTestCase {
|
||||||
|
|
||||||
|
@ -64,6 +65,58 @@ public class IntervalQueriesIT extends ESIntegTestCase {
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public void testPreserveInnerGap() {
|
||||||
|
assertAcked(prepareCreate("index").setMapping("""
|
||||||
|
{
|
||||||
|
"_doc" : {
|
||||||
|
"properties" : {
|
||||||
|
"text" : { "type" : "text" }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
"""));
|
||||||
|
|
||||||
|
indexRandom(true, prepareIndex("index").setId("1").setSource("text", "w1 w2 w3 w4 w5"));
|
||||||
|
|
||||||
|
// ordered
|
||||||
|
{
|
||||||
|
var res = prepareSearch("index").setQuery(
|
||||||
|
new IntervalQueryBuilder(
|
||||||
|
"text",
|
||||||
|
new IntervalsSourceProvider.Combine(
|
||||||
|
Arrays.asList(
|
||||||
|
new IntervalsSourceProvider.Match("w1 w4", -1, true, null, null, null),
|
||||||
|
new IntervalsSourceProvider.Match("w5", -1, true, null, null, null)
|
||||||
|
),
|
||||||
|
true,
|
||||||
|
1,
|
||||||
|
null
|
||||||
|
)
|
||||||
|
)
|
||||||
|
);
|
||||||
|
assertSearchHits(res, "1");
|
||||||
|
}
|
||||||
|
|
||||||
|
// unordered
|
||||||
|
{
|
||||||
|
var res = prepareSearch("index").setQuery(
|
||||||
|
new IntervalQueryBuilder(
|
||||||
|
"text",
|
||||||
|
new IntervalsSourceProvider.Combine(
|
||||||
|
Arrays.asList(
|
||||||
|
new IntervalsSourceProvider.Match("w3", 0, false, null, null, null),
|
||||||
|
new IntervalsSourceProvider.Match("w4 w1", -1, false, null, null, null)
|
||||||
|
),
|
||||||
|
false,
|
||||||
|
0,
|
||||||
|
null
|
||||||
|
)
|
||||||
|
)
|
||||||
|
);
|
||||||
|
assertSearchHits(res, "1");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
private static class EmptyAnalyzer extends Analyzer {
|
private static class EmptyAnalyzer extends Analyzer {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|
|
@ -0,0 +1,151 @@
|
||||||
|
/*
|
||||||
|
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||||
|
* or more contributor license agreements. Licensed under the "Elastic License
|
||||||
|
* 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side
|
||||||
|
* Public License v 1"; you may not use this file except in compliance with, at
|
||||||
|
* your election, the "Elastic License 2.0", the "GNU Affero General Public
|
||||||
|
* License v3.0 only", or the "Server Side Public License, v 1".
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.elasticsearch.search.retriever;
|
||||||
|
|
||||||
|
import org.elasticsearch.action.admin.cluster.node.capabilities.NodesCapabilitiesRequest;
|
||||||
|
import org.elasticsearch.action.admin.cluster.node.capabilities.NodesCapabilitiesResponse;
|
||||||
|
import org.elasticsearch.action.admin.cluster.stats.SearchUsageStats;
|
||||||
|
import org.elasticsearch.client.Request;
|
||||||
|
import org.elasticsearch.common.Strings;
|
||||||
|
import org.elasticsearch.index.query.QueryBuilders;
|
||||||
|
import org.elasticsearch.rest.RestRequest;
|
||||||
|
import org.elasticsearch.search.builder.SearchSourceBuilder;
|
||||||
|
import org.elasticsearch.search.vectors.KnnSearchBuilder;
|
||||||
|
import org.elasticsearch.search.vectors.KnnVectorQueryBuilder;
|
||||||
|
import org.elasticsearch.test.ESIntegTestCase;
|
||||||
|
import org.elasticsearch.xcontent.XContentBuilder;
|
||||||
|
import org.elasticsearch.xcontent.XContentFactory;
|
||||||
|
import org.junit.Before;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.util.List;
|
||||||
|
|
||||||
|
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
|
||||||
|
import static org.hamcrest.Matchers.equalTo;
|
||||||
|
|
||||||
|
@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0)
|
||||||
|
public class RetrieverTelemetryIT extends ESIntegTestCase {
|
||||||
|
|
||||||
|
private static final String INDEX_NAME = "test_index";
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected boolean addMockHttpTransport() {
|
||||||
|
return false; // enable http
|
||||||
|
}
|
||||||
|
|
||||||
|
@Before
|
||||||
|
public void setup() throws IOException {
|
||||||
|
XContentBuilder builder = XContentFactory.jsonBuilder()
|
||||||
|
.startObject()
|
||||||
|
.startObject("properties")
|
||||||
|
.startObject("vector")
|
||||||
|
.field("type", "dense_vector")
|
||||||
|
.field("dims", 1)
|
||||||
|
.field("index", true)
|
||||||
|
.field("similarity", "l2_norm")
|
||||||
|
.startObject("index_options")
|
||||||
|
.field("type", "hnsw")
|
||||||
|
.endObject()
|
||||||
|
.endObject()
|
||||||
|
.startObject("text")
|
||||||
|
.field("type", "text")
|
||||||
|
.endObject()
|
||||||
|
.startObject("integer")
|
||||||
|
.field("type", "integer")
|
||||||
|
.endObject()
|
||||||
|
.startObject("topic")
|
||||||
|
.field("type", "keyword")
|
||||||
|
.endObject()
|
||||||
|
.endObject()
|
||||||
|
.endObject();
|
||||||
|
|
||||||
|
assertAcked(prepareCreate(INDEX_NAME).setMapping(builder));
|
||||||
|
ensureGreen(INDEX_NAME);
|
||||||
|
}
|
||||||
|
|
||||||
|
private void performSearch(SearchSourceBuilder source) throws IOException {
|
||||||
|
Request request = new Request("GET", INDEX_NAME + "/_search");
|
||||||
|
request.setJsonEntity(Strings.toString(source));
|
||||||
|
getRestClient().performRequest(request);
|
||||||
|
}
|
||||||
|
|
||||||
|
public void testTelemetryForRetrievers() throws IOException {
|
||||||
|
|
||||||
|
if (false == isRetrieverTelemetryEnabled()) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// search#1 - this will record 1 entry for "retriever" in `sections`, and 1 for "knn" under `retrievers`
|
||||||
|
{
|
||||||
|
performSearch(new SearchSourceBuilder().retriever(new KnnRetrieverBuilder("vector", new float[] { 1.0f }, null, 10, 15, null)));
|
||||||
|
}
|
||||||
|
|
||||||
|
// search#2 - this will record 1 entry for "retriever" in `sections`, 1 for "standard" under `retrievers`, and 1 for "range" under
|
||||||
|
// `queries`
|
||||||
|
{
|
||||||
|
performSearch(new SearchSourceBuilder().retriever(new StandardRetrieverBuilder(QueryBuilders.rangeQuery("integer").gte(2))));
|
||||||
|
}
|
||||||
|
|
||||||
|
// search#3 - this will record 1 entry for "retriever" in `sections`, and 1 for "standard" under `retrievers`, and 1 for "knn" under
|
||||||
|
// `queries`
|
||||||
|
{
|
||||||
|
performSearch(
|
||||||
|
new SearchSourceBuilder().retriever(
|
||||||
|
new StandardRetrieverBuilder(new KnnVectorQueryBuilder("vector", new float[] { 1.0f }, 10, 15, null))
|
||||||
|
)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// search#4 - this will record 1 entry for "retriever" in `sections`, and 1 for "standard" under `retrievers`, and 1 for "term"
|
||||||
|
// under `queries`
|
||||||
|
{
|
||||||
|
performSearch(new SearchSourceBuilder().retriever(new StandardRetrieverBuilder(QueryBuilders.termQuery("topic", "foo"))));
|
||||||
|
}
|
||||||
|
|
||||||
|
// search#5 - t
|
||||||
|
// his will record 1 entry for "knn" in `sections`
|
||||||
|
{
|
||||||
|
performSearch(new SearchSourceBuilder().knnSearch(List.of(new KnnSearchBuilder("vector", new float[] { 1.0f }, 10, 15, null))));
|
||||||
|
}
|
||||||
|
|
||||||
|
// search#6 - this will record 1 entry for "query" in `sections`, and 1 for "match_all" under `queries`
|
||||||
|
{
|
||||||
|
performSearch(new SearchSourceBuilder().query(QueryBuilders.matchAllQuery()));
|
||||||
|
}
|
||||||
|
|
||||||
|
// cluster stats
|
||||||
|
{
|
||||||
|
SearchUsageStats stats = clusterAdmin().prepareClusterStats().get().getIndicesStats().getSearchUsageStats();
|
||||||
|
assertEquals(6, stats.getTotalSearchCount());
|
||||||
|
|
||||||
|
assertThat(stats.getSectionsUsage().size(), equalTo(3));
|
||||||
|
assertThat(stats.getSectionsUsage().get("retriever"), equalTo(4L));
|
||||||
|
assertThat(stats.getSectionsUsage().get("query"), equalTo(1L));
|
||||||
|
assertThat(stats.getSectionsUsage().get("knn"), equalTo(1L));
|
||||||
|
|
||||||
|
assertThat(stats.getRetrieversUsage().size(), equalTo(2));
|
||||||
|
assertThat(stats.getRetrieversUsage().get("standard"), equalTo(3L));
|
||||||
|
assertThat(stats.getRetrieversUsage().get("knn"), equalTo(1L));
|
||||||
|
|
||||||
|
assertThat(stats.getQueryUsage().size(), equalTo(4));
|
||||||
|
assertThat(stats.getQueryUsage().get("range"), equalTo(1L));
|
||||||
|
assertThat(stats.getQueryUsage().get("term"), equalTo(1L));
|
||||||
|
assertThat(stats.getQueryUsage().get("match_all"), equalTo(1L));
|
||||||
|
assertThat(stats.getQueryUsage().get("knn"), equalTo(1L));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private boolean isRetrieverTelemetryEnabled() throws IOException {
|
||||||
|
NodesCapabilitiesResponse res = clusterAdmin().nodesCapabilities(
|
||||||
|
new NodesCapabilitiesRequest().method(RestRequest.Method.GET).path("_cluster/stats").capabilities("retrievers-usage-stats")
|
||||||
|
).actionGet();
|
||||||
|
return res != null && res.isSupported().orElse(false);
|
||||||
|
}
|
||||||
|
}
|
|
@ -417,7 +417,6 @@ module org.elasticsearch.server {
|
||||||
uses org.elasticsearch.internal.BuildExtension;
|
uses org.elasticsearch.internal.BuildExtension;
|
||||||
uses org.elasticsearch.features.FeatureSpecification;
|
uses org.elasticsearch.features.FeatureSpecification;
|
||||||
uses org.elasticsearch.plugins.internal.LoggingDataProvider;
|
uses org.elasticsearch.plugins.internal.LoggingDataProvider;
|
||||||
uses org.elasticsearch.cluster.metadata.DataStreamFactoryRetention;
|
|
||||||
|
|
||||||
provides org.elasticsearch.features.FeatureSpecification
|
provides org.elasticsearch.features.FeatureSpecification
|
||||||
with
|
with
|
||||||
|
|
|
@ -238,6 +238,7 @@ public class TransportVersions {
|
||||||
public static final TransportVersion FAST_REFRESH_RCO = def(8_762_00_0);
|
public static final TransportVersion FAST_REFRESH_RCO = def(8_762_00_0);
|
||||||
public static final TransportVersion TEXT_SIMILARITY_RERANKER_QUERY_REWRITE = def(8_763_00_0);
|
public static final TransportVersion TEXT_SIMILARITY_RERANKER_QUERY_REWRITE = def(8_763_00_0);
|
||||||
public static final TransportVersion SIMULATE_INDEX_TEMPLATES_SUBSTITUTIONS = def(8_764_00_0);
|
public static final TransportVersion SIMULATE_INDEX_TEMPLATES_SUBSTITUTIONS = def(8_764_00_0);
|
||||||
|
public static final TransportVersion RETRIEVERS_TELEMETRY_ADDED = def(8_765_00_0);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* WARNING: DO NOT MERGE INTO MAIN!
|
* WARNING: DO NOT MERGE INTO MAIN!
|
||||||
|
|
|
@ -16,7 +16,7 @@ import org.elasticsearch.rest.RestRequest;
|
||||||
|
|
||||||
import java.util.Set;
|
import java.util.Set;
|
||||||
|
|
||||||
public class NodesCapabilitiesRequest extends BaseNodesRequest<NodesCapabilitiesRequest> {
|
public class NodesCapabilitiesRequest extends BaseNodesRequest {
|
||||||
|
|
||||||
private RestRequest.Method method = RestRequest.Method.GET;
|
private RestRequest.Method method = RestRequest.Method.GET;
|
||||||
private String path = "/";
|
private String path = "/";
|
||||||
|
|
|
@ -11,7 +11,7 @@ package org.elasticsearch.action.admin.cluster.node.features;
|
||||||
|
|
||||||
import org.elasticsearch.action.support.nodes.BaseNodesRequest;
|
import org.elasticsearch.action.support.nodes.BaseNodesRequest;
|
||||||
|
|
||||||
public class NodesFeaturesRequest extends BaseNodesRequest<NodesFeaturesRequest> {
|
public class NodesFeaturesRequest extends BaseNodesRequest {
|
||||||
public NodesFeaturesRequest(String... nodes) {
|
public NodesFeaturesRequest(String... nodes) {
|
||||||
super(nodes);
|
super(nodes);
|
||||||
}
|
}
|
||||||
|
|
|
@ -14,7 +14,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||||
import org.elasticsearch.core.TimeValue;
|
import org.elasticsearch.core.TimeValue;
|
||||||
import org.elasticsearch.monitor.jvm.HotThreads;
|
import org.elasticsearch.monitor.jvm.HotThreads;
|
||||||
|
|
||||||
public class NodesHotThreadsRequest extends BaseNodesRequest<NodesHotThreadsRequest> {
|
public class NodesHotThreadsRequest extends BaseNodesRequest {
|
||||||
|
|
||||||
final HotThreads.RequestOptions requestOptions;
|
final HotThreads.RequestOptions requestOptions;
|
||||||
|
|
||||||
|
|
|
@ -18,7 +18,7 @@ import java.util.TreeSet;
|
||||||
/**
|
/**
|
||||||
* A request to get node (cluster) level information.
|
* A request to get node (cluster) level information.
|
||||||
*/
|
*/
|
||||||
public final class NodesInfoRequest extends BaseNodesRequest<NodesInfoRequest> {
|
public final class NodesInfoRequest extends BaseNodesRequest {
|
||||||
|
|
||||||
private final NodesInfoMetrics nodesInfoMetrics;
|
private final NodesInfoMetrics nodesInfoMetrics;
|
||||||
|
|
||||||
|
|
|
@ -33,7 +33,7 @@ import java.util.Arrays;
|
||||||
/**
|
/**
|
||||||
* Request for a reload secure settings action
|
* Request for a reload secure settings action
|
||||||
*/
|
*/
|
||||||
public class NodesReloadSecureSettingsRequest extends BaseNodesRequest<NodesReloadSecureSettingsRequest> {
|
public class NodesReloadSecureSettingsRequest extends BaseNodesRequest {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* The password is used to re-read and decrypt the contents
|
* The password is used to re-read and decrypt the contents
|
||||||
|
|
|
@ -16,7 +16,7 @@ import java.util.Arrays;
|
||||||
import java.util.Objects;
|
import java.util.Objects;
|
||||||
import java.util.Set;
|
import java.util.Set;
|
||||||
|
|
||||||
public class PrevalidateShardPathRequest extends BaseNodesRequest<PrevalidateShardPathRequest> {
|
public class PrevalidateShardPathRequest extends BaseNodesRequest {
|
||||||
|
|
||||||
private final Set<ShardId> shardIds;
|
private final Set<ShardId> shardIds;
|
||||||
|
|
||||||
|
|
|
@ -229,7 +229,8 @@ public class TransportPrevalidateNodeRemovalAction extends TransportMasterNodeRe
|
||||||
) // Convert to ShardId
|
) // Convert to ShardId
|
||||||
.collect(Collectors.toSet());
|
.collect(Collectors.toSet());
|
||||||
var nodeIds = requestNodes.stream().map(DiscoveryNode::getId).toList().toArray(new String[0]);
|
var nodeIds = requestNodes.stream().map(DiscoveryNode::getId).toList().toArray(new String[0]);
|
||||||
var checkShardsRequest = new PrevalidateShardPathRequest(redShards, nodeIds).timeout(request.timeout());
|
var checkShardsRequest = new PrevalidateShardPathRequest(redShards, nodeIds);
|
||||||
|
checkShardsRequest.setTimeout(request.timeout());
|
||||||
client.execute(TransportPrevalidateShardPathAction.TYPE, checkShardsRequest, new ActionListener<>() {
|
client.execute(TransportPrevalidateShardPathAction.TYPE, checkShardsRequest, new ActionListener<>() {
|
||||||
@Override
|
@Override
|
||||||
public void onResponse(PrevalidateShardPathResponse response) {
|
public void onResponse(PrevalidateShardPathResponse response) {
|
||||||
|
|
|
@ -25,7 +25,7 @@ import java.util.Set;
|
||||||
/**
|
/**
|
||||||
* A request to get node (cluster) level stats.
|
* A request to get node (cluster) level stats.
|
||||||
*/
|
*/
|
||||||
public class NodesStatsRequest extends BaseNodesRequest<NodesStatsRequest> {
|
public class NodesStatsRequest extends BaseNodesRequest {
|
||||||
|
|
||||||
private final NodesStatsRequestParameters nodesStatsRequestParameters;
|
private final NodesStatsRequestParameters nodesStatsRequestParameters;
|
||||||
|
|
||||||
|
|
|
@ -11,7 +11,7 @@ package org.elasticsearch.action.admin.cluster.node.usage;
|
||||||
|
|
||||||
import org.elasticsearch.action.support.nodes.BaseNodesRequest;
|
import org.elasticsearch.action.support.nodes.BaseNodesRequest;
|
||||||
|
|
||||||
public class NodesUsageRequest extends BaseNodesRequest<NodesUsageRequest> {
|
public class NodesUsageRequest extends BaseNodesRequest {
|
||||||
|
|
||||||
private boolean restActions;
|
private boolean restActions;
|
||||||
private boolean aggregations;
|
private boolean aggregations;
|
||||||
|
|
|
@ -121,17 +121,13 @@ public class TransportNodesSnapshotsStatus extends TransportNodesAction<
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public static class Request extends BaseNodesRequest<Request> {
|
public static class Request extends BaseNodesRequest {
|
||||||
|
|
||||||
private Snapshot[] snapshots;
|
private final Snapshot[] snapshots;
|
||||||
|
|
||||||
public Request(String[] nodesIds) {
|
public Request(String[] nodesIds, Snapshot[] snapshots) {
|
||||||
super(nodesIds);
|
super(nodesIds);
|
||||||
}
|
|
||||||
|
|
||||||
public Request snapshots(Snapshot[] snapshots) {
|
|
||||||
this.snapshots = snapshots;
|
this.snapshots = snapshots;
|
||||||
return this;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -141,10 +141,11 @@ public class TransportSnapshotsStatusAction extends TransportMasterNodeAction<Sn
|
||||||
for (int i = 0; i < currentSnapshots.size(); i++) {
|
for (int i = 0; i < currentSnapshots.size(); i++) {
|
||||||
snapshots[i] = currentSnapshots.get(i).snapshot();
|
snapshots[i] = currentSnapshots.get(i).snapshot();
|
||||||
}
|
}
|
||||||
|
final var snapshotStatusRequest = new TransportNodesSnapshotsStatus.Request(nodesIds.toArray(Strings.EMPTY_ARRAY), snapshots);
|
||||||
|
snapshotStatusRequest.setTimeout(request.masterNodeTimeout().millis() < 0 ? null : request.masterNodeTimeout());
|
||||||
client.executeLocally(
|
client.executeLocally(
|
||||||
TransportNodesSnapshotsStatus.TYPE,
|
TransportNodesSnapshotsStatus.TYPE,
|
||||||
new TransportNodesSnapshotsStatus.Request(nodesIds.toArray(Strings.EMPTY_ARRAY)).snapshots(snapshots)
|
snapshotStatusRequest,
|
||||||
.timeout(request.masterNodeTimeout().millis() < 0 ? null : request.masterNodeTimeout()),
|
|
||||||
// fork to snapshot meta since building the response is expensive for large snapshots
|
// fork to snapshot meta since building the response is expensive for large snapshots
|
||||||
new RefCountAwareThreadedActionListener<>(
|
new RefCountAwareThreadedActionListener<>(
|
||||||
threadPool.executor(ThreadPool.Names.SNAPSHOT_META),
|
threadPool.executor(ThreadPool.Names.SNAPSHOT_META),
|
||||||
|
|
|
@ -19,7 +19,7 @@ import java.util.Map;
|
||||||
/**
|
/**
|
||||||
* A request to get cluster level stats.
|
* A request to get cluster level stats.
|
||||||
*/
|
*/
|
||||||
public class ClusterStatsRequest extends BaseNodesRequest<ClusterStatsRequest> {
|
public class ClusterStatsRequest extends BaseNodesRequest {
|
||||||
/**
|
/**
|
||||||
* Should the remote cluster stats be included in the response.
|
* Should the remote cluster stats be included in the response.
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -28,8 +28,6 @@ import java.util.Locale;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.Set;
|
import java.util.Set;
|
||||||
|
|
||||||
import static org.elasticsearch.action.search.TransportSearchAction.CCS_TELEMETRY_FEATURE_FLAG;
|
|
||||||
|
|
||||||
public class ClusterStatsResponse extends BaseNodesResponse<ClusterStatsNodeResponse> implements ToXContentFragment {
|
public class ClusterStatsResponse extends BaseNodesResponse<ClusterStatsNodeResponse> implements ToXContentFragment {
|
||||||
|
|
||||||
final ClusterStatsNodes nodesStats;
|
final ClusterStatsNodes nodesStats;
|
||||||
|
@ -145,14 +143,12 @@ public class ClusterStatsResponse extends BaseNodesResponse<ClusterStatsNodeResp
|
||||||
builder.field("repositories");
|
builder.field("repositories");
|
||||||
repositoryUsageStats.toXContent(builder, params);
|
repositoryUsageStats.toXContent(builder, params);
|
||||||
|
|
||||||
if (CCS_TELEMETRY_FEATURE_FLAG.isEnabled()) {
|
builder.startObject("ccs");
|
||||||
builder.startObject("ccs");
|
if (remoteClustersStats != null) {
|
||||||
if (remoteClustersStats != null) {
|
builder.field("clusters", remoteClustersStats);
|
||||||
builder.field("clusters", remoteClustersStats);
|
|
||||||
}
|
|
||||||
ccsMetrics.toXContent(builder, params);
|
|
||||||
builder.endObject();
|
|
||||||
}
|
}
|
||||||
|
ccsMetrics.toXContent(builder, params);
|
||||||
|
builder.endObject();
|
||||||
|
|
||||||
return builder;
|
return builder;
|
||||||
}
|
}
|
||||||
|
|
|
@ -22,6 +22,7 @@ import java.util.HashMap;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.Objects;
|
import java.util.Objects;
|
||||||
|
|
||||||
|
import static org.elasticsearch.TransportVersions.RETRIEVERS_TELEMETRY_ADDED;
|
||||||
import static org.elasticsearch.TransportVersions.V_8_12_0;
|
import static org.elasticsearch.TransportVersions.V_8_12_0;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -34,6 +35,7 @@ public final class SearchUsageStats implements Writeable, ToXContentFragment {
|
||||||
private final Map<String, Long> queries;
|
private final Map<String, Long> queries;
|
||||||
private final Map<String, Long> rescorers;
|
private final Map<String, Long> rescorers;
|
||||||
private final Map<String, Long> sections;
|
private final Map<String, Long> sections;
|
||||||
|
private final Map<String, Long> retrievers;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Creates a new empty stats instance, that will get additional stats added through {@link #add(SearchUsageStats)}
|
* Creates a new empty stats instance, that will get additional stats added through {@link #add(SearchUsageStats)}
|
||||||
|
@ -43,17 +45,25 @@ public final class SearchUsageStats implements Writeable, ToXContentFragment {
|
||||||
this.queries = new HashMap<>();
|
this.queries = new HashMap<>();
|
||||||
this.sections = new HashMap<>();
|
this.sections = new HashMap<>();
|
||||||
this.rescorers = new HashMap<>();
|
this.rescorers = new HashMap<>();
|
||||||
|
this.retrievers = new HashMap<>();
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Creates a new stats instance with the provided info. The expectation is that when a new instance is created using
|
* Creates a new stats instance with the provided info. The expectation is that when a new instance is created using
|
||||||
* this constructor, the provided stats are final and won't be modified further.
|
* this constructor, the provided stats are final and won't be modified further.
|
||||||
*/
|
*/
|
||||||
public SearchUsageStats(Map<String, Long> queries, Map<String, Long> rescorers, Map<String, Long> sections, long totalSearchCount) {
|
public SearchUsageStats(
|
||||||
|
Map<String, Long> queries,
|
||||||
|
Map<String, Long> rescorers,
|
||||||
|
Map<String, Long> sections,
|
||||||
|
Map<String, Long> retrievers,
|
||||||
|
long totalSearchCount
|
||||||
|
) {
|
||||||
this.totalSearchCount = totalSearchCount;
|
this.totalSearchCount = totalSearchCount;
|
||||||
this.queries = queries;
|
this.queries = queries;
|
||||||
this.sections = sections;
|
this.sections = sections;
|
||||||
this.rescorers = rescorers;
|
this.rescorers = rescorers;
|
||||||
|
this.retrievers = retrievers;
|
||||||
}
|
}
|
||||||
|
|
||||||
public SearchUsageStats(StreamInput in) throws IOException {
|
public SearchUsageStats(StreamInput in) throws IOException {
|
||||||
|
@ -61,6 +71,7 @@ public final class SearchUsageStats implements Writeable, ToXContentFragment {
|
||||||
this.sections = in.readMap(StreamInput::readLong);
|
this.sections = in.readMap(StreamInput::readLong);
|
||||||
this.totalSearchCount = in.readVLong();
|
this.totalSearchCount = in.readVLong();
|
||||||
this.rescorers = in.getTransportVersion().onOrAfter(V_8_12_0) ? in.readMap(StreamInput::readLong) : Map.of();
|
this.rescorers = in.getTransportVersion().onOrAfter(V_8_12_0) ? in.readMap(StreamInput::readLong) : Map.of();
|
||||||
|
this.retrievers = in.getTransportVersion().onOrAfter(RETRIEVERS_TELEMETRY_ADDED) ? in.readMap(StreamInput::readLong) : Map.of();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -72,6 +83,9 @@ public final class SearchUsageStats implements Writeable, ToXContentFragment {
|
||||||
if (out.getTransportVersion().onOrAfter(V_8_12_0)) {
|
if (out.getTransportVersion().onOrAfter(V_8_12_0)) {
|
||||||
out.writeMap(rescorers, StreamOutput::writeLong);
|
out.writeMap(rescorers, StreamOutput::writeLong);
|
||||||
}
|
}
|
||||||
|
if (out.getTransportVersion().onOrAfter(RETRIEVERS_TELEMETRY_ADDED)) {
|
||||||
|
out.writeMap(retrievers, StreamOutput::writeLong);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -81,6 +95,7 @@ public final class SearchUsageStats implements Writeable, ToXContentFragment {
|
||||||
stats.queries.forEach((query, count) -> queries.merge(query, count, Long::sum));
|
stats.queries.forEach((query, count) -> queries.merge(query, count, Long::sum));
|
||||||
stats.rescorers.forEach((rescorer, count) -> rescorers.merge(rescorer, count, Long::sum));
|
stats.rescorers.forEach((rescorer, count) -> rescorers.merge(rescorer, count, Long::sum));
|
||||||
stats.sections.forEach((query, count) -> sections.merge(query, count, Long::sum));
|
stats.sections.forEach((query, count) -> sections.merge(query, count, Long::sum));
|
||||||
|
stats.retrievers.forEach((query, count) -> retrievers.merge(query, count, Long::sum));
|
||||||
this.totalSearchCount += stats.totalSearchCount;
|
this.totalSearchCount += stats.totalSearchCount;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -95,6 +110,8 @@ public final class SearchUsageStats implements Writeable, ToXContentFragment {
|
||||||
builder.map(rescorers);
|
builder.map(rescorers);
|
||||||
builder.field("sections");
|
builder.field("sections");
|
||||||
builder.map(sections);
|
builder.map(sections);
|
||||||
|
builder.field("retrievers");
|
||||||
|
builder.map(retrievers);
|
||||||
}
|
}
|
||||||
builder.endObject();
|
builder.endObject();
|
||||||
return builder;
|
return builder;
|
||||||
|
@ -112,6 +129,10 @@ public final class SearchUsageStats implements Writeable, ToXContentFragment {
|
||||||
return Collections.unmodifiableMap(sections);
|
return Collections.unmodifiableMap(sections);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public Map<String, Long> getRetrieversUsage() {
|
||||||
|
return Collections.unmodifiableMap(retrievers);
|
||||||
|
}
|
||||||
|
|
||||||
public long getTotalSearchCount() {
|
public long getTotalSearchCount() {
|
||||||
return totalSearchCount;
|
return totalSearchCount;
|
||||||
}
|
}
|
||||||
|
@ -128,12 +149,13 @@ public final class SearchUsageStats implements Writeable, ToXContentFragment {
|
||||||
return totalSearchCount == that.totalSearchCount
|
return totalSearchCount == that.totalSearchCount
|
||||||
&& queries.equals(that.queries)
|
&& queries.equals(that.queries)
|
||||||
&& rescorers.equals(that.rescorers)
|
&& rescorers.equals(that.rescorers)
|
||||||
&& sections.equals(that.sections);
|
&& sections.equals(that.sections)
|
||||||
|
&& retrievers.equals(that.retrievers);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public int hashCode() {
|
public int hashCode() {
|
||||||
return Objects.hash(totalSearchCount, queries, rescorers, sections);
|
return Objects.hash(totalSearchCount, queries, rescorers, sections, retrievers);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|
|
@ -49,6 +49,7 @@ import org.elasticsearch.indices.IndicesService;
|
||||||
import org.elasticsearch.injection.guice.Inject;
|
import org.elasticsearch.injection.guice.Inject;
|
||||||
import org.elasticsearch.node.NodeService;
|
import org.elasticsearch.node.NodeService;
|
||||||
import org.elasticsearch.repositories.RepositoriesService;
|
import org.elasticsearch.repositories.RepositoriesService;
|
||||||
|
import org.elasticsearch.search.SearchService;
|
||||||
import org.elasticsearch.tasks.CancellableTask;
|
import org.elasticsearch.tasks.CancellableTask;
|
||||||
import org.elasticsearch.tasks.Task;
|
import org.elasticsearch.tasks.Task;
|
||||||
import org.elasticsearch.tasks.TaskId;
|
import org.elasticsearch.tasks.TaskId;
|
||||||
|
@ -431,8 +432,8 @@ public class TransportClusterStatsAction extends TransportNodesAction<
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private static boolean doRemotes(ClusterStatsRequest request) {
|
private boolean doRemotes(ClusterStatsRequest request) {
|
||||||
return request.doRemotes();
|
return SearchService.CCS_COLLECT_TELEMETRY.get(settings) && request.doRemotes();
|
||||||
}
|
}
|
||||||
|
|
||||||
private class RemoteStatsFanout extends CancellableFanOut<String, RemoteClusterStatsResponse, Map<String, RemoteClusterStats>> {
|
private class RemoteStatsFanout extends CancellableFanOut<String, RemoteClusterStatsResponse, Map<String, RemoteClusterStats>> {
|
||||||
|
|
|
@ -12,7 +12,7 @@ package org.elasticsearch.action.admin.indices.dangling.find;
|
||||||
import org.elasticsearch.action.support.nodes.BaseNodesRequest;
|
import org.elasticsearch.action.support.nodes.BaseNodesRequest;
|
||||||
import org.elasticsearch.common.Strings;
|
import org.elasticsearch.common.Strings;
|
||||||
|
|
||||||
public class FindDanglingIndexRequest extends BaseNodesRequest<FindDanglingIndexRequest> {
|
public class FindDanglingIndexRequest extends BaseNodesRequest {
|
||||||
private final String indexUUID;
|
private final String indexUUID;
|
||||||
|
|
||||||
public FindDanglingIndexRequest(String indexUUID) {
|
public FindDanglingIndexRequest(String indexUUID) {
|
||||||
|
|
|
@ -12,7 +12,7 @@ package org.elasticsearch.action.admin.indices.dangling.list;
|
||||||
import org.elasticsearch.action.support.nodes.BaseNodesRequest;
|
import org.elasticsearch.action.support.nodes.BaseNodesRequest;
|
||||||
import org.elasticsearch.common.Strings;
|
import org.elasticsearch.common.Strings;
|
||||||
|
|
||||||
public class ListDanglingIndicesRequest extends BaseNodesRequest<ListDanglingIndicesRequest> {
|
public class ListDanglingIndicesRequest extends BaseNodesRequest {
|
||||||
/**
|
/**
|
||||||
* Filter the response by index UUID. Leave as null to find all indices.
|
* Filter the response by index UUID. Leave as null to find all indices.
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -52,9 +52,9 @@ import org.elasticsearch.common.logging.DeprecationLogger;
|
||||||
import org.elasticsearch.common.regex.Regex;
|
import org.elasticsearch.common.regex.Regex;
|
||||||
import org.elasticsearch.common.settings.Setting;
|
import org.elasticsearch.common.settings.Setting;
|
||||||
import org.elasticsearch.common.settings.Setting.Property;
|
import org.elasticsearch.common.settings.Setting.Property;
|
||||||
|
import org.elasticsearch.common.settings.Settings;
|
||||||
import org.elasticsearch.common.util.ArrayUtils;
|
import org.elasticsearch.common.util.ArrayUtils;
|
||||||
import org.elasticsearch.common.util.CollectionUtils;
|
import org.elasticsearch.common.util.CollectionUtils;
|
||||||
import org.elasticsearch.common.util.FeatureFlag;
|
|
||||||
import org.elasticsearch.common.util.Maps;
|
import org.elasticsearch.common.util.Maps;
|
||||||
import org.elasticsearch.common.util.concurrent.CountDown;
|
import org.elasticsearch.common.util.concurrent.CountDown;
|
||||||
import org.elasticsearch.common.util.concurrent.EsExecutors;
|
import org.elasticsearch.common.util.concurrent.EsExecutors;
|
||||||
|
@ -128,8 +128,6 @@ public class TransportSearchAction extends HandledTransportAction<SearchRequest,
|
||||||
public static final String FROZEN_INDICES_DEPRECATION_MESSAGE = "Searching frozen indices [{}] is deprecated."
|
public static final String FROZEN_INDICES_DEPRECATION_MESSAGE = "Searching frozen indices [{}] is deprecated."
|
||||||
+ " Consider cold or frozen tiers in place of frozen indices. The frozen feature will be removed in a feature release.";
|
+ " Consider cold or frozen tiers in place of frozen indices. The frozen feature will be removed in a feature release.";
|
||||||
|
|
||||||
public static final FeatureFlag CCS_TELEMETRY_FEATURE_FLAG = new FeatureFlag("ccs_telemetry");
|
|
||||||
|
|
||||||
/** The maximum number of shards for a single search request. */
|
/** The maximum number of shards for a single search request. */
|
||||||
public static final Setting<Long> SHARD_COUNT_LIMIT_SETTING = Setting.longSetting(
|
public static final Setting<Long> SHARD_COUNT_LIMIT_SETTING = Setting.longSetting(
|
||||||
"action.search.shard_count.limit",
|
"action.search.shard_count.limit",
|
||||||
|
@ -162,6 +160,7 @@ public class TransportSearchAction extends HandledTransportAction<SearchRequest,
|
||||||
private final SearchResponseMetrics searchResponseMetrics;
|
private final SearchResponseMetrics searchResponseMetrics;
|
||||||
private final Client client;
|
private final Client client;
|
||||||
private final UsageService usageService;
|
private final UsageService usageService;
|
||||||
|
private final Settings settings;
|
||||||
|
|
||||||
@Inject
|
@Inject
|
||||||
public TransportSearchAction(
|
public TransportSearchAction(
|
||||||
|
@ -194,8 +193,9 @@ public class TransportSearchAction extends HandledTransportAction<SearchRequest,
|
||||||
this.indexNameExpressionResolver = indexNameExpressionResolver;
|
this.indexNameExpressionResolver = indexNameExpressionResolver;
|
||||||
this.namedWriteableRegistry = namedWriteableRegistry;
|
this.namedWriteableRegistry = namedWriteableRegistry;
|
||||||
this.executorSelector = executorSelector;
|
this.executorSelector = executorSelector;
|
||||||
this.defaultPreFilterShardSize = DEFAULT_PRE_FILTER_SHARD_SIZE.get(clusterService.getSettings());
|
this.settings = clusterService.getSettings();
|
||||||
this.ccsCheckCompatibility = SearchService.CCS_VERSION_CHECK_SETTING.get(clusterService.getSettings());
|
this.defaultPreFilterShardSize = DEFAULT_PRE_FILTER_SHARD_SIZE.get(settings);
|
||||||
|
this.ccsCheckCompatibility = SearchService.CCS_VERSION_CHECK_SETTING.get(settings);
|
||||||
this.searchResponseMetrics = searchResponseMetrics;
|
this.searchResponseMetrics = searchResponseMetrics;
|
||||||
this.client = client;
|
this.client = client;
|
||||||
this.usageService = usageService;
|
this.usageService = usageService;
|
||||||
|
@ -372,7 +372,7 @@ public class TransportSearchAction extends HandledTransportAction<SearchRequest,
|
||||||
searchPhaseProvider.apply(delegate)
|
searchPhaseProvider.apply(delegate)
|
||||||
);
|
);
|
||||||
} else {
|
} else {
|
||||||
if ((listener instanceof TelemetryListener tl) && CCS_TELEMETRY_FEATURE_FLAG.isEnabled()) {
|
if (listener instanceof TelemetryListener tl) {
|
||||||
tl.setRemotes(resolvedIndices.getRemoteClusterIndices().size());
|
tl.setRemotes(resolvedIndices.getRemoteClusterIndices().size());
|
||||||
if (task.isAsync()) {
|
if (task.isAsync()) {
|
||||||
tl.setFeature(CCSUsageTelemetry.ASYNC_FEATURE);
|
tl.setFeature(CCSUsageTelemetry.ASYNC_FEATURE);
|
||||||
|
@ -398,7 +398,7 @@ public class TransportSearchAction extends HandledTransportAction<SearchRequest,
|
||||||
}
|
}
|
||||||
final TaskId parentTaskId = task.taskInfo(clusterService.localNode().getId(), false).taskId();
|
final TaskId parentTaskId = task.taskInfo(clusterService.localNode().getId(), false).taskId();
|
||||||
if (shouldMinimizeRoundtrips(rewritten)) {
|
if (shouldMinimizeRoundtrips(rewritten)) {
|
||||||
if ((listener instanceof TelemetryListener tl) && CCS_TELEMETRY_FEATURE_FLAG.isEnabled()) {
|
if (listener instanceof TelemetryListener tl) {
|
||||||
tl.setFeature(CCSUsageTelemetry.MRT_FEATURE);
|
tl.setFeature(CCSUsageTelemetry.MRT_FEATURE);
|
||||||
}
|
}
|
||||||
final AggregationReduceContext.Builder aggregationReduceContextBuilder = rewritten.source() != null
|
final AggregationReduceContext.Builder aggregationReduceContextBuilder = rewritten.source() != null
|
||||||
|
@ -1868,7 +1868,7 @@ public class TransportSearchAction extends HandledTransportAction<SearchRequest,
|
||||||
* Should we collect telemetry for this search?
|
* Should we collect telemetry for this search?
|
||||||
*/
|
*/
|
||||||
private boolean collectTelemetry() {
|
private boolean collectTelemetry() {
|
||||||
return CCS_TELEMETRY_FEATURE_FLAG.isEnabled() && usageBuilder.getRemotesCount() > 0;
|
return SearchService.CCS_COLLECT_TELEMETRY.get(settings) && usageBuilder.getRemotesCount() > 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
public void setRemotes(int count) {
|
public void setRemotes(int count) {
|
||||||
|
|
|
@ -23,7 +23,7 @@ import java.io.IOException;
|
||||||
import java.util.Arrays;
|
import java.util.Arrays;
|
||||||
import java.util.Objects;
|
import java.util.Objects;
|
||||||
|
|
||||||
public abstract class BaseNodesRequest<Request extends BaseNodesRequest<Request>> extends ActionRequest {
|
public abstract class BaseNodesRequest extends ActionRequest {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Sequence of node specifications that describe the nodes that this request should target. See {@link DiscoveryNodes#resolveNodes} for
|
* Sequence of node specifications that describe the nodes that this request should target. See {@link DiscoveryNodes#resolveNodes} for
|
||||||
|
@ -53,14 +53,13 @@ public abstract class BaseNodesRequest<Request extends BaseNodesRequest<Request>
|
||||||
return nodesIds;
|
return nodesIds;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Nullable
|
||||||
public TimeValue timeout() {
|
public TimeValue timeout() {
|
||||||
return this.timeout;
|
return this.timeout;
|
||||||
}
|
}
|
||||||
|
|
||||||
@SuppressWarnings("unchecked")
|
public final void setTimeout(@Nullable TimeValue timeout) {
|
||||||
public final Request timeout(TimeValue timeout) {
|
|
||||||
this.timeout = timeout;
|
this.timeout = timeout;
|
||||||
return (Request) this;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|
|
@ -15,7 +15,7 @@ import org.elasticsearch.client.internal.ElasticsearchClient;
|
||||||
import org.elasticsearch.core.TimeValue;
|
import org.elasticsearch.core.TimeValue;
|
||||||
|
|
||||||
public abstract class NodesOperationRequestBuilder<
|
public abstract class NodesOperationRequestBuilder<
|
||||||
Request extends BaseNodesRequest<Request>,
|
Request extends BaseNodesRequest,
|
||||||
Response extends BaseNodesResponse<?>,
|
Response extends BaseNodesResponse<?>,
|
||||||
RequestBuilder extends NodesOperationRequestBuilder<Request, Response, RequestBuilder>> extends ActionRequestBuilder<
|
RequestBuilder extends NodesOperationRequestBuilder<Request, Response, RequestBuilder>> extends ActionRequestBuilder<
|
||||||
Request,
|
Request,
|
||||||
|
@ -27,7 +27,7 @@ public abstract class NodesOperationRequestBuilder<
|
||||||
|
|
||||||
@SuppressWarnings("unchecked")
|
@SuppressWarnings("unchecked")
|
||||||
public RequestBuilder setTimeout(TimeValue timeout) {
|
public RequestBuilder setTimeout(TimeValue timeout) {
|
||||||
request.timeout(timeout);
|
request.setTimeout(timeout);
|
||||||
return (RequestBuilder) this;
|
return (RequestBuilder) this;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -46,7 +46,7 @@ import java.util.concurrent.Executor;
|
||||||
import static org.elasticsearch.core.Strings.format;
|
import static org.elasticsearch.core.Strings.format;
|
||||||
|
|
||||||
public abstract class TransportNodesAction<
|
public abstract class TransportNodesAction<
|
||||||
NodesRequest extends BaseNodesRequest<NodesRequest>,
|
NodesRequest extends BaseNodesRequest,
|
||||||
NodesResponse extends BaseNodesResponse<?>,
|
NodesResponse extends BaseNodesResponse<?>,
|
||||||
NodeRequest extends TransportRequest,
|
NodeRequest extends TransportRequest,
|
||||||
NodeResponse extends BaseNodeResponse,
|
NodeResponse extends BaseNodeResponse,
|
||||||
|
|
|
@ -285,7 +285,7 @@ public class InternalClusterInfoService implements ClusterInfoService, ClusterSt
|
||||||
nodesStatsRequest.setIncludeShardsStats(false);
|
nodesStatsRequest.setIncludeShardsStats(false);
|
||||||
nodesStatsRequest.clear();
|
nodesStatsRequest.clear();
|
||||||
nodesStatsRequest.addMetric(NodesStatsRequestParameters.Metric.FS);
|
nodesStatsRequest.addMetric(NodesStatsRequestParameters.Metric.FS);
|
||||||
nodesStatsRequest.timeout(fetchTimeout);
|
nodesStatsRequest.setTimeout(fetchTimeout);
|
||||||
client.admin().cluster().nodesStats(nodesStatsRequest, ActionListener.releaseAfter(new ActionListener<>() {
|
client.admin().cluster().nodesStats(nodesStatsRequest, ActionListener.releaseAfter(new ActionListener<>() {
|
||||||
@Override
|
@Override
|
||||||
public void onResponse(NodesStatsResponse nodesStatsResponse) {
|
public void onResponse(NodesStatsResponse nodesStatsResponse) {
|
||||||
|
|
|
@ -1,79 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
|
||||||
* or more contributor license agreements. Licensed under the "Elastic License
|
|
||||||
* 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side
|
|
||||||
* Public License v 1"; you may not use this file except in compliance with, at
|
|
||||||
* your election, the "Elastic License 2.0", the "GNU Affero General Public
|
|
||||||
* License v3.0 only", or the "Server Side Public License, v 1".
|
|
||||||
*/
|
|
||||||
|
|
||||||
package org.elasticsearch.cluster.metadata;
|
|
||||||
|
|
||||||
import org.elasticsearch.common.settings.ClusterSettings;
|
|
||||||
import org.elasticsearch.core.Nullable;
|
|
||||||
import org.elasticsearch.core.TimeValue;
|
|
||||||
import org.elasticsearch.plugins.PluginsService;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Holds the factory retention configuration. Factory retention is the global retention configuration meant to be
|
|
||||||
* used if a user hasn't provided other retention configuration via {@link DataStreamGlobalRetention} metadata in the
|
|
||||||
* cluster state.
|
|
||||||
* @deprecated This interface is deprecated, please use {@link DataStreamGlobalRetentionSettings}.
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public interface DataStreamFactoryRetention {
|
|
||||||
|
|
||||||
@Nullable
|
|
||||||
TimeValue getMaxRetention();
|
|
||||||
|
|
||||||
@Nullable
|
|
||||||
TimeValue getDefaultRetention();
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @return true, if at least one of the two settings is not null, false otherwise.
|
|
||||||
*/
|
|
||||||
default boolean isDefined() {
|
|
||||||
return getMaxRetention() != null || getDefaultRetention() != null;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Applies any post constructor initialisation, for example, listening to cluster setting changes.
|
|
||||||
*/
|
|
||||||
void init(ClusterSettings clusterSettings);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Loads a single instance of a DataStreamFactoryRetention from the {@link PluginsService} and finalises the
|
|
||||||
* initialisation by calling {@link DataStreamFactoryRetention#init(ClusterSettings)}
|
|
||||||
*/
|
|
||||||
static DataStreamFactoryRetention load(PluginsService pluginsService, ClusterSettings clusterSettings) {
|
|
||||||
DataStreamFactoryRetention factoryRetention = pluginsService.loadSingletonServiceProvider(
|
|
||||||
DataStreamFactoryRetention.class,
|
|
||||||
DataStreamFactoryRetention::emptyFactoryRetention
|
|
||||||
);
|
|
||||||
factoryRetention.init(clusterSettings);
|
|
||||||
return factoryRetention;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Returns empty factory global retention settings.
|
|
||||||
*/
|
|
||||||
static DataStreamFactoryRetention emptyFactoryRetention() {
|
|
||||||
return new DataStreamFactoryRetention() {
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public TimeValue getMaxRetention() {
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public TimeValue getDefaultRetention() {
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void init(ClusterSettings clusterSettings) {
|
|
||||||
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -26,8 +26,6 @@ import java.util.Map;
|
||||||
* The global retention settings apply to non-system data streams that are managed by the data stream lifecycle. They consist of:
|
* The global retention settings apply to non-system data streams that are managed by the data stream lifecycle. They consist of:
|
||||||
* - The default retention which applies to data streams that do not have a retention defined.
|
* - The default retention which applies to data streams that do not have a retention defined.
|
||||||
* - The max retention which applies to all data streams that do not have retention or their retention has exceeded this value.
|
* - The max retention which applies to all data streams that do not have retention or their retention has exceeded this value.
|
||||||
* <p>
|
|
||||||
* Temporarily, we fall back to {@link DataStreamFactoryRetention} to facilitate a smooth transition to these settings.
|
|
||||||
*/
|
*/
|
||||||
public class DataStreamGlobalRetentionSettings {
|
public class DataStreamGlobalRetentionSettings {
|
||||||
|
|
||||||
|
@ -84,42 +82,35 @@ public class DataStreamGlobalRetentionSettings {
|
||||||
Setting.Property.Dynamic
|
Setting.Property.Dynamic
|
||||||
);
|
);
|
||||||
|
|
||||||
private final DataStreamFactoryRetention factoryRetention;
|
|
||||||
|
|
||||||
@Nullable
|
@Nullable
|
||||||
private volatile TimeValue defaultRetention;
|
private volatile TimeValue defaultRetention;
|
||||||
@Nullable
|
@Nullable
|
||||||
private volatile TimeValue maxRetention;
|
private volatile TimeValue maxRetention;
|
||||||
|
|
||||||
private DataStreamGlobalRetentionSettings(DataStreamFactoryRetention factoryRetention) {
|
private DataStreamGlobalRetentionSettings() {
|
||||||
this.factoryRetention = factoryRetention;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Nullable
|
@Nullable
|
||||||
public TimeValue getMaxRetention() {
|
public TimeValue getMaxRetention() {
|
||||||
return shouldFallbackToFactorySettings() ? factoryRetention.getMaxRetention() : maxRetention;
|
return maxRetention;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Nullable
|
@Nullable
|
||||||
public TimeValue getDefaultRetention() {
|
public TimeValue getDefaultRetention() {
|
||||||
return shouldFallbackToFactorySettings() ? factoryRetention.getDefaultRetention() : defaultRetention;
|
return defaultRetention;
|
||||||
}
|
}
|
||||||
|
|
||||||
public boolean areDefined() {
|
public boolean areDefined() {
|
||||||
return getDefaultRetention() != null || getMaxRetention() != null;
|
return getDefaultRetention() != null || getMaxRetention() != null;
|
||||||
}
|
}
|
||||||
|
|
||||||
private boolean shouldFallbackToFactorySettings() {
|
|
||||||
return defaultRetention == null && maxRetention == null;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Creates an instance and initialises the cluster settings listeners
|
* Creates an instance and initialises the cluster settings listeners
|
||||||
* @param clusterSettings it will register the cluster settings listeners to monitor for changes
|
* @param clusterSettings it will register the cluster settings listeners to monitor for changes
|
||||||
* @param factoryRetention for migration purposes, it will be removed shortly
|
|
||||||
*/
|
*/
|
||||||
public static DataStreamGlobalRetentionSettings create(ClusterSettings clusterSettings, DataStreamFactoryRetention factoryRetention) {
|
public static DataStreamGlobalRetentionSettings create(ClusterSettings clusterSettings) {
|
||||||
DataStreamGlobalRetentionSettings dataStreamGlobalRetentionSettings = new DataStreamGlobalRetentionSettings(factoryRetention);
|
DataStreamGlobalRetentionSettings dataStreamGlobalRetentionSettings = new DataStreamGlobalRetentionSettings();
|
||||||
clusterSettings.initializeAndWatch(DATA_STREAMS_DEFAULT_RETENTION_SETTING, dataStreamGlobalRetentionSettings::setDefaultRetention);
|
clusterSettings.initializeAndWatch(DATA_STREAMS_DEFAULT_RETENTION_SETTING, dataStreamGlobalRetentionSettings::setDefaultRetention);
|
||||||
clusterSettings.initializeAndWatch(DATA_STREAMS_MAX_RETENTION_SETTING, dataStreamGlobalRetentionSettings::setMaxRetention);
|
clusterSettings.initializeAndWatch(DATA_STREAMS_MAX_RETENTION_SETTING, dataStreamGlobalRetentionSettings::setMaxRetention);
|
||||||
return dataStreamGlobalRetentionSettings;
|
return dataStreamGlobalRetentionSettings;
|
||||||
|
|
|
@ -467,6 +467,7 @@ public final class ClusterSettings extends AbstractScopedSettings {
|
||||||
SearchService.MAX_KEEPALIVE_SETTING,
|
SearchService.MAX_KEEPALIVE_SETTING,
|
||||||
SearchService.ALLOW_EXPENSIVE_QUERIES,
|
SearchService.ALLOW_EXPENSIVE_QUERIES,
|
||||||
SearchService.CCS_VERSION_CHECK_SETTING,
|
SearchService.CCS_VERSION_CHECK_SETTING,
|
||||||
|
SearchService.CCS_COLLECT_TELEMETRY,
|
||||||
MultiBucketConsumerService.MAX_BUCKET_SETTING,
|
MultiBucketConsumerService.MAX_BUCKET_SETTING,
|
||||||
SearchService.LOW_LEVEL_CANCELLATION_SETTING,
|
SearchService.LOW_LEVEL_CANCELLATION_SETTING,
|
||||||
SearchService.MAX_OPEN_SCROLL_CONTEXT,
|
SearchService.MAX_OPEN_SCROLL_CONTEXT,
|
||||||
|
|
|
@ -58,8 +58,8 @@ public final class Int3Hash extends AbstractHash {
|
||||||
* Get the id associated with <code>key</code> or -1 if the key is not contained in the hash.
|
* Get the id associated with <code>key</code> or -1 if the key is not contained in the hash.
|
||||||
*/
|
*/
|
||||||
public long find(int key1, int key2, int key3) {
|
public long find(int key1, int key2, int key3) {
|
||||||
long index = slot(hash(key1, key2, key3), mask);
|
final long slot = slot(hash(key1, key2, key3), mask);
|
||||||
while (true) {
|
for (long index = slot;; index = nextSlot(index, mask)) {
|
||||||
final long id = id(index);
|
final long id = id(index);
|
||||||
if (id == -1) {
|
if (id == -1) {
|
||||||
return id;
|
return id;
|
||||||
|
@ -69,14 +69,13 @@ public final class Int3Hash extends AbstractHash {
|
||||||
return id;
|
return id;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
index = nextSlot(index, mask);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private long set(int key1, int key2, int key3, long id) {
|
private long set(int key1, int key2, int key3, long id) {
|
||||||
assert size < maxSize;
|
assert size < maxSize;
|
||||||
long index = slot(hash(key1, key2, key3), mask);
|
long slot = slot(hash(key1, key2, key3), mask);
|
||||||
while (true) {
|
for (long index = slot;; index = nextSlot(index, mask)) {
|
||||||
final long curId = id(index);
|
final long curId = id(index);
|
||||||
if (curId == -1) { // means unset
|
if (curId == -1) { // means unset
|
||||||
setId(index, id);
|
setId(index, id);
|
||||||
|
@ -84,33 +83,35 @@ public final class Int3Hash extends AbstractHash {
|
||||||
++size;
|
++size;
|
||||||
return id;
|
return id;
|
||||||
} else {
|
} else {
|
||||||
long keyOffset = 3 * curId;
|
final long keyOffset = 3 * curId;
|
||||||
if (keys.get(keyOffset) == key1 && keys.get(keyOffset + 1) == key2 && keys.get(keyOffset + 2) == key3) {
|
if (keys.get(keyOffset) == key1 && keys.get(keyOffset + 1) == key2 && keys.get(keyOffset + 2) == key3) {
|
||||||
return -1 - curId;
|
return -1 - curId;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
index = nextSlot(index, mask);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private void append(long id, int key1, int key2, int key3) {
|
private void append(long id, int key1, int key2, int key3) {
|
||||||
long keyOffset = 3 * id;
|
final long keyOffset = 3 * id;
|
||||||
keys = bigArrays.grow(keys, keyOffset + 3);
|
keys = bigArrays.grow(keys, keyOffset + 3);
|
||||||
keys.set(keyOffset, key1);
|
keys.set(keyOffset, key1);
|
||||||
keys.set(keyOffset + 1, key2);
|
keys.set(keyOffset + 1, key2);
|
||||||
keys.set(keyOffset + 2, key3);
|
keys.set(keyOffset + 2, key3);
|
||||||
}
|
}
|
||||||
|
|
||||||
private void reset(int key1, int key2, int key3, long id) {
|
private void reset(long id) {
|
||||||
long index = slot(hash(key1, key2, key3), mask);
|
final IntArray keys = this.keys;
|
||||||
while (true) {
|
final long keyOffset = id * 3;
|
||||||
|
final int key1 = keys.get(keyOffset);
|
||||||
|
final int key2 = keys.get(keyOffset + 1);
|
||||||
|
final int key3 = keys.get(keyOffset + 2);
|
||||||
|
final long slot = slot(hash(key1, key2, key3), mask);
|
||||||
|
for (long index = slot;; index = nextSlot(index, mask)) {
|
||||||
final long curId = id(index);
|
final long curId = id(index);
|
||||||
if (curId == -1) { // means unset
|
if (curId == -1) { // means unset
|
||||||
setId(index, id);
|
setId(index, id);
|
||||||
append(id, key1, key2, key3);
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
index = nextSlot(index, mask);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -132,11 +133,7 @@ public final class Int3Hash extends AbstractHash {
|
||||||
protected void removeAndAdd(long index) {
|
protected void removeAndAdd(long index) {
|
||||||
final long id = getAndSetId(index, -1);
|
final long id = getAndSetId(index, -1);
|
||||||
assert id >= 0;
|
assert id >= 0;
|
||||||
long keyOffset = id * 3;
|
reset(id);
|
||||||
final int key1 = keys.getAndSet(keyOffset, 0);
|
|
||||||
final int key2 = keys.getAndSet(keyOffset + 1, 0);
|
|
||||||
final int key3 = keys.getAndSet(keyOffset + 2, 0);
|
|
||||||
reset(key1, key2, key3, id);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|
|
@ -182,7 +182,7 @@ public class TransportNodesListGatewayStartedShards extends TransportNodesAction
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public static class Request extends BaseNodesRequest<Request> {
|
public static class Request extends BaseNodesRequest {
|
||||||
|
|
||||||
private final ShardId shardId;
|
private final ShardId shardId;
|
||||||
@Nullable
|
@Nullable
|
||||||
|
|
|
@ -39,7 +39,7 @@ public class HealthApiStatsAction extends ActionType<HealthApiStatsAction.Respon
|
||||||
super(NAME);
|
super(NAME);
|
||||||
}
|
}
|
||||||
|
|
||||||
public static class Request extends BaseNodesRequest<Request> {
|
public static class Request extends BaseNodesRequest {
|
||||||
|
|
||||||
public Request() {
|
public Request() {
|
||||||
super((String[]) null);
|
super((String[]) null);
|
||||||
|
|
|
@ -126,7 +126,7 @@ public abstract class IntervalBuilder {
|
||||||
if (maxGaps == 0 && ordered) {
|
if (maxGaps == 0 && ordered) {
|
||||||
return Intervals.phrase(sourcesArray);
|
return Intervals.phrase(sourcesArray);
|
||||||
}
|
}
|
||||||
IntervalsSource inner = ordered ? Intervals.ordered(sourcesArray) : Intervals.unordered(sourcesArray);
|
IntervalsSource inner = ordered ? XIntervals.ordered(sourcesArray) : XIntervals.unordered(sourcesArray);
|
||||||
if (maxGaps == -1) {
|
if (maxGaps == -1) {
|
||||||
return inner;
|
return inner;
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,106 @@
|
||||||
|
/*
|
||||||
|
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||||
|
* or more contributor license agreements. Licensed under the "Elastic License
|
||||||
|
* 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side
|
||||||
|
* Public License v 1"; you may not use this file except in compliance with, at
|
||||||
|
* your election, the "Elastic License 2.0", the "GNU Affero General Public
|
||||||
|
* License v3.0 only", or the "Server Side Public License, v 1".
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.elasticsearch.index.query;
|
||||||
|
|
||||||
|
import org.apache.lucene.index.LeafReaderContext;
|
||||||
|
import org.apache.lucene.queries.intervals.IntervalIterator;
|
||||||
|
import org.apache.lucene.queries.intervals.IntervalMatchesIterator;
|
||||||
|
import org.apache.lucene.queries.intervals.Intervals;
|
||||||
|
import org.apache.lucene.queries.intervals.IntervalsSource;
|
||||||
|
import org.apache.lucene.search.QueryVisitor;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.util.Collection;
|
||||||
|
import java.util.Objects;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Copy of {@link Intervals} that exposes versions of {@link Intervals#ordered} and {@link Intervals#unordered}
|
||||||
|
* that preserve their inner gaps.
|
||||||
|
* NOTE: Remove this hack when a version of Lucene with https://github.com/apache/lucene/pull/13819 is used (10.1.0).
|
||||||
|
*/
|
||||||
|
public final class XIntervals {
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Create an ordered {@link IntervalsSource}
|
||||||
|
*
|
||||||
|
* <p>Returns intervals in which the subsources all appear in the given order
|
||||||
|
*
|
||||||
|
* @param subSources an ordered set of {@link IntervalsSource} objects
|
||||||
|
*/
|
||||||
|
public static IntervalsSource ordered(IntervalsSource... subSources) {
|
||||||
|
return new DelegateIntervalsSource(Intervals.ordered(subSources));
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Create an ordered {@link IntervalsSource}
|
||||||
|
*
|
||||||
|
* <p>Returns intervals in which the subsources all appear in the given order
|
||||||
|
*
|
||||||
|
* @param subSources an ordered set of {@link IntervalsSource} objects
|
||||||
|
*/
|
||||||
|
public static IntervalsSource unordered(IntervalsSource... subSources) {
|
||||||
|
return new DelegateIntervalsSource(Intervals.unordered(subSources));
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Wraps a source to avoid aggressive flattening of the ordered and unordered sources.
|
||||||
|
* The flattening modifies the final gap and is removed in the latest unreleased version of Lucene (10.1).
|
||||||
|
*/
|
||||||
|
private static class DelegateIntervalsSource extends IntervalsSource {
|
||||||
|
private final IntervalsSource delegate;
|
||||||
|
|
||||||
|
private DelegateIntervalsSource(IntervalsSource delegate) {
|
||||||
|
this.delegate = delegate;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public IntervalIterator intervals(String field, LeafReaderContext ctx) throws IOException {
|
||||||
|
return delegate.intervals(field, ctx);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public IntervalMatchesIterator matches(String field, LeafReaderContext ctx, int doc) throws IOException {
|
||||||
|
return delegate.matches(field, ctx, doc);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void visit(String field, QueryVisitor visitor) {
|
||||||
|
delegate.visit(field, visitor);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int minExtent() {
|
||||||
|
return delegate.minExtent();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Collection<IntervalsSource> pullUpDisjunctions() {
|
||||||
|
return delegate.pullUpDisjunctions();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean equals(Object o) {
|
||||||
|
if (this == o) return true;
|
||||||
|
if (o == null || getClass() != o.getClass()) return false;
|
||||||
|
DelegateIntervalsSource that = (DelegateIntervalsSource) o;
|
||||||
|
return Objects.equals(delegate, that.delegate);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int hashCode() {
|
||||||
|
return Objects.hash(delegate);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String toString() {
|
||||||
|
return delegate.toString();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -278,7 +278,7 @@ public class TransportNodesListShardStoreMetadata extends TransportNodesAction<
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public static class Request extends BaseNodesRequest<Request> {
|
public static class Request extends BaseNodesRequest {
|
||||||
|
|
||||||
private final ShardId shardId;
|
private final ShardId shardId;
|
||||||
@Nullable
|
@Nullable
|
||||||
|
|
|
@ -29,6 +29,11 @@ public record EmptyTaskSettings() implements TaskSettings {
|
||||||
this();
|
this();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean isEmpty() {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||||
builder.startObject();
|
builder.startObject();
|
||||||
|
|
|
@ -173,7 +173,11 @@ public class ModelConfigurations implements ToFilteredXContentObject, VersionedN
|
||||||
builder.field(TaskType.NAME, taskType.toString());
|
builder.field(TaskType.NAME, taskType.toString());
|
||||||
builder.field(SERVICE, service);
|
builder.field(SERVICE, service);
|
||||||
builder.field(SERVICE_SETTINGS, serviceSettings);
|
builder.field(SERVICE_SETTINGS, serviceSettings);
|
||||||
builder.field(TASK_SETTINGS, taskSettings);
|
// Always write task settings to the index even if empty.
|
||||||
|
// But do not show empty settings in the response
|
||||||
|
if (params.paramAsBoolean(USE_ID_FOR_INDEX, false) || (taskSettings != null && taskSettings.isEmpty() == false)) {
|
||||||
|
builder.field(TASK_SETTINGS, taskSettings);
|
||||||
|
}
|
||||||
if (chunkingSettings != null) {
|
if (chunkingSettings != null) {
|
||||||
builder.field(CHUNKING_SETTINGS, chunkingSettings);
|
builder.field(CHUNKING_SETTINGS, chunkingSettings);
|
||||||
}
|
}
|
||||||
|
@ -192,7 +196,11 @@ public class ModelConfigurations implements ToFilteredXContentObject, VersionedN
|
||||||
builder.field(TaskType.NAME, taskType.toString());
|
builder.field(TaskType.NAME, taskType.toString());
|
||||||
builder.field(SERVICE, service);
|
builder.field(SERVICE, service);
|
||||||
builder.field(SERVICE_SETTINGS, serviceSettings.getFilteredXContentObject());
|
builder.field(SERVICE_SETTINGS, serviceSettings.getFilteredXContentObject());
|
||||||
builder.field(TASK_SETTINGS, taskSettings);
|
// Always write task settings to the index even if empty.
|
||||||
|
// But do not show empty settings in the response
|
||||||
|
if (params.paramAsBoolean(USE_ID_FOR_INDEX, false) || (taskSettings != null && taskSettings.isEmpty() == false)) {
|
||||||
|
builder.field(TASK_SETTINGS, taskSettings);
|
||||||
|
}
|
||||||
if (chunkingSettings != null) {
|
if (chunkingSettings != null) {
|
||||||
builder.field(CHUNKING_SETTINGS, chunkingSettings);
|
builder.field(CHUNKING_SETTINGS, chunkingSettings);
|
||||||
}
|
}
|
||||||
|
|
|
@ -12,4 +12,6 @@ package org.elasticsearch.inference;
|
||||||
import org.elasticsearch.common.io.stream.VersionedNamedWriteable;
|
import org.elasticsearch.common.io.stream.VersionedNamedWriteable;
|
||||||
import org.elasticsearch.xcontent.ToXContentObject;
|
import org.elasticsearch.xcontent.ToXContentObject;
|
||||||
|
|
||||||
public interface TaskSettings extends ToXContentObject, VersionedNamedWriteable {}
|
public interface TaskSettings extends ToXContentObject, VersionedNamedWriteable {
|
||||||
|
boolean isEmpty();
|
||||||
|
}
|
||||||
|
|
|
@ -44,7 +44,6 @@ import org.elasticsearch.cluster.coordination.Coordinator;
|
||||||
import org.elasticsearch.cluster.coordination.MasterHistoryService;
|
import org.elasticsearch.cluster.coordination.MasterHistoryService;
|
||||||
import org.elasticsearch.cluster.coordination.StableMasterHealthIndicatorService;
|
import org.elasticsearch.cluster.coordination.StableMasterHealthIndicatorService;
|
||||||
import org.elasticsearch.cluster.features.NodeFeaturesFixupListener;
|
import org.elasticsearch.cluster.features.NodeFeaturesFixupListener;
|
||||||
import org.elasticsearch.cluster.metadata.DataStreamFactoryRetention;
|
|
||||||
import org.elasticsearch.cluster.metadata.DataStreamGlobalRetentionSettings;
|
import org.elasticsearch.cluster.metadata.DataStreamGlobalRetentionSettings;
|
||||||
import org.elasticsearch.cluster.metadata.IndexMetadataVerifier;
|
import org.elasticsearch.cluster.metadata.IndexMetadataVerifier;
|
||||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||||
|
@ -623,8 +622,7 @@ class NodeConstruction {
|
||||||
MetadataCreateIndexService metadataCreateIndexService
|
MetadataCreateIndexService metadataCreateIndexService
|
||||||
) {
|
) {
|
||||||
DataStreamGlobalRetentionSettings dataStreamGlobalRetentionSettings = DataStreamGlobalRetentionSettings.create(
|
DataStreamGlobalRetentionSettings dataStreamGlobalRetentionSettings = DataStreamGlobalRetentionSettings.create(
|
||||||
clusterService.getClusterSettings(),
|
clusterService.getClusterSettings()
|
||||||
DataStreamFactoryRetention.load(pluginsService, clusterService.getClusterSettings())
|
|
||||||
);
|
);
|
||||||
modules.bindToInstance(DataStreamGlobalRetentionSettings.class, dataStreamGlobalRetentionSettings);
|
modules.bindToInstance(DataStreamGlobalRetentionSettings.class, dataStreamGlobalRetentionSettings);
|
||||||
modules.bindToInstance(
|
modules.bindToInstance(
|
||||||
|
|
|
@ -11,8 +11,6 @@ package org.elasticsearch.rest.action.admin.cluster;
|
||||||
|
|
||||||
import org.elasticsearch.action.admin.cluster.stats.ClusterStatsRequest;
|
import org.elasticsearch.action.admin.cluster.stats.ClusterStatsRequest;
|
||||||
import org.elasticsearch.client.internal.node.NodeClient;
|
import org.elasticsearch.client.internal.node.NodeClient;
|
||||||
import org.elasticsearch.common.util.FeatureFlag;
|
|
||||||
import org.elasticsearch.common.util.set.Sets;
|
|
||||||
import org.elasticsearch.rest.BaseRestHandler;
|
import org.elasticsearch.rest.BaseRestHandler;
|
||||||
import org.elasticsearch.rest.RestRequest;
|
import org.elasticsearch.rest.RestRequest;
|
||||||
import org.elasticsearch.rest.Scope;
|
import org.elasticsearch.rest.Scope;
|
||||||
|
@ -33,10 +31,10 @@ public class RestClusterStatsAction extends BaseRestHandler {
|
||||||
|
|
||||||
private static final Set<String> SUPPORTED_CAPABILITIES = Set.of(
|
private static final Set<String> SUPPORTED_CAPABILITIES = Set.of(
|
||||||
"human-readable-total-docs-size",
|
"human-readable-total-docs-size",
|
||||||
"verbose-dense-vector-mapping-stats"
|
"verbose-dense-vector-mapping-stats",
|
||||||
|
"ccs-stats",
|
||||||
|
"retrievers-usage-stats"
|
||||||
);
|
);
|
||||||
private static final Set<String> SUPPORTED_CAPABILITIES_CCS_STATS = Set.copyOf(Sets.union(SUPPORTED_CAPABILITIES, Set.of("ccs-stats")));
|
|
||||||
public static final FeatureFlag CCS_TELEMETRY_FEATURE_FLAG = new FeatureFlag("ccs_telemetry");
|
|
||||||
private static final Set<String> SUPPORTED_QUERY_PARAMETERS = Set.of("include_remotes", "nodeId", REST_TIMEOUT_PARAM);
|
private static final Set<String> SUPPORTED_QUERY_PARAMETERS = Set.of("include_remotes", "nodeId", REST_TIMEOUT_PARAM);
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -60,7 +58,7 @@ public class RestClusterStatsAction extends BaseRestHandler {
|
||||||
request.paramAsBoolean("include_remotes", false),
|
request.paramAsBoolean("include_remotes", false),
|
||||||
request.paramAsStringArray("nodeId", null)
|
request.paramAsStringArray("nodeId", null)
|
||||||
);
|
);
|
||||||
clusterStatsRequest.timeout(getTimeout(request));
|
clusterStatsRequest.setTimeout(getTimeout(request));
|
||||||
return channel -> new RestCancellableNodeClient(client, request.getHttpChannel()).admin()
|
return channel -> new RestCancellableNodeClient(client, request.getHttpChannel()).admin()
|
||||||
.cluster()
|
.cluster()
|
||||||
.clusterStats(clusterStatsRequest, new NodesResponseRestListener<>(channel));
|
.clusterStats(clusterStatsRequest, new NodesResponseRestListener<>(channel));
|
||||||
|
@ -73,6 +71,6 @@ public class RestClusterStatsAction extends BaseRestHandler {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Set<String> supportedCapabilities() {
|
public Set<String> supportedCapabilities() {
|
||||||
return CCS_TELEMETRY_FEATURE_FLAG.isEnabled() ? SUPPORTED_CAPABILITIES_CCS_STATS : SUPPORTED_CAPABILITIES;
|
return SUPPORTED_CAPABILITIES;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -65,8 +65,8 @@ public class RestNodesCapabilitiesAction extends BaseRestHandler {
|
||||||
// Handle the 'path' parameter, use "/" as default if not provided
|
// Handle the 'path' parameter, use "/" as default if not provided
|
||||||
String path = URLDecoder.decode(request.param("path", "/"), StandardCharsets.UTF_8);
|
String path = URLDecoder.decode(request.param("path", "/"), StandardCharsets.UTF_8);
|
||||||
|
|
||||||
NodesCapabilitiesRequest r = requestNodes.timeout(getTimeout(request))
|
requestNodes.setTimeout(getTimeout(request));
|
||||||
.method(RestRequest.Method.valueOf(request.param("method", "GET")))
|
NodesCapabilitiesRequest r = requestNodes.method(RestRequest.Method.valueOf(request.param("method", "GET")))
|
||||||
.path(path)
|
.path(path)
|
||||||
.parameters(request.paramAsStringArray("parameters", Strings.EMPTY_ARRAY))
|
.parameters(request.paramAsStringArray("parameters", Strings.EMPTY_ARRAY))
|
||||||
.capabilities(request.paramAsStringArray("capabilities", Strings.EMPTY_ARRAY))
|
.capabilities(request.paramAsStringArray("capabilities", Strings.EMPTY_ARRAY))
|
||||||
|
|
|
@ -115,7 +115,7 @@ public class RestNodesHotThreadsAction extends BaseRestHandler {
|
||||||
request.paramAsBoolean("ignore_idle_threads", HotThreads.RequestOptions.DEFAULT.ignoreIdleThreads())
|
request.paramAsBoolean("ignore_idle_threads", HotThreads.RequestOptions.DEFAULT.ignoreIdleThreads())
|
||||||
)
|
)
|
||||||
);
|
);
|
||||||
nodesHotThreadsRequest.timeout(getTimeout(request));
|
nodesHotThreadsRequest.setTimeout(getTimeout(request));
|
||||||
return channel -> client.execute(TransportNodesHotThreadsAction.TYPE, nodesHotThreadsRequest, new RestResponseListener<>(channel) {
|
return channel -> client.execute(TransportNodesHotThreadsAction.TYPE, nodesHotThreadsRequest, new RestResponseListener<>(channel) {
|
||||||
@Override
|
@Override
|
||||||
public RestResponse buildResponse(NodesHotThreadsResponse response) {
|
public RestResponse buildResponse(NodesHotThreadsResponse response) {
|
||||||
|
|
|
@ -88,7 +88,7 @@ public class RestNodesInfoAction extends BaseRestHandler {
|
||||||
}
|
}
|
||||||
|
|
||||||
final NodesInfoRequest nodesInfoRequest = new NodesInfoRequest(nodeIds);
|
final NodesInfoRequest nodesInfoRequest = new NodesInfoRequest(nodeIds);
|
||||||
nodesInfoRequest.timeout(getTimeout(request));
|
nodesInfoRequest.setTimeout(getTimeout(request));
|
||||||
// shortcut, don't do checks if only all is specified
|
// shortcut, don't do checks if only all is specified
|
||||||
if (metrics.size() == 1 && metrics.contains("_all")) {
|
if (metrics.size() == 1 && metrics.contains("_all")) {
|
||||||
nodesInfoRequest.all();
|
nodesInfoRequest.all();
|
||||||
|
|
|
@ -81,7 +81,7 @@ public class RestNodesStatsAction extends BaseRestHandler {
|
||||||
Set<String> metricNames = Strings.tokenizeByCommaToSet(request.param("metric", "_all"));
|
Set<String> metricNames = Strings.tokenizeByCommaToSet(request.param("metric", "_all"));
|
||||||
|
|
||||||
NodesStatsRequest nodesStatsRequest = new NodesStatsRequest(nodesIds);
|
NodesStatsRequest nodesStatsRequest = new NodesStatsRequest(nodesIds);
|
||||||
nodesStatsRequest.timeout(getTimeout(request));
|
nodesStatsRequest.setTimeout(getTimeout(request));
|
||||||
// level parameter validation
|
// level parameter validation
|
||||||
nodesStatsRequest.setIncludeShardsStats(NodeStatsLevel.of(request, NodeStatsLevel.NODE) != NodeStatsLevel.NODE);
|
nodesStatsRequest.setIncludeShardsStats(NodeStatsLevel.of(request, NodeStatsLevel.NODE) != NodeStatsLevel.NODE);
|
||||||
|
|
||||||
|
|
|
@ -50,7 +50,7 @@ public class RestNodesUsageAction extends BaseRestHandler {
|
||||||
Set<String> metrics = Strings.tokenizeByCommaToSet(request.param("metric", "_all"));
|
Set<String> metrics = Strings.tokenizeByCommaToSet(request.param("metric", "_all"));
|
||||||
|
|
||||||
NodesUsageRequest nodesUsageRequest = new NodesUsageRequest(nodesIds);
|
NodesUsageRequest nodesUsageRequest = new NodesUsageRequest(nodesIds);
|
||||||
nodesUsageRequest.timeout(getTimeout(request));
|
nodesUsageRequest.setTimeout(getTimeout(request));
|
||||||
|
|
||||||
if (metrics.size() == 1 && metrics.contains("_all")) {
|
if (metrics.size() == 1 && metrics.contains("_all")) {
|
||||||
nodesUsageRequest.all();
|
nodesUsageRequest.all();
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue