mirror of
https://github.com/elastic/elasticsearch.git
synced 2025-04-25 15:47:23 -04:00
Merge remote-tracking branch 'upstream/main' into lucene_snapshot_9_11
This commit is contained in:
commit
08298dcd69
266 changed files with 5713 additions and 4260 deletions
|
@ -26,6 +26,10 @@ develocity {
|
||||||
if (jenkinsUrl?.host?.endsWith('elastic.co') || jenkinsUrl?.host?.endsWith('elastic.dev') || System.getenv('BUILDKITE') == 'true') {
|
if (jenkinsUrl?.host?.endsWith('elastic.co') || jenkinsUrl?.host?.endsWith('elastic.dev') || System.getenv('BUILDKITE') == 'true') {
|
||||||
publishing.onlyIf { true }
|
publishing.onlyIf { true }
|
||||||
server = 'https://gradle-enterprise.elastic.co'
|
server = 'https://gradle-enterprise.elastic.co'
|
||||||
|
} else {
|
||||||
|
publishing.onlyIf {
|
||||||
|
server.isPresent();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -71,7 +71,6 @@ public abstract class RestrictedBuildApiService implements BuildService<Restrict
|
||||||
map.put(LegacyRestTestBasePlugin.class, ":test:external-modules:test-die-with-dignity");
|
map.put(LegacyRestTestBasePlugin.class, ":test:external-modules:test-die-with-dignity");
|
||||||
map.put(LegacyRestTestBasePlugin.class, ":test:external-modules:test-error-query");
|
map.put(LegacyRestTestBasePlugin.class, ":test:external-modules:test-error-query");
|
||||||
map.put(LegacyRestTestBasePlugin.class, ":test:external-modules:test-latency-simulating-directory");
|
map.put(LegacyRestTestBasePlugin.class, ":test:external-modules:test-latency-simulating-directory");
|
||||||
map.put(LegacyRestTestBasePlugin.class, ":test:external-modules:test-seek-tracking-directory");
|
|
||||||
map.put(LegacyRestTestBasePlugin.class, ":test:yaml-rest-runner");
|
map.put(LegacyRestTestBasePlugin.class, ":test:yaml-rest-runner");
|
||||||
map.put(LegacyRestTestBasePlugin.class, ":distribution:archives:integ-test-zip");
|
map.put(LegacyRestTestBasePlugin.class, ":distribution:archives:integ-test-zip");
|
||||||
map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:core");
|
map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:core");
|
||||||
|
|
|
@ -36,6 +36,15 @@ ext.docsFileTree = fileTree(projectDir) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
tasks.named("yamlRestTest") {
|
||||||
|
if (BuildParams.isSnapshotBuild() == false) {
|
||||||
|
// LOOKUP is not available in snapshots
|
||||||
|
systemProperty 'tests.rest.blacklist', [
|
||||||
|
"reference/esql/processing-commands/lookup/esql-lookup-example"
|
||||||
|
].join(',')
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/* List of files that have snippets that will not work until platinum tests can occur ... */
|
/* List of files that have snippets that will not work until platinum tests can occur ... */
|
||||||
tasks.named("buildRestTests").configure {
|
tasks.named("buildRestTests").configure {
|
||||||
getExpectedUnconvertedCandidates().addAll(
|
getExpectedUnconvertedCandidates().addAll(
|
||||||
|
|
6
docs/changelog/108421.yaml
Normal file
6
docs/changelog/108421.yaml
Normal file
|
@ -0,0 +1,6 @@
|
||||||
|
pr: 108421
|
||||||
|
summary: "[ES|QL] Support Named and Positional Parameters in `EsqlQueryRequest`"
|
||||||
|
area: ES|QL
|
||||||
|
type: enhancement
|
||||||
|
issues:
|
||||||
|
- 107029
|
5
docs/changelog/109492.yaml
Normal file
5
docs/changelog/109492.yaml
Normal file
|
@ -0,0 +1,5 @@
|
||||||
|
pr: 109492
|
||||||
|
summary: Add hexstring support byte painless scorers
|
||||||
|
area: Search
|
||||||
|
type: bug
|
||||||
|
issues: []
|
6
docs/changelog/109613.yaml
Normal file
6
docs/changelog/109613.yaml
Normal file
|
@ -0,0 +1,6 @@
|
||||||
|
pr: 109613
|
||||||
|
summary: Consider `error_trace` supported by all endpoints
|
||||||
|
area: Infra/REST API
|
||||||
|
type: bug
|
||||||
|
issues:
|
||||||
|
- 109612
|
|
@ -68,6 +68,52 @@ state must ever be reloaded from persisted state.
|
||||||
|
|
||||||
## Deprecations
|
## Deprecations
|
||||||
|
|
||||||
|
## Backwards Compatibility
|
||||||
|
|
||||||
|
major releases are mostly about breaking compatibility and dropping deprecated functionality.
|
||||||
|
|
||||||
|
Elasticsearch versions are composed of three pieces of information: the major version, the minor version, and the patch version,
|
||||||
|
in that order (major.minor.patch). Patch releases are typically bug fixes; minor releases contain improvements / new features;
|
||||||
|
and major releases essentially break compatibility and enable removal of deprecated functionality. As an example, each of 8.0.0,
|
||||||
|
8.3.0 and 8.3.1 specifies an exact release version. They all have the same major version (8) and the last two have the same minor
|
||||||
|
version (8.3). Multiversion compatibility within a cluster, or backwards compatibility with older version nodes, is guaranteed
|
||||||
|
across specific versions.
|
||||||
|
|
||||||
|
### Transport Layer Backwards Compatibility
|
||||||
|
|
||||||
|
Elasticsearch nodes can communicate over the network with all node versions within the same major release. All versions within
|
||||||
|
one major version X are also compatible with the last minor version releases of the previous major version, i.e. (X-1).last.
|
||||||
|
More concretely, all 8.x.x version nodes can communicate with all 7.17.x version nodes.
|
||||||
|
|
||||||
|
### Index Format Backwards Compatibility
|
||||||
|
|
||||||
|
Index data format backwards compatibility is guaranteed with all versions of the previous major release. All 8.x.x version nodes,
|
||||||
|
for example, can read index data written by any 7.x.x version node. 9.x.x versions, however, will not be able to read 7.x.x format
|
||||||
|
data files.
|
||||||
|
|
||||||
|
Elasticsearch does not have an upgrade process to convert from older to newer index data formats. The user is expected to run
|
||||||
|
`reindex` on any remaining untouched data from a previous version upgrade before upgrading to the next version. There is a good
|
||||||
|
chance that older version index data will age out and be deleted before the user does the next upgrade, but `reindex` can be used
|
||||||
|
if that is not the case.
|
||||||
|
|
||||||
|
### Snapshot Backwards Compatibility
|
||||||
|
|
||||||
|
Snapshots taken by a cluster of version X cannot be read by a cluster running older version nodes. However, snapshots taken by an
|
||||||
|
older version cluster can continue to be read from and written to by newer version clusters: this compatibility goes back many
|
||||||
|
major versions. If a newer version cluster writes to a snapshot repository containing snapshots from an older version, then it
|
||||||
|
will do so in a way that leaves the repository format (metadata and file layout) readable by those older versions.
|
||||||
|
|
||||||
|
Restoring indexes that have different and no longer supported data formats can be tricky: see the
|
||||||
|
[public snapshot compatibility docs][] for details.
|
||||||
|
|
||||||
|
[public snapshot compatibility docs]: https://www.elastic.co/guide/en/elasticsearch/reference/current/snapshot-restore.html#snapshot-index-compatibility
|
||||||
|
|
||||||
|
### Upgrade
|
||||||
|
|
||||||
|
See the [public upgrade docs][] for the upgrade process.
|
||||||
|
|
||||||
|
[public upgrade docs]: https://www.elastic.co/guide/en/elasticsearch/reference/current/setup-upgrade.html
|
||||||
|
|
||||||
## Plugins
|
## Plugins
|
||||||
|
|
||||||
(what warrants a plugin?)
|
(what warrants a plugin?)
|
||||||
|
|
|
@ -10,7 +10,7 @@ The following specialized API is available in the Score context.
|
||||||
==== Static Methods
|
==== Static Methods
|
||||||
The following methods are directly callable without a class/instance qualifier. Note parameters denoted by a (*) are treated as read-only values.
|
The following methods are directly callable without a class/instance qualifier. Note parameters denoted by a (*) are treated as read-only values.
|
||||||
|
|
||||||
* double cosineSimilarity(List *, String *)
|
* double cosineSimilarity(Object *, String *)
|
||||||
* double decayDateExp(String *, String *, String *, double *, ZonedDateTime)
|
* double decayDateExp(String *, String *, String *, double *, ZonedDateTime)
|
||||||
* double decayDateGauss(String *, String *, String *, double *, ZonedDateTime)
|
* double decayDateGauss(String *, String *, String *, double *, ZonedDateTime)
|
||||||
* double decayDateLinear(String *, String *, String *, double *, ZonedDateTime)
|
* double decayDateLinear(String *, String *, String *, double *, ZonedDateTime)
|
||||||
|
@ -20,9 +20,9 @@ The following methods are directly callable without a class/instance qualifier.
|
||||||
* double decayNumericExp(double *, double *, double *, double *, double)
|
* double decayNumericExp(double *, double *, double *, double *, double)
|
||||||
* double decayNumericGauss(double *, double *, double *, double *, double)
|
* double decayNumericGauss(double *, double *, double *, double *, double)
|
||||||
* double decayNumericLinear(double *, double *, double *, double *, double)
|
* double decayNumericLinear(double *, double *, double *, double *, double)
|
||||||
* double dotProduct(List *, String *)
|
* double dotProduct(Object *, String *)
|
||||||
* double l1norm(List *, String *)
|
* double l1norm(Object *, String *)
|
||||||
* double l2norm(List *, String *)
|
* double l2norm(Object *, String *)
|
||||||
* double randomScore(int *)
|
* double randomScore(int *)
|
||||||
* double randomScore(int *, String *)
|
* double randomScore(int *, String *)
|
||||||
* double saturation(double, double)
|
* double saturation(double, double)
|
||||||
|
|
|
@ -15,9 +15,10 @@ This getting started is also available as an https://github.com/elastic/elastics
|
||||||
[[esql-getting-started-prerequisites]]
|
[[esql-getting-started-prerequisites]]
|
||||||
=== Prerequisites
|
=== Prerequisites
|
||||||
|
|
||||||
To follow along with the queries in this guide, you'll need an {es} deployment with our sample data.
|
To follow along with the queries in this guide, you can either set up your own
|
||||||
|
deployment, or use Elastic's public {esql} demo environment.
|
||||||
|
|
||||||
include::{es-ref-dir}/tab-widgets/esql/esql-getting-started-sample-data.asciidoc[tag=own-deployment]
|
include::{es-ref-dir}/tab-widgets/esql/esql-getting-started-widget-sample-data.asciidoc[]
|
||||||
|
|
||||||
[discrete]
|
[discrete]
|
||||||
[[esql-getting-started-running-queries]]
|
[[esql-getting-started-running-queries]]
|
||||||
|
@ -268,8 +269,7 @@ Before you can use `ENRICH`, you first need to
|
||||||
<<esql-create-enrich-policy,create>> and <<esql-execute-enrich-policy,execute>>
|
<<esql-create-enrich-policy,create>> and <<esql-execute-enrich-policy,execute>>
|
||||||
an <<esql-enrich-policy,enrich policy>>.
|
an <<esql-enrich-policy,enrich policy>>.
|
||||||
|
|
||||||
include::{es-ref-dir}/tab-widgets/esql/esql-getting-started-enrich-policy.asciidoc[tag=own-deployment]
|
include::{es-ref-dir}/tab-widgets/esql/esql-getting-started-widget-enrich-policy.asciidoc[]
|
||||||
|
|
||||||
|
|
||||||
After creating and executing a policy, you can use it with the `ENRICH`
|
After creating and executing a policy, you can use it with the `ENRICH`
|
||||||
command:
|
command:
|
||||||
|
|
|
@ -27,7 +27,7 @@ adding the other fields from the `table` to the output.
|
||||||
*Examples*
|
*Examples*
|
||||||
|
|
||||||
// tag::examples[]
|
// tag::examples[]
|
||||||
[source,console]
|
[source,console,id=esql-lookup-example]
|
||||||
----
|
----
|
||||||
POST /_query?format=txt
|
POST /_query?format=txt
|
||||||
{
|
{
|
||||||
|
@ -40,8 +40,8 @@ POST /_query?format=txt
|
||||||
""",
|
""",
|
||||||
"tables": {
|
"tables": {
|
||||||
"era": {
|
"era": {
|
||||||
"author:keyword": ["Frank Herbert", "Peter F. Hamilton", "Vernor Vinge", "Alastair Reynolds", "James S.A. Corey"],
|
"author": {"keyword": ["Frank Herbert", "Peter F. Hamilton", "Vernor Vinge", "Alastair Reynolds", "James S.A. Corey"]},
|
||||||
"era:keyword" : [ "The New Wave", "Diamond", "Diamond", "Diamond", "Hadron"]
|
"era": {"keyword": [ "The New Wave", "Diamond", "Diamond", "Diamond", "Hadron"]}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -23,7 +23,9 @@ For more information about creating and updating the {es} keystore, see
|
||||||
==== General security settings
|
==== General security settings
|
||||||
`xpack.security.enabled`::
|
`xpack.security.enabled`::
|
||||||
(<<static-cluster-setting,Static>>)
|
(<<static-cluster-setting,Static>>)
|
||||||
Defaults to `true`, which enables {es} {security-features} on the node. +
|
Defaults to `true`, which enables {es} {security-features} on the node.
|
||||||
|
This setting must be enabled to use Elasticsearch's authentication,
|
||||||
|
authorization and audit features. +
|
||||||
+
|
+
|
||||||
--
|
--
|
||||||
If set to `false`, {security-features} are disabled, which is not recommended.
|
If set to `false`, {security-features} are disabled, which is not recommended.
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
// tag::own-deployment[]
|
// tag::own-deployment[]
|
||||||
|
|
||||||
First, you'll need to ingest the sample data. In {kib}, open the main menu and select *Dev
|
First ingest some sample data. In {kib}, open the main menu and select *Dev
|
||||||
Tools*. Run the following two requests:
|
Tools*. Run the following two requests:
|
||||||
|
|
||||||
[source,console]
|
[source,console]
|
||||||
|
|
File diff suppressed because one or more lines are too long
|
@ -27,9 +27,9 @@ static_import {
|
||||||
double decayDateLinear(String, String, String, double, ZonedDateTime) bound_to org.elasticsearch.script.ScoreScriptUtils$DecayDateLinear
|
double decayDateLinear(String, String, String, double, ZonedDateTime) bound_to org.elasticsearch.script.ScoreScriptUtils$DecayDateLinear
|
||||||
double decayDateExp(String, String, String, double, ZonedDateTime) bound_to org.elasticsearch.script.ScoreScriptUtils$DecayDateExp
|
double decayDateExp(String, String, String, double, ZonedDateTime) bound_to org.elasticsearch.script.ScoreScriptUtils$DecayDateExp
|
||||||
double decayDateGauss(String, String, String, double, ZonedDateTime) bound_to org.elasticsearch.script.ScoreScriptUtils$DecayDateGauss
|
double decayDateGauss(String, String, String, double, ZonedDateTime) bound_to org.elasticsearch.script.ScoreScriptUtils$DecayDateGauss
|
||||||
double l1norm(org.elasticsearch.script.ScoreScript, List, String) bound_to org.elasticsearch.script.VectorScoreScriptUtils$L1Norm
|
double l1norm(org.elasticsearch.script.ScoreScript, Object, String) bound_to org.elasticsearch.script.VectorScoreScriptUtils$L1Norm
|
||||||
double l2norm(org.elasticsearch.script.ScoreScript, List, String) bound_to org.elasticsearch.script.VectorScoreScriptUtils$L2Norm
|
double l2norm(org.elasticsearch.script.ScoreScript, Object, String) bound_to org.elasticsearch.script.VectorScoreScriptUtils$L2Norm
|
||||||
double cosineSimilarity(org.elasticsearch.script.ScoreScript, List, String) bound_to org.elasticsearch.script.VectorScoreScriptUtils$CosineSimilarity
|
double cosineSimilarity(org.elasticsearch.script.ScoreScript, Object, String) bound_to org.elasticsearch.script.VectorScoreScriptUtils$CosineSimilarity
|
||||||
double dotProduct(org.elasticsearch.script.ScoreScript, List, String) bound_to org.elasticsearch.script.VectorScoreScriptUtils$DotProduct
|
double dotProduct(org.elasticsearch.script.ScoreScript, Object, String) bound_to org.elasticsearch.script.VectorScoreScriptUtils$DotProduct
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -77,7 +77,35 @@ setup:
|
||||||
|
|
||||||
- match: {hits.hits.2._id: "1"}
|
- match: {hits.hits.2._id: "1"}
|
||||||
- match: {hits.hits.2._score: 1632.0}
|
- match: {hits.hits.2._score: 1632.0}
|
||||||
|
---
|
||||||
|
"Dot Product hexidecimal":
|
||||||
|
- requires:
|
||||||
|
cluster_features: "gte_v8.14.1"
|
||||||
|
reason: "support for hexidecimal byte vectors added in 8.14"
|
||||||
|
- do:
|
||||||
|
headers:
|
||||||
|
Content-Type: application/json
|
||||||
|
search:
|
||||||
|
rest_total_hits_as_int: true
|
||||||
|
body:
|
||||||
|
query:
|
||||||
|
script_score:
|
||||||
|
query: {match_all: {} }
|
||||||
|
script:
|
||||||
|
source: "dotProduct(params.query_vector, 'vector')"
|
||||||
|
params:
|
||||||
|
query_vector: "006ff30e84"
|
||||||
|
|
||||||
|
- match: {hits.total: 3}
|
||||||
|
|
||||||
|
- match: {hits.hits.0._id: "2"}
|
||||||
|
- match: {hits.hits.0._score: 28732.0}
|
||||||
|
|
||||||
|
- match: {hits.hits.1._id: "3"}
|
||||||
|
- match: {hits.hits.1._score: 17439.0}
|
||||||
|
|
||||||
|
- match: {hits.hits.2._id: "1"}
|
||||||
|
- match: {hits.hits.2._score: 1632.0}
|
||||||
---
|
---
|
||||||
"Cosine Similarity":
|
"Cosine Similarity":
|
||||||
- do:
|
- do:
|
||||||
|
@ -108,6 +136,39 @@ setup:
|
||||||
- gte: {hits.hits.2._score: 0.509}
|
- gte: {hits.hits.2._score: 0.509}
|
||||||
- lte: {hits.hits.2._score: 0.512}
|
- lte: {hits.hits.2._score: 0.512}
|
||||||
|
|
||||||
|
---
|
||||||
|
"Cosine Similarity hexidecimal":
|
||||||
|
- requires:
|
||||||
|
cluster_features: "gte_v8.14.1"
|
||||||
|
reason: "support for hexidecimal byte vectors added in 8.14"
|
||||||
|
- do:
|
||||||
|
headers:
|
||||||
|
Content-Type: application/json
|
||||||
|
search:
|
||||||
|
rest_total_hits_as_int: true
|
||||||
|
body:
|
||||||
|
query:
|
||||||
|
script_score:
|
||||||
|
query: {match_all: {} }
|
||||||
|
script:
|
||||||
|
source: "cosineSimilarity(params.query_vector, 'vector')"
|
||||||
|
params:
|
||||||
|
query_vector: "006ff30e84"
|
||||||
|
|
||||||
|
- match: {hits.total: 3}
|
||||||
|
|
||||||
|
- match: {hits.hits.0._id: "2"}
|
||||||
|
- gte: {hits.hits.0._score: 0.995}
|
||||||
|
- lte: {hits.hits.0._score: 0.998}
|
||||||
|
|
||||||
|
- match: {hits.hits.1._id: "3"}
|
||||||
|
- gte: {hits.hits.1._score: 0.829}
|
||||||
|
- lte: {hits.hits.1._score: 0.832}
|
||||||
|
|
||||||
|
- match: {hits.hits.2._id: "1"}
|
||||||
|
- gte: {hits.hits.2._score: 0.509}
|
||||||
|
- lte: {hits.hits.2._score: 0.512}
|
||||||
|
|
||||||
---
|
---
|
||||||
"Cosine similarity with indexed vector":
|
"Cosine similarity with indexed vector":
|
||||||
- do:
|
- do:
|
||||||
|
|
|
@ -70,6 +70,35 @@ setup:
|
||||||
- gte: {hits.hits.2._score: 29.0}
|
- gte: {hits.hits.2._score: 29.0}
|
||||||
|
|
||||||
---
|
---
|
||||||
|
"L1 norm hexidecimal":
|
||||||
|
- requires:
|
||||||
|
cluster_features: "gte_v8.14.1"
|
||||||
|
reason: "support for hexidecimal byte vectors added in 8.14"
|
||||||
|
- do:
|
||||||
|
headers:
|
||||||
|
Content-Type: application/json
|
||||||
|
search:
|
||||||
|
rest_total_hits_as_int: true
|
||||||
|
body:
|
||||||
|
query:
|
||||||
|
script_score:
|
||||||
|
query: {match_all: {} }
|
||||||
|
script:
|
||||||
|
source: "l1norm(params.query_vector, 'my_dense_vector')"
|
||||||
|
params:
|
||||||
|
query_vector: "006ff30e84"
|
||||||
|
|
||||||
|
- match: {hits.total: 3}
|
||||||
|
|
||||||
|
- match: {hits.hits.0._id: "1"}
|
||||||
|
- match: {hits.hits.0._score: 246.0}
|
||||||
|
|
||||||
|
- match: {hits.hits.1._id: "3"}
|
||||||
|
- match: {hits.hits.1._score: 117.0}
|
||||||
|
|
||||||
|
- match: {hits.hits.2._id: "2"}
|
||||||
|
- gte: {hits.hits.2._score: 29.0}
|
||||||
|
---
|
||||||
"L2 norm":
|
"L2 norm":
|
||||||
- do:
|
- do:
|
||||||
headers:
|
headers:
|
||||||
|
@ -95,6 +124,38 @@ setup:
|
||||||
- gte: {hits.hits.1._score: 94.407}
|
- gte: {hits.hits.1._score: 94.407}
|
||||||
- lte: {hits.hits.1._score: 94.41}
|
- lte: {hits.hits.1._score: 94.41}
|
||||||
|
|
||||||
|
- match: {hits.hits.2._id: "2"}
|
||||||
|
- gte: {hits.hits.2._score: 15.263}
|
||||||
|
- lte: {hits.hits.2._score: 15.266}
|
||||||
|
---
|
||||||
|
"L2 norm hexidecimal":
|
||||||
|
- requires:
|
||||||
|
cluster_features: "gte_v8.14.1"
|
||||||
|
reason: "support for hexidecimal byte vectors added in 8.14"
|
||||||
|
- do:
|
||||||
|
headers:
|
||||||
|
Content-Type: application/json
|
||||||
|
search:
|
||||||
|
rest_total_hits_as_int: true
|
||||||
|
body:
|
||||||
|
query:
|
||||||
|
script_score:
|
||||||
|
query: {match_all: {} }
|
||||||
|
script:
|
||||||
|
source: "l2norm(params.query_vector, 'my_dense_vector')"
|
||||||
|
params:
|
||||||
|
query_vector: "006ff30e84"
|
||||||
|
|
||||||
|
- match: {hits.total: 3}
|
||||||
|
|
||||||
|
- match: {hits.hits.0._id: "1"}
|
||||||
|
- gte: {hits.hits.0._score: 158.624}
|
||||||
|
- lte: {hits.hits.0._score: 158.627}
|
||||||
|
|
||||||
|
- match: {hits.hits.1._id: "3"}
|
||||||
|
- gte: {hits.hits.1._score: 94.407}
|
||||||
|
- lte: {hits.hits.1._score: 94.41}
|
||||||
|
|
||||||
- match: {hits.hits.2._id: "2"}
|
- match: {hits.hits.2._id: "2"}
|
||||||
- gte: {hits.hits.2._score: 15.263}
|
- gte: {hits.hits.2._score: 15.263}
|
||||||
- lte: {hits.hits.2._score: 15.266}
|
- lte: {hits.hits.2._score: 15.266}
|
||||||
|
|
|
@ -59,9 +59,6 @@ tests:
|
||||||
- class: org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvAppendTests
|
- class: org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvAppendTests
|
||||||
method: testEvaluateBlockWithoutNulls {TestCase=<cartesian_shape>, <cartesian_shape>}
|
method: testEvaluateBlockWithoutNulls {TestCase=<cartesian_shape>, <cartesian_shape>}
|
||||||
issue: https://github.com/elastic/elasticsearch/issues/109409
|
issue: https://github.com/elastic/elasticsearch/issues/109409
|
||||||
- class: "org.elasticsearch.smoketest.DocsClientYamlTestSuiteIT"
|
|
||||||
issue: "https://github.com/elastic/elasticsearch/issues/109478"
|
|
||||||
method: "test {yaml=reference/esql/processing-commands/lookup/line_31}"
|
|
||||||
|
|
||||||
# Examples:
|
# Examples:
|
||||||
#
|
#
|
||||||
|
|
|
@ -1,44 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
|
||||||
* or more contributor license agreements. Licensed under the Elastic License
|
|
||||||
* 2.0 and the Server Side Public License, v 1; you may not use this file except
|
|
||||||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
|
||||||
* Side Public License, v 1.
|
|
||||||
*/
|
|
||||||
|
|
||||||
import org.elasticsearch.gradle.Architecture
|
|
||||||
import org.elasticsearch.gradle.VersionProperties
|
|
||||||
import org.elasticsearch.gradle.internal.info.BuildParams
|
|
||||||
|
|
||||||
import static org.elasticsearch.gradle.internal.distribution.InternalElasticsearchDistributionTypes.DOCKER
|
|
||||||
|
|
||||||
apply plugin: 'elasticsearch.standalone-rest-test'
|
|
||||||
apply plugin: 'elasticsearch.test.fixtures'
|
|
||||||
apply plugin: 'elasticsearch.internal-distribution-download'
|
|
||||||
|
|
||||||
dockerCompose {
|
|
||||||
environment.put 'STACK_VERSION', BuildParams.snapshotBuild ? VersionProperties.elasticsearch : VersionProperties.elasticsearch + "-SNAPSHOT"
|
|
||||||
}
|
|
||||||
|
|
||||||
elasticsearch_distributions {
|
|
||||||
docker {
|
|
||||||
type = DOCKER
|
|
||||||
architecture = Architecture.current()
|
|
||||||
version = VersionProperties.getElasticsearch()
|
|
||||||
failIfUnavailable = false // This ensures we skip this testing if Docker is unavailable
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
tasks.named("preProcessFixture").configure {
|
|
||||||
dependsOn elasticsearch_distributions.matching { it.architecture == Architecture.current() }
|
|
||||||
}
|
|
||||||
|
|
||||||
tasks.register("integTest", Test) {
|
|
||||||
outputs.doNotCacheIf('Build cache is disabled for Docker tests') { true }
|
|
||||||
maxParallelForks = '1'
|
|
||||||
include '**/*IT.class'
|
|
||||||
}
|
|
||||||
|
|
||||||
tasks.named("check").configure {
|
|
||||||
dependsOn "integTest"
|
|
||||||
}
|
|
|
@ -1,34 +0,0 @@
|
||||||
---
|
|
||||||
apm_server:
|
|
||||||
cluster: ['manage_ilm', 'manage_security', 'manage_api_key']
|
|
||||||
indices:
|
|
||||||
- names: ['apm-*', 'logs-apm*', 'metrics-apm*', 'traces-apm*']
|
|
||||||
privileges: ['write', 'create_index', 'manage', 'manage_ilm']
|
|
||||||
applications:
|
|
||||||
- application: 'apm'
|
|
||||||
privileges: ['sourcemap:write', 'event:write', 'config_agent:read']
|
|
||||||
resources: '*'
|
|
||||||
beats:
|
|
||||||
cluster: ['manage_index_templates', 'monitor', 'manage_ingest_pipelines', 'manage_ilm', 'manage_security', 'manage_api_key']
|
|
||||||
indices:
|
|
||||||
- names: ['filebeat-*', 'shrink-filebeat-*']
|
|
||||||
privileges: ['all']
|
|
||||||
filebeat:
|
|
||||||
cluster: ['manage_index_templates', 'monitor', 'manage_ingest_pipelines', 'manage_ilm']
|
|
||||||
indices:
|
|
||||||
- names: ['filebeat-*', 'shrink-filebeat-*']
|
|
||||||
privileges: ['all']
|
|
||||||
heartbeat:
|
|
||||||
cluster: ['manage_index_templates', 'monitor', 'manage_ingest_pipelines', 'manage_ilm']
|
|
||||||
indices:
|
|
||||||
- names: ['heartbeat-*', 'shrink-heartbeat-*']
|
|
||||||
privileges: ['all']
|
|
||||||
metricbeat:
|
|
||||||
cluster: ['manage_index_templates', 'monitor', 'manage_ingest_pipelines', 'manage_ilm']
|
|
||||||
indices:
|
|
||||||
- names: ['metricbeat-*', 'shrink-metricbeat-*']
|
|
||||||
privileges: ['all']
|
|
||||||
opbeans:
|
|
||||||
indices:
|
|
||||||
- names: ['opbeans-*']
|
|
||||||
privileges: ['write', 'read']
|
|
|
@ -1,2 +0,0 @@
|
||||||
elastic/fleet-server/elastic-package-fleet-server-token:{PBKDF2_STRETCH}10000$PNiVyY96dHwRfoDszBvYPAz+mSLbC+NhtPh63dblDZU=$dAY1tXX1U5rXB+2Lt7m0L2LUNSb1q5nRaIqPNZTBxb8=
|
|
||||||
elastic/kibana/elastic-package-kibana-token:{PBKDF2_STRETCH}10000$wIEFHIIIZ2ap0D0iQsyw0MfB7YuFA1bHnXAmlCoL4Gg=$YxvIJnasjLZyDQZpmFBiJHdR/CGXd5BnVm013Jty6p0=
|
|
|
@ -1,9 +0,0 @@
|
||||||
admin:$2a$10$xiY0ZzOKmDDN1p3if4t4muUBwh2.bFHADoMRAWQgSClm4ZJ4132Y.
|
|
||||||
apm_server_user:$2a$10$iTy29qZaCSVn4FXlIjertuO8YfYVLCbvoUAJ3idaXfLRclg9GXdGG
|
|
||||||
apm_user_ro:$2a$10$hQfy2o2u33SapUClsx8NCuRMpQyHP9b2l4t3QqrBA.5xXN2S.nT4u
|
|
||||||
beats_user:$2a$10$LRpKi4/Q3Qo4oIbiu26rH.FNIL4aOH4aj2Kwi58FkMo1z9FgJONn2
|
|
||||||
filebeat_user:$2a$10$sFxIEX8tKyOYgsbJLbUhTup76ssvSD3L4T0H6Raaxg4ewuNr.lUFC
|
|
||||||
heartbeat_user:$2a$10$nKUGDr/V5ClfliglJhfy8.oEkjrDtklGQfhd9r9NoFqQeoNxr7uUK
|
|
||||||
kibana_system_user:$2a$10$nN6sRtQl2KX9Gn8kV/.NpOLSk6Jwn8TehEDnZ7aaAgzyl/dy5PYzW
|
|
||||||
metricbeat_user:$2a$10$5PyTd121U2ZXnFk9NyqxPuLxdptKbB8nK5egt6M5/4xrKUkk.GReG
|
|
||||||
opbeans_user:$2a$10$iTy29qZaCSVn4FXlIjertuO8YfYVLCbvoUAJ3idaXfLRclg9GXdGG
|
|
|
@ -1,13 +0,0 @@
|
||||||
apm_server:apm_server_user
|
|
||||||
apm_system:apm_server_user
|
|
||||||
apm_user:apm_server_user,apm_user_ro
|
|
||||||
beats:beats_user
|
|
||||||
beats_system:beats_user,filebeat_user,heartbeat_user,metricbeat_user
|
|
||||||
filebeat:filebeat_user
|
|
||||||
heartbeat:heartbeat_user
|
|
||||||
ingest_admin:apm_server_user
|
|
||||||
kibana_system:kibana_system_user
|
|
||||||
kibana_user:apm_server_user,apm_user_ro,beats_user,filebeat_user,heartbeat_user,metricbeat_user,opbeans_user
|
|
||||||
metricbeat:metricbeat_user
|
|
||||||
opbeans:opbeans_user
|
|
||||||
superuser:admin
|
|
|
@ -1,78 +0,0 @@
|
||||||
xpack.fleet.packages:
|
|
||||||
- name: system
|
|
||||||
version: latest
|
|
||||||
- name: elastic_agent
|
|
||||||
version: latest
|
|
||||||
- name: apm
|
|
||||||
version: latest
|
|
||||||
- name: fleet_server
|
|
||||||
version: latest
|
|
||||||
|
|
||||||
xpack.fleet.agentPolicies:
|
|
||||||
- name: Fleet Server + APM policy
|
|
||||||
id: fleet-server-apm-policy
|
|
||||||
description: Fleet server policy with APM and System logs and metrics enabled
|
|
||||||
namespace: default
|
|
||||||
is_default_fleet_server: true
|
|
||||||
is_managed: false
|
|
||||||
monitoring_enabled:
|
|
||||||
- logs
|
|
||||||
- metrics
|
|
||||||
package_policies:
|
|
||||||
- name: system-1
|
|
||||||
package:
|
|
||||||
name: system
|
|
||||||
- name: apm-1
|
|
||||||
package:
|
|
||||||
name: apm
|
|
||||||
inputs:
|
|
||||||
- type: apm
|
|
||||||
keep_enabled: true
|
|
||||||
vars:
|
|
||||||
- name: host
|
|
||||||
value: 0.0.0.0:8200
|
|
||||||
frozen: true
|
|
||||||
- name: url
|
|
||||||
value: "${ELASTIC_APM_SERVER_URL}"
|
|
||||||
frozen: true
|
|
||||||
- name: enable_rum
|
|
||||||
value: true
|
|
||||||
frozen: true
|
|
||||||
- name: read_timeout
|
|
||||||
value: 1m
|
|
||||||
frozen: true
|
|
||||||
- name: shutdown_timeout
|
|
||||||
value: 2m
|
|
||||||
frozen: true
|
|
||||||
- name: write_timeout
|
|
||||||
value: 1m
|
|
||||||
frozen: true
|
|
||||||
- name: rum_allow_headers
|
|
||||||
value:
|
|
||||||
- x-custom-header
|
|
||||||
frozen: true
|
|
||||||
- name: secret_token
|
|
||||||
value: "${ELASTIC_APM_SECRET_TOKEN}"
|
|
||||||
frozen: true
|
|
||||||
- name: tls_enabled
|
|
||||||
value: ${ELASTIC_APM_TLS}
|
|
||||||
frozen: true
|
|
||||||
- name: tls_certificate
|
|
||||||
value: /usr/share/apmserver/config/certs/tls.crt
|
|
||||||
frozen: true
|
|
||||||
- name: tls_key
|
|
||||||
value: /usr/share/apmserver/config/certs/tls.key
|
|
||||||
frozen: true
|
|
||||||
- name: Fleet Server
|
|
||||||
package:
|
|
||||||
name: fleet_server
|
|
||||||
inputs:
|
|
||||||
- type: fleet-server
|
|
||||||
keep_enabled: true
|
|
||||||
vars:
|
|
||||||
- name: host
|
|
||||||
value: 0.0.0.0
|
|
||||||
frozen: true
|
|
||||||
- name: port
|
|
||||||
value: 8220
|
|
||||||
frozen: true
|
|
|
@ -1,154 +0,0 @@
|
||||||
version: "2.4"
|
|
||||||
|
|
||||||
networks:
|
|
||||||
default:
|
|
||||||
name: apm-integration-testing
|
|
||||||
|
|
||||||
services:
|
|
||||||
apmserver:
|
|
||||||
depends_on:
|
|
||||||
kibana:
|
|
||||||
condition: service_healthy
|
|
||||||
environment:
|
|
||||||
FLEET_ELASTICSEARCH_HOST: null
|
|
||||||
FLEET_SERVER_ELASTICSEARCH_INSECURE: "1"
|
|
||||||
FLEET_SERVER_ENABLE: "1"
|
|
||||||
FLEET_SERVER_HOST: 0.0.0.0
|
|
||||||
FLEET_SERVER_INSECURE_HTTP: "1"
|
|
||||||
FLEET_SERVER_POLICY_ID: fleet-server-apm-policy
|
|
||||||
FLEET_SERVER_PORT: "8220"
|
|
||||||
FLEET_SERVER_SERVICE_TOKEN: AAEAAWVsYXN0aWMvZmxlZXQtc2VydmVyL2VsYXN0aWMtcGFja2FnZS1mbGVldC1zZXJ2ZXItdG9rZW46bmgtcFhoQzRRQ2FXbms2U0JySGlWQQ
|
|
||||||
KIBANA_FLEET_HOST: null
|
|
||||||
KIBANA_FLEET_SERVICE_TOKEN: AAEAAWVsYXN0aWMvZmxlZXQtc2VydmVyL2VsYXN0aWMtcGFja2FnZS1mbGVldC1zZXJ2ZXItdG9rZW46bmgtcFhoQzRRQ2FXbms2U0JySGlWQQ
|
|
||||||
KIBANA_FLEET_SETUP: "1"
|
|
||||||
healthcheck:
|
|
||||||
test: /bin/true
|
|
||||||
image: docker.elastic.co/beats/elastic-agent:${STACK_VERSION}
|
|
||||||
labels:
|
|
||||||
- co.elastic.apm.stack-version=${STACK_VERSION}
|
|
||||||
logging:
|
|
||||||
driver: json-file
|
|
||||||
options:
|
|
||||||
max-file: "5"
|
|
||||||
max-size: 2m
|
|
||||||
volumes:
|
|
||||||
- /var/run/docker.sock:/var/run/docker.sock
|
|
||||||
- ./scripts/tls/apmserver/cert.crt:/usr/share/apmserver/config/certs/tls.crt
|
|
||||||
- ./scripts/tls/apmserver/key.pem:/usr/share/apmserver/config/certs/tls.key
|
|
||||||
|
|
||||||
elasticsearch:
|
|
||||||
environment:
|
|
||||||
- action.destructive_requires_name=false
|
|
||||||
- bootstrap.memory_lock=true
|
|
||||||
- cluster.name=docker-cluster
|
|
||||||
- cluster.routing.allocation.disk.threshold_enabled=false
|
|
||||||
- discovery.type=single-node
|
|
||||||
- ES_JAVA_OPTS=-Xms1g -Xmx1g
|
|
||||||
- indices.id_field_data.enabled=true
|
|
||||||
- ingest.geoip.downloader.enabled=false
|
|
||||||
- path.repo=/usr/share/elasticsearch/data/backups
|
|
||||||
- xpack.license.self_generated.type=trial
|
|
||||||
- xpack.monitoring.collection.enabled=true
|
|
||||||
- xpack.security.authc.anonymous.roles=remote_monitoring_collector
|
|
||||||
- xpack.security.authc.api_key.enabled=true
|
|
||||||
- xpack.security.authc.realms.file.file1.order=0
|
|
||||||
- xpack.security.authc.realms.native.native1.order=1
|
|
||||||
- xpack.security.authc.token.enabled=true
|
|
||||||
- xpack.security.enabled=true
|
|
||||||
# APM specific settings. We don't configure `secret_key` because Kibana is configured with a blank key
|
|
||||||
- telemetry.tracing.enabled=true
|
|
||||||
- telemetry.agent.server_url=http://apmserver:8200
|
|
||||||
# Send traces to APM server aggressively
|
|
||||||
- telemetry.agent.metrics_interval=1s
|
|
||||||
# Record everything
|
|
||||||
- telemetry.agent.transaction_sample_rate=1
|
|
||||||
- telemetry.agent.log_level=debug
|
|
||||||
healthcheck:
|
|
||||||
interval: 20s
|
|
||||||
retries: 10
|
|
||||||
test: curl -s -k http://localhost:9200/_cluster/health | grep -vq '"status":"red"'
|
|
||||||
image: elasticsearch:test
|
|
||||||
labels:
|
|
||||||
- co.elastic.apm.stack-version=${STACK_VERSION}
|
|
||||||
- co.elastic.metrics/module=elasticsearch
|
|
||||||
- co.elastic.metrics/metricsets=node,node_stats
|
|
||||||
- co.elastic.metrics/hosts=http://$${data.host}:9200
|
|
||||||
logging:
|
|
||||||
driver: json-file
|
|
||||||
options:
|
|
||||||
max-file: "5"
|
|
||||||
max-size: 2m
|
|
||||||
ports:
|
|
||||||
# - 127.0.0.1:9200:9200
|
|
||||||
- "9200"
|
|
||||||
ulimits:
|
|
||||||
memlock:
|
|
||||||
hard: -1
|
|
||||||
soft: -1
|
|
||||||
volumes:
|
|
||||||
- ./config/elasticsearch/roles.yml:/usr/share/elasticsearch/config/roles.yml
|
|
||||||
- ./config/elasticsearch/users:/usr/share/elasticsearch/config/users
|
|
||||||
- ./config/elasticsearch/users_roles:/usr/share/elasticsearch/config/users_roles
|
|
||||||
- ./config/elasticsearch/service_tokens:/usr/share/elasticsearch/config/service_tokens
|
|
||||||
|
|
||||||
kibana:
|
|
||||||
depends_on:
|
|
||||||
elasticsearch:
|
|
||||||
condition: service_healthy
|
|
||||||
environment:
|
|
||||||
ELASTICSEARCH_HOSTS: http://elasticsearch:9200
|
|
||||||
ELASTICSEARCH_PASSWORD: changeme
|
|
||||||
ELASTICSEARCH_USERNAME: kibana_system_user
|
|
||||||
ELASTIC_APM_SECRET_TOKEN: ""
|
|
||||||
ELASTIC_APM_SERVER_URL: http://apmserver:8200
|
|
||||||
ELASTIC_APM_TLS: "false"
|
|
||||||
SERVER_HOST: 0.0.0.0
|
|
||||||
SERVER_NAME: kibana.example.org
|
|
||||||
STATUS_ALLOWANONYMOUS: "true"
|
|
||||||
TELEMETRY_ENABLED: "false"
|
|
||||||
XPACK_APM_SERVICEMAPENABLED: "true"
|
|
||||||
XPACK_ENCRYPTEDSAVEDOBJECTS_ENCRYPTIONKEY: fhjskloppd678ehkdfdlliverpoolfcr
|
|
||||||
XPACK_FLEET_AGENTS_ELASTICSEARCH_HOSTS: '["http://elasticsearch:9200"]'
|
|
||||||
# XPACK_FLEET_REGISTRYURL: https://epr-snapshot.elastic.co
|
|
||||||
XPACK_MONITORING_ENABLED: "true"
|
|
||||||
XPACK_REPORTING_ROLES_ENABLED: "false"
|
|
||||||
XPACK_SECURITY_ENCRYPTIONKEY: fhjskloppd678ehkdfdlliverpoolfcr
|
|
||||||
XPACK_SECURITY_LOGINASSISTANCEMESSAGE: Login details: `admin/changeme`. Further details [here](https://github.com/elastic/apm-integration-testing#logging-in).
|
|
||||||
XPACK_SECURITY_SESSION_IDLETIMEOUT: 1M
|
|
||||||
XPACK_SECURITY_SESSION_LIFESPAN: 3M
|
|
||||||
XPACK_XPACK_MAIN_TELEMETRY_ENABLED: "false"
|
|
||||||
healthcheck:
|
|
||||||
interval: 10s
|
|
||||||
retries: 30
|
|
||||||
start_period: 10s
|
|
||||||
test: curl -s -k http://kibana:5601/api/status | grep -q 'All services are available'
|
|
||||||
image: docker.elastic.co/kibana/kibana:${STACK_VERSION}
|
|
||||||
labels:
|
|
||||||
- co.elastic.apm.stack-version=${STACK_VERSION}
|
|
||||||
logging:
|
|
||||||
driver: json-file
|
|
||||||
options:
|
|
||||||
max-file: "5"
|
|
||||||
max-size: 2m
|
|
||||||
# ports:
|
|
||||||
# - 127.0.0.1:5601:5601
|
|
||||||
volumes:
|
|
||||||
- ./config/kibana/kibana-8.yml:/usr/share/kibana/config/kibana.yml
|
|
||||||
|
|
||||||
# Rather than mess aroud with threads in the test, just run `curl` in a
|
|
||||||
# loop to generate traces with a known path
|
|
||||||
tracegenerator:
|
|
||||||
depends_on:
|
|
||||||
apmserver:
|
|
||||||
condition: service_healthy
|
|
||||||
elasticsearch:
|
|
||||||
condition: service_healthy
|
|
||||||
kibana:
|
|
||||||
condition: service_healthy
|
|
||||||
# Official curl image
|
|
||||||
image: curlimages/curl
|
|
||||||
command: /bin/sh -c "while true; do curl -s -k -u admin:changeme http://elasticsearch:9200/_nodes/stats >/dev/null ; sleep 3; done"
|
|
||||||
|
|
||||||
volumes:
|
|
||||||
esdata:
|
|
||||||
driver: local
|
|
|
@ -1,27 +0,0 @@
|
||||||
-----BEGIN CERTIFICATE-----
|
|
||||||
MIIEpjCCAo4CCQDR9oXvJbopHjANBgkqhkiG9w0BAQsFADAVMRMwEQYDVQQDDAph
|
|
||||||
cG0tc2VydmVyMB4XDTE5MTExOTE1MjE0NVoXDTI5MTExNjE1MjE0NVowFTETMBEG
|
|
||||||
A1UEAwwKYXBtLXNlcnZlcjCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIB
|
|
||||||
ANduj3tyeBIHj0Bf5aKMRImhRbkAaQ2p6T0WsHKlicd1P4/D5l783+vVsbwprRqR
|
|
||||||
qXAUsUWcUSYJXBX1qtC2MtKqi4xYUTAyQV5dgrMoCV+vtZY31SK4kolumd1vVMh+
|
|
||||||
po+IwueLvLMFK1tQGIXlJblSDYVauIt5rp79IIhWOY/YpcQy9RaxykljTYTbPjLW
|
|
||||||
m3T92bow1nLh5GL3ThJEAkLO+hkJv9716+YRWYtPcojiGzpLjFgF50MoP4Lilm9U
|
|
||||||
r2tBnqpvb2PwE1kkly8DDBtcg+HM4tgGsbdWo2Pgp82ARV4DL+JlNJ+SVQZAmTbc
|
|
||||||
3LMwxnUJtuKMeh2rwb9HOyuONXfF1PiEzyDhAlabyS6toAGy1mlMAop1ClO1wV5O
|
|
||||||
Ayy47TeD6ziNyMKB7/XHdW4rb16K6j6EV27Bg2ZK6Vrfkwm3aRbpztfVRMX+HMUp
|
|
||||||
ktH+V2OwJoP7l7lzw/q8yMdopG57zRJa1dx8NWP/UKi8Ej+87DYyWJODiNHD7PM7
|
|
||||||
9vfd47lNcWxw+p7ntEpnn6EeW2r7SlmfhtdIxL2DiTiKAq9Ktyi9cFnGnDfSDJST
|
|
||||||
T1G1vIDdG33Vt2Y5+wqzCGbYyMsAOaMdXZSeniXXFR4GX7iz+AGoKojBbmoo9VqP
|
|
||||||
mvbudNU+ysha4IJvTfOczJZgstxCXG+MXbEXFSgysImFAgMBAAEwDQYJKoZIhvcN
|
|
||||||
AQELBQADggIBAFh2YxRT6PaAXDq38rm25I91fCP9PzVPDuIkn9wl85e7avuh6FZi
|
|
||||||
R0nQG6+lB1i8XSm9UMl9+ISjE+EQqry6KB6mDsakGOsDuEUdZiw3sGJIUWQkQArB
|
|
||||||
ym5DqxKpeZBeVHBxnrEbQBV8s0j8uxd7X1E0ImfMKbKfNr/B5qPRXkREvydLWYvq
|
|
||||||
8yMcUPu1MiZFUgAGr9Py39kW3lbRPWZii/2bN8AB9h6gAhq5TiennfgJZsRiuSta
|
|
||||||
w/TmOcAuz4e/KPIzfvL/YCWbLyJ2vrIQeOc4N7jZfqMmLKgYCRyjI7+amfuyKPBW
|
|
||||||
J4psfJ0ssHdTxAUK65vghJ2s6FLvU3HoxzetZsJp5kj6CKYaFYkB4NkkYnlY8MP/
|
|
||||||
T68oOmdYwwwrcBmDtZwoppRb5zhev5k3aykgZ/B/vqVJE9oIPkp/7wqEP1WqSiUe
|
|
||||||
AgyQBu8UN4ho2Rf6nZezZ4cjW/0WyhGOHQBFmwPI2MBGsQxF2PF4lKkJtaywIEm7
|
|
||||||
4UsEQYK7Hf2J2OccWGvfo5HZ5tsSbuOGAf0bfHfaBQBsvzWet+TO6XX9VrWjnAKl
|
|
||||||
bH+mInmnd9v2oABFl9Djv/Cw+lEAxxkCTW+DcwdEFJREPab5xhQDEpQQ/Ef0ihvg
|
|
||||||
/ZtJQeoOYfrLN6K726QmoRWxvqxLyWK3gztcO1svHqr/cMt3ooLJEaqU
|
|
||||||
-----END CERTIFICATE-----
|
|
|
@ -1,52 +0,0 @@
|
||||||
-----BEGIN PRIVATE KEY-----
|
|
||||||
MIIJRAIBADANBgkqhkiG9w0BAQEFAASCCS4wggkqAgEAAoICAQDXbo97cngSB49A
|
|
||||||
X+WijESJoUW5AGkNqek9FrBypYnHdT+Pw+Ze/N/r1bG8Ka0akalwFLFFnFEmCVwV
|
|
||||||
9arQtjLSqouMWFEwMkFeXYKzKAlfr7WWN9UiuJKJbpndb1TIfqaPiMLni7yzBStb
|
|
||||||
UBiF5SW5Ug2FWriLea6e/SCIVjmP2KXEMvUWscpJY02E2z4y1pt0/dm6MNZy4eRi
|
|
||||||
904SRAJCzvoZCb/e9evmEVmLT3KI4hs6S4xYBedDKD+C4pZvVK9rQZ6qb29j8BNZ
|
|
||||||
JJcvAwwbXIPhzOLYBrG3VqNj4KfNgEVeAy/iZTSfklUGQJk23NyzMMZ1CbbijHod
|
|
||||||
q8G/RzsrjjV3xdT4hM8g4QJWm8kuraABstZpTAKKdQpTtcFeTgMsuO03g+s4jcjC
|
|
||||||
ge/1x3VuK29eiuo+hFduwYNmSula35MJt2kW6c7X1UTF/hzFKZLR/ldjsCaD+5e5
|
|
||||||
c8P6vMjHaKRue80SWtXcfDVj/1CovBI/vOw2MliTg4jRw+zzO/b33eO5TXFscPqe
|
|
||||||
57RKZ5+hHltq+0pZn4bXSMS9g4k4igKvSrcovXBZxpw30gyUk09RtbyA3Rt91bdm
|
|
||||||
OfsKswhm2MjLADmjHV2Unp4l1xUeBl+4s/gBqCqIwW5qKPVaj5r27nTVPsrIWuCC
|
|
||||||
b03znMyWYLLcQlxvjF2xFxUoMrCJhQIDAQABAoICAQCfClIGsoUN2mLZBXLDw4W9
|
|
||||||
jT+pyjHEEpHLtXphyO+kPlzER71Elq7AriveW24d1TcfNUeBulr2F6bR12FZX4i5
|
|
||||||
mYoX/AND73Xusl4Q4Re6ej82PNWuIlCcAPi6Trxqn4VbJX2t7q1KBCDz8neIMZjd
|
|
||||||
7UNqFYV0Akr1uK1RuUYZebk21N+29139O8A4upp6cZCml9kq6W8HtNgkb6pFNcvt
|
|
||||||
gluELHxnn2mdmWVfwTEu+K1dJfTf7svB+m6Ys6qXWg9+wRzfehDj2JKQFsE9xaQk
|
|
||||||
dvItulIlZRvB28YXr/xxa6bKNtQc8NYej6sRSJNTu017RCDeumM3cLmeOfR4v59f
|
|
||||||
tkMWnFcA3ykmsaK2FiQyX+MoWvs5vdT7/yNIfz3a4MErcWg8z3FDbffKfbhgsb+2
|
|
||||||
z4Ub6fIRKZykW2ajN7t0378bMmJ3rPT66QF40aNNeWasF3EHcwekDPpsHIBJoY4G
|
|
||||||
9aG6uTUmRkC+NGeP9HroxkvDo2NbXn8XGOEJS64rwsME3CsUi1A5ZY0XLTxYptH6
|
|
||||||
X2TfC5oTmnsYB/wWqo26bTJc0bwDOueQWYap0aVtv3f/0tzueKepCbxdeG4ikA0U
|
|
||||||
2t3F+OUmoCZ5D0p+6zLvrTUPhPCFEynp+vGUvmbwozYi0NWzFyFqlvqRG1KLIVLG
|
|
||||||
ZRyTMYuZ/cWkv1SJYbEcaQKCAQEA/9HaJg2YACv7rx6/FesE/81u16OYTaahHngW
|
|
||||||
4M+5rT0+fNKYH/fYkwavQ/Gr6FSTls7F+8K9DVwoGLZRQ3t6epCXqGqX0uaY+iSH
|
|
||||||
O8eezXVnHzUaVE4KlwJY9xZ+K1iIf5zUb5hpaQI0jKS/igcxFAsutWiyenrz8eQp
|
|
||||||
MAycZmzkQMLbUsa1t6y0VaEaC4YMHyQ9ag2eMfqbG27plFQbYxllHXowGMFXPheY
|
|
||||||
xACwo5V5tJUgRP+HlrI4rf0vadMgVIKxVSUiqIzGREIkYrTAshFjkpHR5/R8s/kH
|
|
||||||
Xm8q2gdoJltBFJzA2B8MHXVi7mYDBlUmBoRKhzkl/TSray9j7wKCAQEA15VsNQZu
|
|
||||||
cZluboz/R4EDbEm1po2UBcNNiu/fgJ8BDUkLzJESIITY41fgvBbTun1fiuGeE+El
|
|
||||||
0o1w4hQhIiV1KAB44w69fJR0VELfMZiIcd8kd0sDgPPVrd1MzzKPZ9yg4mbEkCCO
|
|
||||||
V/EoTi8Ut27sMcl8059qm1qq7I5pzHwSziNa087m+5VdfmvJZJVipudngZ3QmRgU
|
|
||||||
KKcBhgFFSkncYezoq2XQfRcqkk0sORxDvsMmRInyHZh0l9zv46ihgTvErlCHtizV
|
|
||||||
V4HNO4OPz7FxUZ04iWSGZs4snu1cW2j+lbKuOkADveBYVmCcdZ3R0SH+A5skL0zG
|
|
||||||
tm6z0TNP/kFlywKCAQEA+lTdFu2od0qTADujG4yemL7rn2J8EEhlU86J/LXo6UiM
|
|
||||||
FFNz/5xltwIMkf00jqXswt9WR9W5cBBlQEFwZgu3v6YscebU6NE0k1sZZnshv8YK
|
|
||||||
AjTRrfusSzdF3YyKLFp3QAE0tHs9cz9wMsyojiYZdZa3v1dTh503h9YQI+/DQEuA
|
|
||||||
VIsZWfgPLEx5L231cZ9bz0GEQ3pN+nRUQdUYB0kCf8gC9YRy+lZ/y8gFeo9+SqVj
|
|
||||||
sj1XlY1DnkiKRGAEfJbYBTra0woCz1LqVTMwLdLY2adAe9XrxQKu4OJovpUkJrSm
|
|
||||||
yxnzJnt6DkLbdRxAki8K+LBsBGaCE67tqMhYkguOywKCAQAslEl77YiJFSEw2xcu
|
|
||||||
wg7jJZrahgxF5Mz0HgYporek96Xo91a4QsBWwqVGP7IoriRDo8P8eGJJ19Wv6lmv
|
|
||||||
pe9EBlT5HuMwD8K+adWde907Ltlrkad30vQsr8ZiUiI1Z/oc1wNuikzlAolDIZk3
|
|
||||||
FUjiQrf9SsnQtj8CC7D1B/MbjVQK2I4LGCftLHzIv9tWiCNvOiMYhVIl1eMKwtiB
|
|
||||||
NCTOWx8B0lv6gf/boPm0FZQsrk4LfjsCw7PYc2dnvEcpYiKZqS1nDn5PShgWZm4m
|
|
||||||
lJrKNairQI5KU/gGJS8j9+ItMnW0tegQK4QY2IGCENCCXnUYacxhu46byuiEKggw
|
|
||||||
m3VhAoIBAQCQa90StsZHqZ+J83do3kpvD+O5nURPnckznC2WJgraW49k5vltnJTT
|
|
||||||
zkFTqHMLfmYwAz1o15sPCqlkMD+fEUzg6Hpzxm7dOUppkf5KFbD7AnsYU9U8LamJ
|
|
||||||
HaET7Dq5TpjG7uoaHZZjs7cCHcWu2E8nIezyAtZ+rbTg/qW7bYMAlJTkerznGuDU
|
|
||||||
v0hNzCr/81o5rbX0UhetcmKVOprUSWzfrw5ElLhAtzM7zivbZSnsOny8pC33FtQ5
|
|
||||||
iQbVcNGUjfFCM95ZipxxN9z0FwxpJ1paCPGYA86u2olWl/VnVPqEj7WYzO8H5W2q
|
|
||||||
aXpWH6HVf6B10pQrWWwUAAHyqYS5bZkQ
|
|
||||||
-----END PRIVATE KEY-----
|
|
|
@ -1,210 +0,0 @@
|
||||||
/*
|
|
||||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
|
||||||
* or more contributor license agreements. Licensed under the Elastic License
|
|
||||||
* 2.0 and the Server Side Public License, v 1; you may not use this file except
|
|
||||||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
|
||||||
* Side Public License, v 1.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package org.elasticsearch.telemetry.apm;
|
|
||||||
|
|
||||||
import org.elasticsearch.client.Request;
|
|
||||||
import org.elasticsearch.client.Response;
|
|
||||||
import org.elasticsearch.common.settings.SecureString;
|
|
||||||
import org.elasticsearch.common.settings.Settings;
|
|
||||||
import org.elasticsearch.common.util.concurrent.ThreadContext;
|
|
||||||
import org.elasticsearch.common.xcontent.support.XContentMapValues;
|
|
||||||
import org.elasticsearch.core.CheckedRunnable;
|
|
||||||
import org.elasticsearch.test.rest.ESRestTestCase;
|
|
||||||
|
|
||||||
import java.io.IOException;
|
|
||||||
import java.util.List;
|
|
||||||
import java.util.Map;
|
|
||||||
import java.util.concurrent.TimeUnit;
|
|
||||||
import java.util.concurrent.atomic.AtomicReference;
|
|
||||||
import java.util.stream.Collectors;
|
|
||||||
|
|
||||||
import static org.hamcrest.Matchers.empty;
|
|
||||||
import static org.hamcrest.Matchers.equalTo;
|
|
||||||
import static org.hamcrest.Matchers.hasKey;
|
|
||||||
import static org.hamcrest.Matchers.not;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Tests around Elasticsearch's tracing support using APM.
|
|
||||||
*/
|
|
||||||
public class ApmIT extends ESRestTestCase {
|
|
||||||
|
|
||||||
private static final String DATA_STREAM = "traces-apm-default";
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Check that if we send HTTP traffic to Elasticsearch, then traces are captured in APM server. The traces are generated in
|
|
||||||
* a separate Docker container, which continually fetches `/_nodes/stats`. We check for the following:
|
|
||||||
* <ul>
|
|
||||||
* <li>A transaction for the REST API call
|
|
||||||
* <li>A span for the task started by the REST call
|
|
||||||
* <li>A child span started by the above span
|
|
||||||
* </ul>
|
|
||||||
* <p>This proves that the hierarchy of spans is being correctly captured.
|
|
||||||
*/
|
|
||||||
@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/90308")
|
|
||||||
public void testCapturesTracesForHttpTraffic() throws Exception {
|
|
||||||
checkTracesDataStream();
|
|
||||||
|
|
||||||
assertTracesExist();
|
|
||||||
}
|
|
||||||
|
|
||||||
private void checkTracesDataStream() throws Exception {
|
|
||||||
assertBusy(() -> {
|
|
||||||
final Response response = performRequestTolerantly(new Request("GET", "/_data_stream/" + DATA_STREAM));
|
|
||||||
assertOK(response);
|
|
||||||
}, 1, TimeUnit.MINUTES);
|
|
||||||
}
|
|
||||||
|
|
||||||
private void assertTracesExist() throws Exception {
|
|
||||||
// First look for a transaction for the REST calls that we make via the `tracegenerator` Docker container
|
|
||||||
|
|
||||||
final AtomicReference<String> transactionId = new AtomicReference<>();
|
|
||||||
assertBusy(() -> {
|
|
||||||
final Request tracesSearchRequest = new Request("GET", "/" + DATA_STREAM + "/_search");
|
|
||||||
tracesSearchRequest.setJsonEntity("""
|
|
||||||
{
|
|
||||||
"query": {
|
|
||||||
"match": { "transaction.name": "GET /_nodes/stats" }
|
|
||||||
}
|
|
||||||
}""");
|
|
||||||
final Response tracesSearchResponse = performRequestTolerantly(tracesSearchRequest);
|
|
||||||
assertOK(tracesSearchResponse);
|
|
||||||
|
|
||||||
final List<Map<String, Object>> documents = getDocuments(tracesSearchResponse);
|
|
||||||
assertThat(documents, not(empty()));
|
|
||||||
|
|
||||||
final Map<String, Object> tx = documents.get(0);
|
|
||||||
|
|
||||||
check(tx, "http.request.method", "GET");
|
|
||||||
check(tx, "http.response.status_code", 200);
|
|
||||||
check(tx, "labels.es_cluster_name", "docker-cluster");
|
|
||||||
check(tx, "labels.http_request_headers_authorization", "[REDACTED]");
|
|
||||||
check(tx, "span.kind", "SERVER");
|
|
||||||
check(tx, "transaction.result", "HTTP 2xx");
|
|
||||||
check(tx, "url.path", "/_nodes/stats");
|
|
||||||
|
|
||||||
final String txId = pluck(tx, "transaction.id");
|
|
||||||
transactionId.set(txId);
|
|
||||||
}, 1, TimeUnit.MINUTES);
|
|
||||||
|
|
||||||
// Then look for the task that the REST call starts
|
|
||||||
|
|
||||||
final AtomicReference<String> monitorNodeStatsSpanId = new AtomicReference<>();
|
|
||||||
assertBusy(() -> {
|
|
||||||
final List<Map<String, Object>> documents = searchByParentId(transactionId.get());
|
|
||||||
assertThat(documents, not(empty()));
|
|
||||||
|
|
||||||
final Map<String, Object> spansByName = documents.stream().collect(Collectors.toMap(d -> pluck(d, "span.name"), d -> d));
|
|
||||||
|
|
||||||
assertThat(spansByName, hasKey("cluster:monitor/nodes/stats"));
|
|
||||||
|
|
||||||
@SuppressWarnings("unchecked")
|
|
||||||
final Map<String, Object> span = (Map<String, Object>) spansByName.get("cluster:monitor/nodes/stats");
|
|
||||||
check(span, "span.kind", "INTERNAL");
|
|
||||||
|
|
||||||
final String spanId = pluck(span, "span.id");
|
|
||||||
monitorNodeStatsSpanId.set(spanId);
|
|
||||||
}, 1, TimeUnit.MINUTES);
|
|
||||||
|
|
||||||
// Finally look for the child task that the task above started
|
|
||||||
|
|
||||||
assertBusy(() -> {
|
|
||||||
final List<Map<String, Object>> documents = searchByParentId(monitorNodeStatsSpanId.get());
|
|
||||||
assertThat(documents, not(empty()));
|
|
||||||
|
|
||||||
final Map<String, Object> spansByName = documents.stream().collect(Collectors.toMap(d -> pluck(d, "span.name"), d -> d));
|
|
||||||
|
|
||||||
assertThat(spansByName, hasKey("cluster:monitor/nodes/stats[n]"));
|
|
||||||
}, 1, TimeUnit.MINUTES);
|
|
||||||
}
|
|
||||||
|
|
||||||
@SuppressWarnings("unchecked")
|
|
||||||
private <T> T pluck(Map<String, Object> map, String path) {
|
|
||||||
String[] parts = path.split("\\.");
|
|
||||||
|
|
||||||
Object result = map;
|
|
||||||
|
|
||||||
for (String part : parts) {
|
|
||||||
result = ((Map<String, ?>) result).get(part);
|
|
||||||
}
|
|
||||||
|
|
||||||
return (T) result;
|
|
||||||
}
|
|
||||||
|
|
||||||
private List<Map<String, Object>> searchByParentId(String parentId) throws IOException {
|
|
||||||
final Request searchRequest = new Request("GET", "/" + DATA_STREAM + "/_search");
|
|
||||||
searchRequest.setJsonEntity("""
|
|
||||||
{
|
|
||||||
"query": {
|
|
||||||
"match": { "parent.id": "%s" }
|
|
||||||
}
|
|
||||||
}""".formatted(parentId));
|
|
||||||
final Response response = performRequestTolerantly(searchRequest);
|
|
||||||
assertOK(response);
|
|
||||||
|
|
||||||
return getDocuments(response);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* We don't need to clean up the cluster, particularly as we have Kibana and APM server using ES as well as our test, so declare
|
|
||||||
* that we need to preserve the cluster in order to prevent the usual cleanup logic from running (and inevitably failing).
|
|
||||||
*/
|
|
||||||
@Override
|
|
||||||
protected boolean preserveClusterUponCompletion() {
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Turns exceptions into assertion failures so that {@link #assertBusy(CheckedRunnable)} can still retry.
|
|
||||||
*/
|
|
||||||
private Response performRequestTolerantly(Request request) {
|
|
||||||
try {
|
|
||||||
return client().performRequest(request);
|
|
||||||
} catch (Exception e) {
|
|
||||||
throw new AssertionError(e);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Customizes the client settings to use the same username / password that is configured in Docke.r
|
|
||||||
*/
|
|
||||||
@Override
|
|
||||||
protected Settings restClientSettings() {
|
|
||||||
String token = basicAuthHeaderValue("admin", new SecureString("changeme".toCharArray()));
|
|
||||||
return Settings.builder().put(ThreadContext.PREFIX + ".Authorization", token).build();
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Constructs the correct cluster address by looking up the dynamic port that Elasticsearch is exposed on.
|
|
||||||
*/
|
|
||||||
@Override
|
|
||||||
protected String getTestRestCluster() {
|
|
||||||
return "localhost:" + getProperty("test.fixtures.elasticsearch.tcp.9200");
|
|
||||||
}
|
|
||||||
|
|
||||||
@SuppressWarnings("unchecked")
|
|
||||||
private List<Map<String, Object>> getDocuments(Response response) throws IOException {
|
|
||||||
final Map<String, Object> stringObjectMap = ESRestTestCase.entityAsMap(response);
|
|
||||||
return (List<Map<String, Object>>) XContentMapValues.extractValue("hits.hits._source", stringObjectMap);
|
|
||||||
}
|
|
||||||
|
|
||||||
private String getProperty(String key) {
|
|
||||||
String value = System.getProperty(key);
|
|
||||||
if (value == null) {
|
|
||||||
throw new IllegalStateException(
|
|
||||||
"Could not find system properties from test.fixtures. "
|
|
||||||
+ "This test expects to run with the elasticsearch.test.fixtures Gradle plugin"
|
|
||||||
);
|
|
||||||
}
|
|
||||||
return value;
|
|
||||||
}
|
|
||||||
|
|
||||||
private <T> void check(Map<String, Object> doc, String path, T expected) {
|
|
||||||
assertThat(pluck(doc, path), equalTo(expected));
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -50,8 +50,9 @@ public class UpgradeWithOldIndexSettingsIT extends ParameterizedFullClusterResta
|
||||||
|
|
||||||
public void testMapperDynamicIndexSetting() throws IOException {
|
public void testMapperDynamicIndexSetting() throws IOException {
|
||||||
assumeTrue(
|
assumeTrue(
|
||||||
"Setting deprecated in 6.x, but remained in 7.x and is no longer defined in 8.x",
|
"Setting deprecated in 6.x, but was disallowed/removed incorrectly in some 7.x versions and can only be set safely in 7.17.22. "
|
||||||
getOldClusterTestVersion().before("8.0.0")
|
+ "Setting can't be used in 8.x ",
|
||||||
|
getOldClusterTestVersion().before("8.0.0") && getOldClusterTestVersion().after("7.17.21")
|
||||||
);
|
);
|
||||||
String indexName = "my-index";
|
String indexName = "my-index";
|
||||||
if (isRunningAgainstOldCluster()) {
|
if (isRunningAgainstOldCluster()) {
|
||||||
|
|
|
@ -91,9 +91,13 @@ public class DenseVectorMappingUpdateIT extends AbstractRollingUpgradeTestCase {
|
||||||
.startObject("properties")
|
.startObject("properties")
|
||||||
.startObject("embedding")
|
.startObject("embedding")
|
||||||
.field("type", "dense_vector")
|
.field("type", "dense_vector")
|
||||||
|
.field("index", "true")
|
||||||
.field("dims", 4)
|
.field("dims", 4)
|
||||||
|
.field("similarity", "cosine")
|
||||||
.startObject("index_options")
|
.startObject("index_options")
|
||||||
.field("type", "hnsw")
|
.field("type", "hnsw")
|
||||||
|
.field("m", "16")
|
||||||
|
.field("ef_construction", "100")
|
||||||
.endObject()
|
.endObject()
|
||||||
.endObject()
|
.endObject()
|
||||||
.endObject()
|
.endObject()
|
||||||
|
@ -109,7 +113,7 @@ public class DenseVectorMappingUpdateIT extends AbstractRollingUpgradeTestCase {
|
||||||
|
|
||||||
int expectedCount = 10;
|
int expectedCount = 10;
|
||||||
|
|
||||||
assertCount("test_index", expectedCount);
|
assertCount(indexName, expectedCount);
|
||||||
|
|
||||||
if (isUpgradedCluster() && clusterSupportsDenseVectorTypeUpdate()) {
|
if (isUpgradedCluster() && clusterSupportsDenseVectorTypeUpdate()) {
|
||||||
Request updateMapping = new Request("PUT", "/" + indexName + "/_mapping");
|
Request updateMapping = new Request("PUT", "/" + indexName + "/_mapping");
|
||||||
|
@ -118,9 +122,13 @@ public class DenseVectorMappingUpdateIT extends AbstractRollingUpgradeTestCase {
|
||||||
.startObject("properties")
|
.startObject("properties")
|
||||||
.startObject("embedding")
|
.startObject("embedding")
|
||||||
.field("type", "dense_vector")
|
.field("type", "dense_vector")
|
||||||
|
.field("index", "true")
|
||||||
.field("dims", 4)
|
.field("dims", 4)
|
||||||
|
.field("similarity", "cosine")
|
||||||
.startObject("index_options")
|
.startObject("index_options")
|
||||||
.field("type", "int8_hnsw")
|
.field("type", "int8_hnsw")
|
||||||
|
.field("m", "16")
|
||||||
|
.field("ef_construction", "100")
|
||||||
.endObject()
|
.endObject()
|
||||||
.endObject()
|
.endObject()
|
||||||
.endObject()
|
.endObject()
|
||||||
|
@ -132,7 +140,7 @@ public class DenseVectorMappingUpdateIT extends AbstractRollingUpgradeTestCase {
|
||||||
index.setJsonEntity(BULK2);
|
index.setJsonEntity(BULK2);
|
||||||
assertOK(client().performRequest(index));
|
assertOK(client().performRequest(index));
|
||||||
expectedCount = 20;
|
expectedCount = 20;
|
||||||
assertCount("test_index", expectedCount);
|
assertCount(indexName, expectedCount);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -152,7 +160,7 @@ public class DenseVectorMappingUpdateIT extends AbstractRollingUpgradeTestCase {
|
||||||
Map<?, ?> response = entityAsMap(client().performRequest(new Request("GET", "_nodes")));
|
Map<?, ?> response = entityAsMap(client().performRequest(new Request("GET", "_nodes")));
|
||||||
Map<?, ?> nodes = (Map<?, ?>) response.get("nodes");
|
Map<?, ?> nodes = (Map<?, ?>) response.get("nodes");
|
||||||
|
|
||||||
Predicate<Map<?, ?>> nodeSupportsBulkApi = n -> Version.fromString(n.get("version").toString()).onOrAfter(Version.V_8_14_0);
|
Predicate<Map<?, ?>> nodeSupportsBulkApi = n -> Version.fromString(n.get("version").toString()).onOrAfter(Version.V_8_15_0);
|
||||||
|
|
||||||
return nodes.values().stream().map(o -> (Map<?, ?>) o).allMatch(nodeSupportsBulkApi);
|
return nodes.values().stream().map(o -> (Map<?, ?>) o).allMatch(nodeSupportsBulkApi);
|
||||||
}
|
}
|
||||||
|
|
|
@ -106,8 +106,9 @@ public class UpgradeWithOldIndexSettingsIT extends AbstractRollingUpgradeTestCas
|
||||||
|
|
||||||
public void testMapperDynamicIndexSetting() throws IOException {
|
public void testMapperDynamicIndexSetting() throws IOException {
|
||||||
assumeTrue(
|
assumeTrue(
|
||||||
"Setting deprecated in 6.x, but remained in 7.x and is no longer defined in 8.x",
|
"Setting deprecated in 6.x, but was disallowed/removed incorrectly in some 7.x versions and can only be set safely in 7.17.22. "
|
||||||
getOldClusterTestVersion().before("8.0.0")
|
+ "Setting can't be used in 8.x ",
|
||||||
|
getOldClusterTestVersion().before("8.0.0") && getOldClusterTestVersion().after("7.17.21")
|
||||||
);
|
);
|
||||||
String indexName = "my-index";
|
String indexName = "my-index";
|
||||||
if (isOldCluster()) {
|
if (isOldCluster()) {
|
||||||
|
|
|
@ -473,6 +473,11 @@ setup:
|
||||||
|
|
||||||
---
|
---
|
||||||
"standard retriever collapse":
|
"standard retriever collapse":
|
||||||
|
- skip:
|
||||||
|
reason: "https://github.com/elastic/elasticsearch/issues/109476"
|
||||||
|
known_issues:
|
||||||
|
- cluster_feature: "gte_v8.13.0"
|
||||||
|
fixed_by: "gte_v8.14.0"
|
||||||
- do:
|
- do:
|
||||||
search:
|
search:
|
||||||
index: animals
|
index: animals
|
||||||
|
|
|
@ -164,8 +164,8 @@ setup:
|
||||||
---
|
---
|
||||||
"Dynamic dimensions for hex-encoded string":
|
"Dynamic dimensions for hex-encoded string":
|
||||||
- requires:
|
- requires:
|
||||||
cluster_features: "gte_v8.15.0"
|
cluster_features: "gte_v8.14.1"
|
||||||
reason: 'hex encoding for byte vectors fixed in 8.15'
|
reason: 'hex encoding for byte vectors fixed in 8.14.1'
|
||||||
|
|
||||||
- do:
|
- do:
|
||||||
indices.create:
|
indices.create:
|
||||||
|
|
|
@ -1,4 +1,9 @@
|
||||||
setup:
|
setup:
|
||||||
|
- skip:
|
||||||
|
reason: "https://github.com/elastic/elasticsearch/issues/109476"
|
||||||
|
known_issues:
|
||||||
|
- cluster_feature: "gte_v8.13.0"
|
||||||
|
fixed_by: "gte_v8.14.0"
|
||||||
- do:
|
- do:
|
||||||
indices.create:
|
indices.create:
|
||||||
index: test
|
index: test
|
||||||
|
@ -85,7 +90,6 @@ setup:
|
||||||
|
|
||||||
---
|
---
|
||||||
"field collapsing and from":
|
"field collapsing and from":
|
||||||
|
|
||||||
- do:
|
- do:
|
||||||
search:
|
search:
|
||||||
rest_total_hits_as_int: true
|
rest_total_hits_as_int: true
|
||||||
|
|
|
@ -1,4 +1,9 @@
|
||||||
setup:
|
setup:
|
||||||
|
- skip:
|
||||||
|
reason: "https://github.com/elastic/elasticsearch/issues/109476"
|
||||||
|
known_issues:
|
||||||
|
- cluster_feature: "gte_v8.13.0"
|
||||||
|
fixed_by: "gte_v8.14.0"
|
||||||
- requires:
|
- requires:
|
||||||
cluster_features: ["gte_v8.10.0"]
|
cluster_features: ["gte_v8.10.0"]
|
||||||
reason: Collapse with max score was fixed in 8.10.0
|
reason: Collapse with max score was fixed in 8.10.0
|
||||||
|
|
|
@ -1,4 +1,9 @@
|
||||||
setup:
|
setup:
|
||||||
|
- skip:
|
||||||
|
reason: "https://github.com/elastic/elasticsearch/issues/109476"
|
||||||
|
known_issues:
|
||||||
|
- cluster_feature: "gte_v8.13.0"
|
||||||
|
fixed_by: "gte_v8.14.0"
|
||||||
- requires:
|
- requires:
|
||||||
cluster_features: "gte_v8.15.0"
|
cluster_features: "gte_v8.15.0"
|
||||||
reason: Collapse with rescore added in 8.15.0
|
reason: Collapse with rescore added in 8.15.0
|
||||||
|
|
|
@ -1,3 +1,9 @@
|
||||||
|
setup:
|
||||||
|
- skip:
|
||||||
|
reason: "https://github.com/elastic/elasticsearch/issues/109476"
|
||||||
|
known_issues:
|
||||||
|
- cluster_feature: "gte_v8.13.0"
|
||||||
|
fixed_by: "gte_v8.14.0"
|
||||||
---
|
---
|
||||||
"two levels fields collapsing":
|
"two levels fields collapsing":
|
||||||
|
|
||||||
|
|
|
@ -13,7 +13,8 @@ import org.apache.lucene.search.SortField;
|
||||||
import org.apache.lucene.search.SortedSetSelector;
|
import org.apache.lucene.search.SortedSetSelector;
|
||||||
import org.apache.lucene.search.SortedSetSortField;
|
import org.apache.lucene.search.SortedSetSortField;
|
||||||
import org.apache.lucene.util.Constants;
|
import org.apache.lucene.util.Constants;
|
||||||
import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteResponse;
|
import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteRequest;
|
||||||
|
import org.elasticsearch.action.admin.cluster.reroute.TransportClusterRerouteAction;
|
||||||
import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest;
|
import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest;
|
||||||
import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
|
import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
|
||||||
import org.elasticsearch.action.admin.indices.segments.IndexShardSegments;
|
import org.elasticsearch.action.admin.indices.segments.IndexShardSegments;
|
||||||
|
@ -396,7 +397,12 @@ public class ShrinkIndexIT extends ESIntegTestCase {
|
||||||
|
|
||||||
refreshClusterInfo();
|
refreshClusterInfo();
|
||||||
// kick off a retry and wait until it's done!
|
// kick off a retry and wait until it's done!
|
||||||
ClusterRerouteResponse clusterRerouteResponse = clusterAdmin().prepareReroute().setRetryFailed(true).get();
|
final var clusterRerouteResponse = safeGet(
|
||||||
|
client().execute(
|
||||||
|
TransportClusterRerouteAction.TYPE,
|
||||||
|
new ClusterRerouteRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT).setRetryFailed(true)
|
||||||
|
)
|
||||||
|
);
|
||||||
long expectedShardSize = clusterRerouteResponse.getState().routingTable().index("target").shard(0).shard(0).getExpectedShardSize();
|
long expectedShardSize = clusterRerouteResponse.getState().routingTable().index("target").shard(0).shard(0).getExpectedShardSize();
|
||||||
// we support the expected shard size in the allocator to sum up over the source index shards
|
// we support the expected shard size in the allocator to sum up over the source index shards
|
||||||
assertTrue("expected shard size must be set but wasn't: " + expectedShardSize, expectedShardSize > 0);
|
assertTrue("expected shard size must be set but wasn't: " + expectedShardSize, expectedShardSize > 0);
|
||||||
|
|
|
@ -11,6 +11,7 @@ package org.elasticsearch.cluster;
|
||||||
import org.elasticsearch.action.DocWriteResponse;
|
import org.elasticsearch.action.DocWriteResponse;
|
||||||
import org.elasticsearch.action.UnavailableShardsException;
|
import org.elasticsearch.action.UnavailableShardsException;
|
||||||
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
|
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
|
||||||
|
import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteUtils;
|
||||||
import org.elasticsearch.action.admin.indices.create.CreateIndexRequest;
|
import org.elasticsearch.action.admin.indices.create.CreateIndexRequest;
|
||||||
import org.elasticsearch.action.index.IndexRequest;
|
import org.elasticsearch.action.index.IndexRequest;
|
||||||
import org.elasticsearch.action.support.ActiveShardCount;
|
import org.elasticsearch.action.support.ActiveShardCount;
|
||||||
|
@ -106,7 +107,7 @@ public class SimpleDataNodesIT extends ESIntegTestCase {
|
||||||
|
|
||||||
internalCluster().startNode();
|
internalCluster().startNode();
|
||||||
internalCluster().startNode();
|
internalCluster().startNode();
|
||||||
clusterAdmin().prepareReroute().setRetryFailed(true).get();
|
ClusterRerouteUtils.rerouteRetryFailed(client());
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -11,6 +11,7 @@ package org.elasticsearch.cluster.allocation;
|
||||||
import org.apache.logging.log4j.LogManager;
|
import org.apache.logging.log4j.LogManager;
|
||||||
import org.apache.logging.log4j.Logger;
|
import org.apache.logging.log4j.Logger;
|
||||||
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
|
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
|
||||||
|
import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteUtils;
|
||||||
import org.elasticsearch.cluster.ClusterState;
|
import org.elasticsearch.cluster.ClusterState;
|
||||||
import org.elasticsearch.cluster.metadata.IndexMetadata.State;
|
import org.elasticsearch.cluster.metadata.IndexMetadata.State;
|
||||||
import org.elasticsearch.cluster.routing.IndexRoutingTable;
|
import org.elasticsearch.cluster.routing.IndexRoutingTable;
|
||||||
|
@ -184,7 +185,7 @@ public class AwarenessAllocationIT extends ESIntegTestCase {
|
||||||
.setWaitForNodes("3")
|
.setWaitForNodes("3")
|
||||||
.get();
|
.get();
|
||||||
assertThat(health.isTimedOut(), equalTo(false));
|
assertThat(health.isTimedOut(), equalTo(false));
|
||||||
clusterAdmin().prepareReroute().get();
|
ClusterRerouteUtils.reroute(client());
|
||||||
health = clusterAdmin().prepareHealth()
|
health = clusterAdmin().prepareHealth()
|
||||||
.setIndices("test")
|
.setIndices("test")
|
||||||
.setWaitForEvents(Priority.LANGUID)
|
.setWaitForEvents(Priority.LANGUID)
|
||||||
|
@ -210,7 +211,7 @@ public class AwarenessAllocationIT extends ESIntegTestCase {
|
||||||
.setWaitForNodes("4")
|
.setWaitForNodes("4")
|
||||||
.get();
|
.get();
|
||||||
assertThat(health.isTimedOut(), equalTo(false));
|
assertThat(health.isTimedOut(), equalTo(false));
|
||||||
clusterAdmin().prepareReroute().get();
|
ClusterRerouteUtils.reroute(client());
|
||||||
health = clusterAdmin().prepareHealth()
|
health = clusterAdmin().prepareHealth()
|
||||||
.setIndices("test")
|
.setIndices("test")
|
||||||
.setWaitForEvents(Priority.LANGUID)
|
.setWaitForEvents(Priority.LANGUID)
|
||||||
|
|
|
@ -11,7 +11,9 @@ package org.elasticsearch.cluster.allocation;
|
||||||
import org.apache.logging.log4j.Level;
|
import org.apache.logging.log4j.Level;
|
||||||
import org.apache.lucene.tests.util.LuceneTestCase;
|
import org.apache.lucene.tests.util.LuceneTestCase;
|
||||||
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
|
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
|
||||||
|
import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteRequest;
|
||||||
import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteResponse;
|
import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteResponse;
|
||||||
|
import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteUtils;
|
||||||
import org.elasticsearch.action.admin.cluster.reroute.TransportClusterRerouteAction;
|
import org.elasticsearch.action.admin.cluster.reroute.TransportClusterRerouteAction;
|
||||||
import org.elasticsearch.action.support.ActiveShardCount;
|
import org.elasticsearch.action.support.ActiveShardCount;
|
||||||
import org.elasticsearch.action.support.WriteRequest.RefreshPolicy;
|
import org.elasticsearch.action.support.WriteRequest.RefreshPolicy;
|
||||||
|
@ -99,12 +101,14 @@ public class ClusterRerouteIT extends ESIntegTestCase {
|
||||||
assertThat(state.getRoutingNodes().unassigned().size(), equalTo(2));
|
assertThat(state.getRoutingNodes().unassigned().size(), equalTo(2));
|
||||||
|
|
||||||
logger.info("--> explicitly allocate shard 1, *under dry_run*");
|
logger.info("--> explicitly allocate shard 1, *under dry_run*");
|
||||||
state = clusterAdmin().prepareReroute()
|
state = safeGet(
|
||||||
.setExplain(randomBoolean())
|
client().execute(
|
||||||
|
TransportClusterRerouteAction.TYPE,
|
||||||
|
new ClusterRerouteRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT).explain(randomBoolean())
|
||||||
.add(new AllocateEmptyPrimaryAllocationCommand("test", 0, node_1, true))
|
.add(new AllocateEmptyPrimaryAllocationCommand("test", 0, node_1, true))
|
||||||
.setDryRun(true)
|
.dryRun(true)
|
||||||
.get()
|
)
|
||||||
.getState();
|
).getState();
|
||||||
assertThat(state.getRoutingNodes().unassigned().size(), equalTo(1));
|
assertThat(state.getRoutingNodes().unassigned().size(), equalTo(1));
|
||||||
assertThat(
|
assertThat(
|
||||||
state.getRoutingNodes().node(state.nodes().resolveNode(node_1).getId()).iterator().next().state(),
|
state.getRoutingNodes().node(state.nodes().resolveNode(node_1).getId()).iterator().next().state(),
|
||||||
|
@ -116,11 +120,13 @@ public class ClusterRerouteIT extends ESIntegTestCase {
|
||||||
assertThat(state.getRoutingNodes().unassigned().size(), equalTo(2));
|
assertThat(state.getRoutingNodes().unassigned().size(), equalTo(2));
|
||||||
|
|
||||||
logger.info("--> explicitly allocate shard 1, actually allocating, no dry run");
|
logger.info("--> explicitly allocate shard 1, actually allocating, no dry run");
|
||||||
state = clusterAdmin().prepareReroute()
|
state = safeGet(
|
||||||
.setExplain(randomBoolean())
|
client().execute(
|
||||||
|
TransportClusterRerouteAction.TYPE,
|
||||||
|
new ClusterRerouteRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT).explain(randomBoolean())
|
||||||
.add(new AllocateEmptyPrimaryAllocationCommand("test", 0, node_1, true))
|
.add(new AllocateEmptyPrimaryAllocationCommand("test", 0, node_1, true))
|
||||||
.get()
|
)
|
||||||
.getState();
|
).getState();
|
||||||
assertThat(state.getRoutingNodes().unassigned().size(), equalTo(1));
|
assertThat(state.getRoutingNodes().unassigned().size(), equalTo(1));
|
||||||
assertThat(
|
assertThat(
|
||||||
state.getRoutingNodes().node(state.nodes().resolveNode(node_1).getId()).iterator().next().state(),
|
state.getRoutingNodes().node(state.nodes().resolveNode(node_1).getId()).iterator().next().state(),
|
||||||
|
@ -143,11 +149,13 @@ public class ClusterRerouteIT extends ESIntegTestCase {
|
||||||
);
|
);
|
||||||
|
|
||||||
logger.info("--> move shard 1 primary from node1 to node2");
|
logger.info("--> move shard 1 primary from node1 to node2");
|
||||||
state = clusterAdmin().prepareReroute()
|
state = safeGet(
|
||||||
.setExplain(randomBoolean())
|
client().execute(
|
||||||
|
TransportClusterRerouteAction.TYPE,
|
||||||
|
new ClusterRerouteRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT).explain(randomBoolean())
|
||||||
.add(new MoveAllocationCommand("test", 0, node_1, node_2))
|
.add(new MoveAllocationCommand("test", 0, node_1, node_2))
|
||||||
.get()
|
)
|
||||||
.getState();
|
).getState();
|
||||||
|
|
||||||
assertThat(
|
assertThat(
|
||||||
state.getRoutingNodes().node(state.nodes().resolveNode(node_1).getId()).iterator().next().state(),
|
state.getRoutingNodes().node(state.nodes().resolveNode(node_1).getId()).iterator().next().state(),
|
||||||
|
@ -250,11 +258,13 @@ public class ClusterRerouteIT extends ESIntegTestCase {
|
||||||
assertThat(state.getRoutingNodes().unassigned().size(), equalTo(2));
|
assertThat(state.getRoutingNodes().unassigned().size(), equalTo(2));
|
||||||
|
|
||||||
logger.info("--> explicitly allocate shard 1, actually allocating, no dry run");
|
logger.info("--> explicitly allocate shard 1, actually allocating, no dry run");
|
||||||
state = clusterAdmin().prepareReroute()
|
state = safeGet(
|
||||||
.setExplain(randomBoolean())
|
client().execute(
|
||||||
|
TransportClusterRerouteAction.TYPE,
|
||||||
|
new ClusterRerouteRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT).explain(randomBoolean())
|
||||||
.add(new AllocateEmptyPrimaryAllocationCommand("test", 0, node_1, true))
|
.add(new AllocateEmptyPrimaryAllocationCommand("test", 0, node_1, true))
|
||||||
.get()
|
)
|
||||||
.getState();
|
).getState();
|
||||||
assertThat(state.getRoutingNodes().unassigned().size(), equalTo(1));
|
assertThat(state.getRoutingNodes().unassigned().size(), equalTo(1));
|
||||||
assertThat(
|
assertThat(
|
||||||
state.getRoutingNodes().node(state.nodes().resolveNode(node_1).getId()).iterator().next().state(),
|
state.getRoutingNodes().node(state.nodes().resolveNode(node_1).getId()).iterator().next().state(),
|
||||||
|
@ -295,17 +305,19 @@ public class ClusterRerouteIT extends ESIntegTestCase {
|
||||||
internalCluster().startNode(commonSettings);
|
internalCluster().startNode(commonSettings);
|
||||||
// wait a bit for the cluster to realize that the shard is not there...
|
// wait a bit for the cluster to realize that the shard is not there...
|
||||||
// TODO can we get around this? the cluster is RED, so what do we wait for?
|
// TODO can we get around this? the cluster is RED, so what do we wait for?
|
||||||
clusterAdmin().prepareReroute().get();
|
ClusterRerouteUtils.reroute(client());
|
||||||
assertThat(
|
assertThat(
|
||||||
clusterAdmin().prepareHealth().setIndices("test").setWaitForNodes("2").get().getStatus(),
|
clusterAdmin().prepareHealth().setIndices("test").setWaitForNodes("2").get().getStatus(),
|
||||||
equalTo(ClusterHealthStatus.RED)
|
equalTo(ClusterHealthStatus.RED)
|
||||||
);
|
);
|
||||||
logger.info("--> explicitly allocate primary");
|
logger.info("--> explicitly allocate primary");
|
||||||
state = clusterAdmin().prepareReroute()
|
state = safeGet(
|
||||||
.setExplain(randomBoolean())
|
client().execute(
|
||||||
|
TransportClusterRerouteAction.TYPE,
|
||||||
|
new ClusterRerouteRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT).explain(randomBoolean())
|
||||||
.add(new AllocateEmptyPrimaryAllocationCommand("test", 0, node_1, true))
|
.add(new AllocateEmptyPrimaryAllocationCommand("test", 0, node_1, true))
|
||||||
.get()
|
)
|
||||||
.getState();
|
).getState();
|
||||||
assertThat(state.getRoutingNodes().unassigned().size(), equalTo(1));
|
assertThat(state.getRoutingNodes().unassigned().size(), equalTo(1));
|
||||||
assertThat(
|
assertThat(
|
||||||
state.getRoutingNodes().node(state.nodes().resolveNode(node_1).getId()).iterator().next().state(),
|
state.getRoutingNodes().node(state.nodes().resolveNode(node_1).getId()).iterator().next().state(),
|
||||||
|
@ -350,7 +362,12 @@ public class ClusterRerouteIT extends ESIntegTestCase {
|
||||||
|
|
||||||
logger.info("--> try to move the shard from node1 to node2");
|
logger.info("--> try to move the shard from node1 to node2");
|
||||||
MoveAllocationCommand cmd = new MoveAllocationCommand("test", 0, node_1, node_2);
|
MoveAllocationCommand cmd = new MoveAllocationCommand("test", 0, node_1, node_2);
|
||||||
ClusterRerouteResponse resp = clusterAdmin().prepareReroute().add(cmd).setExplain(true).get();
|
ClusterRerouteResponse resp = safeGet(
|
||||||
|
client().execute(
|
||||||
|
TransportClusterRerouteAction.TYPE,
|
||||||
|
new ClusterRerouteRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT).add(cmd).explain(true)
|
||||||
|
)
|
||||||
|
);
|
||||||
RoutingExplanations e = resp.getExplanations();
|
RoutingExplanations e = resp.getExplanations();
|
||||||
assertThat(e.explanations().size(), equalTo(1));
|
assertThat(e.explanations().size(), equalTo(1));
|
||||||
RerouteExplanation explanation = e.explanations().get(0);
|
RerouteExplanation explanation = e.explanations().get(0);
|
||||||
|
@ -398,11 +415,14 @@ public class ClusterRerouteIT extends ESIntegTestCase {
|
||||||
);
|
);
|
||||||
|
|
||||||
AllocationCommand dryRunAllocation = new AllocateEmptyPrimaryAllocationCommand(indexName, 0, nodeName1, true);
|
AllocationCommand dryRunAllocation = new AllocateEmptyPrimaryAllocationCommand(indexName, 0, nodeName1, true);
|
||||||
ClusterRerouteResponse dryRunResponse = clusterAdmin().prepareReroute()
|
ClusterRerouteResponse dryRunResponse = safeGet(
|
||||||
.setExplain(randomBoolean())
|
client().execute(
|
||||||
.setDryRun(true)
|
TransportClusterRerouteAction.TYPE,
|
||||||
|
new ClusterRerouteRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT).explain(randomBoolean())
|
||||||
|
.dryRun(true)
|
||||||
.add(dryRunAllocation)
|
.add(dryRunAllocation)
|
||||||
.get();
|
)
|
||||||
|
);
|
||||||
|
|
||||||
// during a dry run, messages exist but are not logged or exposed
|
// during a dry run, messages exist but are not logged or exposed
|
||||||
assertThat(dryRunResponse.getExplanations().getYesDecisionMessages(), hasSize(1));
|
assertThat(dryRunResponse.getExplanations().getYesDecisionMessages(), hasSize(1));
|
||||||
|
@ -431,11 +451,16 @@ public class ClusterRerouteIT extends ESIntegTestCase {
|
||||||
|
|
||||||
AllocationCommand yesDecisionAllocation = new AllocateEmptyPrimaryAllocationCommand(indexName, 0, nodeName1, true);
|
AllocationCommand yesDecisionAllocation = new AllocateEmptyPrimaryAllocationCommand(indexName, 0, nodeName1, true);
|
||||||
AllocationCommand noDecisionAllocation = new AllocateEmptyPrimaryAllocationCommand("noexist", 1, nodeName2, true);
|
AllocationCommand noDecisionAllocation = new AllocateEmptyPrimaryAllocationCommand("noexist", 1, nodeName2, true);
|
||||||
ClusterRerouteResponse response = clusterAdmin().prepareReroute()
|
ClusterRerouteResponse response = safeGet(
|
||||||
.setExplain(true) // so we get a NO decision back rather than an exception
|
client().execute(
|
||||||
|
TransportClusterRerouteAction.TYPE,
|
||||||
|
new ClusterRerouteRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT)
|
||||||
|
// set explain(true) so we get a NO decision back rather than an exception
|
||||||
|
.explain(true)
|
||||||
.add(yesDecisionAllocation)
|
.add(yesDecisionAllocation)
|
||||||
.add(noDecisionAllocation)
|
.add(noDecisionAllocation)
|
||||||
.get();
|
)
|
||||||
|
);
|
||||||
|
|
||||||
assertThat(response.getExplanations().getYesDecisionMessages(), hasSize(1));
|
assertThat(response.getExplanations().getYesDecisionMessages(), hasSize(1));
|
||||||
assertThat(response.getExplanations().getYesDecisionMessages().get(0), containsString("allocated an empty primary"));
|
assertThat(response.getExplanations().getYesDecisionMessages().get(0), containsString("allocated an empty primary"));
|
||||||
|
@ -482,9 +507,9 @@ public class ClusterRerouteIT extends ESIntegTestCase {
|
||||||
)) {
|
)) {
|
||||||
try {
|
try {
|
||||||
enableIndexBlock("test-blocks", blockSetting);
|
enableIndexBlock("test-blocks", blockSetting);
|
||||||
assertAcked(
|
ClusterRerouteUtils.reroute(
|
||||||
clusterAdmin().prepareReroute()
|
client(),
|
||||||
.add(new MoveAllocationCommand("test-blocks", 0, nodesIds.get(toggle % 2), nodesIds.get(++toggle % 2)))
|
new MoveAllocationCommand("test-blocks", 0, nodesIds.get(toggle % 2), nodesIds.get(++toggle % 2))
|
||||||
);
|
);
|
||||||
|
|
||||||
ClusterHealthResponse healthResponse = clusterAdmin().prepareHealth()
|
ClusterHealthResponse healthResponse = clusterAdmin().prepareHealth()
|
||||||
|
@ -502,8 +527,11 @@ public class ClusterRerouteIT extends ESIntegTestCase {
|
||||||
try {
|
try {
|
||||||
setClusterReadOnly(true);
|
setClusterReadOnly(true);
|
||||||
assertBlocked(
|
assertBlocked(
|
||||||
clusterAdmin().prepareReroute()
|
null,
|
||||||
.add(new MoveAllocationCommand("test-blocks", 1, nodesIds.get(toggle % 2), nodesIds.get(++toggle % 2)))
|
ClusterRerouteUtils.expectRerouteFailure(
|
||||||
|
client(),
|
||||||
|
new MoveAllocationCommand("test-blocks", 1, nodesIds.get(toggle % 2), nodesIds.get(++toggle % 2))
|
||||||
|
)
|
||||||
);
|
);
|
||||||
} finally {
|
} finally {
|
||||||
setClusterReadOnly(false);
|
setClusterReadOnly(false);
|
||||||
|
|
|
@ -8,6 +8,7 @@
|
||||||
|
|
||||||
package org.elasticsearch.cluster.allocation;
|
package org.elasticsearch.cluster.allocation;
|
||||||
|
|
||||||
|
import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteUtils;
|
||||||
import org.elasticsearch.cluster.ClusterState;
|
import org.elasticsearch.cluster.ClusterState;
|
||||||
import org.elasticsearch.cluster.health.ClusterHealthStatus;
|
import org.elasticsearch.cluster.health.ClusterHealthStatus;
|
||||||
import org.elasticsearch.cluster.metadata.AutoExpandReplicas;
|
import org.elasticsearch.cluster.metadata.AutoExpandReplicas;
|
||||||
|
@ -160,7 +161,7 @@ public class FilteringAllocationIT extends ESIntegTestCase {
|
||||||
}
|
}
|
||||||
logger.info("--> remove index from the first node");
|
logger.info("--> remove index from the first node");
|
||||||
updateIndexSettings(Settings.builder().put("index.routing.allocation.exclude._name", node_0), "test");
|
updateIndexSettings(Settings.builder().put("index.routing.allocation.exclude._name", node_0), "test");
|
||||||
clusterAdmin().prepareReroute().get();
|
ClusterRerouteUtils.reroute(client());
|
||||||
ensureGreen("test");
|
ensureGreen("test");
|
||||||
|
|
||||||
logger.info("--> verify all shards are allocated on node_1 now");
|
logger.info("--> verify all shards are allocated on node_1 now");
|
||||||
|
@ -175,7 +176,7 @@ public class FilteringAllocationIT extends ESIntegTestCase {
|
||||||
|
|
||||||
logger.info("--> disable allocation filtering ");
|
logger.info("--> disable allocation filtering ");
|
||||||
updateIndexSettings(Settings.builder().put("index.routing.allocation.exclude._name", ""), "test");
|
updateIndexSettings(Settings.builder().put("index.routing.allocation.exclude._name", ""), "test");
|
||||||
clusterAdmin().prepareReroute().get();
|
ClusterRerouteUtils.reroute(client());
|
||||||
ensureGreen("test");
|
ensureGreen("test");
|
||||||
|
|
||||||
logger.info("--> verify that there are shards allocated on both nodes now");
|
logger.info("--> verify that there are shards allocated on both nodes now");
|
||||||
|
|
|
@ -10,6 +10,7 @@ package org.elasticsearch.cluster.routing;
|
||||||
|
|
||||||
import org.elasticsearch.action.admin.cluster.allocation.ClusterAllocationExplanationUtils;
|
import org.elasticsearch.action.admin.cluster.allocation.ClusterAllocationExplanationUtils;
|
||||||
import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest;
|
import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest;
|
||||||
|
import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteUtils;
|
||||||
import org.elasticsearch.action.admin.indices.stats.ShardStats;
|
import org.elasticsearch.action.admin.indices.stats.ShardStats;
|
||||||
import org.elasticsearch.action.index.IndexRequestBuilder;
|
import org.elasticsearch.action.index.IndexRequestBuilder;
|
||||||
import org.elasticsearch.cluster.ClusterState;
|
import org.elasticsearch.cluster.ClusterState;
|
||||||
|
@ -106,7 +107,7 @@ public class AllocationIdIT extends ESIntegTestCase {
|
||||||
checkNoValidShardCopy(indexName, shardId);
|
checkNoValidShardCopy(indexName, shardId);
|
||||||
|
|
||||||
// allocate stale primary
|
// allocate stale primary
|
||||||
client(node1).admin().cluster().prepareReroute().add(new AllocateStalePrimaryAllocationCommand(indexName, 0, node1, true)).get();
|
ClusterRerouteUtils.reroute(client(node1), new AllocateStalePrimaryAllocationCommand(indexName, 0, node1, true));
|
||||||
|
|
||||||
// allocation fails due to corruption marker
|
// allocation fails due to corruption marker
|
||||||
assertBusy(() -> {
|
assertBusy(() -> {
|
||||||
|
@ -127,7 +128,7 @@ public class AllocationIdIT extends ESIntegTestCase {
|
||||||
checkNoValidShardCopy(indexName, shardId);
|
checkNoValidShardCopy(indexName, shardId);
|
||||||
|
|
||||||
// no any valid shard is there; have to invoke AllocateStalePrimary again
|
// no any valid shard is there; have to invoke AllocateStalePrimary again
|
||||||
clusterAdmin().prepareReroute().add(new AllocateStalePrimaryAllocationCommand(indexName, 0, node1, true)).get();
|
ClusterRerouteUtils.reroute(client(), new AllocateStalePrimaryAllocationCommand(indexName, 0, node1, true));
|
||||||
|
|
||||||
ensureYellow(indexName);
|
ensureYellow(indexName);
|
||||||
|
|
||||||
|
|
|
@ -9,7 +9,9 @@
|
||||||
package org.elasticsearch.cluster.routing;
|
package org.elasticsearch.cluster.routing;
|
||||||
|
|
||||||
import org.elasticsearch.action.DocWriteResponse;
|
import org.elasticsearch.action.DocWriteResponse;
|
||||||
import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteRequestBuilder;
|
import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteRequest;
|
||||||
|
import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteUtils;
|
||||||
|
import org.elasticsearch.action.admin.cluster.reroute.TransportClusterRerouteAction;
|
||||||
import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresRequest;
|
import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresRequest;
|
||||||
import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresResponse;
|
import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresResponse;
|
||||||
import org.elasticsearch.action.admin.indices.shards.TransportIndicesShardStoresAction;
|
import org.elasticsearch.action.admin.indices.shards.TransportIndicesShardStoresAction;
|
||||||
|
@ -156,12 +158,13 @@ public class PrimaryAllocationIT extends ESIntegTestCase {
|
||||||
|
|
||||||
logger.info("--> check that old primary shard does not get promoted to primary again");
|
logger.info("--> check that old primary shard does not get promoted to primary again");
|
||||||
// kick reroute and wait for all shard states to be fetched
|
// kick reroute and wait for all shard states to be fetched
|
||||||
client(master).admin().cluster().prepareReroute().get();
|
ClusterRerouteUtils.reroute(client(master));
|
||||||
assertBusy(
|
assertBusy(
|
||||||
() -> assertThat(internalCluster().getInstance(GatewayAllocator.class, master).getNumberOfInFlightFetches(), equalTo(0))
|
() -> assertThat(internalCluster().getInstance(GatewayAllocator.class, master).getNumberOfInFlightFetches(), equalTo(0))
|
||||||
);
|
);
|
||||||
// kick reroute a second time and check that all shards are unassigned
|
// kick reroute a second time and check that all shards are unassigned
|
||||||
assertThat(client(master).admin().cluster().prepareReroute().get().getState().getRoutingNodes().unassigned().size(), equalTo(2));
|
ClusterRerouteUtils.reroute(client(master));
|
||||||
|
assertThat(client(master).admin().cluster().prepareState().get().getState().getRoutingNodes().unassigned().size(), equalTo(2));
|
||||||
return inSyncDataPathSettings;
|
return inSyncDataPathSettings;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -207,11 +210,16 @@ public class PrimaryAllocationIT extends ESIntegTestCase {
|
||||||
);
|
);
|
||||||
|
|
||||||
logger.info("--> force allocation of stale copy to node that does not have shard copy");
|
logger.info("--> force allocation of stale copy to node that does not have shard copy");
|
||||||
Throwable iae = expectThrows(
|
assertEquals(
|
||||||
|
"No data for shard [0] of index [test] found on any node",
|
||||||
|
asInstanceOf(
|
||||||
IllegalArgumentException.class,
|
IllegalArgumentException.class,
|
||||||
clusterAdmin().prepareReroute().add(new AllocateStalePrimaryAllocationCommand("test", 0, dataNodeWithNoShardCopy, true))
|
ClusterRerouteUtils.expectRerouteFailure(
|
||||||
|
client(),
|
||||||
|
new AllocateStalePrimaryAllocationCommand("test", 0, dataNodeWithNoShardCopy, true)
|
||||||
|
)
|
||||||
|
).getMessage()
|
||||||
);
|
);
|
||||||
assertThat(iae.getMessage(), equalTo("No data for shard [0] of index [test] found on any node"));
|
|
||||||
|
|
||||||
logger.info("--> wait until shard is failed and becomes unassigned again");
|
logger.info("--> wait until shard is failed and becomes unassigned again");
|
||||||
assertTrue(
|
assertTrue(
|
||||||
|
@ -252,16 +260,16 @@ public class PrimaryAllocationIT extends ESIntegTestCase {
|
||||||
TransportIndicesShardStoresAction.TYPE,
|
TransportIndicesShardStoresAction.TYPE,
|
||||||
new IndicesShardStoresRequest(idxName)
|
new IndicesShardStoresRequest(idxName)
|
||||||
).get().getStoreStatuses().get(idxName);
|
).get().getStoreStatuses().get(idxName);
|
||||||
ClusterRerouteRequestBuilder rerouteBuilder = clusterAdmin().prepareReroute();
|
final var rerouteRequest = new ClusterRerouteRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT);
|
||||||
for (Map.Entry<Integer, List<IndicesShardStoresResponse.StoreStatus>> shardStoreStatuses : storeStatuses.entrySet()) {
|
for (Map.Entry<Integer, List<IndicesShardStoresResponse.StoreStatus>> shardStoreStatuses : storeStatuses.entrySet()) {
|
||||||
int shardId = shardStoreStatuses.getKey();
|
int shardId = shardStoreStatuses.getKey();
|
||||||
IndicesShardStoresResponse.StoreStatus storeStatus = randomFrom(shardStoreStatuses.getValue());
|
IndicesShardStoresResponse.StoreStatus storeStatus = randomFrom(shardStoreStatuses.getValue());
|
||||||
logger.info("--> adding allocation command for shard {}", shardId);
|
logger.info("--> adding allocation command for shard {}", shardId);
|
||||||
// force allocation based on node id
|
// force allocation based on node id
|
||||||
if (useStaleReplica) {
|
if (useStaleReplica) {
|
||||||
rerouteBuilder.add(new AllocateStalePrimaryAllocationCommand(idxName, shardId, storeStatus.getNode().getId(), true));
|
rerouteRequest.add(new AllocateStalePrimaryAllocationCommand(idxName, shardId, storeStatus.getNode().getId(), true));
|
||||||
} else {
|
} else {
|
||||||
rerouteBuilder.add(new AllocateEmptyPrimaryAllocationCommand(idxName, shardId, storeStatus.getNode().getId(), true));
|
rerouteRequest.add(new AllocateEmptyPrimaryAllocationCommand(idxName, shardId, storeStatus.getNode().getId(), true));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -280,7 +288,7 @@ public class PrimaryAllocationIT extends ESIntegTestCase {
|
||||||
final ClusterService clusterService = internalCluster().getInstance(ClusterService.class, master);
|
final ClusterService clusterService = internalCluster().getInstance(ClusterService.class, master);
|
||||||
clusterService.addListener(clusterStateListener);
|
clusterService.addListener(clusterStateListener);
|
||||||
|
|
||||||
rerouteBuilder.get();
|
assertAcked(safeGet(client().execute(TransportClusterRerouteAction.TYPE, rerouteRequest)));
|
||||||
|
|
||||||
assertTrue(clusterStateChangeLatch.await(30, TimeUnit.SECONDS));
|
assertTrue(clusterStateChangeLatch.await(30, TimeUnit.SECONDS));
|
||||||
clusterService.removeListener(clusterStateListener);
|
clusterService.removeListener(clusterStateListener);
|
||||||
|
@ -341,13 +349,16 @@ public class PrimaryAllocationIT extends ESIntegTestCase {
|
||||||
.forEach(status -> nodeNames.remove(status.getNode().getName()));
|
.forEach(status -> nodeNames.remove(status.getNode().getName()));
|
||||||
assertThat(nodeNames, hasSize(1));
|
assertThat(nodeNames, hasSize(1));
|
||||||
final String nodeWithoutData = nodeNames.get(0);
|
final String nodeWithoutData = nodeNames.get(0);
|
||||||
Throwable iae = expectThrows(
|
|
||||||
|
assertEquals(
|
||||||
|
"No data for shard [" + shardId + "] of index [" + idxName + "] found on node [" + nodeWithoutData + ']',
|
||||||
|
asInstanceOf(
|
||||||
IllegalArgumentException.class,
|
IllegalArgumentException.class,
|
||||||
clusterAdmin().prepareReroute().add(new AllocateStalePrimaryAllocationCommand(idxName, shardId, nodeWithoutData, true))
|
ClusterRerouteUtils.expectRerouteFailure(
|
||||||
);
|
client(),
|
||||||
assertThat(
|
new AllocateStalePrimaryAllocationCommand(idxName, shardId, nodeWithoutData, true)
|
||||||
iae.getMessage(),
|
)
|
||||||
equalTo("No data for shard [" + shardId + "] of index [" + idxName + "] found on node [" + nodeWithoutData + ']')
|
).getMessage()
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -359,22 +370,29 @@ public class PrimaryAllocationIT extends ESIntegTestCase {
|
||||||
ensureGreen();
|
ensureGreen();
|
||||||
final String nodeWithoutData = randomFrom(dataNodes);
|
final String nodeWithoutData = randomFrom(dataNodes);
|
||||||
final int shardId = 0;
|
final int shardId = 0;
|
||||||
IllegalArgumentException iae = expectThrows(
|
assertEquals(
|
||||||
|
"[allocate_stale_primary] primary [" + idxName + "][" + shardId + "] is already assigned",
|
||||||
|
asInstanceOf(
|
||||||
IllegalArgumentException.class,
|
IllegalArgumentException.class,
|
||||||
clusterAdmin().prepareReroute().add(new AllocateStalePrimaryAllocationCommand(idxName, shardId, nodeWithoutData, true))
|
ClusterRerouteUtils.expectRerouteFailure(
|
||||||
|
client(),
|
||||||
|
new AllocateStalePrimaryAllocationCommand(idxName, shardId, nodeWithoutData, true)
|
||||||
|
)
|
||||||
|
).getMessage()
|
||||||
);
|
);
|
||||||
assertThat(iae.getMessage(), equalTo("[allocate_stale_primary] primary [" + idxName + "][" + shardId + "] is already assigned"));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testForceStaleReplicaToBePromotedForMissingIndex() {
|
public void testForceStaleReplicaToBePromotedForMissingIndex() {
|
||||||
internalCluster().startMasterOnlyNode(Settings.EMPTY);
|
internalCluster().startMasterOnlyNode(Settings.EMPTY);
|
||||||
final String dataNode = internalCluster().startDataOnlyNode();
|
final String dataNode = internalCluster().startDataOnlyNode();
|
||||||
final String idxName = "test";
|
final String idxName = "test";
|
||||||
IndexNotFoundException ex = expectThrows(
|
assertEquals(
|
||||||
|
idxName,
|
||||||
|
asInstanceOf(
|
||||||
IndexNotFoundException.class,
|
IndexNotFoundException.class,
|
||||||
clusterAdmin().prepareReroute().add(new AllocateStalePrimaryAllocationCommand(idxName, 0, dataNode, true))
|
ClusterRerouteUtils.expectRerouteFailure(client(), new AllocateStalePrimaryAllocationCommand(idxName, 0, dataNode, true))
|
||||||
|
).getIndex().getName()
|
||||||
);
|
);
|
||||||
assertThat(ex.getIndex().getName(), equalTo(idxName));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testForcePrimaryShardIfAllocationDecidersSayNoAfterIndexCreation() throws ExecutionException, InterruptedException {
|
public void testForcePrimaryShardIfAllocationDecidersSayNoAfterIndexCreation() throws ExecutionException, InterruptedException {
|
||||||
|
@ -386,7 +404,7 @@ public class PrimaryAllocationIT extends ESIntegTestCase {
|
||||||
|
|
||||||
assertThat(clusterAdmin().prepareState().get().getState().getRoutingTable().shardRoutingTable("test", 0).assignedShards(), empty());
|
assertThat(clusterAdmin().prepareState().get().getState().getRoutingTable().shardRoutingTable("test", 0).assignedShards(), empty());
|
||||||
|
|
||||||
clusterAdmin().prepareReroute().add(new AllocateEmptyPrimaryAllocationCommand("test", 0, node, true)).get();
|
ClusterRerouteUtils.reroute(client(), new AllocateEmptyPrimaryAllocationCommand("test", 0, node, true));
|
||||||
ensureGreen("test");
|
ensureGreen("test");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -11,6 +11,7 @@ package org.elasticsearch.cluster.routing;
|
||||||
import org.apache.logging.log4j.LogManager;
|
import org.apache.logging.log4j.LogManager;
|
||||||
import org.apache.logging.log4j.Logger;
|
import org.apache.logging.log4j.Logger;
|
||||||
import org.elasticsearch.action.ActionResponse;
|
import org.elasticsearch.action.ActionResponse;
|
||||||
|
import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteUtils;
|
||||||
import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsGroup;
|
import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsGroup;
|
||||||
import org.elasticsearch.action.admin.indices.refresh.TransportUnpromotableShardRefreshAction;
|
import org.elasticsearch.action.admin.indices.refresh.TransportUnpromotableShardRefreshAction;
|
||||||
import org.elasticsearch.action.search.ClosePointInTimeRequest;
|
import org.elasticsearch.action.search.ClosePointInTimeRequest;
|
||||||
|
@ -422,7 +423,7 @@ public class ShardRoutingRoleIT extends ESIntegTestCase {
|
||||||
updateIndexSettings(Settings.builder().put(IndexMetadata.INDEX_ROUTING_REQUIRE_GROUP_PREFIX + "._name", "not-a-node"), "test");
|
updateIndexSettings(Settings.builder().put(IndexMetadata.INDEX_ROUTING_REQUIRE_GROUP_PREFIX + "._name", "not-a-node"), "test");
|
||||||
AllocationCommand cancelPrimaryCommand;
|
AllocationCommand cancelPrimaryCommand;
|
||||||
while ((cancelPrimaryCommand = getCancelPrimaryCommand()) != null) {
|
while ((cancelPrimaryCommand = getCancelPrimaryCommand()) != null) {
|
||||||
clusterAdmin().prepareReroute().add(cancelPrimaryCommand).get();
|
ClusterRerouteUtils.reroute(client(), cancelPrimaryCommand);
|
||||||
}
|
}
|
||||||
} finally {
|
} finally {
|
||||||
masterClusterService.removeListener(routingTableWatcher);
|
masterClusterService.removeListener(routingTableWatcher);
|
||||||
|
|
|
@ -8,6 +8,7 @@
|
||||||
|
|
||||||
package org.elasticsearch.cluster.routing.allocation.decider;
|
package org.elasticsearch.cluster.routing.allocation.decider;
|
||||||
|
|
||||||
|
import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteUtils;
|
||||||
import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse;
|
import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse;
|
||||||
import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse;
|
import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse;
|
||||||
import org.elasticsearch.action.admin.indices.stats.ShardStats;
|
import org.elasticsearch.action.admin.indices.stats.ShardStats;
|
||||||
|
@ -314,7 +315,7 @@ public class DiskThresholdDeciderIT extends DiskUsageIntegTestCase {
|
||||||
.values()
|
.values()
|
||||||
.stream()
|
.stream()
|
||||||
.allMatch(e -> e.freeBytes() > WATERMARK_BYTES)) {
|
.allMatch(e -> e.freeBytes() > WATERMARK_BYTES)) {
|
||||||
assertAcked(clusterAdmin().prepareReroute());
|
ClusterRerouteUtils.reroute(client());
|
||||||
}
|
}
|
||||||
|
|
||||||
assertFalse(
|
assertFalse(
|
||||||
|
|
|
@ -7,6 +7,7 @@
|
||||||
*/
|
*/
|
||||||
package org.elasticsearch.cluster.routing.allocation.decider;
|
package org.elasticsearch.cluster.routing.allocation.decider;
|
||||||
|
|
||||||
|
import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteUtils;
|
||||||
import org.elasticsearch.cluster.ClusterState;
|
import org.elasticsearch.cluster.ClusterState;
|
||||||
import org.elasticsearch.cluster.routing.ShardRoutingState;
|
import org.elasticsearch.cluster.routing.ShardRoutingState;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
|
@ -50,7 +51,7 @@ public class UpdateShardAllocationSettingsIT extends ESIntegTestCase {
|
||||||
.put(EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), EnableAllocationDecider.Rebalance.NONE),
|
.put(EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), EnableAllocationDecider.Rebalance.NONE),
|
||||||
"test"
|
"test"
|
||||||
);
|
);
|
||||||
clusterAdmin().prepareReroute().get();
|
ClusterRerouteUtils.reroute(client());
|
||||||
ensureGreen();
|
ensureGreen();
|
||||||
assertAllShardsOnNodes("test", firstNode);
|
assertAllShardsOnNodes("test", firstNode);
|
||||||
assertAllShardsOnNodes("test_1", firstNode);
|
assertAllShardsOnNodes("test_1", firstNode);
|
||||||
|
@ -65,7 +66,7 @@ public class UpdateShardAllocationSettingsIT extends ESIntegTestCase {
|
||||||
"test"
|
"test"
|
||||||
);
|
);
|
||||||
logger.info("--> balance index [test]");
|
logger.info("--> balance index [test]");
|
||||||
clusterAdmin().prepareReroute().get();
|
ClusterRerouteUtils.reroute(client());
|
||||||
ensureGreen("test");
|
ensureGreen("test");
|
||||||
Set<String> test = assertAllShardsOnNodes("test", firstNode, secondNode);
|
Set<String> test = assertAllShardsOnNodes("test", firstNode, secondNode);
|
||||||
assertThat("index: [test] expected to be rebalanced on both nodes", test.size(), equalTo(2));
|
assertThat("index: [test] expected to be rebalanced on both nodes", test.size(), equalTo(2));
|
||||||
|
@ -80,7 +81,7 @@ public class UpdateShardAllocationSettingsIT extends ESIntegTestCase {
|
||||||
)
|
)
|
||||||
);
|
);
|
||||||
logger.info("--> balance index [test_1]");
|
logger.info("--> balance index [test_1]");
|
||||||
clusterAdmin().prepareReroute().get();
|
ClusterRerouteUtils.reroute(client());
|
||||||
ensureGreen("test_1");
|
ensureGreen("test_1");
|
||||||
Set<String> test_1 = assertAllShardsOnNodes("test_1", firstNode, secondNode);
|
Set<String> test_1 = assertAllShardsOnNodes("test_1", firstNode, secondNode);
|
||||||
assertThat("index: [test_1] expected to be rebalanced on both nodes", test_1.size(), equalTo(2));
|
assertThat("index: [test_1] expected to be rebalanced on both nodes", test_1.size(), equalTo(2));
|
||||||
|
|
|
@ -13,6 +13,7 @@ import org.elasticsearch.ElasticsearchException;
|
||||||
import org.elasticsearch.action.ActionListener;
|
import org.elasticsearch.action.ActionListener;
|
||||||
import org.elasticsearch.action.DocWriteResponse;
|
import org.elasticsearch.action.DocWriteResponse;
|
||||||
import org.elasticsearch.action.NoShardAvailableActionException;
|
import org.elasticsearch.action.NoShardAvailableActionException;
|
||||||
|
import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteUtils;
|
||||||
import org.elasticsearch.action.get.GetResponse;
|
import org.elasticsearch.action.get.GetResponse;
|
||||||
import org.elasticsearch.action.index.IndexRequestBuilder;
|
import org.elasticsearch.action.index.IndexRequestBuilder;
|
||||||
import org.elasticsearch.action.support.PlainActionFuture;
|
import org.elasticsearch.action.support.PlainActionFuture;
|
||||||
|
@ -233,7 +234,7 @@ public class ClusterDisruptionIT extends AbstractDisruptionTestCase {
|
||||||
// is the super-connected node and recovery source and target are on opposite sides of the bridge
|
// is the super-connected node and recovery source and target are on opposite sides of the bridge
|
||||||
if (disruptionScheme instanceof NetworkDisruption networkDisruption
|
if (disruptionScheme instanceof NetworkDisruption networkDisruption
|
||||||
&& networkDisruption.getDisruptedLinks() instanceof Bridge) {
|
&& networkDisruption.getDisruptedLinks() instanceof Bridge) {
|
||||||
assertBusy(() -> assertAcked(clusterAdmin().prepareReroute().setRetryFailed(true)));
|
assertBusy(() -> ClusterRerouteUtils.rerouteRetryFailed(client()));
|
||||||
}
|
}
|
||||||
ensureGreen("test");
|
ensureGreen("test");
|
||||||
|
|
||||||
|
|
|
@ -8,6 +8,7 @@
|
||||||
|
|
||||||
package org.elasticsearch.discovery;
|
package org.elasticsearch.discovery;
|
||||||
|
|
||||||
|
import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteUtils;
|
||||||
import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse;
|
import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse;
|
||||||
import org.elasticsearch.action.admin.indices.stats.ShardStats;
|
import org.elasticsearch.action.admin.indices.stats.ShardStats;
|
||||||
import org.elasticsearch.action.bulk.BulkRequestBuilder;
|
import org.elasticsearch.action.bulk.BulkRequestBuilder;
|
||||||
|
@ -116,7 +117,7 @@ public class MasterDisruptionIT extends AbstractDisruptionTestCase {
|
||||||
|
|
||||||
logger.info("issue a reroute");
|
logger.info("issue a reroute");
|
||||||
// trigger a reroute now, instead of waiting for the background reroute of RerouteService
|
// trigger a reroute now, instead of waiting for the background reroute of RerouteService
|
||||||
assertAcked(clusterAdmin().prepareReroute());
|
ClusterRerouteUtils.reroute(client());
|
||||||
// and wait for it to finish and for the cluster to stabilize
|
// and wait for it to finish and for the cluster to stabilize
|
||||||
ensureGreen("test");
|
ensureGreen("test");
|
||||||
|
|
||||||
|
|
|
@ -20,6 +20,7 @@ import org.apache.lucene.store.LockObtainFailedException;
|
||||||
import org.apache.lucene.store.NativeFSLockFactory;
|
import org.apache.lucene.store.NativeFSLockFactory;
|
||||||
import org.elasticsearch.ExceptionsHelper;
|
import org.elasticsearch.ExceptionsHelper;
|
||||||
import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse;
|
import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse;
|
||||||
|
import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteUtils;
|
||||||
import org.elasticsearch.action.admin.indices.flush.FlushRequest;
|
import org.elasticsearch.action.admin.indices.flush.FlushRequest;
|
||||||
import org.elasticsearch.action.admin.indices.recovery.RecoveryResponse;
|
import org.elasticsearch.action.admin.indices.recovery.RecoveryResponse;
|
||||||
import org.elasticsearch.action.admin.indices.stats.ShardStats;
|
import org.elasticsearch.action.admin.indices.stats.ShardStats;
|
||||||
|
@ -219,7 +220,7 @@ public class RemoveCorruptedShardDataCommandIT extends ESIntegTestCase {
|
||||||
);
|
);
|
||||||
});
|
});
|
||||||
|
|
||||||
clusterAdmin().prepareReroute().add(new AllocateStalePrimaryAllocationCommand(indexName, 0, nodeId, true)).get();
|
ClusterRerouteUtils.reroute(client(), new AllocateStalePrimaryAllocationCommand(indexName, 0, nodeId, true));
|
||||||
|
|
||||||
assertBusy(() -> {
|
assertBusy(() -> {
|
||||||
final var explanation = getClusterAllocationExplanation(client(), indexName, 0, true);
|
final var explanation = getClusterAllocationExplanation(client(), indexName, 0, true);
|
||||||
|
@ -373,7 +374,7 @@ public class RemoveCorruptedShardDataCommandIT extends ESIntegTestCase {
|
||||||
);
|
);
|
||||||
});
|
});
|
||||||
|
|
||||||
clusterAdmin().prepareReroute().add(new AllocateStalePrimaryAllocationCommand(indexName, 0, primaryNodeId, true)).get();
|
ClusterRerouteUtils.reroute(client(), new AllocateStalePrimaryAllocationCommand(indexName, 0, primaryNodeId, true));
|
||||||
|
|
||||||
assertBusy(() -> {
|
assertBusy(() -> {
|
||||||
final var explanation = getClusterAllocationExplanation(client(), indexName, 0, true);
|
final var explanation = getClusterAllocationExplanation(client(), indexName, 0, true);
|
||||||
|
|
|
@ -19,6 +19,7 @@ import org.elasticsearch.ElasticsearchException;
|
||||||
import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest;
|
import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest;
|
||||||
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
|
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
|
||||||
import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse;
|
import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse;
|
||||||
|
import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteUtils;
|
||||||
import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse;
|
import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse;
|
||||||
import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest;
|
import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest;
|
||||||
import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresRequest;
|
import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresRequest;
|
||||||
|
@ -284,7 +285,7 @@ public class CorruptedFileIT extends ESIntegTestCase {
|
||||||
* we corrupted the primary shard - now lets make sure we never recover from it successfully
|
* we corrupted the primary shard - now lets make sure we never recover from it successfully
|
||||||
*/
|
*/
|
||||||
setReplicaCount(1, "test");
|
setReplicaCount(1, "test");
|
||||||
clusterAdmin().prepareReroute().get();
|
ClusterRerouteUtils.reroute(client());
|
||||||
|
|
||||||
boolean didClusterTurnRed = waitUntil(() -> {
|
boolean didClusterTurnRed = waitUntil(() -> {
|
||||||
ClusterHealthStatus test = clusterAdmin().health(new ClusterHealthRequest("test")).actionGet().getStatus();
|
ClusterHealthStatus test = clusterAdmin().health(new ClusterHealthRequest("test")).actionGet().getStatus();
|
||||||
|
@ -368,7 +369,7 @@ public class CorruptedFileIT extends ESIntegTestCase {
|
||||||
.put("index.routing.allocation.include._name", primariesNode.getName() + "," + unluckyNode.getName()),
|
.put("index.routing.allocation.include._name", primariesNode.getName() + "," + unluckyNode.getName()),
|
||||||
"test"
|
"test"
|
||||||
);
|
);
|
||||||
clusterAdmin().prepareReroute().get();
|
ClusterRerouteUtils.reroute(client());
|
||||||
hasCorrupted.await();
|
hasCorrupted.await();
|
||||||
corrupt.set(false);
|
corrupt.set(false);
|
||||||
ensureGreen();
|
ensureGreen();
|
||||||
|
@ -493,7 +494,7 @@ public class CorruptedFileIT extends ESIntegTestCase {
|
||||||
.put("index.routing.allocation.exclude._name", unluckyNode.getName()),
|
.put("index.routing.allocation.exclude._name", unluckyNode.getName()),
|
||||||
"test"
|
"test"
|
||||||
);
|
);
|
||||||
clusterAdmin().prepareReroute().setRetryFailed(true).get();
|
ClusterRerouteUtils.rerouteRetryFailed(client());
|
||||||
ensureGreen("test");
|
ensureGreen("test");
|
||||||
assertThatAllShards("test", shard -> {
|
assertThatAllShards("test", shard -> {
|
||||||
assertThat(shard.primaryShard().currentNodeId(), not(equalTo(unluckyNode.getId())));
|
assertThat(shard.primaryShard().currentNodeId(), not(equalTo(unluckyNode.getId())));
|
||||||
|
|
|
@ -10,6 +10,7 @@ package org.elasticsearch.indexlifecycle;
|
||||||
|
|
||||||
import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest;
|
import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest;
|
||||||
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
|
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
|
||||||
|
import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteUtils;
|
||||||
import org.elasticsearch.action.admin.indices.create.CreateIndexRequest;
|
import org.elasticsearch.action.admin.indices.create.CreateIndexRequest;
|
||||||
import org.elasticsearch.action.admin.indices.create.CreateIndexResponse;
|
import org.elasticsearch.action.admin.indices.create.CreateIndexResponse;
|
||||||
import org.elasticsearch.action.support.master.AcknowledgedResponse;
|
import org.elasticsearch.action.support.master.AcknowledgedResponse;
|
||||||
|
@ -79,7 +80,7 @@ public class IndexLifecycleActionIT extends ESIntegTestCase {
|
||||||
final String node2 = getLocalNodeId(server_2);
|
final String node2 = getLocalNodeId(server_2);
|
||||||
|
|
||||||
// explicitly call reroute, so shards will get relocated to the new node (we delay it in ES in case other nodes join)
|
// explicitly call reroute, so shards will get relocated to the new node (we delay it in ES in case other nodes join)
|
||||||
clusterAdmin().prepareReroute().get();
|
ClusterRerouteUtils.reroute(client());
|
||||||
|
|
||||||
clusterHealth = clusterAdmin().health(
|
clusterHealth = clusterAdmin().health(
|
||||||
new ClusterHealthRequest(new String[] {}).waitForGreenStatus().waitForNodes("2").waitForNoRelocatingShards(true)
|
new ClusterHealthRequest(new String[] {}).waitForGreenStatus().waitForNodes("2").waitForNoRelocatingShards(true)
|
||||||
|
@ -120,7 +121,7 @@ public class IndexLifecycleActionIT extends ESIntegTestCase {
|
||||||
final String node3 = getLocalNodeId(server_3);
|
final String node3 = getLocalNodeId(server_3);
|
||||||
|
|
||||||
// explicitly call reroute, so shards will get relocated to the new node (we delay it in ES in case other nodes join)
|
// explicitly call reroute, so shards will get relocated to the new node (we delay it in ES in case other nodes join)
|
||||||
clusterAdmin().prepareReroute().get();
|
ClusterRerouteUtils.reroute(client());
|
||||||
|
|
||||||
clusterHealth = clusterAdmin().prepareHealth()
|
clusterHealth = clusterAdmin().prepareHealth()
|
||||||
.setWaitForGreenStatus()
|
.setWaitForGreenStatus()
|
||||||
|
@ -174,7 +175,7 @@ public class IndexLifecycleActionIT extends ESIntegTestCase {
|
||||||
assertThat(clusterHealth.isTimedOut(), equalTo(false));
|
assertThat(clusterHealth.isTimedOut(), equalTo(false));
|
||||||
assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
|
assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
|
||||||
|
|
||||||
clusterAdmin().prepareReroute().get();
|
ClusterRerouteUtils.reroute(client());
|
||||||
|
|
||||||
clusterHealth = clusterAdmin().prepareHealth()
|
clusterHealth = clusterAdmin().prepareHealth()
|
||||||
.setWaitForGreenStatus()
|
.setWaitForGreenStatus()
|
||||||
|
|
|
@ -8,6 +8,7 @@
|
||||||
package org.elasticsearch.indices;
|
package org.elasticsearch.indices;
|
||||||
|
|
||||||
import org.elasticsearch.ElasticsearchException;
|
import org.elasticsearch.ElasticsearchException;
|
||||||
|
import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteUtils;
|
||||||
import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
|
import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
|
||||||
import org.elasticsearch.cluster.routing.ShardRoutingState;
|
import org.elasticsearch.cluster.routing.ShardRoutingState;
|
||||||
import org.elasticsearch.cluster.routing.allocation.command.MoveAllocationCommand;
|
import org.elasticsearch.cluster.routing.allocation.command.MoveAllocationCommand;
|
||||||
|
@ -115,7 +116,7 @@ public class IndicesLifecycleListenerIT extends ESIntegTestCase {
|
||||||
throw new RuntimeException("FAIL");
|
throw new RuntimeException("FAIL");
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
clusterAdmin().prepareReroute().add(new MoveAllocationCommand("index1", 0, node1, node2)).get();
|
ClusterRerouteUtils.reroute(client(), new MoveAllocationCommand("index1", 0, node1, node2));
|
||||||
ensureGreen("index1");
|
ensureGreen("index1");
|
||||||
|
|
||||||
var state = clusterAdmin().prepareState().get().getState();
|
var state = clusterAdmin().prepareState().get().getState();
|
||||||
|
|
|
@ -11,6 +11,7 @@ package org.elasticsearch.indices.cluster;
|
||||||
import org.apache.logging.log4j.Level;
|
import org.apache.logging.log4j.Level;
|
||||||
import org.apache.logging.log4j.core.LogEvent;
|
import org.apache.logging.log4j.core.LogEvent;
|
||||||
import org.elasticsearch.ExceptionsHelper;
|
import org.elasticsearch.ExceptionsHelper;
|
||||||
|
import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteUtils;
|
||||||
import org.elasticsearch.cluster.health.ClusterHealthStatus;
|
import org.elasticsearch.cluster.health.ClusterHealthStatus;
|
||||||
import org.elasticsearch.cluster.metadata.IndexMetadata;
|
import org.elasticsearch.cluster.metadata.IndexMetadata;
|
||||||
import org.elasticsearch.cluster.routing.allocation.decider.MaxRetryAllocationDecider;
|
import org.elasticsearch.cluster.routing.allocation.decider.MaxRetryAllocationDecider;
|
||||||
|
@ -28,8 +29,6 @@ import org.elasticsearch.test.junit.annotations.TestLogging;
|
||||||
import java.util.concurrent.CountDownLatch;
|
import java.util.concurrent.CountDownLatch;
|
||||||
import java.util.concurrent.TimeUnit;
|
import java.util.concurrent.TimeUnit;
|
||||||
|
|
||||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
|
|
||||||
|
|
||||||
public class ShardLockFailureIT extends ESIntegTestCase {
|
public class ShardLockFailureIT extends ESIntegTestCase {
|
||||||
|
|
||||||
@TestLogging(reason = "checking DEBUG logs from ICSS", value = "org.elasticsearch.indices.cluster.IndicesClusterStateService:DEBUG")
|
@TestLogging(reason = "checking DEBUG logs from ICSS", value = "org.elasticsearch.indices.cluster.IndicesClusterStateService:DEBUG")
|
||||||
|
@ -165,7 +164,7 @@ public class ShardLockFailureIT extends ESIntegTestCase {
|
||||||
assertEquals(1, clusterHealthResponse.getUnassignedShards());
|
assertEquals(1, clusterHealthResponse.getUnassignedShards());
|
||||||
}
|
}
|
||||||
|
|
||||||
assertAcked(clusterAdmin().prepareReroute().setRetryFailed(true));
|
ClusterRerouteUtils.rerouteRetryFailed(client());
|
||||||
ensureGreen(indexName);
|
ensureGreen(indexName);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -11,6 +11,7 @@ package org.elasticsearch.indices.recovery;
|
||||||
import org.apache.logging.log4j.Level;
|
import org.apache.logging.log4j.Level;
|
||||||
import org.elasticsearch.action.DocWriteResponse;
|
import org.elasticsearch.action.DocWriteResponse;
|
||||||
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
|
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
|
||||||
|
import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteUtils;
|
||||||
import org.elasticsearch.action.delete.DeleteResponse;
|
import org.elasticsearch.action.delete.DeleteResponse;
|
||||||
import org.elasticsearch.cluster.ClusterState;
|
import org.elasticsearch.cluster.ClusterState;
|
||||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||||
|
@ -63,9 +64,7 @@ public class IndexPrimaryRelocationIT extends ESIntegTestCase {
|
||||||
relocationTarget = randomFrom(dataNodes);
|
relocationTarget = randomFrom(dataNodes);
|
||||||
}
|
}
|
||||||
logger.info("--> [iteration {}] relocating from {} to {} ", i, relocationSource.getName(), relocationTarget.getName());
|
logger.info("--> [iteration {}] relocating from {} to {} ", i, relocationSource.getName(), relocationTarget.getName());
|
||||||
clusterAdmin().prepareReroute()
|
ClusterRerouteUtils.reroute(client(), new MoveAllocationCommand("test", 0, relocationSource.getId(), relocationTarget.getId()));
|
||||||
.add(new MoveAllocationCommand("test", 0, relocationSource.getId(), relocationTarget.getId()))
|
|
||||||
.get();
|
|
||||||
ClusterHealthResponse clusterHealthResponse = clusterAdmin().prepareHealth()
|
ClusterHealthResponse clusterHealthResponse = clusterAdmin().prepareHealth()
|
||||||
.setTimeout(TimeValue.timeValueSeconds(60))
|
.setTimeout(TimeValue.timeValueSeconds(60))
|
||||||
.setWaitForEvents(Priority.LANGUID)
|
.setWaitForEvents(Priority.LANGUID)
|
||||||
|
|
|
@ -28,6 +28,7 @@ import org.elasticsearch.action.ActionListener;
|
||||||
import org.elasticsearch.action.DocWriteResponse;
|
import org.elasticsearch.action.DocWriteResponse;
|
||||||
import org.elasticsearch.action.admin.cluster.node.stats.NodeStats;
|
import org.elasticsearch.action.admin.cluster.node.stats.NodeStats;
|
||||||
import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse;
|
import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse;
|
||||||
|
import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteUtils;
|
||||||
import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse;
|
import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse;
|
||||||
import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse;
|
import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse;
|
||||||
import org.elasticsearch.action.admin.indices.recovery.RecoveryRequest;
|
import org.elasticsearch.action.admin.indices.recovery.RecoveryRequest;
|
||||||
|
@ -281,7 +282,7 @@ public class IndexRecoveryIT extends AbstractIndexRecoveryIntegTestCase {
|
||||||
*/
|
*/
|
||||||
public void startShardRecovery(String sourceNode, String targetNode) throws Exception {
|
public void startShardRecovery(String sourceNode, String targetNode) throws Exception {
|
||||||
logger.info("--> updating cluster settings with moving shard from node `{}` to node `{}`", sourceNode, targetNode);
|
logger.info("--> updating cluster settings with moving shard from node `{}` to node `{}`", sourceNode, targetNode);
|
||||||
clusterAdmin().prepareReroute().add(new MoveAllocationCommand(INDEX_NAME, 0, sourceNode, targetNode)).get().getState();
|
ClusterRerouteUtils.reroute(client(), new MoveAllocationCommand(INDEX_NAME, 0, sourceNode, targetNode));
|
||||||
|
|
||||||
logger.info("--> requesting shard recovery");
|
logger.info("--> requesting shard recovery");
|
||||||
indicesAdmin().prepareRecoveries(INDEX_NAME).get();
|
indicesAdmin().prepareRecoveries(INDEX_NAME).get();
|
||||||
|
@ -553,7 +554,7 @@ public class IndexRecoveryIT extends AbstractIndexRecoveryIntegTestCase {
|
||||||
throttleRecovery10Seconds(shardSize);
|
throttleRecovery10Seconds(shardSize);
|
||||||
|
|
||||||
logger.info("--> move shard from: {} to: {}", nodeA, nodeB);
|
logger.info("--> move shard from: {} to: {}", nodeA, nodeB);
|
||||||
clusterAdmin().prepareReroute().add(new MoveAllocationCommand(INDEX_NAME, 0, nodeA, nodeB)).get().getState();
|
ClusterRerouteUtils.reroute(client(), new MoveAllocationCommand(INDEX_NAME, 0, nodeA, nodeB));
|
||||||
|
|
||||||
logger.info("--> waiting for recovery to start both on source and target");
|
logger.info("--> waiting for recovery to start both on source and target");
|
||||||
final Index index = resolveIndex(INDEX_NAME);
|
final Index index = resolveIndex(INDEX_NAME);
|
||||||
|
@ -639,7 +640,7 @@ public class IndexRecoveryIT extends AbstractIndexRecoveryIntegTestCase {
|
||||||
throttleRecovery10Seconds(shardSize);
|
throttleRecovery10Seconds(shardSize);
|
||||||
|
|
||||||
logger.info("--> move replica shard from: {} to: {}", nodeA, nodeC);
|
logger.info("--> move replica shard from: {} to: {}", nodeA, nodeC);
|
||||||
clusterAdmin().prepareReroute().add(new MoveAllocationCommand(INDEX_NAME, 0, nodeA, nodeC)).get().getState();
|
ClusterRerouteUtils.reroute(client(), new MoveAllocationCommand(INDEX_NAME, 0, nodeA, nodeC));
|
||||||
|
|
||||||
response = indicesAdmin().prepareRecoveries(INDEX_NAME).get();
|
response = indicesAdmin().prepareRecoveries(INDEX_NAME).get();
|
||||||
recoveryStates = response.shardRecoveryStates().get(INDEX_NAME);
|
recoveryStates = response.shardRecoveryStates().get(INDEX_NAME);
|
||||||
|
@ -1643,7 +1644,7 @@ public class IndexRecoveryIT extends AbstractIndexRecoveryIntegTestCase {
|
||||||
internalCluster().stopRandomDataNode();
|
internalCluster().stopRandomDataNode();
|
||||||
internalCluster().stopRandomDataNode();
|
internalCluster().stopRandomDataNode();
|
||||||
final String nodeWithoutData = internalCluster().startDataOnlyNode();
|
final String nodeWithoutData = internalCluster().startDataOnlyNode();
|
||||||
assertAcked(clusterAdmin().prepareReroute().add(new AllocateEmptyPrimaryAllocationCommand(indexName, 0, nodeWithoutData, true)));
|
ClusterRerouteUtils.reroute(client(), new AllocateEmptyPrimaryAllocationCommand(indexName, 0, nodeWithoutData, true));
|
||||||
internalCluster().startDataOnlyNode(randomNodeDataPathSettings);
|
internalCluster().startDataOnlyNode(randomNodeDataPathSettings);
|
||||||
ensureGreen();
|
ensureGreen();
|
||||||
for (ShardStats shardStats : indicesAdmin().prepareStats(indexName).get().getIndex(indexName).getShards()) {
|
for (ShardStats shardStats : indicesAdmin().prepareStats(indexName).get().getIndex(indexName).getShards()) {
|
||||||
|
@ -1712,7 +1713,7 @@ public class IndexRecoveryIT extends AbstractIndexRecoveryIntegTestCase {
|
||||||
);
|
);
|
||||||
internalCluster().startNode();
|
internalCluster().startNode();
|
||||||
internalCluster().startNode();
|
internalCluster().startNode();
|
||||||
clusterAdmin().prepareReroute().setRetryFailed(true).get();
|
ClusterRerouteUtils.rerouteRetryFailed(client());
|
||||||
assertAcked(indicesAdmin().prepareDelete("test")); // cancel recoveries
|
assertAcked(indicesAdmin().prepareDelete("test")); // cancel recoveries
|
||||||
assertBusy(() -> {
|
assertBusy(() -> {
|
||||||
for (PeerRecoverySourceService recoveryService : internalCluster().getDataNodeInstances(PeerRecoverySourceService.class)) {
|
for (PeerRecoverySourceService recoveryService : internalCluster().getDataNodeInstances(PeerRecoverySourceService.class)) {
|
||||||
|
|
|
@ -7,13 +7,13 @@
|
||||||
*/
|
*/
|
||||||
package org.elasticsearch.indices.state;
|
package org.elasticsearch.indices.state;
|
||||||
|
|
||||||
import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteRequest;
|
import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteUtils;
|
||||||
import org.elasticsearch.action.support.master.AcknowledgedResponse;
|
import org.elasticsearch.action.support.master.AcknowledgedResponse;
|
||||||
import org.elasticsearch.cluster.ClusterState;
|
import org.elasticsearch.cluster.ClusterState;
|
||||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||||
import org.elasticsearch.cluster.routing.IndexRoutingTable;
|
import org.elasticsearch.cluster.routing.IndexRoutingTable;
|
||||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||||
import org.elasticsearch.cluster.routing.allocation.command.AllocationCommands;
|
import org.elasticsearch.cluster.routing.allocation.command.AllocationCommand;
|
||||||
import org.elasticsearch.cluster.routing.allocation.command.MoveAllocationCommand;
|
import org.elasticsearch.cluster.routing.allocation.command.MoveAllocationCommand;
|
||||||
import org.elasticsearch.cluster.routing.allocation.decider.ConcurrentRebalanceAllocationDecider;
|
import org.elasticsearch.cluster.routing.allocation.decider.ConcurrentRebalanceAllocationDecider;
|
||||||
import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider;
|
import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider;
|
||||||
|
@ -125,7 +125,7 @@ public class CloseWhileRelocatingShardsIT extends ESIntegTestCase {
|
||||||
final CountDownLatch release = new CountDownLatch(indices.length);
|
final CountDownLatch release = new CountDownLatch(indices.length);
|
||||||
|
|
||||||
// relocate one shard for every index to be closed
|
// relocate one shard for every index to be closed
|
||||||
final AllocationCommands commands = new AllocationCommands();
|
final var commands = new ArrayList<AllocationCommand>();
|
||||||
for (final String index : indices) {
|
for (final String index : indices) {
|
||||||
final NumShards numShards = getNumShards(index);
|
final NumShards numShards = getNumShards(index);
|
||||||
final int shardId = numShards.numPrimaries == 1 ? 0 : randomIntBetween(0, numShards.numPrimaries - 1);
|
final int shardId = numShards.numPrimaries == 1 ? 0 : randomIntBetween(0, numShards.numPrimaries - 1);
|
||||||
|
@ -146,8 +146,7 @@ public class CloseWhileRelocatingShardsIT extends ESIntegTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Build the list of shards for which recoveries will be blocked
|
// Build the list of shards for which recoveries will be blocked
|
||||||
final Set<ShardId> blockedShards = commands.commands()
|
final Set<ShardId> blockedShards = commands.stream()
|
||||||
.stream()
|
|
||||||
.map(c -> (MoveAllocationCommand) c)
|
.map(c -> (MoveAllocationCommand) c)
|
||||||
.map(c -> new ShardId(clusterService.state().metadata().index(c.index()).getIndex(), c.shardId()))
|
.map(c -> new ShardId(clusterService.state().metadata().index(c.index()).getIndex(), c.shardId()))
|
||||||
.collect(Collectors.toSet());
|
.collect(Collectors.toSet());
|
||||||
|
@ -185,7 +184,7 @@ public class CloseWhileRelocatingShardsIT extends ESIntegTestCase {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
assertAcked(clusterAdmin().reroute(new ClusterRerouteRequest().commands(commands)).get());
|
ClusterRerouteUtils.reroute(client(), commands.toArray(AllocationCommand[]::new));
|
||||||
|
|
||||||
// start index closing threads
|
// start index closing threads
|
||||||
final List<Thread> threads = new ArrayList<>();
|
final List<Thread> threads = new ArrayList<>();
|
||||||
|
|
|
@ -11,6 +11,7 @@ package org.elasticsearch.indices.store;
|
||||||
import org.apache.logging.log4j.Logger;
|
import org.apache.logging.log4j.Logger;
|
||||||
import org.elasticsearch.action.ActionListener;
|
import org.elasticsearch.action.ActionListener;
|
||||||
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
|
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
|
||||||
|
import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteUtils;
|
||||||
import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
|
import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
|
||||||
import org.elasticsearch.cluster.ClusterState;
|
import org.elasticsearch.cluster.ClusterState;
|
||||||
import org.elasticsearch.cluster.health.ClusterHealthStatus;
|
import org.elasticsearch.cluster.health.ClusterHealthStatus;
|
||||||
|
@ -128,7 +129,7 @@ public class IndicesStoreIntegrationIT extends ESIntegTestCase {
|
||||||
logger.info("--> stopping disruption");
|
logger.info("--> stopping disruption");
|
||||||
disruption.stopDisrupting();
|
disruption.stopDisrupting();
|
||||||
} else {
|
} else {
|
||||||
internalCluster().client().admin().cluster().prepareReroute().add(new MoveAllocationCommand("test", 0, node_1, node_3)).get();
|
ClusterRerouteUtils.reroute(internalCluster().client(), new MoveAllocationCommand("test", 0, node_1, node_3));
|
||||||
}
|
}
|
||||||
clusterHealth = clusterAdmin().prepareHealth().setWaitForNoRelocatingShards(true).get();
|
clusterHealth = clusterAdmin().prepareHealth().setWaitForNoRelocatingShards(true).get();
|
||||||
assertThat(clusterHealth.isTimedOut(), equalTo(false));
|
assertThat(clusterHealth.isTimedOut(), equalTo(false));
|
||||||
|
@ -172,7 +173,7 @@ public class IndicesStoreIntegrationIT extends ESIntegTestCase {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
internalCluster().client().admin().cluster().prepareReroute().add(new MoveAllocationCommand(index, shard, nodeFrom, nodeTo)).get();
|
ClusterRerouteUtils.reroute(internalCluster().client(), new MoveAllocationCommand(index, shard, nodeFrom, nodeTo));
|
||||||
logger.info("--> waiting for relocation to start");
|
logger.info("--> waiting for relocation to start");
|
||||||
beginRelocationLatch.await();
|
beginRelocationLatch.await();
|
||||||
logger.info("--> starting disruption");
|
logger.info("--> starting disruption");
|
||||||
|
@ -223,7 +224,7 @@ public class IndicesStoreIntegrationIT extends ESIntegTestCase {
|
||||||
});
|
});
|
||||||
|
|
||||||
logger.info("--> move shard from {} to {}, and wait for relocation to finish", node_1, node_2);
|
logger.info("--> move shard from {} to {}, and wait for relocation to finish", node_1, node_2);
|
||||||
internalCluster().client().admin().cluster().prepareReroute().add(new MoveAllocationCommand("test", 0, node_1, node_2)).get();
|
ClusterRerouteUtils.reroute(client(), new MoveAllocationCommand("test", 0, node_1, node_2));
|
||||||
shardActiveRequestSent.await();
|
shardActiveRequestSent.await();
|
||||||
ClusterHealthResponse clusterHealth = clusterAdmin().prepareHealth().setWaitForNoRelocatingShards(true).get();
|
ClusterHealthResponse clusterHealth = clusterAdmin().prepareHealth().setWaitForNoRelocatingShards(true).get();
|
||||||
assertThat(clusterHealth.isTimedOut(), equalTo(false));
|
assertThat(clusterHealth.isTimedOut(), equalTo(false));
|
||||||
|
|
|
@ -13,7 +13,10 @@ import org.apache.lucene.tests.util.English;
|
||||||
import org.elasticsearch.action.ActionFuture;
|
import org.elasticsearch.action.ActionFuture;
|
||||||
import org.elasticsearch.action.DocWriteResponse;
|
import org.elasticsearch.action.DocWriteResponse;
|
||||||
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
|
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
|
||||||
|
import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteRequest;
|
||||||
import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteResponse;
|
import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteResponse;
|
||||||
|
import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteUtils;
|
||||||
|
import org.elasticsearch.action.admin.cluster.reroute.TransportClusterRerouteAction;
|
||||||
import org.elasticsearch.action.admin.indices.stats.ShardStats;
|
import org.elasticsearch.action.admin.indices.stats.ShardStats;
|
||||||
import org.elasticsearch.action.index.IndexRequestBuilder;
|
import org.elasticsearch.action.index.IndexRequestBuilder;
|
||||||
import org.elasticsearch.action.support.WriteRequest;
|
import org.elasticsearch.action.support.WriteRequest;
|
||||||
|
@ -144,7 +147,7 @@ public class RelocationIT extends ESIntegTestCase {
|
||||||
assertThat(clusterHealthResponse.isTimedOut(), equalTo(false));
|
assertThat(clusterHealthResponse.isTimedOut(), equalTo(false));
|
||||||
|
|
||||||
logger.info("--> relocate the shard from node1 to node2");
|
logger.info("--> relocate the shard from node1 to node2");
|
||||||
clusterAdmin().prepareReroute().add(new MoveAllocationCommand("test", 0, node_1, node_2)).get();
|
ClusterRerouteUtils.reroute(client(), new MoveAllocationCommand("test", 0, node_1, node_2));
|
||||||
|
|
||||||
clusterHealthResponse = clusterAdmin().prepareHealth()
|
clusterHealthResponse = clusterAdmin().prepareHealth()
|
||||||
.setWaitForEvents(Priority.LANGUID)
|
.setWaitForEvents(Priority.LANGUID)
|
||||||
|
@ -207,7 +210,7 @@ public class RelocationIT extends ESIntegTestCase {
|
||||||
logger.debug("--> Allow indexer to index [{}] documents", numDocs);
|
logger.debug("--> Allow indexer to index [{}] documents", numDocs);
|
||||||
indexer.continueIndexing(numDocs);
|
indexer.continueIndexing(numDocs);
|
||||||
logger.info("--> START relocate the shard from {} to {}", nodes[fromNode], nodes[toNode]);
|
logger.info("--> START relocate the shard from {} to {}", nodes[fromNode], nodes[toNode]);
|
||||||
clusterAdmin().prepareReroute().add(new MoveAllocationCommand("test", 0, nodes[fromNode], nodes[toNode])).get();
|
ClusterRerouteUtils.reroute(client(), new MoveAllocationCommand("test", 0, nodes[fromNode], nodes[toNode]));
|
||||||
if (rarely()) {
|
if (rarely()) {
|
||||||
logger.debug("--> flushing");
|
logger.debug("--> flushing");
|
||||||
indicesAdmin().prepareFlush().get();
|
indicesAdmin().prepareFlush().get();
|
||||||
|
@ -334,7 +337,7 @@ public class RelocationIT extends ESIntegTestCase {
|
||||||
|
|
||||||
logger.info("--> START relocate the shard from {} to {}", nodes[fromNode], nodes[toNode]);
|
logger.info("--> START relocate the shard from {} to {}", nodes[fromNode], nodes[toNode]);
|
||||||
|
|
||||||
clusterAdmin().prepareReroute().add(new MoveAllocationCommand("test", 0, nodes[fromNode], nodes[toNode])).get();
|
ClusterRerouteUtils.reroute(client(), new MoveAllocationCommand("test", 0, nodes[fromNode], nodes[toNode]));
|
||||||
|
|
||||||
logger.debug("--> index [{}] documents", builders1.size());
|
logger.debug("--> index [{}] documents", builders1.size());
|
||||||
indexRandom(false, true, builders1);
|
indexRandom(false, true, builders1);
|
||||||
|
@ -555,7 +558,7 @@ public class RelocationIT extends ESIntegTestCase {
|
||||||
assertThat(clusterHealthResponse.isTimedOut(), equalTo(false));
|
assertThat(clusterHealthResponse.isTimedOut(), equalTo(false));
|
||||||
|
|
||||||
logger.info("--> relocate the shard from node1 to node2");
|
logger.info("--> relocate the shard from node1 to node2");
|
||||||
clusterAdmin().prepareReroute().add(new MoveAllocationCommand("test", 0, node1, node2)).get();
|
ClusterRerouteUtils.reroute(client(), new MoveAllocationCommand("test", 0, node1, node2));
|
||||||
|
|
||||||
clusterHealthResponse = clusterAdmin().prepareHealth()
|
clusterHealthResponse = clusterAdmin().prepareHealth()
|
||||||
.setWaitForEvents(Priority.LANGUID)
|
.setWaitForEvents(Priority.LANGUID)
|
||||||
|
@ -606,9 +609,10 @@ public class RelocationIT extends ESIntegTestCase {
|
||||||
assertThat(clusterHealthResponse.isTimedOut(), equalTo(false));
|
assertThat(clusterHealthResponse.isTimedOut(), equalTo(false));
|
||||||
|
|
||||||
logger.info("--> relocate the shard from node1 to node2");
|
logger.info("--> relocate the shard from node1 to node2");
|
||||||
ActionFuture<ClusterRerouteResponse> relocationListener = clusterAdmin().prepareReroute()
|
ActionFuture<ClusterRerouteResponse> relocationListener = client().execute(
|
||||||
.add(new MoveAllocationCommand("test", 0, node1, node2))
|
TransportClusterRerouteAction.TYPE,
|
||||||
.execute();
|
new ClusterRerouteRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT).add(new MoveAllocationCommand("test", 0, node1, node2))
|
||||||
|
);
|
||||||
logger.info("--> index 100 docs while relocating");
|
logger.info("--> index 100 docs while relocating");
|
||||||
for (int i = 20; i < 120; i++) {
|
for (int i = 20; i < 120; i++) {
|
||||||
pendingIndexResponses.add(
|
pendingIndexResponses.add(
|
||||||
|
@ -618,7 +622,7 @@ public class RelocationIT extends ESIntegTestCase {
|
||||||
.execute()
|
.execute()
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
relocationListener.actionGet();
|
safeGet(relocationListener);
|
||||||
clusterHealthResponse = clusterAdmin().prepareHealth()
|
clusterHealthResponse = clusterAdmin().prepareHealth()
|
||||||
.setWaitForEvents(Priority.LANGUID)
|
.setWaitForEvents(Priority.LANGUID)
|
||||||
.setWaitForNoRelocatingShards(true)
|
.setWaitForNoRelocatingShards(true)
|
||||||
|
|
|
@ -10,6 +10,7 @@ package org.elasticsearch.search.basic;
|
||||||
|
|
||||||
import org.elasticsearch.action.NoShardAvailableActionException;
|
import org.elasticsearch.action.NoShardAvailableActionException;
|
||||||
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
|
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
|
||||||
|
import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteUtils;
|
||||||
import org.elasticsearch.action.index.IndexRequestBuilder;
|
import org.elasticsearch.action.index.IndexRequestBuilder;
|
||||||
import org.elasticsearch.action.search.SearchPhaseExecutionException;
|
import org.elasticsearch.action.search.SearchPhaseExecutionException;
|
||||||
import org.elasticsearch.common.Priority;
|
import org.elasticsearch.common.Priority;
|
||||||
|
@ -118,7 +119,7 @@ public class SearchWhileRelocatingIT extends ESIntegTestCase {
|
||||||
threads[j].start();
|
threads[j].start();
|
||||||
}
|
}
|
||||||
allowNodes("test", between(1, 3));
|
allowNodes("test", between(1, 3));
|
||||||
clusterAdmin().prepareReroute().get();
|
ClusterRerouteUtils.reroute(client());
|
||||||
stop.set(true);
|
stop.set(true);
|
||||||
for (int j = 0; j < threads.length; j++) {
|
for (int j = 0; j < threads.length; j++) {
|
||||||
threads[j].join();
|
threads[j].join();
|
||||||
|
|
|
@ -13,6 +13,7 @@ import org.apache.http.entity.StringEntity;
|
||||||
import org.apache.logging.log4j.Level;
|
import org.apache.logging.log4j.Level;
|
||||||
import org.apache.lucene.util.BytesRef;
|
import org.apache.lucene.util.BytesRef;
|
||||||
import org.elasticsearch.ElasticsearchException;
|
import org.elasticsearch.ElasticsearchException;
|
||||||
|
import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteUtils;
|
||||||
import org.elasticsearch.action.fieldcaps.FieldCapabilities;
|
import org.elasticsearch.action.fieldcaps.FieldCapabilities;
|
||||||
import org.elasticsearch.action.fieldcaps.FieldCapabilitiesFailure;
|
import org.elasticsearch.action.fieldcaps.FieldCapabilitiesFailure;
|
||||||
import org.elasticsearch.action.fieldcaps.FieldCapabilitiesRequest;
|
import org.elasticsearch.action.fieldcaps.FieldCapabilitiesRequest;
|
||||||
|
@ -549,18 +550,14 @@ public class FieldCapabilitiesIT extends ESIntegTestCase {
|
||||||
if (targetNodes.isEmpty()) {
|
if (targetNodes.isEmpty()) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
ClusterRerouteUtils.reroute(
|
||||||
safeGet(
|
client(),
|
||||||
clusterAdmin().prepareReroute()
|
|
||||||
.add(
|
|
||||||
new MoveAllocationCommand(
|
new MoveAllocationCommand(
|
||||||
shardId.getIndexName(),
|
shardId.getIndexName(),
|
||||||
shardId.id(),
|
shardId.id(),
|
||||||
indicesService.clusterService().localNode().getId(),
|
indicesService.clusterService().localNode().getId(),
|
||||||
randomFrom(targetNodes)
|
randomFrom(targetNodes)
|
||||||
)
|
)
|
||||||
)
|
|
||||||
.execute()
|
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -12,6 +12,7 @@ import org.apache.lucene.util.BytesRef;
|
||||||
import org.elasticsearch.ElasticsearchException;
|
import org.elasticsearch.ElasticsearchException;
|
||||||
import org.elasticsearch.ExceptionsHelper;
|
import org.elasticsearch.ExceptionsHelper;
|
||||||
import org.elasticsearch.action.ActionFuture;
|
import org.elasticsearch.action.ActionFuture;
|
||||||
|
import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteUtils;
|
||||||
import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse;
|
import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse;
|
||||||
import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsResponse;
|
import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsResponse;
|
||||||
import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse;
|
import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse;
|
||||||
|
@ -597,7 +598,7 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas
|
||||||
Runnable fixupAction = () -> {
|
Runnable fixupAction = () -> {
|
||||||
// remove the shard allocation filtering settings and use the Reroute API to retry the failed shards
|
// remove the shard allocation filtering settings and use the Reroute API to retry the failed shards
|
||||||
updateIndexSettings(Settings.builder().putNull("index.routing.allocation.include._name"), indexName);
|
updateIndexSettings(Settings.builder().putNull("index.routing.allocation.include._name"), indexName);
|
||||||
assertAcked(clusterAdmin().prepareReroute().setRetryFailed(true));
|
ClusterRerouteUtils.rerouteRetryFailed(client());
|
||||||
};
|
};
|
||||||
|
|
||||||
unrestorableUseCase(
|
unrestorableUseCase(
|
||||||
|
|
|
@ -188,6 +188,7 @@ public class TransportVersions {
|
||||||
public static final TransportVersion RANK_DOC_IN_SHARD_FETCH_REQUEST = def(8_679_00_0);
|
public static final TransportVersion RANK_DOC_IN_SHARD_FETCH_REQUEST = def(8_679_00_0);
|
||||||
public static final TransportVersion SECURITY_SETTINGS_REQUEST_TIMEOUTS = def(8_680_00_0);
|
public static final TransportVersion SECURITY_SETTINGS_REQUEST_TIMEOUTS = def(8_680_00_0);
|
||||||
public static final TransportVersion QUERY_RULE_CRUD_API_PUT = def(8_681_00_0);
|
public static final TransportVersion QUERY_RULE_CRUD_API_PUT = def(8_681_00_0);
|
||||||
|
public static final TransportVersion DROP_UNUSED_NODES_REQUESTS = def(8_682_00_0);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* STOP! READ THIS FIRST! No, really,
|
* STOP! READ THIS FIRST! No, really,
|
||||||
|
|
|
@ -11,6 +11,7 @@ package org.elasticsearch.action.admin.cluster.node.info;
|
||||||
import org.elasticsearch.action.support.nodes.BaseNodesRequest;
|
import org.elasticsearch.action.support.nodes.BaseNodesRequest;
|
||||||
import org.elasticsearch.common.io.stream.StreamInput;
|
import org.elasticsearch.common.io.stream.StreamInput;
|
||||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||||
|
import org.elasticsearch.core.UpdateForV9;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.Set;
|
import java.util.Set;
|
||||||
|
@ -30,6 +31,7 @@ public final class NodesInfoRequest extends BaseNodesRequest<NodesInfoRequest> {
|
||||||
* @param in A stream input object.
|
* @param in A stream input object.
|
||||||
* @throws IOException if the stream cannot be deserialized.
|
* @throws IOException if the stream cannot be deserialized.
|
||||||
*/
|
*/
|
||||||
|
@UpdateForV9 // this constructor is unused in v9
|
||||||
public NodesInfoRequest(StreamInput in) throws IOException {
|
public NodesInfoRequest(StreamInput in) throws IOException {
|
||||||
super(in);
|
super(in);
|
||||||
nodesInfoMetrics = new NodesInfoMetrics(in);
|
nodesInfoMetrics = new NodesInfoMetrics(in);
|
||||||
|
@ -111,6 +113,7 @@ public final class NodesInfoRequest extends BaseNodesRequest<NodesInfoRequest> {
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@UpdateForV9 // this method can just call localOnly() in v9
|
||||||
@Override
|
@Override
|
||||||
public void writeTo(StreamOutput out) throws IOException {
|
public void writeTo(StreamOutput out) throws IOException {
|
||||||
super.writeTo(out);
|
super.writeTo(out);
|
||||||
|
|
|
@ -13,6 +13,7 @@ import org.elasticsearch.action.support.nodes.BaseNodesRequest;
|
||||||
import org.elasticsearch.common.Strings;
|
import org.elasticsearch.common.Strings;
|
||||||
import org.elasticsearch.common.io.stream.StreamInput;
|
import org.elasticsearch.common.io.stream.StreamInput;
|
||||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||||
|
import org.elasticsearch.core.UpdateForV9;
|
||||||
import org.elasticsearch.tasks.CancellableTask;
|
import org.elasticsearch.tasks.CancellableTask;
|
||||||
import org.elasticsearch.tasks.Task;
|
import org.elasticsearch.tasks.Task;
|
||||||
import org.elasticsearch.tasks.TaskId;
|
import org.elasticsearch.tasks.TaskId;
|
||||||
|
@ -36,9 +37,9 @@ public class NodesStatsRequest extends BaseNodesRequest<NodesStatsRequest> {
|
||||||
nodesStatsRequestParameters = new NodesStatsRequestParameters();
|
nodesStatsRequestParameters = new NodesStatsRequestParameters();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@UpdateForV9 // this constructor is unused in v9
|
||||||
public NodesStatsRequest(StreamInput in) throws IOException {
|
public NodesStatsRequest(StreamInput in) throws IOException {
|
||||||
super(in);
|
super(in);
|
||||||
|
|
||||||
nodesStatsRequestParameters = new NodesStatsRequestParameters(in);
|
nodesStatsRequestParameters = new NodesStatsRequestParameters(in);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -178,6 +179,7 @@ public class NodesStatsRequest extends BaseNodesRequest<NodesStatsRequest> {
|
||||||
nodesStatsRequestParameters.setIncludeShardsStats(includeShardsStats);
|
nodesStatsRequestParameters.setIncludeShardsStats(includeShardsStats);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@UpdateForV9 // this method can just call localOnly() in v9
|
||||||
@Override
|
@Override
|
||||||
public void writeTo(StreamOutput out) throws IOException {
|
public void writeTo(StreamOutput out) throws IOException {
|
||||||
super.writeTo(out);
|
super.writeTo(out);
|
||||||
|
|
|
@ -13,6 +13,7 @@ import org.elasticsearch.cluster.routing.allocation.command.AllocationCommand;
|
||||||
import org.elasticsearch.cluster.routing.allocation.command.AllocationCommands;
|
import org.elasticsearch.cluster.routing.allocation.command.AllocationCommands;
|
||||||
import org.elasticsearch.common.io.stream.StreamInput;
|
import org.elasticsearch.common.io.stream.StreamInput;
|
||||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||||
|
import org.elasticsearch.core.TimeValue;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.Objects;
|
import java.util.Objects;
|
||||||
|
@ -34,8 +35,8 @@ public class ClusterRerouteRequest extends AcknowledgedRequest<ClusterRerouteReq
|
||||||
retryFailed = in.readBoolean();
|
retryFailed = in.readBoolean();
|
||||||
}
|
}
|
||||||
|
|
||||||
public ClusterRerouteRequest() {
|
public ClusterRerouteRequest(TimeValue masterNodeTimeout, TimeValue ackTimeout) {
|
||||||
super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT);
|
super(masterNodeTimeout, ackTimeout);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -8,19 +8,31 @@
|
||||||
|
|
||||||
package org.elasticsearch.action.admin.cluster.reroute;
|
package org.elasticsearch.action.admin.cluster.reroute;
|
||||||
|
|
||||||
|
import org.elasticsearch.action.support.master.AcknowledgedRequest;
|
||||||
import org.elasticsearch.action.support.master.AcknowledgedRequestBuilder;
|
import org.elasticsearch.action.support.master.AcknowledgedRequestBuilder;
|
||||||
|
import org.elasticsearch.action.support.master.MasterNodeRequest;
|
||||||
import org.elasticsearch.client.internal.ElasticsearchClient;
|
import org.elasticsearch.client.internal.ElasticsearchClient;
|
||||||
import org.elasticsearch.cluster.routing.allocation.command.AllocationCommand;
|
import org.elasticsearch.cluster.routing.allocation.command.AllocationCommand;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Builder for a cluster reroute request
|
* Builder for a cluster reroute request
|
||||||
|
*
|
||||||
|
* @deprecated just build the request directly
|
||||||
*/
|
*/
|
||||||
|
@Deprecated(forRemoval = true) // temporary compatibility shim
|
||||||
public class ClusterRerouteRequestBuilder extends AcknowledgedRequestBuilder<
|
public class ClusterRerouteRequestBuilder extends AcknowledgedRequestBuilder<
|
||||||
ClusterRerouteRequest,
|
ClusterRerouteRequest,
|
||||||
ClusterRerouteResponse,
|
ClusterRerouteResponse,
|
||||||
ClusterRerouteRequestBuilder> {
|
ClusterRerouteRequestBuilder> {
|
||||||
public ClusterRerouteRequestBuilder(ElasticsearchClient client) {
|
public ClusterRerouteRequestBuilder(ElasticsearchClient client) {
|
||||||
super(client, TransportClusterRerouteAction.TYPE, new ClusterRerouteRequest());
|
super(
|
||||||
|
client,
|
||||||
|
TransportClusterRerouteAction.TYPE,
|
||||||
|
new ClusterRerouteRequest(
|
||||||
|
MasterNodeRequest.TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT,
|
||||||
|
AcknowledgedRequest.DEFAULT_ACK_TIMEOUT
|
||||||
|
)
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -11,6 +11,7 @@ package org.elasticsearch.action.admin.cluster.stats;
|
||||||
import org.elasticsearch.action.support.nodes.BaseNodesRequest;
|
import org.elasticsearch.action.support.nodes.BaseNodesRequest;
|
||||||
import org.elasticsearch.common.io.stream.StreamInput;
|
import org.elasticsearch.common.io.stream.StreamInput;
|
||||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||||
|
import org.elasticsearch.core.UpdateForV9;
|
||||||
import org.elasticsearch.tasks.CancellableTask;
|
import org.elasticsearch.tasks.CancellableTask;
|
||||||
import org.elasticsearch.tasks.Task;
|
import org.elasticsearch.tasks.Task;
|
||||||
import org.elasticsearch.tasks.TaskId;
|
import org.elasticsearch.tasks.TaskId;
|
||||||
|
@ -23,6 +24,7 @@ import java.util.Map;
|
||||||
*/
|
*/
|
||||||
public class ClusterStatsRequest extends BaseNodesRequest<ClusterStatsRequest> {
|
public class ClusterStatsRequest extends BaseNodesRequest<ClusterStatsRequest> {
|
||||||
|
|
||||||
|
@UpdateForV9 // this constructor is unused in v9
|
||||||
public ClusterStatsRequest(StreamInput in) throws IOException {
|
public ClusterStatsRequest(StreamInput in) throws IOException {
|
||||||
super(in);
|
super(in);
|
||||||
}
|
}
|
||||||
|
@ -40,6 +42,7 @@ public class ClusterStatsRequest extends BaseNodesRequest<ClusterStatsRequest> {
|
||||||
return new CancellableTask(id, type, action, "", parentTaskId, headers);
|
return new CancellableTask(id, type, action, "", parentTaskId, headers);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@UpdateForV9 // this method can just call localOnly() in v9
|
||||||
@Override
|
@Override
|
||||||
public void writeTo(StreamOutput out) throws IOException {
|
public void writeTo(StreamOutput out) throws IOException {
|
||||||
super.writeTo(out);
|
super.writeTo(out);
|
||||||
|
|
|
@ -9,6 +9,7 @@
|
||||||
package org.elasticsearch.action.admin.cluster.stats;
|
package org.elasticsearch.action.admin.cluster.stats;
|
||||||
|
|
||||||
import org.apache.lucene.store.AlreadyClosedException;
|
import org.apache.lucene.store.AlreadyClosedException;
|
||||||
|
import org.elasticsearch.TransportVersions;
|
||||||
import org.elasticsearch.action.ActionListener;
|
import org.elasticsearch.action.ActionListener;
|
||||||
import org.elasticsearch.action.ActionType;
|
import org.elasticsearch.action.ActionType;
|
||||||
import org.elasticsearch.action.FailedNodeException;
|
import org.elasticsearch.action.FailedNodeException;
|
||||||
|
@ -32,6 +33,7 @@ import org.elasticsearch.common.io.stream.StreamOutput;
|
||||||
import org.elasticsearch.common.util.CancellableSingleObjectCache;
|
import org.elasticsearch.common.util.CancellableSingleObjectCache;
|
||||||
import org.elasticsearch.common.util.concurrent.ListenableFuture;
|
import org.elasticsearch.common.util.concurrent.ListenableFuture;
|
||||||
import org.elasticsearch.common.util.concurrent.ThreadContext;
|
import org.elasticsearch.common.util.concurrent.ThreadContext;
|
||||||
|
import org.elasticsearch.core.UpdateForV9;
|
||||||
import org.elasticsearch.index.IndexService;
|
import org.elasticsearch.index.IndexService;
|
||||||
import org.elasticsearch.index.engine.CommitStats;
|
import org.elasticsearch.index.engine.CommitStats;
|
||||||
import org.elasticsearch.index.seqno.RetentionLeaseStats;
|
import org.elasticsearch.index.seqno.RetentionLeaseStats;
|
||||||
|
@ -167,7 +169,7 @@ public class TransportClusterStatsAction extends TransportNodesAction<
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected ClusterStatsNodeRequest newNodeRequest(ClusterStatsRequest request) {
|
protected ClusterStatsNodeRequest newNodeRequest(ClusterStatsRequest request) {
|
||||||
return new ClusterStatsNodeRequest(request);
|
return new ClusterStatsNodeRequest();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -251,18 +253,16 @@ public class TransportClusterStatsAction extends TransportNodesAction<
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@UpdateForV9 // this can be replaced with TransportRequest.Empty in v9
|
||||||
public static class ClusterStatsNodeRequest extends TransportRequest {
|
public static class ClusterStatsNodeRequest extends TransportRequest {
|
||||||
|
|
||||||
// TODO don't wrap the whole top-level request, it contains heavy and irrelevant DiscoveryNode things; see #100878
|
ClusterStatsNodeRequest() {}
|
||||||
ClusterStatsRequest request;
|
|
||||||
|
|
||||||
public ClusterStatsNodeRequest(StreamInput in) throws IOException {
|
public ClusterStatsNodeRequest(StreamInput in) throws IOException {
|
||||||
super(in);
|
super(in);
|
||||||
request = new ClusterStatsRequest(in);
|
if (in.getTransportVersion().before(TransportVersions.DROP_UNUSED_NODES_REQUESTS)) {
|
||||||
|
new ClusterStatsRequest(in);
|
||||||
}
|
}
|
||||||
|
|
||||||
ClusterStatsNodeRequest(ClusterStatsRequest request) {
|
|
||||||
this.request = request;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -273,7 +273,9 @@ public class TransportClusterStatsAction extends TransportNodesAction<
|
||||||
@Override
|
@Override
|
||||||
public void writeTo(StreamOutput out) throws IOException {
|
public void writeTo(StreamOutput out) throws IOException {
|
||||||
super.writeTo(out);
|
super.writeTo(out);
|
||||||
request.writeTo(out);
|
if (out.getTransportVersion().before(TransportVersions.DROP_UNUSED_NODES_REQUESTS)) {
|
||||||
|
new ClusterStatsRequest().writeTo(out);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -9,6 +9,7 @@ package org.elasticsearch.action.search;
|
||||||
|
|
||||||
import org.apache.lucene.search.ScoreDoc;
|
import org.apache.lucene.search.ScoreDoc;
|
||||||
import org.apache.lucene.search.join.ScoreMode;
|
import org.apache.lucene.search.join.ScoreMode;
|
||||||
|
import org.elasticsearch.common.lucene.Lucene;
|
||||||
import org.elasticsearch.index.query.NestedQueryBuilder;
|
import org.elasticsearch.index.query.NestedQueryBuilder;
|
||||||
import org.elasticsearch.index.query.QueryBuilder;
|
import org.elasticsearch.index.query.QueryBuilder;
|
||||||
import org.elasticsearch.search.SearchPhaseResult;
|
import org.elasticsearch.search.SearchPhaseResult;
|
||||||
|
@ -152,7 +153,7 @@ final class DfsQueryPhase extends SearchPhase {
|
||||||
scoreDocs.sort(Comparator.comparingInt(scoreDoc -> scoreDoc.doc));
|
scoreDocs.sort(Comparator.comparingInt(scoreDoc -> scoreDoc.doc));
|
||||||
String nestedPath = dfsKnnResults.getNestedPath();
|
String nestedPath = dfsKnnResults.getNestedPath();
|
||||||
QueryBuilder query = new KnnScoreDocQueryBuilder(
|
QueryBuilder query = new KnnScoreDocQueryBuilder(
|
||||||
scoreDocs.toArray(new ScoreDoc[0]),
|
scoreDocs.toArray(Lucene.EMPTY_SCORE_DOCS),
|
||||||
source.knnSearch().get(i).getField(),
|
source.knnSearch().get(i).getField(),
|
||||||
source.knnSearch().get(i).getQueryVector()
|
source.knnSearch().get(i).getQueryVector()
|
||||||
).boost(source.knnSearch().get(i).boost()).queryName(source.knnSearch().get(i).queryName());
|
).boost(source.knnSearch().get(i).boost()).queryName(source.knnSearch().get(i).queryName());
|
||||||
|
|
|
@ -24,6 +24,7 @@ import org.apache.lucene.search.TotalHits.Relation;
|
||||||
import org.apache.lucene.util.SetOnce;
|
import org.apache.lucene.util.SetOnce;
|
||||||
import org.elasticsearch.common.breaker.CircuitBreaker;
|
import org.elasticsearch.common.breaker.CircuitBreaker;
|
||||||
import org.elasticsearch.common.io.stream.DelayableWriteable;
|
import org.elasticsearch.common.io.stream.DelayableWriteable;
|
||||||
|
import org.elasticsearch.common.lucene.Lucene;
|
||||||
import org.elasticsearch.common.lucene.search.TopDocsAndMaxScore;
|
import org.elasticsearch.common.lucene.search.TopDocsAndMaxScore;
|
||||||
import org.elasticsearch.common.util.Maps;
|
import org.elasticsearch.common.util.Maps;
|
||||||
import org.elasticsearch.common.util.concurrent.AtomicArray;
|
import org.elasticsearch.common.util.concurrent.AtomicArray;
|
||||||
|
@ -66,7 +67,6 @@ import java.util.function.Supplier;
|
||||||
import static org.elasticsearch.search.SearchService.DEFAULT_SIZE;
|
import static org.elasticsearch.search.SearchService.DEFAULT_SIZE;
|
||||||
|
|
||||||
public final class SearchPhaseController {
|
public final class SearchPhaseController {
|
||||||
private static final ScoreDoc[] EMPTY_DOCS = new ScoreDoc[0];
|
|
||||||
|
|
||||||
private final BiFunction<
|
private final BiFunction<
|
||||||
Supplier<Boolean>,
|
Supplier<Boolean>,
|
||||||
|
@ -195,7 +195,7 @@ public final class SearchPhaseController {
|
||||||
return SortedTopDocs.EMPTY;
|
return SortedTopDocs.EMPTY;
|
||||||
}
|
}
|
||||||
final TopDocs mergedTopDocs = mergeTopDocs(topDocs, size, ignoreFrom ? 0 : from);
|
final TopDocs mergedTopDocs = mergeTopDocs(topDocs, size, ignoreFrom ? 0 : from);
|
||||||
final ScoreDoc[] mergedScoreDocs = mergedTopDocs == null ? EMPTY_DOCS : mergedTopDocs.scoreDocs;
|
final ScoreDoc[] mergedScoreDocs = mergedTopDocs == null ? Lucene.EMPTY_SCORE_DOCS : mergedTopDocs.scoreDocs;
|
||||||
ScoreDoc[] scoreDocs = mergedScoreDocs;
|
ScoreDoc[] scoreDocs = mergedScoreDocs;
|
||||||
int numSuggestDocs = 0;
|
int numSuggestDocs = 0;
|
||||||
if (reducedCompletionSuggestions.isEmpty() == false) {
|
if (reducedCompletionSuggestions.isEmpty() == false) {
|
||||||
|
@ -907,6 +907,6 @@ public final class SearchPhaseController {
|
||||||
Object[] collapseValues,
|
Object[] collapseValues,
|
||||||
int numberOfCompletionsSuggestions
|
int numberOfCompletionsSuggestions
|
||||||
) {
|
) {
|
||||||
public static final SortedTopDocs EMPTY = new SortedTopDocs(EMPTY_DOCS, false, null, null, null, 0);
|
public static final SortedTopDocs EMPTY = new SortedTopDocs(Lucene.EMPTY_SCORE_DOCS, false, null, null, null, 0);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -8,7 +8,6 @@
|
||||||
|
|
||||||
package org.elasticsearch.action.search;
|
package org.elasticsearch.action.search;
|
||||||
|
|
||||||
import org.apache.lucene.search.TotalHits;
|
|
||||||
import org.elasticsearch.TransportVersions;
|
import org.elasticsearch.TransportVersions;
|
||||||
import org.elasticsearch.action.ActionResponse;
|
import org.elasticsearch.action.ActionResponse;
|
||||||
import org.elasticsearch.action.OriginalIndices;
|
import org.elasticsearch.action.OriginalIndices;
|
||||||
|
@ -18,6 +17,7 @@ import org.elasticsearch.common.collect.Iterators;
|
||||||
import org.elasticsearch.common.io.stream.StreamInput;
|
import org.elasticsearch.common.io.stream.StreamInput;
|
||||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||||
import org.elasticsearch.common.io.stream.Writeable;
|
import org.elasticsearch.common.io.stream.Writeable;
|
||||||
|
import org.elasticsearch.common.lucene.Lucene;
|
||||||
import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
|
import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
|
||||||
import org.elasticsearch.common.xcontent.ChunkedToXContentHelper;
|
import org.elasticsearch.common.xcontent.ChunkedToXContentHelper;
|
||||||
import org.elasticsearch.common.xcontent.ChunkedToXContentObject;
|
import org.elasticsearch.common.xcontent.ChunkedToXContentObject;
|
||||||
|
@ -1154,7 +1154,7 @@ public class SearchResponse extends ActionResponse implements ChunkedToXContentO
|
||||||
// public for tests
|
// public for tests
|
||||||
public static SearchResponse empty(Supplier<Long> tookInMillisSupplier, Clusters clusters) {
|
public static SearchResponse empty(Supplier<Long> tookInMillisSupplier, Clusters clusters) {
|
||||||
return new SearchResponse(
|
return new SearchResponse(
|
||||||
SearchHits.empty(new TotalHits(0L, TotalHits.Relation.EQUAL_TO), Float.NaN),
|
SearchHits.empty(Lucene.TOTAL_HITS_EQUAL_TO_ZERO, Float.NaN),
|
||||||
InternalAggregations.EMPTY,
|
InternalAggregations.EMPTY,
|
||||||
null,
|
null,
|
||||||
false,
|
false,
|
||||||
|
|
|
@ -18,6 +18,7 @@ import org.elasticsearch.ElasticsearchException;
|
||||||
import org.elasticsearch.action.search.SearchPhaseController.TopDocsStats;
|
import org.elasticsearch.action.search.SearchPhaseController.TopDocsStats;
|
||||||
import org.elasticsearch.action.search.SearchResponse.Clusters;
|
import org.elasticsearch.action.search.SearchResponse.Clusters;
|
||||||
import org.elasticsearch.action.search.TransportSearchAction.SearchTimeProvider;
|
import org.elasticsearch.action.search.TransportSearchAction.SearchTimeProvider;
|
||||||
|
import org.elasticsearch.common.lucene.Lucene;
|
||||||
import org.elasticsearch.common.lucene.search.TopDocsAndMaxScore;
|
import org.elasticsearch.common.lucene.search.TopDocsAndMaxScore;
|
||||||
import org.elasticsearch.core.Releasable;
|
import org.elasticsearch.core.Releasable;
|
||||||
import org.elasticsearch.index.shard.ShardId;
|
import org.elasticsearch.index.shard.ShardId;
|
||||||
|
@ -177,7 +178,7 @@ public final class SearchResponseMerger implements Releasable {
|
||||||
final TotalHits totalHits;
|
final TotalHits totalHits;
|
||||||
if (searchHits.getTotalHits() == null) {
|
if (searchHits.getTotalHits() == null) {
|
||||||
// in case we didn't track total hits, we get null from each cluster, but we need to set 0 eq to the TopDocs
|
// in case we didn't track total hits, we get null from each cluster, but we need to set 0 eq to the TopDocs
|
||||||
totalHits = new TotalHits(0, TotalHits.Relation.EQUAL_TO);
|
totalHits = Lucene.TOTAL_HITS_EQUAL_TO_ZERO;
|
||||||
assert trackTotalHits == null || trackTotalHits == false;
|
assert trackTotalHits == null || trackTotalHits == false;
|
||||||
trackTotalHits = false;
|
trackTotalHits = false;
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -39,6 +39,14 @@ public abstract class BaseNodesRequest<Request extends BaseNodesRequest<Request>
|
||||||
|
|
||||||
private TimeValue timeout;
|
private TimeValue timeout;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @deprecated {@link BaseNodesRequest} derivatives are quite heavyweight and should never need sending over the wire. Do not include
|
||||||
|
* the full top-level request directly in the node-level requests. Instead, copy the needed fields over to a dedicated node-level
|
||||||
|
* request.
|
||||||
|
*
|
||||||
|
* @see <a href="https://github.com/elastic/elasticsearch/issues/100878">#100878</a>
|
||||||
|
*/
|
||||||
|
@Deprecated(forRemoval = true)
|
||||||
protected BaseNodesRequest(StreamInput in) throws IOException {
|
protected BaseNodesRequest(StreamInput in) throws IOException {
|
||||||
// A bare `BaseNodesRequest` is never sent over the wire, but several implementations send the full top-level request to each node
|
// A bare `BaseNodesRequest` is never sent over the wire, but several implementations send the full top-level request to each node
|
||||||
// (wrapped up in another request). They shouldn't, but until we fix that we must keep this. See #100878.
|
// (wrapped up in another request). They shouldn't, but until we fix that we must keep this. See #100878.
|
||||||
|
|
|
@ -203,14 +203,26 @@ public class ClusterAdminClient implements ElasticsearchClient {
|
||||||
return new ClusterUpdateSettingsRequestBuilder(this);
|
return new ClusterUpdateSettingsRequestBuilder(this);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @deprecated use {@code ClusterRerouteUtils} in tests, or just run the action directly
|
||||||
|
*/
|
||||||
|
@Deprecated(forRemoval = true) // temporary compatibility shim
|
||||||
public ActionFuture<ClusterRerouteResponse> reroute(final ClusterRerouteRequest request) {
|
public ActionFuture<ClusterRerouteResponse> reroute(final ClusterRerouteRequest request) {
|
||||||
return execute(TransportClusterRerouteAction.TYPE, request);
|
return execute(TransportClusterRerouteAction.TYPE, request);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @deprecated use {@code ClusterRerouteUtils} in tests, or just run the action directly
|
||||||
|
*/
|
||||||
|
@Deprecated(forRemoval = true) // temporary compatibility shim
|
||||||
public void reroute(final ClusterRerouteRequest request, final ActionListener<ClusterRerouteResponse> listener) {
|
public void reroute(final ClusterRerouteRequest request, final ActionListener<ClusterRerouteResponse> listener) {
|
||||||
execute(TransportClusterRerouteAction.TYPE, request, listener);
|
execute(TransportClusterRerouteAction.TYPE, request, listener);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @deprecated use {@code ClusterRerouteUtils} in tests, or just run the action directly
|
||||||
|
*/
|
||||||
|
@Deprecated(forRemoval = true) // temporary compatibility shim
|
||||||
public ClusterRerouteRequestBuilder prepareReroute() {
|
public ClusterRerouteRequestBuilder prepareReroute() {
|
||||||
return new ClusterRerouteRequestBuilder(this);
|
return new ClusterRerouteRequestBuilder(this);
|
||||||
}
|
}
|
||||||
|
|
|
@ -100,13 +100,6 @@ public record AutoExpandReplicas(int minReplicas, int maxReplicas, boolean enabl
|
||||||
|
|
||||||
public int getDesiredNumberOfReplicas(IndexMetadata indexMetadata, RoutingAllocation allocation) {
|
public int getDesiredNumberOfReplicas(IndexMetadata indexMetadata, RoutingAllocation allocation) {
|
||||||
assert enabled : "should only be called when enabled";
|
assert enabled : "should only be called when enabled";
|
||||||
// Make sure in stateless auto-expand indices always have 1 replica to ensure all shard roles are always present
|
|
||||||
if (Objects.equals(
|
|
||||||
indexMetadata.getSettings().get(ExistingShardsAllocator.EXISTING_SHARDS_ALLOCATOR_SETTING.getKey()),
|
|
||||||
"stateless"
|
|
||||||
)) {
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
int numMatchingDataNodes = 0;
|
int numMatchingDataNodes = 0;
|
||||||
for (DiscoveryNode discoveryNode : allocation.nodes().getDataNodes().values()) {
|
for (DiscoveryNode discoveryNode : allocation.nodes().getDataNodes().values()) {
|
||||||
Decision decision = allocation.deciders().shouldAutoExpandToNode(indexMetadata, discoveryNode, allocation);
|
Decision decision = allocation.deciders().shouldAutoExpandToNode(indexMetadata, discoveryNode, allocation);
|
||||||
|
@ -150,9 +143,22 @@ public record AutoExpandReplicas(int minReplicas, int maxReplicas, boolean enabl
|
||||||
for (final IndexMetadata indexMetadata : metadata) {
|
for (final IndexMetadata indexMetadata : metadata) {
|
||||||
if (indexMetadata.getState() == IndexMetadata.State.OPEN || isIndexVerifiedBeforeClosed(indexMetadata)) {
|
if (indexMetadata.getState() == IndexMetadata.State.OPEN || isIndexVerifiedBeforeClosed(indexMetadata)) {
|
||||||
AutoExpandReplicas autoExpandReplicas = indexMetadata.getAutoExpandReplicas();
|
AutoExpandReplicas autoExpandReplicas = indexMetadata.getAutoExpandReplicas();
|
||||||
|
// Make sure auto-expand is applied only when configured, and entirely disabled in stateless
|
||||||
if (autoExpandReplicas.enabled() == false) {
|
if (autoExpandReplicas.enabled() == false) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
// Special case for stateless indices: auto-expand is disabled, unless number_of_replicas has been set
|
||||||
|
// manually to 0 via index settings, which needs to be converted to 1.
|
||||||
|
if (Objects.equals(
|
||||||
|
indexMetadata.getSettings().get(ExistingShardsAllocator.EXISTING_SHARDS_ALLOCATOR_SETTING.getKey()),
|
||||||
|
"stateless"
|
||||||
|
)) {
|
||||||
|
if (indexMetadata.getNumberOfReplicas() == 0) {
|
||||||
|
nrReplicasChanged.computeIfAbsent(1, ArrayList::new).add(indexMetadata.getIndex().getName());
|
||||||
|
} else {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
}
|
||||||
if (allocation == null) {
|
if (allocation == null) {
|
||||||
allocation = allocationSupplier.get();
|
allocation = allocationSupplier.get();
|
||||||
}
|
}
|
||||||
|
|
|
@ -9,7 +9,6 @@
|
||||||
package org.elasticsearch.common.bytes;
|
package org.elasticsearch.common.bytes;
|
||||||
|
|
||||||
import org.apache.lucene.util.BytesRef;
|
import org.apache.lucene.util.BytesRef;
|
||||||
import org.apache.lucene.util.BytesRefBuilder;
|
|
||||||
import org.apache.lucene.util.BytesRefIterator;
|
import org.apache.lucene.util.BytesRefIterator;
|
||||||
import org.apache.lucene.util.RamUsageEstimator;
|
import org.apache.lucene.util.RamUsageEstimator;
|
||||||
|
|
||||||
|
@ -172,18 +171,33 @@ public final class CompositeBytesReference extends AbstractBytesReference {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public BytesRef toBytesRef() {
|
public BytesRef toBytesRef() {
|
||||||
BytesRefBuilder builder = new BytesRefBuilder();
|
final byte[] result = new byte[length];
|
||||||
builder.grow(length());
|
int offset = 0;
|
||||||
|
for (BytesReference reference : references) {
|
||||||
|
if (reference.hasArray()) {
|
||||||
|
int len = reference.length();
|
||||||
|
System.arraycopy(reference.array(), reference.arrayOffset(), result, offset, len);
|
||||||
|
offset += len;
|
||||||
|
} else {
|
||||||
|
offset = copyViaIterator(reference, result, offset);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
assert offset == result.length;
|
||||||
|
return new BytesRef(result);
|
||||||
|
}
|
||||||
|
|
||||||
|
private static int copyViaIterator(BytesReference reference, byte[] result, int offset) {
|
||||||
BytesRef spare;
|
BytesRef spare;
|
||||||
BytesRefIterator iterator = iterator();
|
BytesRefIterator iterator = reference.iterator();
|
||||||
try {
|
try {
|
||||||
while ((spare = iterator.next()) != null) {
|
while ((spare = iterator.next()) != null) {
|
||||||
builder.append(spare);
|
System.arraycopy(spare.bytes, spare.offset, result, offset, spare.length);
|
||||||
|
offset += spare.length;
|
||||||
}
|
}
|
||||||
} catch (IOException ex) {
|
} catch (IOException ex) {
|
||||||
throw new AssertionError("won't happen", ex); // this is really an error since we don't do IO in our bytesreferences
|
throw new AssertionError("won't happen", ex); // this is really an error since we don't do IO in our bytesreferences
|
||||||
}
|
}
|
||||||
return builder.toBytesRef();
|
return offset;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|
|
@ -101,7 +101,10 @@ public class Lucene {
|
||||||
|
|
||||||
public static final ScoreDoc[] EMPTY_SCORE_DOCS = new ScoreDoc[0];
|
public static final ScoreDoc[] EMPTY_SCORE_DOCS = new ScoreDoc[0];
|
||||||
|
|
||||||
public static final TopDocs EMPTY_TOP_DOCS = new TopDocs(new TotalHits(0, TotalHits.Relation.EQUAL_TO), EMPTY_SCORE_DOCS);
|
public static final TotalHits TOTAL_HITS_EQUAL_TO_ZERO = new TotalHits(0, TotalHits.Relation.EQUAL_TO);
|
||||||
|
public static final TotalHits TOTAL_HITS_GREATER_OR_EQUAL_TO_ZERO = new TotalHits(0, TotalHits.Relation.GREATER_THAN_OR_EQUAL_TO);
|
||||||
|
|
||||||
|
public static final TopDocs EMPTY_TOP_DOCS = new TopDocs(TOTAL_HITS_EQUAL_TO_ZERO, EMPTY_SCORE_DOCS);
|
||||||
|
|
||||||
private Lucene() {}
|
private Lucene() {}
|
||||||
|
|
||||||
|
|
|
@ -135,6 +135,15 @@ public final class Sets {
|
||||||
return union;
|
return union;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@SafeVarargs
|
||||||
|
public static <T> Set<T> union(Set<T> first, Set<T>... others) {
|
||||||
|
Set<T> union = new HashSet<>(first);
|
||||||
|
for (Set<T> other : others) {
|
||||||
|
union.addAll(other);
|
||||||
|
}
|
||||||
|
return union;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* The intersection of two sets. Namely, the resulting set contains all the elements that are in both sets.
|
* The intersection of two sets. Namely, the resulting set contains all the elements that are in both sets.
|
||||||
* Neither input is mutated by this operation, an entirely new set is returned.
|
* Neither input is mutated by this operation, an entirely new set is returned.
|
||||||
|
|
|
@ -36,6 +36,7 @@ import org.apache.lucene.search.Sort;
|
||||||
import org.apache.lucene.search.SortField;
|
import org.apache.lucene.search.SortField;
|
||||||
import org.apache.lucene.search.TotalHits;
|
import org.apache.lucene.search.TotalHits;
|
||||||
import org.apache.lucene.search.grouping.GroupSelector;
|
import org.apache.lucene.search.grouping.GroupSelector;
|
||||||
|
import org.elasticsearch.common.lucene.Lucene;
|
||||||
import org.elasticsearch.common.util.Maps;
|
import org.elasticsearch.common.util.Maps;
|
||||||
import org.elasticsearch.core.Nullable;
|
import org.elasticsearch.core.Nullable;
|
||||||
import org.elasticsearch.index.mapper.MappedFieldType;
|
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||||
|
@ -202,7 +203,7 @@ public class SinglePassGroupingCollector<T> extends SimpleCollector {
|
||||||
|
|
||||||
if (groupMap.size() <= groupOffset) {
|
if (groupMap.size() <= groupOffset) {
|
||||||
TotalHits totalHits = new TotalHits(totalHitCount, TotalHits.Relation.EQUAL_TO);
|
TotalHits totalHits = new TotalHits(totalHitCount, TotalHits.Relation.EQUAL_TO);
|
||||||
return new TopFieldGroups(groupField, totalHits, new ScoreDoc[0], groupSort.getSort(), new Object[0]);
|
return new TopFieldGroups(groupField, totalHits, Lucene.EMPTY_SCORE_DOCS, groupSort.getSort(), new Object[0]);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (orderedGroups == null) {
|
if (orderedGroups == null) {
|
||||||
|
|
|
@ -16,6 +16,7 @@ import org.apache.lucene.search.SortField;
|
||||||
import org.apache.lucene.search.TopFieldDocs;
|
import org.apache.lucene.search.TopFieldDocs;
|
||||||
import org.apache.lucene.search.TotalHits;
|
import org.apache.lucene.search.TotalHits;
|
||||||
import org.apache.lucene.util.PriorityQueue;
|
import org.apache.lucene.util.PriorityQueue;
|
||||||
|
import org.elasticsearch.common.lucene.Lucene;
|
||||||
import org.elasticsearch.common.util.CollectionUtils;
|
import org.elasticsearch.common.util.CollectionUtils;
|
||||||
|
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
|
@ -225,7 +226,7 @@ public final class TopFieldGroups extends TopFieldDocs {
|
||||||
queue.pop();
|
queue.pop();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
hits = hitList.toArray(new ScoreDoc[0]);
|
hits = hitList.toArray(Lucene.EMPTY_SCORE_DOCS);
|
||||||
values = groupList.toArray(new Object[0]);
|
values = groupList.toArray(new Object[0]);
|
||||||
}
|
}
|
||||||
TotalHits totalHits = new TotalHits(totalHitCount, totalHitsRelation);
|
TotalHits totalHits = new TotalHits(totalHitCount, totalHitsRelation);
|
||||||
|
|
|
@ -83,7 +83,7 @@ public abstract class BaseRestHandler implements RestHandler {
|
||||||
// check if the query has any parameters that are not in the supported set (if declared)
|
// check if the query has any parameters that are not in the supported set (if declared)
|
||||||
Set<String> supported = allSupportedParameters();
|
Set<String> supported = allSupportedParameters();
|
||||||
if (supported != null) {
|
if (supported != null) {
|
||||||
var allSupported = Sets.union(ALWAYS_SUPPORTED, supported);
|
var allSupported = Sets.union(RestResponse.RESPONSE_PARAMS, ALWAYS_SUPPORTED, supported);
|
||||||
if (allSupported.containsAll(request.params().keySet()) == false) {
|
if (allSupported.containsAll(request.params().keySet()) == false) {
|
||||||
Set<String> unsupported = Sets.difference(request.params().keySet(), allSupported);
|
Set<String> unsupported = Sets.difference(request.params().keySet(), allSupported);
|
||||||
throw new IllegalArgumentException(unrecognized(request, unsupported, allSupported, "parameter"));
|
throw new IllegalArgumentException(unrecognized(request, unsupported, allSupported, "parameter"));
|
||||||
|
@ -97,6 +97,7 @@ public abstract class BaseRestHandler implements RestHandler {
|
||||||
// use a sorted set so the unconsumed parameters appear in a reliable sorted order
|
// use a sorted set so the unconsumed parameters appear in a reliable sorted order
|
||||||
final SortedSet<String> unconsumedParams = request.unconsumedParams()
|
final SortedSet<String> unconsumedParams = request.unconsumedParams()
|
||||||
.stream()
|
.stream()
|
||||||
|
.filter(p -> RestResponse.RESPONSE_PARAMS.contains(p) == false)
|
||||||
.filter(p -> responseParams(request.getRestApiVersion()).contains(p) == false)
|
.filter(p -> responseParams(request.getRestApiVersion()).contains(p) == false)
|
||||||
.collect(Collectors.toCollection(TreeSet::new));
|
.collect(Collectors.toCollection(TreeSet::new));
|
||||||
|
|
||||||
|
|
|
@ -37,6 +37,7 @@ import static org.elasticsearch.rest.RestController.ELASTIC_PRODUCT_HTTP_HEADER;
|
||||||
public final class RestResponse implements Releasable {
|
public final class RestResponse implements Releasable {
|
||||||
|
|
||||||
public static final String TEXT_CONTENT_TYPE = "text/plain; charset=UTF-8";
|
public static final String TEXT_CONTENT_TYPE = "text/plain; charset=UTF-8";
|
||||||
|
public static final Set<String> RESPONSE_PARAMS = Set.of("error_trace");
|
||||||
|
|
||||||
static final String STATUS = "status";
|
static final String STATUS = "status";
|
||||||
|
|
||||||
|
|
|
@ -9,6 +9,7 @@
|
||||||
package org.elasticsearch.rest.action.admin.cluster;
|
package org.elasticsearch.rest.action.admin.cluster;
|
||||||
|
|
||||||
import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteRequest;
|
import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteRequest;
|
||||||
|
import org.elasticsearch.action.admin.cluster.reroute.TransportClusterRerouteAction;
|
||||||
import org.elasticsearch.client.internal.node.NodeClient;
|
import org.elasticsearch.client.internal.node.NodeClient;
|
||||||
import org.elasticsearch.cluster.ClusterState;
|
import org.elasticsearch.cluster.ClusterState;
|
||||||
import org.elasticsearch.cluster.routing.allocation.command.AllocationCommands;
|
import org.elasticsearch.cluster.routing.allocation.command.AllocationCommands;
|
||||||
|
@ -86,7 +87,11 @@ public class RestClusterRerouteAction extends BaseRestHandler {
|
||||||
if (metric == null) {
|
if (metric == null) {
|
||||||
request.params().put("metric", DEFAULT_METRICS);
|
request.params().put("metric", DEFAULT_METRICS);
|
||||||
}
|
}
|
||||||
return channel -> client.admin().cluster().reroute(clusterRerouteRequest, new RestRefCountedChunkedToXContentListener<>(channel));
|
return channel -> client.execute(
|
||||||
|
TransportClusterRerouteAction.TYPE,
|
||||||
|
clusterRerouteRequest,
|
||||||
|
new RestRefCountedChunkedToXContentListener<>(channel)
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -95,12 +100,10 @@ public class RestClusterRerouteAction extends BaseRestHandler {
|
||||||
}
|
}
|
||||||
|
|
||||||
public static ClusterRerouteRequest createRequest(RestRequest request) throws IOException {
|
public static ClusterRerouteRequest createRequest(RestRequest request) throws IOException {
|
||||||
ClusterRerouteRequest clusterRerouteRequest = new ClusterRerouteRequest();
|
final var clusterRerouteRequest = new ClusterRerouteRequest(getMasterNodeTimeout(request), getAckTimeout(request));
|
||||||
clusterRerouteRequest.dryRun(request.paramAsBoolean("dry_run", clusterRerouteRequest.dryRun()));
|
clusterRerouteRequest.dryRun(request.paramAsBoolean("dry_run", clusterRerouteRequest.dryRun()));
|
||||||
clusterRerouteRequest.explain(request.paramAsBoolean("explain", clusterRerouteRequest.explain()));
|
clusterRerouteRequest.explain(request.paramAsBoolean("explain", clusterRerouteRequest.explain()));
|
||||||
clusterRerouteRequest.ackTimeout(getAckTimeout(request));
|
|
||||||
clusterRerouteRequest.setRetryFailed(request.paramAsBoolean("retry_failed", clusterRerouteRequest.isRetryFailed()));
|
clusterRerouteRequest.setRetryFailed(request.paramAsBoolean("retry_failed", clusterRerouteRequest.isRetryFailed()));
|
||||||
clusterRerouteRequest.masterNodeTimeout(getMasterNodeTimeout(request));
|
|
||||||
request.applyContentParser(parser -> PARSER.parse(parser, clusterRerouteRequest, null));
|
request.applyContentParser(parser -> PARSER.parse(parser, clusterRerouteRequest, null));
|
||||||
return clusterRerouteRequest;
|
return clusterRerouteRequest;
|
||||||
}
|
}
|
||||||
|
|
|
@ -13,6 +13,7 @@ import org.elasticsearch.script.field.vectors.DenseVector;
|
||||||
import org.elasticsearch.script.field.vectors.DenseVectorDocValuesField;
|
import org.elasticsearch.script.field.vectors.DenseVectorDocValuesField;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
import java.util.HexFormat;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
|
||||||
public class VectorScoreScriptUtils {
|
public class VectorScoreScriptUtils {
|
||||||
|
@ -65,6 +66,23 @@ public class VectorScoreScriptUtils {
|
||||||
this.qvMagnitude = (float) Math.sqrt(queryMagnitude);
|
this.qvMagnitude = (float) Math.sqrt(queryMagnitude);
|
||||||
field.getElementType().checkVectorBounds(validateValues);
|
field.getElementType().checkVectorBounds(validateValues);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Constructs a dense vector function used for byte-sized vectors.
|
||||||
|
*
|
||||||
|
* @param scoreScript The script in which this function was referenced.
|
||||||
|
* @param field The vector field.
|
||||||
|
* @param queryVector The query vector.
|
||||||
|
*/
|
||||||
|
public ByteDenseVectorFunction(ScoreScript scoreScript, DenseVectorDocValuesField field, byte[] queryVector) {
|
||||||
|
super(scoreScript, field);
|
||||||
|
this.queryVector = queryVector;
|
||||||
|
float queryMagnitude = 0.0f;
|
||||||
|
for (byte value : queryVector) {
|
||||||
|
queryMagnitude += value * value;
|
||||||
|
}
|
||||||
|
this.qvMagnitude = (float) Math.sqrt(queryMagnitude);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public static class FloatDenseVectorFunction extends DenseVectorFunction {
|
public static class FloatDenseVectorFunction extends DenseVectorFunction {
|
||||||
|
@ -116,6 +134,10 @@ public class VectorScoreScriptUtils {
|
||||||
super(scoreScript, field, queryVector);
|
super(scoreScript, field, queryVector);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public ByteL1Norm(ScoreScript scoreScript, DenseVectorDocValuesField field, byte[] queryVector) {
|
||||||
|
super(scoreScript, field, queryVector);
|
||||||
|
}
|
||||||
|
|
||||||
public double l1norm() {
|
public double l1norm() {
|
||||||
setNextVector();
|
setNextVector();
|
||||||
return field.get().l1Norm(queryVector);
|
return field.get().l1Norm(queryVector);
|
||||||
|
@ -138,11 +160,25 @@ public class VectorScoreScriptUtils {
|
||||||
|
|
||||||
private final L1NormInterface function;
|
private final L1NormInterface function;
|
||||||
|
|
||||||
public L1Norm(ScoreScript scoreScript, List<Number> queryVector, String fieldName) {
|
@SuppressWarnings("unchecked")
|
||||||
|
public L1Norm(ScoreScript scoreScript, Object queryVector, String fieldName) {
|
||||||
DenseVectorDocValuesField field = (DenseVectorDocValuesField) scoreScript.field(fieldName);
|
DenseVectorDocValuesField field = (DenseVectorDocValuesField) scoreScript.field(fieldName);
|
||||||
function = switch (field.getElementType()) {
|
function = switch (field.getElementType()) {
|
||||||
case BYTE -> new ByteL1Norm(scoreScript, field, queryVector);
|
case BYTE -> {
|
||||||
case FLOAT -> new FloatL1Norm(scoreScript, field, queryVector);
|
if (queryVector instanceof List) {
|
||||||
|
yield new ByteL1Norm(scoreScript, field, (List<Number>) queryVector);
|
||||||
|
} else if (queryVector instanceof String s) {
|
||||||
|
byte[] parsedQueryVector = HexFormat.of().parseHex(s);
|
||||||
|
yield new ByteL1Norm(scoreScript, field, parsedQueryVector);
|
||||||
|
}
|
||||||
|
throw new IllegalArgumentException("Unsupported input object for byte vectors: " + queryVector.getClass().getName());
|
||||||
|
}
|
||||||
|
case FLOAT -> {
|
||||||
|
if (queryVector instanceof List) {
|
||||||
|
yield new FloatL1Norm(scoreScript, field, (List<Number>) queryVector);
|
||||||
|
}
|
||||||
|
throw new IllegalArgumentException("Unsupported input object for float vectors: " + queryVector.getClass().getName());
|
||||||
|
}
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -162,6 +198,10 @@ public class VectorScoreScriptUtils {
|
||||||
super(scoreScript, field, queryVector);
|
super(scoreScript, field, queryVector);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public ByteL2Norm(ScoreScript scoreScript, DenseVectorDocValuesField field, byte[] queryVector) {
|
||||||
|
super(scoreScript, field, queryVector);
|
||||||
|
}
|
||||||
|
|
||||||
public double l2norm() {
|
public double l2norm() {
|
||||||
setNextVector();
|
setNextVector();
|
||||||
return field.get().l2Norm(queryVector);
|
return field.get().l2Norm(queryVector);
|
||||||
|
@ -184,11 +224,25 @@ public class VectorScoreScriptUtils {
|
||||||
|
|
||||||
private final L2NormInterface function;
|
private final L2NormInterface function;
|
||||||
|
|
||||||
public L2Norm(ScoreScript scoreScript, List<Number> queryVector, String fieldName) {
|
@SuppressWarnings("unchecked")
|
||||||
|
public L2Norm(ScoreScript scoreScript, Object queryVector, String fieldName) {
|
||||||
DenseVectorDocValuesField field = (DenseVectorDocValuesField) scoreScript.field(fieldName);
|
DenseVectorDocValuesField field = (DenseVectorDocValuesField) scoreScript.field(fieldName);
|
||||||
function = switch (field.getElementType()) {
|
function = switch (field.getElementType()) {
|
||||||
case BYTE -> new ByteL2Norm(scoreScript, field, queryVector);
|
case BYTE -> {
|
||||||
case FLOAT -> new FloatL2Norm(scoreScript, field, queryVector);
|
if (queryVector instanceof List) {
|
||||||
|
yield new ByteL2Norm(scoreScript, field, (List<Number>) queryVector);
|
||||||
|
} else if (queryVector instanceof String s) {
|
||||||
|
byte[] parsedQueryVector = HexFormat.of().parseHex(s);
|
||||||
|
yield new ByteL2Norm(scoreScript, field, parsedQueryVector);
|
||||||
|
}
|
||||||
|
throw new IllegalArgumentException("Unsupported input object for byte vectors: " + queryVector.getClass().getName());
|
||||||
|
}
|
||||||
|
case FLOAT -> {
|
||||||
|
if (queryVector instanceof List) {
|
||||||
|
yield new FloatL2Norm(scoreScript, field, (List<Number>) queryVector);
|
||||||
|
}
|
||||||
|
throw new IllegalArgumentException("Unsupported input object for float vectors: " + queryVector.getClass().getName());
|
||||||
|
}
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -208,6 +262,10 @@ public class VectorScoreScriptUtils {
|
||||||
super(scoreScript, field, queryVector);
|
super(scoreScript, field, queryVector);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public ByteDotProduct(ScoreScript scoreScript, DenseVectorDocValuesField field, byte[] queryVector) {
|
||||||
|
super(scoreScript, field, queryVector);
|
||||||
|
}
|
||||||
|
|
||||||
public double dotProduct() {
|
public double dotProduct() {
|
||||||
setNextVector();
|
setNextVector();
|
||||||
return field.get().dotProduct(queryVector);
|
return field.get().dotProduct(queryVector);
|
||||||
|
@ -230,11 +288,25 @@ public class VectorScoreScriptUtils {
|
||||||
|
|
||||||
private final DotProductInterface function;
|
private final DotProductInterface function;
|
||||||
|
|
||||||
public DotProduct(ScoreScript scoreScript, List<Number> queryVector, String fieldName) {
|
@SuppressWarnings("unchecked")
|
||||||
|
public DotProduct(ScoreScript scoreScript, Object queryVector, String fieldName) {
|
||||||
DenseVectorDocValuesField field = (DenseVectorDocValuesField) scoreScript.field(fieldName);
|
DenseVectorDocValuesField field = (DenseVectorDocValuesField) scoreScript.field(fieldName);
|
||||||
function = switch (field.getElementType()) {
|
function = switch (field.getElementType()) {
|
||||||
case BYTE -> new ByteDotProduct(scoreScript, field, queryVector);
|
case BYTE -> {
|
||||||
case FLOAT -> new FloatDotProduct(scoreScript, field, queryVector);
|
if (queryVector instanceof List) {
|
||||||
|
yield new ByteDotProduct(scoreScript, field, (List<Number>) queryVector);
|
||||||
|
} else if (queryVector instanceof String s) {
|
||||||
|
byte[] parsedQueryVector = HexFormat.of().parseHex(s);
|
||||||
|
yield new ByteDotProduct(scoreScript, field, parsedQueryVector);
|
||||||
|
}
|
||||||
|
throw new IllegalArgumentException("Unsupported input object for byte vectors: " + queryVector.getClass().getName());
|
||||||
|
}
|
||||||
|
case FLOAT -> {
|
||||||
|
if (queryVector instanceof List) {
|
||||||
|
yield new FloatDotProduct(scoreScript, field, (List<Number>) queryVector);
|
||||||
|
}
|
||||||
|
throw new IllegalArgumentException("Unsupported input object for float vectors: " + queryVector.getClass().getName());
|
||||||
|
}
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -254,6 +326,10 @@ public class VectorScoreScriptUtils {
|
||||||
super(scoreScript, field, queryVector);
|
super(scoreScript, field, queryVector);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public ByteCosineSimilarity(ScoreScript scoreScript, DenseVectorDocValuesField field, byte[] queryVector) {
|
||||||
|
super(scoreScript, field, queryVector);
|
||||||
|
}
|
||||||
|
|
||||||
public double cosineSimilarity() {
|
public double cosineSimilarity() {
|
||||||
setNextVector();
|
setNextVector();
|
||||||
return field.get().cosineSimilarity(queryVector, qvMagnitude);
|
return field.get().cosineSimilarity(queryVector, qvMagnitude);
|
||||||
|
@ -276,11 +352,25 @@ public class VectorScoreScriptUtils {
|
||||||
|
|
||||||
private final CosineSimilarityInterface function;
|
private final CosineSimilarityInterface function;
|
||||||
|
|
||||||
public CosineSimilarity(ScoreScript scoreScript, List<Number> queryVector, String fieldName) {
|
@SuppressWarnings("unchecked")
|
||||||
|
public CosineSimilarity(ScoreScript scoreScript, Object queryVector, String fieldName) {
|
||||||
DenseVectorDocValuesField field = (DenseVectorDocValuesField) scoreScript.field(fieldName);
|
DenseVectorDocValuesField field = (DenseVectorDocValuesField) scoreScript.field(fieldName);
|
||||||
function = switch (field.getElementType()) {
|
function = switch (field.getElementType()) {
|
||||||
case BYTE -> new ByteCosineSimilarity(scoreScript, field, queryVector);
|
case BYTE -> {
|
||||||
case FLOAT -> new FloatCosineSimilarity(scoreScript, field, queryVector);
|
if (queryVector instanceof List) {
|
||||||
|
yield new ByteCosineSimilarity(scoreScript, field, (List<Number>) queryVector);
|
||||||
|
} else if (queryVector instanceof String s) {
|
||||||
|
byte[] parsedQueryVector = HexFormat.of().parseHex(s);
|
||||||
|
yield new ByteCosineSimilarity(scoreScript, field, parsedQueryVector);
|
||||||
|
}
|
||||||
|
throw new IllegalArgumentException("Unsupported input object for byte vectors: " + queryVector.getClass().getName());
|
||||||
|
}
|
||||||
|
case FLOAT -> {
|
||||||
|
if (queryVector instanceof List) {
|
||||||
|
yield new FloatCosineSimilarity(scoreScript, field, (List<Number>) queryVector);
|
||||||
|
}
|
||||||
|
throw new IllegalArgumentException("Unsupported input object for float vectors: " + queryVector.getClass().getName());
|
||||||
|
}
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -34,7 +34,7 @@ import java.util.Objects;
|
||||||
public final class SearchHits implements Writeable, ChunkedToXContent, RefCounted, Iterable<SearchHit> {
|
public final class SearchHits implements Writeable, ChunkedToXContent, RefCounted, Iterable<SearchHit> {
|
||||||
|
|
||||||
public static final SearchHit[] EMPTY = new SearchHit[0];
|
public static final SearchHit[] EMPTY = new SearchHit[0];
|
||||||
public static final SearchHits EMPTY_WITH_TOTAL_HITS = SearchHits.empty(new TotalHits(0, Relation.EQUAL_TO), 0);
|
public static final SearchHits EMPTY_WITH_TOTAL_HITS = SearchHits.empty(Lucene.TOTAL_HITS_EQUAL_TO_ZERO, 0);
|
||||||
public static final SearchHits EMPTY_WITHOUT_TOTAL_HITS = SearchHits.empty(null, 0);
|
public static final SearchHits EMPTY_WITHOUT_TOTAL_HITS = SearchHits.empty(null, 0);
|
||||||
|
|
||||||
private final SearchHit[] hits;
|
private final SearchHit[] hits;
|
||||||
|
|
|
@ -20,7 +20,6 @@ import org.apache.lucene.search.TopDocsCollector;
|
||||||
import org.apache.lucene.search.TopFieldCollector;
|
import org.apache.lucene.search.TopFieldCollector;
|
||||||
import org.apache.lucene.search.TopFieldDocs;
|
import org.apache.lucene.search.TopFieldDocs;
|
||||||
import org.apache.lucene.search.TopScoreDocCollector;
|
import org.apache.lucene.search.TopScoreDocCollector;
|
||||||
import org.apache.lucene.search.TotalHits;
|
|
||||||
import org.elasticsearch.ElasticsearchException;
|
import org.elasticsearch.ElasticsearchException;
|
||||||
import org.elasticsearch.action.search.MaxScoreCollector;
|
import org.elasticsearch.action.search.MaxScoreCollector;
|
||||||
import org.elasticsearch.common.lucene.Lucene;
|
import org.elasticsearch.common.lucene.Lucene;
|
||||||
|
@ -233,11 +232,7 @@ class TopHitsAggregator extends MetricsAggregator {
|
||||||
public InternalTopHits buildEmptyAggregation() {
|
public InternalTopHits buildEmptyAggregation() {
|
||||||
TopDocs topDocs;
|
TopDocs topDocs;
|
||||||
if (subSearchContext.sort() != null) {
|
if (subSearchContext.sort() != null) {
|
||||||
topDocs = new TopFieldDocs(
|
topDocs = new TopFieldDocs(Lucene.TOTAL_HITS_EQUAL_TO_ZERO, new FieldDoc[0], subSearchContext.sort().sort.getSort());
|
||||||
new TotalHits(0, TotalHits.Relation.EQUAL_TO),
|
|
||||||
new FieldDoc[0],
|
|
||||||
subSearchContext.sort().sort.getSort()
|
|
||||||
);
|
|
||||||
} else {
|
} else {
|
||||||
topDocs = Lucene.EMPTY_TOP_DOCS;
|
topDocs = Lucene.EMPTY_TOP_DOCS;
|
||||||
}
|
}
|
||||||
|
|
|
@ -49,7 +49,6 @@ import java.io.IOException;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.Arrays;
|
import java.util.Arrays;
|
||||||
import java.util.Collections;
|
import java.util.Collections;
|
||||||
import java.util.Comparator;
|
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Objects;
|
import java.util.Objects;
|
||||||
import java.util.PriorityQueue;
|
import java.util.PriorityQueue;
|
||||||
|
@ -255,13 +254,11 @@ public class ContextIndexSearcher extends IndexSearcher implements Releasable {
|
||||||
// Make a copy so we can sort:
|
// Make a copy so we can sort:
|
||||||
List<LeafReaderContext> sortedLeaves = new ArrayList<>(leaves);
|
List<LeafReaderContext> sortedLeaves = new ArrayList<>(leaves);
|
||||||
// Sort by maxDoc, descending:
|
// Sort by maxDoc, descending:
|
||||||
final Comparator<LeafReaderContext> leafComparator = Comparator.comparingInt(l -> l.reader().maxDoc());
|
sortedLeaves.sort((c1, c2) -> Integer.compare(c2.reader().maxDoc(), c1.reader().maxDoc()));
|
||||||
sortedLeaves.sort(leafComparator.reversed());
|
|
||||||
// we add the groups on a priority queue, so we can add orphan leafs to the smallest group
|
// we add the groups on a priority queue, so we can add orphan leafs to the smallest group
|
||||||
final Comparator<List<LeafReaderContext>> groupComparator = Comparator.comparingInt(
|
final PriorityQueue<List<LeafReaderContext>> queue = new PriorityQueue<>(
|
||||||
l -> l.stream().mapToInt(lr -> lr.reader().maxDoc()).sum()
|
(c1, c2) -> Integer.compare(sumMaxDocValues(c1), sumMaxDocValues(c2))
|
||||||
);
|
);
|
||||||
final PriorityQueue<List<LeafReaderContext>> queue = new PriorityQueue<>(groupComparator);
|
|
||||||
long docSum = 0;
|
long docSum = 0;
|
||||||
List<LeafReaderContext> group = new ArrayList<>();
|
List<LeafReaderContext> group = new ArrayList<>();
|
||||||
for (LeafReaderContext ctx : sortedLeaves) {
|
for (LeafReaderContext ctx : sortedLeaves) {
|
||||||
|
@ -297,6 +294,14 @@ public class ContextIndexSearcher extends IndexSearcher implements Releasable {
|
||||||
return slices;
|
return slices;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private static int sumMaxDocValues(List<LeafReaderContext> l) {
|
||||||
|
int sum = 0;
|
||||||
|
for (LeafReaderContext lr : l) {
|
||||||
|
sum += lr.reader().maxDoc();
|
||||||
|
}
|
||||||
|
return sum;
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public <C extends Collector, T> T search(Query query, CollectorManager<C, T> collectorManager) throws IOException {
|
public <C extends Collector, T> T search(Query query, CollectorManager<C, T> collectorManager) throws IOException {
|
||||||
final C firstCollector = collectorManager.newCollector();
|
final C firstCollector = collectorManager.newCollector();
|
||||||
|
@ -337,7 +342,7 @@ public class ContextIndexSearcher extends IndexSearcher implements Releasable {
|
||||||
throw new IllegalStateException("CollectorManager does not always produce collectors with the same score mode");
|
throw new IllegalStateException("CollectorManager does not always produce collectors with the same score mode");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
final List<Callable<C>> listTasks = new ArrayList<>();
|
final List<Callable<C>> listTasks = new ArrayList<>(leafSlices.length);
|
||||||
for (int i = 0; i < leafSlices.length; ++i) {
|
for (int i = 0; i < leafSlices.length; ++i) {
|
||||||
final LeafReaderContext[] leaves = leafSlices[i].leaves;
|
final LeafReaderContext[] leaves = leafSlices[i].leaves;
|
||||||
final C collector = collectors.get(i);
|
final C collector = collectors.get(i);
|
||||||
|
|
|
@ -22,7 +22,6 @@ import org.apache.lucene.search.ScoreDoc;
|
||||||
import org.apache.lucene.search.ScoreMode;
|
import org.apache.lucene.search.ScoreMode;
|
||||||
import org.apache.lucene.search.Sort;
|
import org.apache.lucene.search.Sort;
|
||||||
import org.apache.lucene.search.TopDocs;
|
import org.apache.lucene.search.TopDocs;
|
||||||
import org.apache.lucene.search.TotalHits;
|
|
||||||
import org.apache.lucene.search.Weight;
|
import org.apache.lucene.search.Weight;
|
||||||
import org.elasticsearch.common.lucene.Lucene;
|
import org.elasticsearch.common.lucene.Lucene;
|
||||||
import org.elasticsearch.common.lucene.search.TopDocsAndMaxScore;
|
import org.elasticsearch.common.lucene.search.TopDocsAndMaxScore;
|
||||||
|
@ -76,11 +75,7 @@ public class QueryPhase {
|
||||||
searchContext.size(0);
|
searchContext.size(0);
|
||||||
QueryPhase.executeQuery(searchContext);
|
QueryPhase.executeQuery(searchContext);
|
||||||
} else {
|
} else {
|
||||||
searchContext.queryResult()
|
searchContext.queryResult().topDocs(new TopDocsAndMaxScore(Lucene.EMPTY_TOP_DOCS, Float.NaN), new DocValueFormat[0]);
|
||||||
.topDocs(
|
|
||||||
new TopDocsAndMaxScore(new TopDocs(new TotalHits(0, TotalHits.Relation.EQUAL_TO), Lucene.EMPTY_SCORE_DOCS), Float.NaN),
|
|
||||||
new DocValueFormat[0]
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
List<TopDocs> rrfRankResults = new ArrayList<>();
|
List<TopDocs> rrfRankResults = new ArrayList<>();
|
||||||
|
@ -124,11 +119,7 @@ public class QueryPhase {
|
||||||
static void executeQuery(SearchContext searchContext) throws QueryPhaseExecutionException {
|
static void executeQuery(SearchContext searchContext) throws QueryPhaseExecutionException {
|
||||||
if (searchContext.hasOnlySuggest()) {
|
if (searchContext.hasOnlySuggest()) {
|
||||||
SuggestPhase.execute(searchContext);
|
SuggestPhase.execute(searchContext);
|
||||||
searchContext.queryResult()
|
searchContext.queryResult().topDocs(new TopDocsAndMaxScore(Lucene.EMPTY_TOP_DOCS, Float.NaN), new DocValueFormat[0]);
|
||||||
.topDocs(
|
|
||||||
new TopDocsAndMaxScore(new TopDocs(new TotalHits(0, TotalHits.Relation.EQUAL_TO), Lucene.EMPTY_SCORE_DOCS), Float.NaN),
|
|
||||||
new DocValueFormat[0]
|
|
||||||
);
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -400,7 +400,7 @@ abstract class QueryPhaseCollectorManager implements CollectorManager<Collector,
|
||||||
} else if (trackTotalHitsUpTo == SearchContext.TRACK_TOTAL_HITS_DISABLED) {
|
} else if (trackTotalHitsUpTo == SearchContext.TRACK_TOTAL_HITS_DISABLED) {
|
||||||
// don't compute hit counts via the collector
|
// don't compute hit counts via the collector
|
||||||
hitCountThreshold = 1;
|
hitCountThreshold = 1;
|
||||||
shortcutTotalHits = new TotalHits(0, TotalHits.Relation.GREATER_THAN_OR_EQUAL_TO);
|
shortcutTotalHits = Lucene.TOTAL_HITS_GREATER_OR_EQUAL_TO_ZERO;
|
||||||
} else {
|
} else {
|
||||||
// implicit total hit counts are valid only when there is no filter collector in the chain
|
// implicit total hit counts are valid only when there is no filter collector in the chain
|
||||||
final int hitCount = hasFilterCollector ? -1 : shortcutTotalHitCount(reader, query);
|
final int hitCount = hasFilterCollector ? -1 : shortcutTotalHitCount(reader, query);
|
||||||
|
|
|
@ -100,12 +100,7 @@ public class CancellableTasksTests extends TaskManagerTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
public static class CancellableNodesRequest extends BaseNodesRequest<CancellableNodesRequest> {
|
public static class CancellableNodesRequest extends BaseNodesRequest<CancellableNodesRequest> {
|
||||||
private String requestName;
|
private final String requestName;
|
||||||
|
|
||||||
private CancellableNodesRequest(StreamInput in) throws IOException {
|
|
||||||
super(in);
|
|
||||||
requestName = in.readString();
|
|
||||||
}
|
|
||||||
|
|
||||||
public CancellableNodesRequest(String requestName, String... nodesIds) {
|
public CancellableNodesRequest(String requestName, String... nodesIds) {
|
||||||
super(nodesIds);
|
super(nodesIds);
|
||||||
|
@ -147,7 +142,7 @@ public class CancellableTasksTests extends TaskManagerTestCase {
|
||||||
boolean shouldBlock,
|
boolean shouldBlock,
|
||||||
CountDownLatch actionStartedLatch
|
CountDownLatch actionStartedLatch
|
||||||
) {
|
) {
|
||||||
super(actionName, threadPool, clusterService, transportService, CancellableNodesRequest::new, CancellableNodeRequest::new);
|
super(actionName, threadPool, clusterService, transportService, CancellableNodeRequest::new);
|
||||||
this.shouldBlock = shouldBlock;
|
this.shouldBlock = shouldBlock;
|
||||||
this.actionStartedLatch = actionStartedLatch;
|
this.actionStartedLatch = actionStartedLatch;
|
||||||
}
|
}
|
||||||
|
|
|
@ -144,7 +144,6 @@ public abstract class TaskManagerTestCase extends ESTestCase {
|
||||||
ThreadPool threadPool,
|
ThreadPool threadPool,
|
||||||
ClusterService clusterService,
|
ClusterService clusterService,
|
||||||
TransportService transportService,
|
TransportService transportService,
|
||||||
Writeable.Reader<NodesRequest> request,
|
|
||||||
Writeable.Reader<NodeRequest> nodeRequest
|
Writeable.Reader<NodeRequest> nodeRequest
|
||||||
) {
|
) {
|
||||||
super(
|
super(
|
||||||
|
|
|
@ -195,19 +195,11 @@ public class TestTaskPlugin extends Plugin implements ActionPlugin, NetworkPlugi
|
||||||
}
|
}
|
||||||
|
|
||||||
public static class NodesRequest extends BaseNodesRequest<NodesRequest> {
|
public static class NodesRequest extends BaseNodesRequest<NodesRequest> {
|
||||||
private String requestName;
|
private final String requestName;
|
||||||
private boolean shouldStoreResult = false;
|
private boolean shouldStoreResult = false;
|
||||||
private boolean shouldBlock = true;
|
private boolean shouldBlock = true;
|
||||||
private boolean shouldFail = false;
|
private boolean shouldFail = false;
|
||||||
|
|
||||||
NodesRequest(StreamInput in) throws IOException {
|
|
||||||
super(in);
|
|
||||||
requestName = in.readString();
|
|
||||||
shouldStoreResult = in.readBoolean();
|
|
||||||
shouldBlock = in.readBoolean();
|
|
||||||
shouldFail = in.readBoolean();
|
|
||||||
}
|
|
||||||
|
|
||||||
NodesRequest(String requestName, String... nodesIds) {
|
NodesRequest(String requestName, String... nodesIds) {
|
||||||
super(nodesIds);
|
super(nodesIds);
|
||||||
this.requestName = requestName;
|
this.requestName = requestName;
|
||||||
|
|
|
@ -109,11 +109,6 @@ public class TransportTasksActionTests extends TaskManagerTestCase {
|
||||||
public static class NodesRequest extends BaseNodesRequest<NodesRequest> {
|
public static class NodesRequest extends BaseNodesRequest<NodesRequest> {
|
||||||
private final String requestName;
|
private final String requestName;
|
||||||
|
|
||||||
NodesRequest(StreamInput in) throws IOException {
|
|
||||||
super(in);
|
|
||||||
requestName = in.readString();
|
|
||||||
}
|
|
||||||
|
|
||||||
public NodesRequest(String requestName, String... nodesIds) {
|
public NodesRequest(String requestName, String... nodesIds) {
|
||||||
super(nodesIds);
|
super(nodesIds);
|
||||||
this.requestName = requestName;
|
this.requestName = requestName;
|
||||||
|
@ -142,7 +137,7 @@ public class TransportTasksActionTests extends TaskManagerTestCase {
|
||||||
abstract class TestNodesAction extends AbstractTestNodesAction<NodesRequest, NodeRequest> {
|
abstract class TestNodesAction extends AbstractTestNodesAction<NodesRequest, NodeRequest> {
|
||||||
|
|
||||||
TestNodesAction(String actionName, ThreadPool threadPool, ClusterService clusterService, TransportService transportService) {
|
TestNodesAction(String actionName, ThreadPool threadPool, ClusterService clusterService, TransportService transportService) {
|
||||||
super(actionName, threadPool, clusterService, transportService, NodesRequest::new, NodeRequest::new);
|
super(actionName, threadPool, clusterService, transportService, NodeRequest::new);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|
|
@ -21,8 +21,8 @@ import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput;
|
||||||
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
|
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
|
||||||
import org.elasticsearch.common.io.stream.StreamInput;
|
import org.elasticsearch.common.io.stream.StreamInput;
|
||||||
import org.elasticsearch.common.network.NetworkModule;
|
import org.elasticsearch.common.network.NetworkModule;
|
||||||
import org.elasticsearch.core.TimeValue;
|
|
||||||
import org.elasticsearch.rest.RestRequest;
|
import org.elasticsearch.rest.RestRequest;
|
||||||
|
import org.elasticsearch.rest.RestUtils;
|
||||||
import org.elasticsearch.rest.action.admin.cluster.RestClusterRerouteAction;
|
import org.elasticsearch.rest.action.admin.cluster.RestClusterRerouteAction;
|
||||||
import org.elasticsearch.test.ESTestCase;
|
import org.elasticsearch.test.ESTestCase;
|
||||||
import org.elasticsearch.test.rest.FakeRestRequest;
|
import org.elasticsearch.test.rest.FakeRestRequest;
|
||||||
|
@ -38,6 +38,7 @@ import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.function.Supplier;
|
import java.util.function.Supplier;
|
||||||
|
|
||||||
|
import static org.elasticsearch.action.support.master.AcknowledgedRequest.DEFAULT_ACK_TIMEOUT;
|
||||||
import static org.elasticsearch.core.TimeValue.timeValueMillis;
|
import static org.elasticsearch.core.TimeValue.timeValueMillis;
|
||||||
import static org.elasticsearch.rest.RestUtils.REST_MASTER_TIMEOUT_PARAM;
|
import static org.elasticsearch.rest.RestUtils.REST_MASTER_TIMEOUT_PARAM;
|
||||||
|
|
||||||
|
@ -80,7 +81,7 @@ public class ClusterRerouteRequestTests extends ESTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
private ClusterRerouteRequest randomRequest() {
|
private ClusterRerouteRequest randomRequest() {
|
||||||
ClusterRerouteRequest request = new ClusterRerouteRequest();
|
ClusterRerouteRequest request = new ClusterRerouteRequest(randomTimeValue(), randomTimeValue());
|
||||||
int commands = between(0, 10);
|
int commands = between(0, 10);
|
||||||
for (int i = 0; i < commands; i++) {
|
for (int i = 0; i < commands; i++) {
|
||||||
request.add(randomFrom(RANDOM_COMMAND_GENERATORS).get());
|
request.add(randomFrom(RANDOM_COMMAND_GENERATORS).get());
|
||||||
|
@ -97,7 +98,7 @@ public class ClusterRerouteRequestTests extends ESTestCase {
|
||||||
assertEquals(request, request);
|
assertEquals(request, request);
|
||||||
assertEquals(request.hashCode(), request.hashCode());
|
assertEquals(request.hashCode(), request.hashCode());
|
||||||
|
|
||||||
ClusterRerouteRequest copy = new ClusterRerouteRequest().add(
|
ClusterRerouteRequest copy = new ClusterRerouteRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT).add(
|
||||||
request.getCommands().commands().toArray(new AllocationCommand[0])
|
request.getCommands().commands().toArray(new AllocationCommand[0])
|
||||||
);
|
);
|
||||||
AcknowledgedRequest<ClusterRerouteRequest> clusterRerouteRequestAcknowledgedRequest = copy.dryRun(request.dryRun())
|
AcknowledgedRequest<ClusterRerouteRequest> clusterRerouteRequestAcknowledgedRequest = copy.dryRun(request.dryRun())
|
||||||
|
@ -196,14 +197,14 @@ public class ClusterRerouteRequestTests extends ESTestCase {
|
||||||
builder.field("dry_run", original.dryRun());
|
builder.field("dry_run", original.dryRun());
|
||||||
}
|
}
|
||||||
params.put("explain", Boolean.toString(original.explain()));
|
params.put("explain", Boolean.toString(original.explain()));
|
||||||
if (false == original.ackTimeout().equals(AcknowledgedRequest.DEFAULT_ACK_TIMEOUT) || randomBoolean()) {
|
if (false == original.ackTimeout().equals(DEFAULT_ACK_TIMEOUT) || randomBoolean()) {
|
||||||
params.put("timeout", original.ackTimeout().toString());
|
params.put("timeout", original.ackTimeout().getStringRep());
|
||||||
}
|
}
|
||||||
if (original.isRetryFailed() || randomBoolean()) {
|
if (original.isRetryFailed() || randomBoolean()) {
|
||||||
params.put("retry_failed", Boolean.toString(original.isRetryFailed()));
|
params.put("retry_failed", Boolean.toString(original.isRetryFailed()));
|
||||||
}
|
}
|
||||||
if (false == original.masterNodeTimeout().equals(TimeValue.THIRTY_SECONDS) || randomBoolean()) {
|
if (false == original.masterNodeTimeout().equals(RestUtils.REST_MASTER_TIMEOUT_DEFAULT) || randomBoolean()) {
|
||||||
params.put(REST_MASTER_TIMEOUT_PARAM, original.masterNodeTimeout().toString());
|
params.put(REST_MASTER_TIMEOUT_PARAM, original.masterNodeTimeout().getStringRep());
|
||||||
}
|
}
|
||||||
if (original.getCommands() != null) {
|
if (original.getCommands() != null) {
|
||||||
hasBody = true;
|
hasBody = true;
|
||||||
|
|
|
@ -51,7 +51,7 @@ import static org.hamcrest.Matchers.not;
|
||||||
public class ClusterRerouteTests extends ESAllocationTestCase {
|
public class ClusterRerouteTests extends ESAllocationTestCase {
|
||||||
|
|
||||||
public void testSerializeRequest() throws IOException {
|
public void testSerializeRequest() throws IOException {
|
||||||
ClusterRerouteRequest req = new ClusterRerouteRequest();
|
ClusterRerouteRequest req = new ClusterRerouteRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT);
|
||||||
req.setRetryFailed(randomBoolean());
|
req.setRetryFailed(randomBoolean());
|
||||||
req.dryRun(randomBoolean());
|
req.dryRun(randomBoolean());
|
||||||
req.explain(randomBoolean());
|
req.explain(randomBoolean());
|
||||||
|
@ -86,7 +86,7 @@ public class ClusterRerouteTests extends ESAllocationTestCase {
|
||||||
var responseRef = new AtomicReference<ClusterRerouteResponse>();
|
var responseRef = new AtomicReference<ClusterRerouteResponse>();
|
||||||
var responseActionListener = ActionTestUtils.assertNoFailureListener(responseRef::set);
|
var responseActionListener = ActionTestUtils.assertNoFailureListener(responseRef::set);
|
||||||
|
|
||||||
var request = new ClusterRerouteRequest().dryRun(true);
|
var request = new ClusterRerouteRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT).dryRun(true);
|
||||||
var task = new TransportClusterRerouteAction.ClusterRerouteResponseAckedClusterStateUpdateTask(
|
var task = new TransportClusterRerouteAction.ClusterRerouteResponseAckedClusterStateUpdateTask(
|
||||||
logger,
|
logger,
|
||||||
allocationService,
|
allocationService,
|
||||||
|
@ -112,7 +112,7 @@ public class ClusterRerouteTests extends ESAllocationTestCase {
|
||||||
);
|
);
|
||||||
ClusterState clusterState = createInitialClusterState(allocationService);
|
ClusterState clusterState = createInitialClusterState(allocationService);
|
||||||
|
|
||||||
var req = new ClusterRerouteRequest().dryRun(false);
|
var req = new ClusterRerouteRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT).dryRun(false);
|
||||||
var task = new TransportClusterRerouteAction.ClusterRerouteResponseAckedClusterStateUpdateTask(
|
var task = new TransportClusterRerouteAction.ClusterRerouteResponseAckedClusterStateUpdateTask(
|
||||||
logger,
|
logger,
|
||||||
allocationService,
|
allocationService,
|
||||||
|
|
|
@ -323,11 +323,9 @@ public class TransportNodesActionTests extends ESTestCase {
|
||||||
|
|
||||||
public DataNodesOnlyTransportNodesAction getDataNodesOnlyTransportNodesAction(TransportService transportService) {
|
public DataNodesOnlyTransportNodesAction getDataNodesOnlyTransportNodesAction(TransportService transportService) {
|
||||||
return new DataNodesOnlyTransportNodesAction(
|
return new DataNodesOnlyTransportNodesAction(
|
||||||
THREAD_POOL,
|
|
||||||
clusterService,
|
clusterService,
|
||||||
transportService,
|
transportService,
|
||||||
new ActionFilters(Collections.emptySet()),
|
new ActionFilters(Collections.emptySet()),
|
||||||
TestNodesRequest::new,
|
|
||||||
TestNodeRequest::new,
|
TestNodeRequest::new,
|
||||||
THREAD_POOL.executor(ThreadPool.Names.GENERIC)
|
THREAD_POOL.executor(ThreadPool.Names.GENERIC)
|
||||||
);
|
);
|
||||||
|
@ -383,11 +381,9 @@ public class TransportNodesActionTests extends ESTestCase {
|
||||||
private static class DataNodesOnlyTransportNodesAction extends TestTransportNodesAction {
|
private static class DataNodesOnlyTransportNodesAction extends TestTransportNodesAction {
|
||||||
|
|
||||||
DataNodesOnlyTransportNodesAction(
|
DataNodesOnlyTransportNodesAction(
|
||||||
ThreadPool threadPool,
|
|
||||||
ClusterService clusterService,
|
ClusterService clusterService,
|
||||||
TransportService transportService,
|
TransportService transportService,
|
||||||
ActionFilters actionFilters,
|
ActionFilters actionFilters,
|
||||||
Writeable.Reader<TestNodesRequest> request,
|
|
||||||
Writeable.Reader<TestNodeRequest> nodeRequest,
|
Writeable.Reader<TestNodeRequest> nodeRequest,
|
||||||
Executor nodeExecutor
|
Executor nodeExecutor
|
||||||
) {
|
) {
|
||||||
|
@ -401,10 +397,6 @@ public class TransportNodesActionTests extends ESTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
private static class TestNodesRequest extends BaseNodesRequest<TestNodesRequest> {
|
private static class TestNodesRequest extends BaseNodesRequest<TestNodesRequest> {
|
||||||
TestNodesRequest(StreamInput in) throws IOException {
|
|
||||||
super(in);
|
|
||||||
}
|
|
||||||
|
|
||||||
TestNodesRequest(String... nodesIds) {
|
TestNodesRequest(String... nodesIds) {
|
||||||
super(nodesIds);
|
super(nodesIds);
|
||||||
}
|
}
|
||||||
|
|
|
@ -11,6 +11,7 @@ package org.elasticsearch.client.internal;
|
||||||
import org.elasticsearch.ExceptionsHelper;
|
import org.elasticsearch.ExceptionsHelper;
|
||||||
import org.elasticsearch.action.ActionListener;
|
import org.elasticsearch.action.ActionListener;
|
||||||
import org.elasticsearch.action.ActionType;
|
import org.elasticsearch.action.ActionType;
|
||||||
|
import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteRequest;
|
||||||
import org.elasticsearch.action.admin.cluster.reroute.TransportClusterRerouteAction;
|
import org.elasticsearch.action.admin.cluster.reroute.TransportClusterRerouteAction;
|
||||||
import org.elasticsearch.action.admin.cluster.snapshots.create.TransportCreateSnapshotAction;
|
import org.elasticsearch.action.admin.cluster.snapshots.create.TransportCreateSnapshotAction;
|
||||||
import org.elasticsearch.action.admin.cluster.stats.TransportClusterStatsAction;
|
import org.elasticsearch.action.admin.cluster.stats.TransportClusterStatsAction;
|
||||||
|
@ -118,10 +119,11 @@ public abstract class AbstractClientHeadersTestCase extends ESTestCase {
|
||||||
.cluster()
|
.cluster()
|
||||||
.prepareCreateSnapshot("repo", "bck")
|
.prepareCreateSnapshot("repo", "bck")
|
||||||
.execute(new AssertingActionListener<>(TransportCreateSnapshotAction.TYPE.name(), client.threadPool()));
|
.execute(new AssertingActionListener<>(TransportCreateSnapshotAction.TYPE.name(), client.threadPool()));
|
||||||
client.admin()
|
client.execute(
|
||||||
.cluster()
|
TransportClusterRerouteAction.TYPE,
|
||||||
.prepareReroute()
|
new ClusterRerouteRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT),
|
||||||
.execute(new AssertingActionListener<>(TransportClusterRerouteAction.TYPE.name(), client.threadPool()));
|
new AssertingActionListener<>(TransportClusterRerouteAction.TYPE.name(), client.threadPool())
|
||||||
|
);
|
||||||
|
|
||||||
// choosing arbitrary indices admin actions to test
|
// choosing arbitrary indices admin actions to test
|
||||||
client.admin()
|
client.admin()
|
||||||
|
|
|
@ -142,7 +142,7 @@ public class AutoExpandReplicasTests extends ESTestCase {
|
||||||
state,
|
state,
|
||||||
state.routingTable().index("index").shard(0).shardsWithState(ShardRoutingState.INITIALIZING)
|
state.routingTable().index("index").shard(0).shardsWithState(ShardRoutingState.INITIALIZING)
|
||||||
);
|
);
|
||||||
state = cluster.reroute(state, new ClusterRerouteRequest());
|
state = cluster.reroute(state, new ClusterRerouteRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT));
|
||||||
}
|
}
|
||||||
|
|
||||||
IndexShardRoutingTable preTable = state.routingTable().index("index").shard(0);
|
IndexShardRoutingTable preTable = state.routingTable().index("index").shard(0);
|
||||||
|
|
|
@ -123,7 +123,8 @@ public class FailedNodeRoutingTests extends ESAllocationTestCase {
|
||||||
for (int i = 0; i < randomIntBetween(4, 8); i++) {
|
for (int i = 0; i < randomIntBetween(4, 8); i++) {
|
||||||
DiscoveryNodes newNodes = DiscoveryNodes.builder(state.nodes()).add(createNode()).build();
|
DiscoveryNodes newNodes = DiscoveryNodes.builder(state.nodes()).add(createNode()).build();
|
||||||
state = ClusterState.builder(state).nodes(newNodes).build();
|
state = ClusterState.builder(state).nodes(newNodes).build();
|
||||||
state = cluster.reroute(state, new ClusterRerouteRequest()); // always reroute after adding node
|
// always reroute after adding node
|
||||||
|
state = cluster.reroute(state, new ClusterRerouteRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT));
|
||||||
}
|
}
|
||||||
|
|
||||||
// Log the node versions (for debugging if necessary)
|
// Log the node versions (for debugging if necessary)
|
||||||
|
|
|
@ -441,7 +441,7 @@ public class IndicesClusterStateServiceRandomUpdatesTests extends AbstractIndice
|
||||||
|
|
||||||
// randomly reroute
|
// randomly reroute
|
||||||
if (rarely()) {
|
if (rarely()) {
|
||||||
state = cluster.reroute(state, new ClusterRerouteRequest());
|
state = cluster.reroute(state, new ClusterRerouteRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT));
|
||||||
}
|
}
|
||||||
|
|
||||||
// randomly start and fail allocated shards
|
// randomly start and fail allocated shards
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue