Merge remote-tracking branch 'upstream/main' into lucene_snapshot_9_11

This commit is contained in:
Benjamin Trent 2024-06-12 08:05:36 -04:00
commit 08298dcd69
266 changed files with 5713 additions and 4260 deletions

View file

@ -26,6 +26,10 @@ develocity {
if (jenkinsUrl?.host?.endsWith('elastic.co') || jenkinsUrl?.host?.endsWith('elastic.dev') || System.getenv('BUILDKITE') == 'true') {
publishing.onlyIf { true }
server = 'https://gradle-enterprise.elastic.co'
} else {
publishing.onlyIf {
server.isPresent();
}
}

View file

@ -71,7 +71,6 @@ public abstract class RestrictedBuildApiService implements BuildService<Restrict
map.put(LegacyRestTestBasePlugin.class, ":test:external-modules:test-die-with-dignity");
map.put(LegacyRestTestBasePlugin.class, ":test:external-modules:test-error-query");
map.put(LegacyRestTestBasePlugin.class, ":test:external-modules:test-latency-simulating-directory");
map.put(LegacyRestTestBasePlugin.class, ":test:external-modules:test-seek-tracking-directory");
map.put(LegacyRestTestBasePlugin.class, ":test:yaml-rest-runner");
map.put(LegacyRestTestBasePlugin.class, ":distribution:archives:integ-test-zip");
map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:core");

View file

@ -36,6 +36,15 @@ ext.docsFileTree = fileTree(projectDir) {
}
}
tasks.named("yamlRestTest") {
if (BuildParams.isSnapshotBuild() == false) {
// LOOKUP is not available in snapshots
systemProperty 'tests.rest.blacklist', [
"reference/esql/processing-commands/lookup/esql-lookup-example"
].join(',')
}
}
/* List of files that have snippets that will not work until platinum tests can occur ... */
tasks.named("buildRestTests").configure {
getExpectedUnconvertedCandidates().addAll(

View file

@ -0,0 +1,6 @@
pr: 108421
summary: "[ES|QL] Support Named and Positional Parameters in `EsqlQueryRequest`"
area: ES|QL
type: enhancement
issues:
- 107029

View file

@ -0,0 +1,5 @@
pr: 109492
summary: Add hexstring support byte painless scorers
area: Search
type: bug
issues: []

View file

@ -0,0 +1,6 @@
pr: 109613
summary: Consider `error_trace` supported by all endpoints
area: Infra/REST API
type: bug
issues:
- 109612

View file

@ -68,6 +68,52 @@ state must ever be reloaded from persisted state.
## Deprecations
## Backwards Compatibility
major releases are mostly about breaking compatibility and dropping deprecated functionality.
Elasticsearch versions are composed of three pieces of information: the major version, the minor version, and the patch version,
in that order (major.minor.patch). Patch releases are typically bug fixes; minor releases contain improvements / new features;
and major releases essentially break compatibility and enable removal of deprecated functionality. As an example, each of 8.0.0,
8.3.0 and 8.3.1 specifies an exact release version. They all have the same major version (8) and the last two have the same minor
version (8.3). Multiversion compatibility within a cluster, or backwards compatibility with older version nodes, is guaranteed
across specific versions.
### Transport Layer Backwards Compatibility
Elasticsearch nodes can communicate over the network with all node versions within the same major release. All versions within
one major version X are also compatible with the last minor version releases of the previous major version, i.e. (X-1).last.
More concretely, all 8.x.x version nodes can communicate with all 7.17.x version nodes.
### Index Format Backwards Compatibility
Index data format backwards compatibility is guaranteed with all versions of the previous major release. All 8.x.x version nodes,
for example, can read index data written by any 7.x.x version node. 9.x.x versions, however, will not be able to read 7.x.x format
data files.
Elasticsearch does not have an upgrade process to convert from older to newer index data formats. The user is expected to run
`reindex` on any remaining untouched data from a previous version upgrade before upgrading to the next version. There is a good
chance that older version index data will age out and be deleted before the user does the next upgrade, but `reindex` can be used
if that is not the case.
### Snapshot Backwards Compatibility
Snapshots taken by a cluster of version X cannot be read by a cluster running older version nodes. However, snapshots taken by an
older version cluster can continue to be read from and written to by newer version clusters: this compatibility goes back many
major versions. If a newer version cluster writes to a snapshot repository containing snapshots from an older version, then it
will do so in a way that leaves the repository format (metadata and file layout) readable by those older versions.
Restoring indexes that have different and no longer supported data formats can be tricky: see the
[public snapshot compatibility docs][] for details.
[public snapshot compatibility docs]: https://www.elastic.co/guide/en/elasticsearch/reference/current/snapshot-restore.html#snapshot-index-compatibility
### Upgrade
See the [public upgrade docs][] for the upgrade process.
[public upgrade docs]: https://www.elastic.co/guide/en/elasticsearch/reference/current/setup-upgrade.html
## Plugins
(what warrants a plugin?)

View file

@ -10,7 +10,7 @@ The following specialized API is available in the Score context.
==== Static Methods
The following methods are directly callable without a class/instance qualifier. Note parameters denoted by a (*) are treated as read-only values.
* double cosineSimilarity(List *, String *)
* double cosineSimilarity(Object *, String *)
* double decayDateExp(String *, String *, String *, double *, ZonedDateTime)
* double decayDateGauss(String *, String *, String *, double *, ZonedDateTime)
* double decayDateLinear(String *, String *, String *, double *, ZonedDateTime)
@ -20,9 +20,9 @@ The following methods are directly callable without a class/instance qualifier.
* double decayNumericExp(double *, double *, double *, double *, double)
* double decayNumericGauss(double *, double *, double *, double *, double)
* double decayNumericLinear(double *, double *, double *, double *, double)
* double dotProduct(List *, String *)
* double l1norm(List *, String *)
* double l2norm(List *, String *)
* double dotProduct(Object *, String *)
* double l1norm(Object *, String *)
* double l2norm(Object *, String *)
* double randomScore(int *)
* double randomScore(int *, String *)
* double saturation(double, double)

View file

@ -15,9 +15,10 @@ This getting started is also available as an https://github.com/elastic/elastics
[[esql-getting-started-prerequisites]]
=== Prerequisites
To follow along with the queries in this guide, you'll need an {es} deployment with our sample data.
To follow along with the queries in this guide, you can either set up your own
deployment, or use Elastic's public {esql} demo environment.
include::{es-ref-dir}/tab-widgets/esql/esql-getting-started-sample-data.asciidoc[tag=own-deployment]
include::{es-ref-dir}/tab-widgets/esql/esql-getting-started-widget-sample-data.asciidoc[]
[discrete]
[[esql-getting-started-running-queries]]
@ -268,8 +269,7 @@ Before you can use `ENRICH`, you first need to
<<esql-create-enrich-policy,create>> and <<esql-execute-enrich-policy,execute>>
an <<esql-enrich-policy,enrich policy>>.
include::{es-ref-dir}/tab-widgets/esql/esql-getting-started-enrich-policy.asciidoc[tag=own-deployment]
include::{es-ref-dir}/tab-widgets/esql/esql-getting-started-widget-enrich-policy.asciidoc[]
After creating and executing a policy, you can use it with the `ENRICH`
command:

View file

@ -27,7 +27,7 @@ adding the other fields from the `table` to the output.
*Examples*
// tag::examples[]
[source,console]
[source,console,id=esql-lookup-example]
----
POST /_query?format=txt
{
@ -40,8 +40,8 @@ POST /_query?format=txt
""",
"tables": {
"era": {
"author:keyword": ["Frank Herbert", "Peter F. Hamilton", "Vernor Vinge", "Alastair Reynolds", "James S.A. Corey"],
"era:keyword" : [ "The New Wave", "Diamond", "Diamond", "Diamond", "Hadron"]
"author": {"keyword": ["Frank Herbert", "Peter F. Hamilton", "Vernor Vinge", "Alastair Reynolds", "James S.A. Corey"]},
"era": {"keyword": [ "The New Wave", "Diamond", "Diamond", "Diamond", "Hadron"]}
}
}
}

View file

@ -23,7 +23,9 @@ For more information about creating and updating the {es} keystore, see
==== General security settings
`xpack.security.enabled`::
(<<static-cluster-setting,Static>>)
Defaults to `true`, which enables {es} {security-features} on the node. +
Defaults to `true`, which enables {es} {security-features} on the node.
This setting must be enabled to use Elasticsearch's authentication,
authorization and audit features. +
+
--
If set to `false`, {security-features} are disabled, which is not recommended.

View file

@ -1,6 +1,6 @@
// tag::own-deployment[]
First, you'll need to ingest the sample data. In {kib}, open the main menu and select *Dev
First ingest some sample data. In {kib}, open the main menu and select *Dev
Tools*. Run the following two requests:
[source,console]

File diff suppressed because one or more lines are too long

View file

@ -27,9 +27,9 @@ static_import {
double decayDateLinear(String, String, String, double, ZonedDateTime) bound_to org.elasticsearch.script.ScoreScriptUtils$DecayDateLinear
double decayDateExp(String, String, String, double, ZonedDateTime) bound_to org.elasticsearch.script.ScoreScriptUtils$DecayDateExp
double decayDateGauss(String, String, String, double, ZonedDateTime) bound_to org.elasticsearch.script.ScoreScriptUtils$DecayDateGauss
double l1norm(org.elasticsearch.script.ScoreScript, List, String) bound_to org.elasticsearch.script.VectorScoreScriptUtils$L1Norm
double l2norm(org.elasticsearch.script.ScoreScript, List, String) bound_to org.elasticsearch.script.VectorScoreScriptUtils$L2Norm
double cosineSimilarity(org.elasticsearch.script.ScoreScript, List, String) bound_to org.elasticsearch.script.VectorScoreScriptUtils$CosineSimilarity
double dotProduct(org.elasticsearch.script.ScoreScript, List, String) bound_to org.elasticsearch.script.VectorScoreScriptUtils$DotProduct
double l1norm(org.elasticsearch.script.ScoreScript, Object, String) bound_to org.elasticsearch.script.VectorScoreScriptUtils$L1Norm
double l2norm(org.elasticsearch.script.ScoreScript, Object, String) bound_to org.elasticsearch.script.VectorScoreScriptUtils$L2Norm
double cosineSimilarity(org.elasticsearch.script.ScoreScript, Object, String) bound_to org.elasticsearch.script.VectorScoreScriptUtils$CosineSimilarity
double dotProduct(org.elasticsearch.script.ScoreScript, Object, String) bound_to org.elasticsearch.script.VectorScoreScriptUtils$DotProduct
}

View file

@ -77,7 +77,35 @@ setup:
- match: {hits.hits.2._id: "1"}
- match: {hits.hits.2._score: 1632.0}
---
"Dot Product hexidecimal":
- requires:
cluster_features: "gte_v8.14.1"
reason: "support for hexidecimal byte vectors added in 8.14"
- do:
headers:
Content-Type: application/json
search:
rest_total_hits_as_int: true
body:
query:
script_score:
query: {match_all: {} }
script:
source: "dotProduct(params.query_vector, 'vector')"
params:
query_vector: "006ff30e84"
- match: {hits.total: 3}
- match: {hits.hits.0._id: "2"}
- match: {hits.hits.0._score: 28732.0}
- match: {hits.hits.1._id: "3"}
- match: {hits.hits.1._score: 17439.0}
- match: {hits.hits.2._id: "1"}
- match: {hits.hits.2._score: 1632.0}
---
"Cosine Similarity":
- do:
@ -108,6 +136,39 @@ setup:
- gte: {hits.hits.2._score: 0.509}
- lte: {hits.hits.2._score: 0.512}
---
"Cosine Similarity hexidecimal":
- requires:
cluster_features: "gte_v8.14.1"
reason: "support for hexidecimal byte vectors added in 8.14"
- do:
headers:
Content-Type: application/json
search:
rest_total_hits_as_int: true
body:
query:
script_score:
query: {match_all: {} }
script:
source: "cosineSimilarity(params.query_vector, 'vector')"
params:
query_vector: "006ff30e84"
- match: {hits.total: 3}
- match: {hits.hits.0._id: "2"}
- gte: {hits.hits.0._score: 0.995}
- lte: {hits.hits.0._score: 0.998}
- match: {hits.hits.1._id: "3"}
- gte: {hits.hits.1._score: 0.829}
- lte: {hits.hits.1._score: 0.832}
- match: {hits.hits.2._id: "1"}
- gte: {hits.hits.2._score: 0.509}
- lte: {hits.hits.2._score: 0.512}
---
"Cosine similarity with indexed vector":
- do:

View file

@ -70,6 +70,35 @@ setup:
- gte: {hits.hits.2._score: 29.0}
---
"L1 norm hexidecimal":
- requires:
cluster_features: "gte_v8.14.1"
reason: "support for hexidecimal byte vectors added in 8.14"
- do:
headers:
Content-Type: application/json
search:
rest_total_hits_as_int: true
body:
query:
script_score:
query: {match_all: {} }
script:
source: "l1norm(params.query_vector, 'my_dense_vector')"
params:
query_vector: "006ff30e84"
- match: {hits.total: 3}
- match: {hits.hits.0._id: "1"}
- match: {hits.hits.0._score: 246.0}
- match: {hits.hits.1._id: "3"}
- match: {hits.hits.1._score: 117.0}
- match: {hits.hits.2._id: "2"}
- gte: {hits.hits.2._score: 29.0}
---
"L2 norm":
- do:
headers:
@ -95,6 +124,38 @@ setup:
- gte: {hits.hits.1._score: 94.407}
- lte: {hits.hits.1._score: 94.41}
- match: {hits.hits.2._id: "2"}
- gte: {hits.hits.2._score: 15.263}
- lte: {hits.hits.2._score: 15.266}
---
"L2 norm hexidecimal":
- requires:
cluster_features: "gte_v8.14.1"
reason: "support for hexidecimal byte vectors added in 8.14"
- do:
headers:
Content-Type: application/json
search:
rest_total_hits_as_int: true
body:
query:
script_score:
query: {match_all: {} }
script:
source: "l2norm(params.query_vector, 'my_dense_vector')"
params:
query_vector: "006ff30e84"
- match: {hits.total: 3}
- match: {hits.hits.0._id: "1"}
- gte: {hits.hits.0._score: 158.624}
- lte: {hits.hits.0._score: 158.627}
- match: {hits.hits.1._id: "3"}
- gte: {hits.hits.1._score: 94.407}
- lte: {hits.hits.1._score: 94.41}
- match: {hits.hits.2._id: "2"}
- gte: {hits.hits.2._score: 15.263}
- lte: {hits.hits.2._score: 15.266}

View file

@ -59,9 +59,6 @@ tests:
- class: org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvAppendTests
method: testEvaluateBlockWithoutNulls {TestCase=<cartesian_shape>, <cartesian_shape>}
issue: https://github.com/elastic/elasticsearch/issues/109409
- class: "org.elasticsearch.smoketest.DocsClientYamlTestSuiteIT"
issue: "https://github.com/elastic/elasticsearch/issues/109478"
method: "test {yaml=reference/esql/processing-commands/lookup/line_31}"
# Examples:
#

View file

@ -1,44 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0 and the Server Side Public License, v 1; you may not use this file except
* in compliance with, at your election, the Elastic License 2.0 or the Server
* Side Public License, v 1.
*/
import org.elasticsearch.gradle.Architecture
import org.elasticsearch.gradle.VersionProperties
import org.elasticsearch.gradle.internal.info.BuildParams
import static org.elasticsearch.gradle.internal.distribution.InternalElasticsearchDistributionTypes.DOCKER
apply plugin: 'elasticsearch.standalone-rest-test'
apply plugin: 'elasticsearch.test.fixtures'
apply plugin: 'elasticsearch.internal-distribution-download'
dockerCompose {
environment.put 'STACK_VERSION', BuildParams.snapshotBuild ? VersionProperties.elasticsearch : VersionProperties.elasticsearch + "-SNAPSHOT"
}
elasticsearch_distributions {
docker {
type = DOCKER
architecture = Architecture.current()
version = VersionProperties.getElasticsearch()
failIfUnavailable = false // This ensures we skip this testing if Docker is unavailable
}
}
tasks.named("preProcessFixture").configure {
dependsOn elasticsearch_distributions.matching { it.architecture == Architecture.current() }
}
tasks.register("integTest", Test) {
outputs.doNotCacheIf('Build cache is disabled for Docker tests') { true }
maxParallelForks = '1'
include '**/*IT.class'
}
tasks.named("check").configure {
dependsOn "integTest"
}

View file

@ -1,34 +0,0 @@
---
apm_server:
cluster: ['manage_ilm', 'manage_security', 'manage_api_key']
indices:
- names: ['apm-*', 'logs-apm*', 'metrics-apm*', 'traces-apm*']
privileges: ['write', 'create_index', 'manage', 'manage_ilm']
applications:
- application: 'apm'
privileges: ['sourcemap:write', 'event:write', 'config_agent:read']
resources: '*'
beats:
cluster: ['manage_index_templates', 'monitor', 'manage_ingest_pipelines', 'manage_ilm', 'manage_security', 'manage_api_key']
indices:
- names: ['filebeat-*', 'shrink-filebeat-*']
privileges: ['all']
filebeat:
cluster: ['manage_index_templates', 'monitor', 'manage_ingest_pipelines', 'manage_ilm']
indices:
- names: ['filebeat-*', 'shrink-filebeat-*']
privileges: ['all']
heartbeat:
cluster: ['manage_index_templates', 'monitor', 'manage_ingest_pipelines', 'manage_ilm']
indices:
- names: ['heartbeat-*', 'shrink-heartbeat-*']
privileges: ['all']
metricbeat:
cluster: ['manage_index_templates', 'monitor', 'manage_ingest_pipelines', 'manage_ilm']
indices:
- names: ['metricbeat-*', 'shrink-metricbeat-*']
privileges: ['all']
opbeans:
indices:
- names: ['opbeans-*']
privileges: ['write', 'read']

View file

@ -1,2 +0,0 @@
elastic/fleet-server/elastic-package-fleet-server-token:{PBKDF2_STRETCH}10000$PNiVyY96dHwRfoDszBvYPAz+mSLbC+NhtPh63dblDZU=$dAY1tXX1U5rXB+2Lt7m0L2LUNSb1q5nRaIqPNZTBxb8=
elastic/kibana/elastic-package-kibana-token:{PBKDF2_STRETCH}10000$wIEFHIIIZ2ap0D0iQsyw0MfB7YuFA1bHnXAmlCoL4Gg=$YxvIJnasjLZyDQZpmFBiJHdR/CGXd5BnVm013Jty6p0=

View file

@ -1,9 +0,0 @@
admin:$2a$10$xiY0ZzOKmDDN1p3if4t4muUBwh2.bFHADoMRAWQgSClm4ZJ4132Y.
apm_server_user:$2a$10$iTy29qZaCSVn4FXlIjertuO8YfYVLCbvoUAJ3idaXfLRclg9GXdGG
apm_user_ro:$2a$10$hQfy2o2u33SapUClsx8NCuRMpQyHP9b2l4t3QqrBA.5xXN2S.nT4u
beats_user:$2a$10$LRpKi4/Q3Qo4oIbiu26rH.FNIL4aOH4aj2Kwi58FkMo1z9FgJONn2
filebeat_user:$2a$10$sFxIEX8tKyOYgsbJLbUhTup76ssvSD3L4T0H6Raaxg4ewuNr.lUFC
heartbeat_user:$2a$10$nKUGDr/V5ClfliglJhfy8.oEkjrDtklGQfhd9r9NoFqQeoNxr7uUK
kibana_system_user:$2a$10$nN6sRtQl2KX9Gn8kV/.NpOLSk6Jwn8TehEDnZ7aaAgzyl/dy5PYzW
metricbeat_user:$2a$10$5PyTd121U2ZXnFk9NyqxPuLxdptKbB8nK5egt6M5/4xrKUkk.GReG
opbeans_user:$2a$10$iTy29qZaCSVn4FXlIjertuO8YfYVLCbvoUAJ3idaXfLRclg9GXdGG

View file

@ -1,13 +0,0 @@
apm_server:apm_server_user
apm_system:apm_server_user
apm_user:apm_server_user,apm_user_ro
beats:beats_user
beats_system:beats_user,filebeat_user,heartbeat_user,metricbeat_user
filebeat:filebeat_user
heartbeat:heartbeat_user
ingest_admin:apm_server_user
kibana_system:kibana_system_user
kibana_user:apm_server_user,apm_user_ro,beats_user,filebeat_user,heartbeat_user,metricbeat_user,opbeans_user
metricbeat:metricbeat_user
opbeans:opbeans_user
superuser:admin

View file

@ -1,78 +0,0 @@
xpack.fleet.packages:
- name: system
version: latest
- name: elastic_agent
version: latest
- name: apm
version: latest
- name: fleet_server
version: latest
xpack.fleet.agentPolicies:
- name: Fleet Server + APM policy
id: fleet-server-apm-policy
description: Fleet server policy with APM and System logs and metrics enabled
namespace: default
is_default_fleet_server: true
is_managed: false
monitoring_enabled:
- logs
- metrics
package_policies:
- name: system-1
package:
name: system
- name: apm-1
package:
name: apm
inputs:
- type: apm
keep_enabled: true
vars:
- name: host
value: 0.0.0.0:8200
frozen: true
- name: url
value: "${ELASTIC_APM_SERVER_URL}"
frozen: true
- name: enable_rum
value: true
frozen: true
- name: read_timeout
value: 1m
frozen: true
- name: shutdown_timeout
value: 2m
frozen: true
- name: write_timeout
value: 1m
frozen: true
- name: rum_allow_headers
value:
- x-custom-header
frozen: true
- name: secret_token
value: "${ELASTIC_APM_SECRET_TOKEN}"
frozen: true
- name: tls_enabled
value: ${ELASTIC_APM_TLS}
frozen: true
- name: tls_certificate
value: /usr/share/apmserver/config/certs/tls.crt
frozen: true
- name: tls_key
value: /usr/share/apmserver/config/certs/tls.key
frozen: true
- name: Fleet Server
package:
name: fleet_server
inputs:
- type: fleet-server
keep_enabled: true
vars:
- name: host
value: 0.0.0.0
frozen: true
- name: port
value: 8220
frozen: true

View file

@ -1,154 +0,0 @@
version: "2.4"
networks:
default:
name: apm-integration-testing
services:
apmserver:
depends_on:
kibana:
condition: service_healthy
environment:
FLEET_ELASTICSEARCH_HOST: null
FLEET_SERVER_ELASTICSEARCH_INSECURE: "1"
FLEET_SERVER_ENABLE: "1"
FLEET_SERVER_HOST: 0.0.0.0
FLEET_SERVER_INSECURE_HTTP: "1"
FLEET_SERVER_POLICY_ID: fleet-server-apm-policy
FLEET_SERVER_PORT: "8220"
FLEET_SERVER_SERVICE_TOKEN: AAEAAWVsYXN0aWMvZmxlZXQtc2VydmVyL2VsYXN0aWMtcGFja2FnZS1mbGVldC1zZXJ2ZXItdG9rZW46bmgtcFhoQzRRQ2FXbms2U0JySGlWQQ
KIBANA_FLEET_HOST: null
KIBANA_FLEET_SERVICE_TOKEN: AAEAAWVsYXN0aWMvZmxlZXQtc2VydmVyL2VsYXN0aWMtcGFja2FnZS1mbGVldC1zZXJ2ZXItdG9rZW46bmgtcFhoQzRRQ2FXbms2U0JySGlWQQ
KIBANA_FLEET_SETUP: "1"
healthcheck:
test: /bin/true
image: docker.elastic.co/beats/elastic-agent:${STACK_VERSION}
labels:
- co.elastic.apm.stack-version=${STACK_VERSION}
logging:
driver: json-file
options:
max-file: "5"
max-size: 2m
volumes:
- /var/run/docker.sock:/var/run/docker.sock
- ./scripts/tls/apmserver/cert.crt:/usr/share/apmserver/config/certs/tls.crt
- ./scripts/tls/apmserver/key.pem:/usr/share/apmserver/config/certs/tls.key
elasticsearch:
environment:
- action.destructive_requires_name=false
- bootstrap.memory_lock=true
- cluster.name=docker-cluster
- cluster.routing.allocation.disk.threshold_enabled=false
- discovery.type=single-node
- ES_JAVA_OPTS=-Xms1g -Xmx1g
- indices.id_field_data.enabled=true
- ingest.geoip.downloader.enabled=false
- path.repo=/usr/share/elasticsearch/data/backups
- xpack.license.self_generated.type=trial
- xpack.monitoring.collection.enabled=true
- xpack.security.authc.anonymous.roles=remote_monitoring_collector
- xpack.security.authc.api_key.enabled=true
- xpack.security.authc.realms.file.file1.order=0
- xpack.security.authc.realms.native.native1.order=1
- xpack.security.authc.token.enabled=true
- xpack.security.enabled=true
# APM specific settings. We don't configure `secret_key` because Kibana is configured with a blank key
- telemetry.tracing.enabled=true
- telemetry.agent.server_url=http://apmserver:8200
# Send traces to APM server aggressively
- telemetry.agent.metrics_interval=1s
# Record everything
- telemetry.agent.transaction_sample_rate=1
- telemetry.agent.log_level=debug
healthcheck:
interval: 20s
retries: 10
test: curl -s -k http://localhost:9200/_cluster/health | grep -vq '"status":"red"'
image: elasticsearch:test
labels:
- co.elastic.apm.stack-version=${STACK_VERSION}
- co.elastic.metrics/module=elasticsearch
- co.elastic.metrics/metricsets=node,node_stats
- co.elastic.metrics/hosts=http://$${data.host}:9200
logging:
driver: json-file
options:
max-file: "5"
max-size: 2m
ports:
# - 127.0.0.1:9200:9200
- "9200"
ulimits:
memlock:
hard: -1
soft: -1
volumes:
- ./config/elasticsearch/roles.yml:/usr/share/elasticsearch/config/roles.yml
- ./config/elasticsearch/users:/usr/share/elasticsearch/config/users
- ./config/elasticsearch/users_roles:/usr/share/elasticsearch/config/users_roles
- ./config/elasticsearch/service_tokens:/usr/share/elasticsearch/config/service_tokens
kibana:
depends_on:
elasticsearch:
condition: service_healthy
environment:
ELASTICSEARCH_HOSTS: http://elasticsearch:9200
ELASTICSEARCH_PASSWORD: changeme
ELASTICSEARCH_USERNAME: kibana_system_user
ELASTIC_APM_SECRET_TOKEN: ""
ELASTIC_APM_SERVER_URL: http://apmserver:8200
ELASTIC_APM_TLS: "false"
SERVER_HOST: 0.0.0.0
SERVER_NAME: kibana.example.org
STATUS_ALLOWANONYMOUS: "true"
TELEMETRY_ENABLED: "false"
XPACK_APM_SERVICEMAPENABLED: "true"
XPACK_ENCRYPTEDSAVEDOBJECTS_ENCRYPTIONKEY: fhjskloppd678ehkdfdlliverpoolfcr
XPACK_FLEET_AGENTS_ELASTICSEARCH_HOSTS: '["http://elasticsearch:9200"]'
# XPACK_FLEET_REGISTRYURL: https://epr-snapshot.elastic.co
XPACK_MONITORING_ENABLED: "true"
XPACK_REPORTING_ROLES_ENABLED: "false"
XPACK_SECURITY_ENCRYPTIONKEY: fhjskloppd678ehkdfdlliverpoolfcr
XPACK_SECURITY_LOGINASSISTANCEMESSAGE: Login&#32;details:&#32;`admin/changeme`.&#32;Further&#32;details&#32;[here](https://github.com/elastic/apm-integration-testing#logging-in).
XPACK_SECURITY_SESSION_IDLETIMEOUT: 1M
XPACK_SECURITY_SESSION_LIFESPAN: 3M
XPACK_XPACK_MAIN_TELEMETRY_ENABLED: "false"
healthcheck:
interval: 10s
retries: 30
start_period: 10s
test: curl -s -k http://kibana:5601/api/status | grep -q 'All services are available'
image: docker.elastic.co/kibana/kibana:${STACK_VERSION}
labels:
- co.elastic.apm.stack-version=${STACK_VERSION}
logging:
driver: json-file
options:
max-file: "5"
max-size: 2m
# ports:
# - 127.0.0.1:5601:5601
volumes:
- ./config/kibana/kibana-8.yml:/usr/share/kibana/config/kibana.yml
# Rather than mess aroud with threads in the test, just run `curl` in a
# loop to generate traces with a known path
tracegenerator:
depends_on:
apmserver:
condition: service_healthy
elasticsearch:
condition: service_healthy
kibana:
condition: service_healthy
# Official curl image
image: curlimages/curl
command: /bin/sh -c "while true; do curl -s -k -u admin:changeme http://elasticsearch:9200/_nodes/stats >/dev/null ; sleep 3; done"
volumes:
esdata:
driver: local

View file

@ -1,27 +0,0 @@
-----BEGIN CERTIFICATE-----
MIIEpjCCAo4CCQDR9oXvJbopHjANBgkqhkiG9w0BAQsFADAVMRMwEQYDVQQDDAph
cG0tc2VydmVyMB4XDTE5MTExOTE1MjE0NVoXDTI5MTExNjE1MjE0NVowFTETMBEG
A1UEAwwKYXBtLXNlcnZlcjCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIB
ANduj3tyeBIHj0Bf5aKMRImhRbkAaQ2p6T0WsHKlicd1P4/D5l783+vVsbwprRqR
qXAUsUWcUSYJXBX1qtC2MtKqi4xYUTAyQV5dgrMoCV+vtZY31SK4kolumd1vVMh+
po+IwueLvLMFK1tQGIXlJblSDYVauIt5rp79IIhWOY/YpcQy9RaxykljTYTbPjLW
m3T92bow1nLh5GL3ThJEAkLO+hkJv9716+YRWYtPcojiGzpLjFgF50MoP4Lilm9U
r2tBnqpvb2PwE1kkly8DDBtcg+HM4tgGsbdWo2Pgp82ARV4DL+JlNJ+SVQZAmTbc
3LMwxnUJtuKMeh2rwb9HOyuONXfF1PiEzyDhAlabyS6toAGy1mlMAop1ClO1wV5O
Ayy47TeD6ziNyMKB7/XHdW4rb16K6j6EV27Bg2ZK6Vrfkwm3aRbpztfVRMX+HMUp
ktH+V2OwJoP7l7lzw/q8yMdopG57zRJa1dx8NWP/UKi8Ej+87DYyWJODiNHD7PM7
9vfd47lNcWxw+p7ntEpnn6EeW2r7SlmfhtdIxL2DiTiKAq9Ktyi9cFnGnDfSDJST
T1G1vIDdG33Vt2Y5+wqzCGbYyMsAOaMdXZSeniXXFR4GX7iz+AGoKojBbmoo9VqP
mvbudNU+ysha4IJvTfOczJZgstxCXG+MXbEXFSgysImFAgMBAAEwDQYJKoZIhvcN
AQELBQADggIBAFh2YxRT6PaAXDq38rm25I91fCP9PzVPDuIkn9wl85e7avuh6FZi
R0nQG6+lB1i8XSm9UMl9+ISjE+EQqry6KB6mDsakGOsDuEUdZiw3sGJIUWQkQArB
ym5DqxKpeZBeVHBxnrEbQBV8s0j8uxd7X1E0ImfMKbKfNr/B5qPRXkREvydLWYvq
8yMcUPu1MiZFUgAGr9Py39kW3lbRPWZii/2bN8AB9h6gAhq5TiennfgJZsRiuSta
w/TmOcAuz4e/KPIzfvL/YCWbLyJ2vrIQeOc4N7jZfqMmLKgYCRyjI7+amfuyKPBW
J4psfJ0ssHdTxAUK65vghJ2s6FLvU3HoxzetZsJp5kj6CKYaFYkB4NkkYnlY8MP/
T68oOmdYwwwrcBmDtZwoppRb5zhev5k3aykgZ/B/vqVJE9oIPkp/7wqEP1WqSiUe
AgyQBu8UN4ho2Rf6nZezZ4cjW/0WyhGOHQBFmwPI2MBGsQxF2PF4lKkJtaywIEm7
4UsEQYK7Hf2J2OccWGvfo5HZ5tsSbuOGAf0bfHfaBQBsvzWet+TO6XX9VrWjnAKl
bH+mInmnd9v2oABFl9Djv/Cw+lEAxxkCTW+DcwdEFJREPab5xhQDEpQQ/Ef0ihvg
/ZtJQeoOYfrLN6K726QmoRWxvqxLyWK3gztcO1svHqr/cMt3ooLJEaqU
-----END CERTIFICATE-----

View file

@ -1,52 +0,0 @@
-----BEGIN PRIVATE KEY-----
MIIJRAIBADANBgkqhkiG9w0BAQEFAASCCS4wggkqAgEAAoICAQDXbo97cngSB49A
X+WijESJoUW5AGkNqek9FrBypYnHdT+Pw+Ze/N/r1bG8Ka0akalwFLFFnFEmCVwV
9arQtjLSqouMWFEwMkFeXYKzKAlfr7WWN9UiuJKJbpndb1TIfqaPiMLni7yzBStb
UBiF5SW5Ug2FWriLea6e/SCIVjmP2KXEMvUWscpJY02E2z4y1pt0/dm6MNZy4eRi
904SRAJCzvoZCb/e9evmEVmLT3KI4hs6S4xYBedDKD+C4pZvVK9rQZ6qb29j8BNZ
JJcvAwwbXIPhzOLYBrG3VqNj4KfNgEVeAy/iZTSfklUGQJk23NyzMMZ1CbbijHod
q8G/RzsrjjV3xdT4hM8g4QJWm8kuraABstZpTAKKdQpTtcFeTgMsuO03g+s4jcjC
ge/1x3VuK29eiuo+hFduwYNmSula35MJt2kW6c7X1UTF/hzFKZLR/ldjsCaD+5e5
c8P6vMjHaKRue80SWtXcfDVj/1CovBI/vOw2MliTg4jRw+zzO/b33eO5TXFscPqe
57RKZ5+hHltq+0pZn4bXSMS9g4k4igKvSrcovXBZxpw30gyUk09RtbyA3Rt91bdm
OfsKswhm2MjLADmjHV2Unp4l1xUeBl+4s/gBqCqIwW5qKPVaj5r27nTVPsrIWuCC
b03znMyWYLLcQlxvjF2xFxUoMrCJhQIDAQABAoICAQCfClIGsoUN2mLZBXLDw4W9
jT+pyjHEEpHLtXphyO+kPlzER71Elq7AriveW24d1TcfNUeBulr2F6bR12FZX4i5
mYoX/AND73Xusl4Q4Re6ej82PNWuIlCcAPi6Trxqn4VbJX2t7q1KBCDz8neIMZjd
7UNqFYV0Akr1uK1RuUYZebk21N+29139O8A4upp6cZCml9kq6W8HtNgkb6pFNcvt
gluELHxnn2mdmWVfwTEu+K1dJfTf7svB+m6Ys6qXWg9+wRzfehDj2JKQFsE9xaQk
dvItulIlZRvB28YXr/xxa6bKNtQc8NYej6sRSJNTu017RCDeumM3cLmeOfR4v59f
tkMWnFcA3ykmsaK2FiQyX+MoWvs5vdT7/yNIfz3a4MErcWg8z3FDbffKfbhgsb+2
z4Ub6fIRKZykW2ajN7t0378bMmJ3rPT66QF40aNNeWasF3EHcwekDPpsHIBJoY4G
9aG6uTUmRkC+NGeP9HroxkvDo2NbXn8XGOEJS64rwsME3CsUi1A5ZY0XLTxYptH6
X2TfC5oTmnsYB/wWqo26bTJc0bwDOueQWYap0aVtv3f/0tzueKepCbxdeG4ikA0U
2t3F+OUmoCZ5D0p+6zLvrTUPhPCFEynp+vGUvmbwozYi0NWzFyFqlvqRG1KLIVLG
ZRyTMYuZ/cWkv1SJYbEcaQKCAQEA/9HaJg2YACv7rx6/FesE/81u16OYTaahHngW
4M+5rT0+fNKYH/fYkwavQ/Gr6FSTls7F+8K9DVwoGLZRQ3t6epCXqGqX0uaY+iSH
O8eezXVnHzUaVE4KlwJY9xZ+K1iIf5zUb5hpaQI0jKS/igcxFAsutWiyenrz8eQp
MAycZmzkQMLbUsa1t6y0VaEaC4YMHyQ9ag2eMfqbG27plFQbYxllHXowGMFXPheY
xACwo5V5tJUgRP+HlrI4rf0vadMgVIKxVSUiqIzGREIkYrTAshFjkpHR5/R8s/kH
Xm8q2gdoJltBFJzA2B8MHXVi7mYDBlUmBoRKhzkl/TSray9j7wKCAQEA15VsNQZu
cZluboz/R4EDbEm1po2UBcNNiu/fgJ8BDUkLzJESIITY41fgvBbTun1fiuGeE+El
0o1w4hQhIiV1KAB44w69fJR0VELfMZiIcd8kd0sDgPPVrd1MzzKPZ9yg4mbEkCCO
V/EoTi8Ut27sMcl8059qm1qq7I5pzHwSziNa087m+5VdfmvJZJVipudngZ3QmRgU
KKcBhgFFSkncYezoq2XQfRcqkk0sORxDvsMmRInyHZh0l9zv46ihgTvErlCHtizV
V4HNO4OPz7FxUZ04iWSGZs4snu1cW2j+lbKuOkADveBYVmCcdZ3R0SH+A5skL0zG
tm6z0TNP/kFlywKCAQEA+lTdFu2od0qTADujG4yemL7rn2J8EEhlU86J/LXo6UiM
FFNz/5xltwIMkf00jqXswt9WR9W5cBBlQEFwZgu3v6YscebU6NE0k1sZZnshv8YK
AjTRrfusSzdF3YyKLFp3QAE0tHs9cz9wMsyojiYZdZa3v1dTh503h9YQI+/DQEuA
VIsZWfgPLEx5L231cZ9bz0GEQ3pN+nRUQdUYB0kCf8gC9YRy+lZ/y8gFeo9+SqVj
sj1XlY1DnkiKRGAEfJbYBTra0woCz1LqVTMwLdLY2adAe9XrxQKu4OJovpUkJrSm
yxnzJnt6DkLbdRxAki8K+LBsBGaCE67tqMhYkguOywKCAQAslEl77YiJFSEw2xcu
wg7jJZrahgxF5Mz0HgYporek96Xo91a4QsBWwqVGP7IoriRDo8P8eGJJ19Wv6lmv
pe9EBlT5HuMwD8K+adWde907Ltlrkad30vQsr8ZiUiI1Z/oc1wNuikzlAolDIZk3
FUjiQrf9SsnQtj8CC7D1B/MbjVQK2I4LGCftLHzIv9tWiCNvOiMYhVIl1eMKwtiB
NCTOWx8B0lv6gf/boPm0FZQsrk4LfjsCw7PYc2dnvEcpYiKZqS1nDn5PShgWZm4m
lJrKNairQI5KU/gGJS8j9+ItMnW0tegQK4QY2IGCENCCXnUYacxhu46byuiEKggw
m3VhAoIBAQCQa90StsZHqZ+J83do3kpvD+O5nURPnckznC2WJgraW49k5vltnJTT
zkFTqHMLfmYwAz1o15sPCqlkMD+fEUzg6Hpzxm7dOUppkf5KFbD7AnsYU9U8LamJ
HaET7Dq5TpjG7uoaHZZjs7cCHcWu2E8nIezyAtZ+rbTg/qW7bYMAlJTkerznGuDU
v0hNzCr/81o5rbX0UhetcmKVOprUSWzfrw5ElLhAtzM7zivbZSnsOny8pC33FtQ5
iQbVcNGUjfFCM95ZipxxN9z0FwxpJ1paCPGYA86u2olWl/VnVPqEj7WYzO8H5W2q
aXpWH6HVf6B10pQrWWwUAAHyqYS5bZkQ
-----END PRIVATE KEY-----

View file

@ -1,210 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0 and the Server Side Public License, v 1; you may not use this file except
* in compliance with, at your election, the Elastic License 2.0 or the Server
* Side Public License, v 1.
*/
package org.elasticsearch.telemetry.apm;
import org.elasticsearch.client.Request;
import org.elasticsearch.client.Response;
import org.elasticsearch.common.settings.SecureString;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.concurrent.ThreadContext;
import org.elasticsearch.common.xcontent.support.XContentMapValues;
import org.elasticsearch.core.CheckedRunnable;
import org.elasticsearch.test.rest.ESRestTestCase;
import java.io.IOException;
import java.util.List;
import java.util.Map;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicReference;
import java.util.stream.Collectors;
import static org.hamcrest.Matchers.empty;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.hasKey;
import static org.hamcrest.Matchers.not;
/**
* Tests around Elasticsearch's tracing support using APM.
*/
public class ApmIT extends ESRestTestCase {
private static final String DATA_STREAM = "traces-apm-default";
/**
* Check that if we send HTTP traffic to Elasticsearch, then traces are captured in APM server. The traces are generated in
* a separate Docker container, which continually fetches `/_nodes/stats`. We check for the following:
* <ul>
* <li>A transaction for the REST API call
* <li>A span for the task started by the REST call
* <li>A child span started by the above span
* </ul>
* <p>This proves that the hierarchy of spans is being correctly captured.
*/
@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/90308")
public void testCapturesTracesForHttpTraffic() throws Exception {
checkTracesDataStream();
assertTracesExist();
}
private void checkTracesDataStream() throws Exception {
assertBusy(() -> {
final Response response = performRequestTolerantly(new Request("GET", "/_data_stream/" + DATA_STREAM));
assertOK(response);
}, 1, TimeUnit.MINUTES);
}
private void assertTracesExist() throws Exception {
// First look for a transaction for the REST calls that we make via the `tracegenerator` Docker container
final AtomicReference<String> transactionId = new AtomicReference<>();
assertBusy(() -> {
final Request tracesSearchRequest = new Request("GET", "/" + DATA_STREAM + "/_search");
tracesSearchRequest.setJsonEntity("""
{
"query": {
"match": { "transaction.name": "GET /_nodes/stats" }
}
}""");
final Response tracesSearchResponse = performRequestTolerantly(tracesSearchRequest);
assertOK(tracesSearchResponse);
final List<Map<String, Object>> documents = getDocuments(tracesSearchResponse);
assertThat(documents, not(empty()));
final Map<String, Object> tx = documents.get(0);
check(tx, "http.request.method", "GET");
check(tx, "http.response.status_code", 200);
check(tx, "labels.es_cluster_name", "docker-cluster");
check(tx, "labels.http_request_headers_authorization", "[REDACTED]");
check(tx, "span.kind", "SERVER");
check(tx, "transaction.result", "HTTP 2xx");
check(tx, "url.path", "/_nodes/stats");
final String txId = pluck(tx, "transaction.id");
transactionId.set(txId);
}, 1, TimeUnit.MINUTES);
// Then look for the task that the REST call starts
final AtomicReference<String> monitorNodeStatsSpanId = new AtomicReference<>();
assertBusy(() -> {
final List<Map<String, Object>> documents = searchByParentId(transactionId.get());
assertThat(documents, not(empty()));
final Map<String, Object> spansByName = documents.stream().collect(Collectors.toMap(d -> pluck(d, "span.name"), d -> d));
assertThat(spansByName, hasKey("cluster:monitor/nodes/stats"));
@SuppressWarnings("unchecked")
final Map<String, Object> span = (Map<String, Object>) spansByName.get("cluster:monitor/nodes/stats");
check(span, "span.kind", "INTERNAL");
final String spanId = pluck(span, "span.id");
monitorNodeStatsSpanId.set(spanId);
}, 1, TimeUnit.MINUTES);
// Finally look for the child task that the task above started
assertBusy(() -> {
final List<Map<String, Object>> documents = searchByParentId(monitorNodeStatsSpanId.get());
assertThat(documents, not(empty()));
final Map<String, Object> spansByName = documents.stream().collect(Collectors.toMap(d -> pluck(d, "span.name"), d -> d));
assertThat(spansByName, hasKey("cluster:monitor/nodes/stats[n]"));
}, 1, TimeUnit.MINUTES);
}
@SuppressWarnings("unchecked")
private <T> T pluck(Map<String, Object> map, String path) {
String[] parts = path.split("\\.");
Object result = map;
for (String part : parts) {
result = ((Map<String, ?>) result).get(part);
}
return (T) result;
}
private List<Map<String, Object>> searchByParentId(String parentId) throws IOException {
final Request searchRequest = new Request("GET", "/" + DATA_STREAM + "/_search");
searchRequest.setJsonEntity("""
{
"query": {
"match": { "parent.id": "%s" }
}
}""".formatted(parentId));
final Response response = performRequestTolerantly(searchRequest);
assertOK(response);
return getDocuments(response);
}
/**
* We don't need to clean up the cluster, particularly as we have Kibana and APM server using ES as well as our test, so declare
* that we need to preserve the cluster in order to prevent the usual cleanup logic from running (and inevitably failing).
*/
@Override
protected boolean preserveClusterUponCompletion() {
return true;
}
/**
* Turns exceptions into assertion failures so that {@link #assertBusy(CheckedRunnable)} can still retry.
*/
private Response performRequestTolerantly(Request request) {
try {
return client().performRequest(request);
} catch (Exception e) {
throw new AssertionError(e);
}
}
/**
* Customizes the client settings to use the same username / password that is configured in Docke.r
*/
@Override
protected Settings restClientSettings() {
String token = basicAuthHeaderValue("admin", new SecureString("changeme".toCharArray()));
return Settings.builder().put(ThreadContext.PREFIX + ".Authorization", token).build();
}
/**
* Constructs the correct cluster address by looking up the dynamic port that Elasticsearch is exposed on.
*/
@Override
protected String getTestRestCluster() {
return "localhost:" + getProperty("test.fixtures.elasticsearch.tcp.9200");
}
@SuppressWarnings("unchecked")
private List<Map<String, Object>> getDocuments(Response response) throws IOException {
final Map<String, Object> stringObjectMap = ESRestTestCase.entityAsMap(response);
return (List<Map<String, Object>>) XContentMapValues.extractValue("hits.hits._source", stringObjectMap);
}
private String getProperty(String key) {
String value = System.getProperty(key);
if (value == null) {
throw new IllegalStateException(
"Could not find system properties from test.fixtures. "
+ "This test expects to run with the elasticsearch.test.fixtures Gradle plugin"
);
}
return value;
}
private <T> void check(Map<String, Object> doc, String path, T expected) {
assertThat(pluck(doc, path), equalTo(expected));
}
}

View file

@ -50,8 +50,9 @@ public class UpgradeWithOldIndexSettingsIT extends ParameterizedFullClusterResta
public void testMapperDynamicIndexSetting() throws IOException {
assumeTrue(
"Setting deprecated in 6.x, but remained in 7.x and is no longer defined in 8.x",
getOldClusterTestVersion().before("8.0.0")
"Setting deprecated in 6.x, but was disallowed/removed incorrectly in some 7.x versions and can only be set safely in 7.17.22. "
+ "Setting can't be used in 8.x ",
getOldClusterTestVersion().before("8.0.0") && getOldClusterTestVersion().after("7.17.21")
);
String indexName = "my-index";
if (isRunningAgainstOldCluster()) {

View file

@ -91,9 +91,13 @@ public class DenseVectorMappingUpdateIT extends AbstractRollingUpgradeTestCase {
.startObject("properties")
.startObject("embedding")
.field("type", "dense_vector")
.field("index", "true")
.field("dims", 4)
.field("similarity", "cosine")
.startObject("index_options")
.field("type", "hnsw")
.field("m", "16")
.field("ef_construction", "100")
.endObject()
.endObject()
.endObject()
@ -109,7 +113,7 @@ public class DenseVectorMappingUpdateIT extends AbstractRollingUpgradeTestCase {
int expectedCount = 10;
assertCount("test_index", expectedCount);
assertCount(indexName, expectedCount);
if (isUpgradedCluster() && clusterSupportsDenseVectorTypeUpdate()) {
Request updateMapping = new Request("PUT", "/" + indexName + "/_mapping");
@ -118,9 +122,13 @@ public class DenseVectorMappingUpdateIT extends AbstractRollingUpgradeTestCase {
.startObject("properties")
.startObject("embedding")
.field("type", "dense_vector")
.field("index", "true")
.field("dims", 4)
.field("similarity", "cosine")
.startObject("index_options")
.field("type", "int8_hnsw")
.field("m", "16")
.field("ef_construction", "100")
.endObject()
.endObject()
.endObject()
@ -132,7 +140,7 @@ public class DenseVectorMappingUpdateIT extends AbstractRollingUpgradeTestCase {
index.setJsonEntity(BULK2);
assertOK(client().performRequest(index));
expectedCount = 20;
assertCount("test_index", expectedCount);
assertCount(indexName, expectedCount);
}
}
}
@ -152,7 +160,7 @@ public class DenseVectorMappingUpdateIT extends AbstractRollingUpgradeTestCase {
Map<?, ?> response = entityAsMap(client().performRequest(new Request("GET", "_nodes")));
Map<?, ?> nodes = (Map<?, ?>) response.get("nodes");
Predicate<Map<?, ?>> nodeSupportsBulkApi = n -> Version.fromString(n.get("version").toString()).onOrAfter(Version.V_8_14_0);
Predicate<Map<?, ?>> nodeSupportsBulkApi = n -> Version.fromString(n.get("version").toString()).onOrAfter(Version.V_8_15_0);
return nodes.values().stream().map(o -> (Map<?, ?>) o).allMatch(nodeSupportsBulkApi);
}

View file

@ -106,8 +106,9 @@ public class UpgradeWithOldIndexSettingsIT extends AbstractRollingUpgradeTestCas
public void testMapperDynamicIndexSetting() throws IOException {
assumeTrue(
"Setting deprecated in 6.x, but remained in 7.x and is no longer defined in 8.x",
getOldClusterTestVersion().before("8.0.0")
"Setting deprecated in 6.x, but was disallowed/removed incorrectly in some 7.x versions and can only be set safely in 7.17.22. "
+ "Setting can't be used in 8.x ",
getOldClusterTestVersion().before("8.0.0") && getOldClusterTestVersion().after("7.17.21")
);
String indexName = "my-index";
if (isOldCluster()) {

View file

@ -473,6 +473,11 @@ setup:
---
"standard retriever collapse":
- skip:
reason: "https://github.com/elastic/elasticsearch/issues/109476"
known_issues:
- cluster_feature: "gte_v8.13.0"
fixed_by: "gte_v8.14.0"
- do:
search:
index: animals

View file

@ -164,8 +164,8 @@ setup:
---
"Dynamic dimensions for hex-encoded string":
- requires:
cluster_features: "gte_v8.15.0"
reason: 'hex encoding for byte vectors fixed in 8.15'
cluster_features: "gte_v8.14.1"
reason: 'hex encoding for byte vectors fixed in 8.14.1'
- do:
indices.create:

View file

@ -1,4 +1,9 @@
setup:
- skip:
reason: "https://github.com/elastic/elasticsearch/issues/109476"
known_issues:
- cluster_feature: "gte_v8.13.0"
fixed_by: "gte_v8.14.0"
- do:
indices.create:
index: test
@ -85,7 +90,6 @@ setup:
---
"field collapsing and from":
- do:
search:
rest_total_hits_as_int: true

View file

@ -1,4 +1,9 @@
setup:
- skip:
reason: "https://github.com/elastic/elasticsearch/issues/109476"
known_issues:
- cluster_feature: "gte_v8.13.0"
fixed_by: "gte_v8.14.0"
- requires:
cluster_features: ["gte_v8.10.0"]
reason: Collapse with max score was fixed in 8.10.0

View file

@ -1,4 +1,9 @@
setup:
- skip:
reason: "https://github.com/elastic/elasticsearch/issues/109476"
known_issues:
- cluster_feature: "gte_v8.13.0"
fixed_by: "gte_v8.14.0"
- requires:
cluster_features: "gte_v8.15.0"
reason: Collapse with rescore added in 8.15.0

View file

@ -1,3 +1,9 @@
setup:
- skip:
reason: "https://github.com/elastic/elasticsearch/issues/109476"
known_issues:
- cluster_feature: "gte_v8.13.0"
fixed_by: "gte_v8.14.0"
---
"two levels fields collapsing":

View file

@ -13,7 +13,8 @@ import org.apache.lucene.search.SortField;
import org.apache.lucene.search.SortedSetSelector;
import org.apache.lucene.search.SortedSetSortField;
import org.apache.lucene.util.Constants;
import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteResponse;
import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteRequest;
import org.elasticsearch.action.admin.cluster.reroute.TransportClusterRerouteAction;
import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest;
import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
import org.elasticsearch.action.admin.indices.segments.IndexShardSegments;
@ -396,7 +397,12 @@ public class ShrinkIndexIT extends ESIntegTestCase {
refreshClusterInfo();
// kick off a retry and wait until it's done!
ClusterRerouteResponse clusterRerouteResponse = clusterAdmin().prepareReroute().setRetryFailed(true).get();
final var clusterRerouteResponse = safeGet(
client().execute(
TransportClusterRerouteAction.TYPE,
new ClusterRerouteRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT).setRetryFailed(true)
)
);
long expectedShardSize = clusterRerouteResponse.getState().routingTable().index("target").shard(0).shard(0).getExpectedShardSize();
// we support the expected shard size in the allocator to sum up over the source index shards
assertTrue("expected shard size must be set but wasn't: " + expectedShardSize, expectedShardSize > 0);

View file

@ -11,6 +11,7 @@ package org.elasticsearch.cluster;
import org.elasticsearch.action.DocWriteResponse;
import org.elasticsearch.action.UnavailableShardsException;
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteUtils;
import org.elasticsearch.action.admin.indices.create.CreateIndexRequest;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.support.ActiveShardCount;
@ -106,7 +107,7 @@ public class SimpleDataNodesIT extends ESIntegTestCase {
internalCluster().startNode();
internalCluster().startNode();
clusterAdmin().prepareReroute().setRetryFailed(true).get();
ClusterRerouteUtils.rerouteRetryFailed(client());
}
}

View file

@ -11,6 +11,7 @@ package org.elasticsearch.cluster.allocation;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteUtils;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.metadata.IndexMetadata.State;
import org.elasticsearch.cluster.routing.IndexRoutingTable;
@ -184,7 +185,7 @@ public class AwarenessAllocationIT extends ESIntegTestCase {
.setWaitForNodes("3")
.get();
assertThat(health.isTimedOut(), equalTo(false));
clusterAdmin().prepareReroute().get();
ClusterRerouteUtils.reroute(client());
health = clusterAdmin().prepareHealth()
.setIndices("test")
.setWaitForEvents(Priority.LANGUID)
@ -210,7 +211,7 @@ public class AwarenessAllocationIT extends ESIntegTestCase {
.setWaitForNodes("4")
.get();
assertThat(health.isTimedOut(), equalTo(false));
clusterAdmin().prepareReroute().get();
ClusterRerouteUtils.reroute(client());
health = clusterAdmin().prepareHealth()
.setIndices("test")
.setWaitForEvents(Priority.LANGUID)

View file

@ -11,7 +11,9 @@ package org.elasticsearch.cluster.allocation;
import org.apache.logging.log4j.Level;
import org.apache.lucene.tests.util.LuceneTestCase;
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteRequest;
import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteResponse;
import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteUtils;
import org.elasticsearch.action.admin.cluster.reroute.TransportClusterRerouteAction;
import org.elasticsearch.action.support.ActiveShardCount;
import org.elasticsearch.action.support.WriteRequest.RefreshPolicy;
@ -99,12 +101,14 @@ public class ClusterRerouteIT extends ESIntegTestCase {
assertThat(state.getRoutingNodes().unassigned().size(), equalTo(2));
logger.info("--> explicitly allocate shard 1, *under dry_run*");
state = clusterAdmin().prepareReroute()
.setExplain(randomBoolean())
.add(new AllocateEmptyPrimaryAllocationCommand("test", 0, node_1, true))
.setDryRun(true)
.get()
.getState();
state = safeGet(
client().execute(
TransportClusterRerouteAction.TYPE,
new ClusterRerouteRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT).explain(randomBoolean())
.add(new AllocateEmptyPrimaryAllocationCommand("test", 0, node_1, true))
.dryRun(true)
)
).getState();
assertThat(state.getRoutingNodes().unassigned().size(), equalTo(1));
assertThat(
state.getRoutingNodes().node(state.nodes().resolveNode(node_1).getId()).iterator().next().state(),
@ -116,11 +120,13 @@ public class ClusterRerouteIT extends ESIntegTestCase {
assertThat(state.getRoutingNodes().unassigned().size(), equalTo(2));
logger.info("--> explicitly allocate shard 1, actually allocating, no dry run");
state = clusterAdmin().prepareReroute()
.setExplain(randomBoolean())
.add(new AllocateEmptyPrimaryAllocationCommand("test", 0, node_1, true))
.get()
.getState();
state = safeGet(
client().execute(
TransportClusterRerouteAction.TYPE,
new ClusterRerouteRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT).explain(randomBoolean())
.add(new AllocateEmptyPrimaryAllocationCommand("test", 0, node_1, true))
)
).getState();
assertThat(state.getRoutingNodes().unassigned().size(), equalTo(1));
assertThat(
state.getRoutingNodes().node(state.nodes().resolveNode(node_1).getId()).iterator().next().state(),
@ -143,11 +149,13 @@ public class ClusterRerouteIT extends ESIntegTestCase {
);
logger.info("--> move shard 1 primary from node1 to node2");
state = clusterAdmin().prepareReroute()
.setExplain(randomBoolean())
.add(new MoveAllocationCommand("test", 0, node_1, node_2))
.get()
.getState();
state = safeGet(
client().execute(
TransportClusterRerouteAction.TYPE,
new ClusterRerouteRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT).explain(randomBoolean())
.add(new MoveAllocationCommand("test", 0, node_1, node_2))
)
).getState();
assertThat(
state.getRoutingNodes().node(state.nodes().resolveNode(node_1).getId()).iterator().next().state(),
@ -250,11 +258,13 @@ public class ClusterRerouteIT extends ESIntegTestCase {
assertThat(state.getRoutingNodes().unassigned().size(), equalTo(2));
logger.info("--> explicitly allocate shard 1, actually allocating, no dry run");
state = clusterAdmin().prepareReroute()
.setExplain(randomBoolean())
.add(new AllocateEmptyPrimaryAllocationCommand("test", 0, node_1, true))
.get()
.getState();
state = safeGet(
client().execute(
TransportClusterRerouteAction.TYPE,
new ClusterRerouteRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT).explain(randomBoolean())
.add(new AllocateEmptyPrimaryAllocationCommand("test", 0, node_1, true))
)
).getState();
assertThat(state.getRoutingNodes().unassigned().size(), equalTo(1));
assertThat(
state.getRoutingNodes().node(state.nodes().resolveNode(node_1).getId()).iterator().next().state(),
@ -295,17 +305,19 @@ public class ClusterRerouteIT extends ESIntegTestCase {
internalCluster().startNode(commonSettings);
// wait a bit for the cluster to realize that the shard is not there...
// TODO can we get around this? the cluster is RED, so what do we wait for?
clusterAdmin().prepareReroute().get();
ClusterRerouteUtils.reroute(client());
assertThat(
clusterAdmin().prepareHealth().setIndices("test").setWaitForNodes("2").get().getStatus(),
equalTo(ClusterHealthStatus.RED)
);
logger.info("--> explicitly allocate primary");
state = clusterAdmin().prepareReroute()
.setExplain(randomBoolean())
.add(new AllocateEmptyPrimaryAllocationCommand("test", 0, node_1, true))
.get()
.getState();
state = safeGet(
client().execute(
TransportClusterRerouteAction.TYPE,
new ClusterRerouteRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT).explain(randomBoolean())
.add(new AllocateEmptyPrimaryAllocationCommand("test", 0, node_1, true))
)
).getState();
assertThat(state.getRoutingNodes().unassigned().size(), equalTo(1));
assertThat(
state.getRoutingNodes().node(state.nodes().resolveNode(node_1).getId()).iterator().next().state(),
@ -350,7 +362,12 @@ public class ClusterRerouteIT extends ESIntegTestCase {
logger.info("--> try to move the shard from node1 to node2");
MoveAllocationCommand cmd = new MoveAllocationCommand("test", 0, node_1, node_2);
ClusterRerouteResponse resp = clusterAdmin().prepareReroute().add(cmd).setExplain(true).get();
ClusterRerouteResponse resp = safeGet(
client().execute(
TransportClusterRerouteAction.TYPE,
new ClusterRerouteRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT).add(cmd).explain(true)
)
);
RoutingExplanations e = resp.getExplanations();
assertThat(e.explanations().size(), equalTo(1));
RerouteExplanation explanation = e.explanations().get(0);
@ -398,11 +415,14 @@ public class ClusterRerouteIT extends ESIntegTestCase {
);
AllocationCommand dryRunAllocation = new AllocateEmptyPrimaryAllocationCommand(indexName, 0, nodeName1, true);
ClusterRerouteResponse dryRunResponse = clusterAdmin().prepareReroute()
.setExplain(randomBoolean())
.setDryRun(true)
.add(dryRunAllocation)
.get();
ClusterRerouteResponse dryRunResponse = safeGet(
client().execute(
TransportClusterRerouteAction.TYPE,
new ClusterRerouteRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT).explain(randomBoolean())
.dryRun(true)
.add(dryRunAllocation)
)
);
// during a dry run, messages exist but are not logged or exposed
assertThat(dryRunResponse.getExplanations().getYesDecisionMessages(), hasSize(1));
@ -431,11 +451,16 @@ public class ClusterRerouteIT extends ESIntegTestCase {
AllocationCommand yesDecisionAllocation = new AllocateEmptyPrimaryAllocationCommand(indexName, 0, nodeName1, true);
AllocationCommand noDecisionAllocation = new AllocateEmptyPrimaryAllocationCommand("noexist", 1, nodeName2, true);
ClusterRerouteResponse response = clusterAdmin().prepareReroute()
.setExplain(true) // so we get a NO decision back rather than an exception
.add(yesDecisionAllocation)
.add(noDecisionAllocation)
.get();
ClusterRerouteResponse response = safeGet(
client().execute(
TransportClusterRerouteAction.TYPE,
new ClusterRerouteRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT)
// set explain(true) so we get a NO decision back rather than an exception
.explain(true)
.add(yesDecisionAllocation)
.add(noDecisionAllocation)
)
);
assertThat(response.getExplanations().getYesDecisionMessages(), hasSize(1));
assertThat(response.getExplanations().getYesDecisionMessages().get(0), containsString("allocated an empty primary"));
@ -482,9 +507,9 @@ public class ClusterRerouteIT extends ESIntegTestCase {
)) {
try {
enableIndexBlock("test-blocks", blockSetting);
assertAcked(
clusterAdmin().prepareReroute()
.add(new MoveAllocationCommand("test-blocks", 0, nodesIds.get(toggle % 2), nodesIds.get(++toggle % 2)))
ClusterRerouteUtils.reroute(
client(),
new MoveAllocationCommand("test-blocks", 0, nodesIds.get(toggle % 2), nodesIds.get(++toggle % 2))
);
ClusterHealthResponse healthResponse = clusterAdmin().prepareHealth()
@ -502,8 +527,11 @@ public class ClusterRerouteIT extends ESIntegTestCase {
try {
setClusterReadOnly(true);
assertBlocked(
clusterAdmin().prepareReroute()
.add(new MoveAllocationCommand("test-blocks", 1, nodesIds.get(toggle % 2), nodesIds.get(++toggle % 2)))
null,
ClusterRerouteUtils.expectRerouteFailure(
client(),
new MoveAllocationCommand("test-blocks", 1, nodesIds.get(toggle % 2), nodesIds.get(++toggle % 2))
)
);
} finally {
setClusterReadOnly(false);

View file

@ -8,6 +8,7 @@
package org.elasticsearch.cluster.allocation;
import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteUtils;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.health.ClusterHealthStatus;
import org.elasticsearch.cluster.metadata.AutoExpandReplicas;
@ -160,7 +161,7 @@ public class FilteringAllocationIT extends ESIntegTestCase {
}
logger.info("--> remove index from the first node");
updateIndexSettings(Settings.builder().put("index.routing.allocation.exclude._name", node_0), "test");
clusterAdmin().prepareReroute().get();
ClusterRerouteUtils.reroute(client());
ensureGreen("test");
logger.info("--> verify all shards are allocated on node_1 now");
@ -175,7 +176,7 @@ public class FilteringAllocationIT extends ESIntegTestCase {
logger.info("--> disable allocation filtering ");
updateIndexSettings(Settings.builder().put("index.routing.allocation.exclude._name", ""), "test");
clusterAdmin().prepareReroute().get();
ClusterRerouteUtils.reroute(client());
ensureGreen("test");
logger.info("--> verify that there are shards allocated on both nodes now");

View file

@ -10,6 +10,7 @@ package org.elasticsearch.cluster.routing;
import org.elasticsearch.action.admin.cluster.allocation.ClusterAllocationExplanationUtils;
import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest;
import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteUtils;
import org.elasticsearch.action.admin.indices.stats.ShardStats;
import org.elasticsearch.action.index.IndexRequestBuilder;
import org.elasticsearch.cluster.ClusterState;
@ -106,7 +107,7 @@ public class AllocationIdIT extends ESIntegTestCase {
checkNoValidShardCopy(indexName, shardId);
// allocate stale primary
client(node1).admin().cluster().prepareReroute().add(new AllocateStalePrimaryAllocationCommand(indexName, 0, node1, true)).get();
ClusterRerouteUtils.reroute(client(node1), new AllocateStalePrimaryAllocationCommand(indexName, 0, node1, true));
// allocation fails due to corruption marker
assertBusy(() -> {
@ -127,7 +128,7 @@ public class AllocationIdIT extends ESIntegTestCase {
checkNoValidShardCopy(indexName, shardId);
// no any valid shard is there; have to invoke AllocateStalePrimary again
clusterAdmin().prepareReroute().add(new AllocateStalePrimaryAllocationCommand(indexName, 0, node1, true)).get();
ClusterRerouteUtils.reroute(client(), new AllocateStalePrimaryAllocationCommand(indexName, 0, node1, true));
ensureYellow(indexName);

View file

@ -9,7 +9,9 @@
package org.elasticsearch.cluster.routing;
import org.elasticsearch.action.DocWriteResponse;
import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteRequestBuilder;
import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteRequest;
import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteUtils;
import org.elasticsearch.action.admin.cluster.reroute.TransportClusterRerouteAction;
import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresRequest;
import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresResponse;
import org.elasticsearch.action.admin.indices.shards.TransportIndicesShardStoresAction;
@ -156,12 +158,13 @@ public class PrimaryAllocationIT extends ESIntegTestCase {
logger.info("--> check that old primary shard does not get promoted to primary again");
// kick reroute and wait for all shard states to be fetched
client(master).admin().cluster().prepareReroute().get();
ClusterRerouteUtils.reroute(client(master));
assertBusy(
() -> assertThat(internalCluster().getInstance(GatewayAllocator.class, master).getNumberOfInFlightFetches(), equalTo(0))
);
// kick reroute a second time and check that all shards are unassigned
assertThat(client(master).admin().cluster().prepareReroute().get().getState().getRoutingNodes().unassigned().size(), equalTo(2));
ClusterRerouteUtils.reroute(client(master));
assertThat(client(master).admin().cluster().prepareState().get().getState().getRoutingNodes().unassigned().size(), equalTo(2));
return inSyncDataPathSettings;
}
@ -207,11 +210,16 @@ public class PrimaryAllocationIT extends ESIntegTestCase {
);
logger.info("--> force allocation of stale copy to node that does not have shard copy");
Throwable iae = expectThrows(
IllegalArgumentException.class,
clusterAdmin().prepareReroute().add(new AllocateStalePrimaryAllocationCommand("test", 0, dataNodeWithNoShardCopy, true))
assertEquals(
"No data for shard [0] of index [test] found on any node",
asInstanceOf(
IllegalArgumentException.class,
ClusterRerouteUtils.expectRerouteFailure(
client(),
new AllocateStalePrimaryAllocationCommand("test", 0, dataNodeWithNoShardCopy, true)
)
).getMessage()
);
assertThat(iae.getMessage(), equalTo("No data for shard [0] of index [test] found on any node"));
logger.info("--> wait until shard is failed and becomes unassigned again");
assertTrue(
@ -252,16 +260,16 @@ public class PrimaryAllocationIT extends ESIntegTestCase {
TransportIndicesShardStoresAction.TYPE,
new IndicesShardStoresRequest(idxName)
).get().getStoreStatuses().get(idxName);
ClusterRerouteRequestBuilder rerouteBuilder = clusterAdmin().prepareReroute();
final var rerouteRequest = new ClusterRerouteRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT);
for (Map.Entry<Integer, List<IndicesShardStoresResponse.StoreStatus>> shardStoreStatuses : storeStatuses.entrySet()) {
int shardId = shardStoreStatuses.getKey();
IndicesShardStoresResponse.StoreStatus storeStatus = randomFrom(shardStoreStatuses.getValue());
logger.info("--> adding allocation command for shard {}", shardId);
// force allocation based on node id
if (useStaleReplica) {
rerouteBuilder.add(new AllocateStalePrimaryAllocationCommand(idxName, shardId, storeStatus.getNode().getId(), true));
rerouteRequest.add(new AllocateStalePrimaryAllocationCommand(idxName, shardId, storeStatus.getNode().getId(), true));
} else {
rerouteBuilder.add(new AllocateEmptyPrimaryAllocationCommand(idxName, shardId, storeStatus.getNode().getId(), true));
rerouteRequest.add(new AllocateEmptyPrimaryAllocationCommand(idxName, shardId, storeStatus.getNode().getId(), true));
}
}
@ -280,7 +288,7 @@ public class PrimaryAllocationIT extends ESIntegTestCase {
final ClusterService clusterService = internalCluster().getInstance(ClusterService.class, master);
clusterService.addListener(clusterStateListener);
rerouteBuilder.get();
assertAcked(safeGet(client().execute(TransportClusterRerouteAction.TYPE, rerouteRequest)));
assertTrue(clusterStateChangeLatch.await(30, TimeUnit.SECONDS));
clusterService.removeListener(clusterStateListener);
@ -341,13 +349,16 @@ public class PrimaryAllocationIT extends ESIntegTestCase {
.forEach(status -> nodeNames.remove(status.getNode().getName()));
assertThat(nodeNames, hasSize(1));
final String nodeWithoutData = nodeNames.get(0);
Throwable iae = expectThrows(
IllegalArgumentException.class,
clusterAdmin().prepareReroute().add(new AllocateStalePrimaryAllocationCommand(idxName, shardId, nodeWithoutData, true))
);
assertThat(
iae.getMessage(),
equalTo("No data for shard [" + shardId + "] of index [" + idxName + "] found on node [" + nodeWithoutData + ']')
assertEquals(
"No data for shard [" + shardId + "] of index [" + idxName + "] found on node [" + nodeWithoutData + ']',
asInstanceOf(
IllegalArgumentException.class,
ClusterRerouteUtils.expectRerouteFailure(
client(),
new AllocateStalePrimaryAllocationCommand(idxName, shardId, nodeWithoutData, true)
)
).getMessage()
);
}
@ -359,22 +370,29 @@ public class PrimaryAllocationIT extends ESIntegTestCase {
ensureGreen();
final String nodeWithoutData = randomFrom(dataNodes);
final int shardId = 0;
IllegalArgumentException iae = expectThrows(
IllegalArgumentException.class,
clusterAdmin().prepareReroute().add(new AllocateStalePrimaryAllocationCommand(idxName, shardId, nodeWithoutData, true))
assertEquals(
"[allocate_stale_primary] primary [" + idxName + "][" + shardId + "] is already assigned",
asInstanceOf(
IllegalArgumentException.class,
ClusterRerouteUtils.expectRerouteFailure(
client(),
new AllocateStalePrimaryAllocationCommand(idxName, shardId, nodeWithoutData, true)
)
).getMessage()
);
assertThat(iae.getMessage(), equalTo("[allocate_stale_primary] primary [" + idxName + "][" + shardId + "] is already assigned"));
}
public void testForceStaleReplicaToBePromotedForMissingIndex() {
internalCluster().startMasterOnlyNode(Settings.EMPTY);
final String dataNode = internalCluster().startDataOnlyNode();
final String idxName = "test";
IndexNotFoundException ex = expectThrows(
IndexNotFoundException.class,
clusterAdmin().prepareReroute().add(new AllocateStalePrimaryAllocationCommand(idxName, 0, dataNode, true))
assertEquals(
idxName,
asInstanceOf(
IndexNotFoundException.class,
ClusterRerouteUtils.expectRerouteFailure(client(), new AllocateStalePrimaryAllocationCommand(idxName, 0, dataNode, true))
).getIndex().getName()
);
assertThat(ex.getIndex().getName(), equalTo(idxName));
}
public void testForcePrimaryShardIfAllocationDecidersSayNoAfterIndexCreation() throws ExecutionException, InterruptedException {
@ -386,7 +404,7 @@ public class PrimaryAllocationIT extends ESIntegTestCase {
assertThat(clusterAdmin().prepareState().get().getState().getRoutingTable().shardRoutingTable("test", 0).assignedShards(), empty());
clusterAdmin().prepareReroute().add(new AllocateEmptyPrimaryAllocationCommand("test", 0, node, true)).get();
ClusterRerouteUtils.reroute(client(), new AllocateEmptyPrimaryAllocationCommand("test", 0, node, true));
ensureGreen("test");
}

View file

@ -11,6 +11,7 @@ package org.elasticsearch.cluster.routing;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.action.ActionResponse;
import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteUtils;
import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsGroup;
import org.elasticsearch.action.admin.indices.refresh.TransportUnpromotableShardRefreshAction;
import org.elasticsearch.action.search.ClosePointInTimeRequest;
@ -422,7 +423,7 @@ public class ShardRoutingRoleIT extends ESIntegTestCase {
updateIndexSettings(Settings.builder().put(IndexMetadata.INDEX_ROUTING_REQUIRE_GROUP_PREFIX + "._name", "not-a-node"), "test");
AllocationCommand cancelPrimaryCommand;
while ((cancelPrimaryCommand = getCancelPrimaryCommand()) != null) {
clusterAdmin().prepareReroute().add(cancelPrimaryCommand).get();
ClusterRerouteUtils.reroute(client(), cancelPrimaryCommand);
}
} finally {
masterClusterService.removeListener(routingTableWatcher);

View file

@ -8,6 +8,7 @@
package org.elasticsearch.cluster.routing.allocation.decider;
import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteUtils;
import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse;
import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse;
import org.elasticsearch.action.admin.indices.stats.ShardStats;
@ -314,7 +315,7 @@ public class DiskThresholdDeciderIT extends DiskUsageIntegTestCase {
.values()
.stream()
.allMatch(e -> e.freeBytes() > WATERMARK_BYTES)) {
assertAcked(clusterAdmin().prepareReroute());
ClusterRerouteUtils.reroute(client());
}
assertFalse(

View file

@ -7,6 +7,7 @@
*/
package org.elasticsearch.cluster.routing.allocation.decider;
import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteUtils;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.routing.ShardRoutingState;
import org.elasticsearch.common.settings.Settings;
@ -50,7 +51,7 @@ public class UpdateShardAllocationSettingsIT extends ESIntegTestCase {
.put(EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), EnableAllocationDecider.Rebalance.NONE),
"test"
);
clusterAdmin().prepareReroute().get();
ClusterRerouteUtils.reroute(client());
ensureGreen();
assertAllShardsOnNodes("test", firstNode);
assertAllShardsOnNodes("test_1", firstNode);
@ -65,7 +66,7 @@ public class UpdateShardAllocationSettingsIT extends ESIntegTestCase {
"test"
);
logger.info("--> balance index [test]");
clusterAdmin().prepareReroute().get();
ClusterRerouteUtils.reroute(client());
ensureGreen("test");
Set<String> test = assertAllShardsOnNodes("test", firstNode, secondNode);
assertThat("index: [test] expected to be rebalanced on both nodes", test.size(), equalTo(2));
@ -80,7 +81,7 @@ public class UpdateShardAllocationSettingsIT extends ESIntegTestCase {
)
);
logger.info("--> balance index [test_1]");
clusterAdmin().prepareReroute().get();
ClusterRerouteUtils.reroute(client());
ensureGreen("test_1");
Set<String> test_1 = assertAllShardsOnNodes("test_1", firstNode, secondNode);
assertThat("index: [test_1] expected to be rebalanced on both nodes", test_1.size(), equalTo(2));

View file

@ -13,6 +13,7 @@ import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.DocWriteResponse;
import org.elasticsearch.action.NoShardAvailableActionException;
import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteUtils;
import org.elasticsearch.action.get.GetResponse;
import org.elasticsearch.action.index.IndexRequestBuilder;
import org.elasticsearch.action.support.PlainActionFuture;
@ -233,7 +234,7 @@ public class ClusterDisruptionIT extends AbstractDisruptionTestCase {
// is the super-connected node and recovery source and target are on opposite sides of the bridge
if (disruptionScheme instanceof NetworkDisruption networkDisruption
&& networkDisruption.getDisruptedLinks() instanceof Bridge) {
assertBusy(() -> assertAcked(clusterAdmin().prepareReroute().setRetryFailed(true)));
assertBusy(() -> ClusterRerouteUtils.rerouteRetryFailed(client()));
}
ensureGreen("test");

View file

@ -8,6 +8,7 @@
package org.elasticsearch.discovery;
import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteUtils;
import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse;
import org.elasticsearch.action.admin.indices.stats.ShardStats;
import org.elasticsearch.action.bulk.BulkRequestBuilder;
@ -116,7 +117,7 @@ public class MasterDisruptionIT extends AbstractDisruptionTestCase {
logger.info("issue a reroute");
// trigger a reroute now, instead of waiting for the background reroute of RerouteService
assertAcked(clusterAdmin().prepareReroute());
ClusterRerouteUtils.reroute(client());
// and wait for it to finish and for the cluster to stabilize
ensureGreen("test");

View file

@ -20,6 +20,7 @@ import org.apache.lucene.store.LockObtainFailedException;
import org.apache.lucene.store.NativeFSLockFactory;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse;
import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteUtils;
import org.elasticsearch.action.admin.indices.flush.FlushRequest;
import org.elasticsearch.action.admin.indices.recovery.RecoveryResponse;
import org.elasticsearch.action.admin.indices.stats.ShardStats;
@ -219,7 +220,7 @@ public class RemoveCorruptedShardDataCommandIT extends ESIntegTestCase {
);
});
clusterAdmin().prepareReroute().add(new AllocateStalePrimaryAllocationCommand(indexName, 0, nodeId, true)).get();
ClusterRerouteUtils.reroute(client(), new AllocateStalePrimaryAllocationCommand(indexName, 0, nodeId, true));
assertBusy(() -> {
final var explanation = getClusterAllocationExplanation(client(), indexName, 0, true);
@ -373,7 +374,7 @@ public class RemoveCorruptedShardDataCommandIT extends ESIntegTestCase {
);
});
clusterAdmin().prepareReroute().add(new AllocateStalePrimaryAllocationCommand(indexName, 0, primaryNodeId, true)).get();
ClusterRerouteUtils.reroute(client(), new AllocateStalePrimaryAllocationCommand(indexName, 0, primaryNodeId, true));
assertBusy(() -> {
final var explanation = getClusterAllocationExplanation(client(), indexName, 0, true);

View file

@ -19,6 +19,7 @@ import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest;
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse;
import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteUtils;
import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse;
import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest;
import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresRequest;
@ -284,7 +285,7 @@ public class CorruptedFileIT extends ESIntegTestCase {
* we corrupted the primary shard - now lets make sure we never recover from it successfully
*/
setReplicaCount(1, "test");
clusterAdmin().prepareReroute().get();
ClusterRerouteUtils.reroute(client());
boolean didClusterTurnRed = waitUntil(() -> {
ClusterHealthStatus test = clusterAdmin().health(new ClusterHealthRequest("test")).actionGet().getStatus();
@ -368,7 +369,7 @@ public class CorruptedFileIT extends ESIntegTestCase {
.put("index.routing.allocation.include._name", primariesNode.getName() + "," + unluckyNode.getName()),
"test"
);
clusterAdmin().prepareReroute().get();
ClusterRerouteUtils.reroute(client());
hasCorrupted.await();
corrupt.set(false);
ensureGreen();
@ -493,7 +494,7 @@ public class CorruptedFileIT extends ESIntegTestCase {
.put("index.routing.allocation.exclude._name", unluckyNode.getName()),
"test"
);
clusterAdmin().prepareReroute().setRetryFailed(true).get();
ClusterRerouteUtils.rerouteRetryFailed(client());
ensureGreen("test");
assertThatAllShards("test", shard -> {
assertThat(shard.primaryShard().currentNodeId(), not(equalTo(unluckyNode.getId())));

View file

@ -10,6 +10,7 @@ package org.elasticsearch.indexlifecycle;
import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest;
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteUtils;
import org.elasticsearch.action.admin.indices.create.CreateIndexRequest;
import org.elasticsearch.action.admin.indices.create.CreateIndexResponse;
import org.elasticsearch.action.support.master.AcknowledgedResponse;
@ -79,7 +80,7 @@ public class IndexLifecycleActionIT extends ESIntegTestCase {
final String node2 = getLocalNodeId(server_2);
// explicitly call reroute, so shards will get relocated to the new node (we delay it in ES in case other nodes join)
clusterAdmin().prepareReroute().get();
ClusterRerouteUtils.reroute(client());
clusterHealth = clusterAdmin().health(
new ClusterHealthRequest(new String[] {}).waitForGreenStatus().waitForNodes("2").waitForNoRelocatingShards(true)
@ -120,7 +121,7 @@ public class IndexLifecycleActionIT extends ESIntegTestCase {
final String node3 = getLocalNodeId(server_3);
// explicitly call reroute, so shards will get relocated to the new node (we delay it in ES in case other nodes join)
clusterAdmin().prepareReroute().get();
ClusterRerouteUtils.reroute(client());
clusterHealth = clusterAdmin().prepareHealth()
.setWaitForGreenStatus()
@ -174,7 +175,7 @@ public class IndexLifecycleActionIT extends ESIntegTestCase {
assertThat(clusterHealth.isTimedOut(), equalTo(false));
assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
clusterAdmin().prepareReroute().get();
ClusterRerouteUtils.reroute(client());
clusterHealth = clusterAdmin().prepareHealth()
.setWaitForGreenStatus()

View file

@ -8,6 +8,7 @@
package org.elasticsearch.indices;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteUtils;
import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
import org.elasticsearch.cluster.routing.ShardRoutingState;
import org.elasticsearch.cluster.routing.allocation.command.MoveAllocationCommand;
@ -115,7 +116,7 @@ public class IndicesLifecycleListenerIT extends ESIntegTestCase {
throw new RuntimeException("FAIL");
}
});
clusterAdmin().prepareReroute().add(new MoveAllocationCommand("index1", 0, node1, node2)).get();
ClusterRerouteUtils.reroute(client(), new MoveAllocationCommand("index1", 0, node1, node2));
ensureGreen("index1");
var state = clusterAdmin().prepareState().get().getState();

View file

@ -11,6 +11,7 @@ package org.elasticsearch.indices.cluster;
import org.apache.logging.log4j.Level;
import org.apache.logging.log4j.core.LogEvent;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteUtils;
import org.elasticsearch.cluster.health.ClusterHealthStatus;
import org.elasticsearch.cluster.metadata.IndexMetadata;
import org.elasticsearch.cluster.routing.allocation.decider.MaxRetryAllocationDecider;
@ -28,8 +29,6 @@ import org.elasticsearch.test.junit.annotations.TestLogging;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
public class ShardLockFailureIT extends ESIntegTestCase {
@TestLogging(reason = "checking DEBUG logs from ICSS", value = "org.elasticsearch.indices.cluster.IndicesClusterStateService:DEBUG")
@ -165,7 +164,7 @@ public class ShardLockFailureIT extends ESIntegTestCase {
assertEquals(1, clusterHealthResponse.getUnassignedShards());
}
assertAcked(clusterAdmin().prepareReroute().setRetryFailed(true));
ClusterRerouteUtils.rerouteRetryFailed(client());
ensureGreen(indexName);
}
}

View file

@ -11,6 +11,7 @@ package org.elasticsearch.indices.recovery;
import org.apache.logging.log4j.Level;
import org.elasticsearch.action.DocWriteResponse;
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteUtils;
import org.elasticsearch.action.delete.DeleteResponse;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.node.DiscoveryNode;
@ -63,9 +64,7 @@ public class IndexPrimaryRelocationIT extends ESIntegTestCase {
relocationTarget = randomFrom(dataNodes);
}
logger.info("--> [iteration {}] relocating from {} to {} ", i, relocationSource.getName(), relocationTarget.getName());
clusterAdmin().prepareReroute()
.add(new MoveAllocationCommand("test", 0, relocationSource.getId(), relocationTarget.getId()))
.get();
ClusterRerouteUtils.reroute(client(), new MoveAllocationCommand("test", 0, relocationSource.getId(), relocationTarget.getId()));
ClusterHealthResponse clusterHealthResponse = clusterAdmin().prepareHealth()
.setTimeout(TimeValue.timeValueSeconds(60))
.setWaitForEvents(Priority.LANGUID)

View file

@ -28,6 +28,7 @@ import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.DocWriteResponse;
import org.elasticsearch.action.admin.cluster.node.stats.NodeStats;
import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse;
import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteUtils;
import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse;
import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse;
import org.elasticsearch.action.admin.indices.recovery.RecoveryRequest;
@ -281,7 +282,7 @@ public class IndexRecoveryIT extends AbstractIndexRecoveryIntegTestCase {
*/
public void startShardRecovery(String sourceNode, String targetNode) throws Exception {
logger.info("--> updating cluster settings with moving shard from node `{}` to node `{}`", sourceNode, targetNode);
clusterAdmin().prepareReroute().add(new MoveAllocationCommand(INDEX_NAME, 0, sourceNode, targetNode)).get().getState();
ClusterRerouteUtils.reroute(client(), new MoveAllocationCommand(INDEX_NAME, 0, sourceNode, targetNode));
logger.info("--> requesting shard recovery");
indicesAdmin().prepareRecoveries(INDEX_NAME).get();
@ -553,7 +554,7 @@ public class IndexRecoveryIT extends AbstractIndexRecoveryIntegTestCase {
throttleRecovery10Seconds(shardSize);
logger.info("--> move shard from: {} to: {}", nodeA, nodeB);
clusterAdmin().prepareReroute().add(new MoveAllocationCommand(INDEX_NAME, 0, nodeA, nodeB)).get().getState();
ClusterRerouteUtils.reroute(client(), new MoveAllocationCommand(INDEX_NAME, 0, nodeA, nodeB));
logger.info("--> waiting for recovery to start both on source and target");
final Index index = resolveIndex(INDEX_NAME);
@ -639,7 +640,7 @@ public class IndexRecoveryIT extends AbstractIndexRecoveryIntegTestCase {
throttleRecovery10Seconds(shardSize);
logger.info("--> move replica shard from: {} to: {}", nodeA, nodeC);
clusterAdmin().prepareReroute().add(new MoveAllocationCommand(INDEX_NAME, 0, nodeA, nodeC)).get().getState();
ClusterRerouteUtils.reroute(client(), new MoveAllocationCommand(INDEX_NAME, 0, nodeA, nodeC));
response = indicesAdmin().prepareRecoveries(INDEX_NAME).get();
recoveryStates = response.shardRecoveryStates().get(INDEX_NAME);
@ -1643,7 +1644,7 @@ public class IndexRecoveryIT extends AbstractIndexRecoveryIntegTestCase {
internalCluster().stopRandomDataNode();
internalCluster().stopRandomDataNode();
final String nodeWithoutData = internalCluster().startDataOnlyNode();
assertAcked(clusterAdmin().prepareReroute().add(new AllocateEmptyPrimaryAllocationCommand(indexName, 0, nodeWithoutData, true)));
ClusterRerouteUtils.reroute(client(), new AllocateEmptyPrimaryAllocationCommand(indexName, 0, nodeWithoutData, true));
internalCluster().startDataOnlyNode(randomNodeDataPathSettings);
ensureGreen();
for (ShardStats shardStats : indicesAdmin().prepareStats(indexName).get().getIndex(indexName).getShards()) {
@ -1712,7 +1713,7 @@ public class IndexRecoveryIT extends AbstractIndexRecoveryIntegTestCase {
);
internalCluster().startNode();
internalCluster().startNode();
clusterAdmin().prepareReroute().setRetryFailed(true).get();
ClusterRerouteUtils.rerouteRetryFailed(client());
assertAcked(indicesAdmin().prepareDelete("test")); // cancel recoveries
assertBusy(() -> {
for (PeerRecoverySourceService recoveryService : internalCluster().getDataNodeInstances(PeerRecoverySourceService.class)) {

View file

@ -7,13 +7,13 @@
*/
package org.elasticsearch.indices.state;
import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteRequest;
import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteUtils;
import org.elasticsearch.action.support.master.AcknowledgedResponse;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.routing.IndexRoutingTable;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.allocation.command.AllocationCommands;
import org.elasticsearch.cluster.routing.allocation.command.AllocationCommand;
import org.elasticsearch.cluster.routing.allocation.command.MoveAllocationCommand;
import org.elasticsearch.cluster.routing.allocation.decider.ConcurrentRebalanceAllocationDecider;
import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider;
@ -125,7 +125,7 @@ public class CloseWhileRelocatingShardsIT extends ESIntegTestCase {
final CountDownLatch release = new CountDownLatch(indices.length);
// relocate one shard for every index to be closed
final AllocationCommands commands = new AllocationCommands();
final var commands = new ArrayList<AllocationCommand>();
for (final String index : indices) {
final NumShards numShards = getNumShards(index);
final int shardId = numShards.numPrimaries == 1 ? 0 : randomIntBetween(0, numShards.numPrimaries - 1);
@ -146,8 +146,7 @@ public class CloseWhileRelocatingShardsIT extends ESIntegTestCase {
}
// Build the list of shards for which recoveries will be blocked
final Set<ShardId> blockedShards = commands.commands()
.stream()
final Set<ShardId> blockedShards = commands.stream()
.map(c -> (MoveAllocationCommand) c)
.map(c -> new ShardId(clusterService.state().metadata().index(c.index()).getIndex(), c.shardId()))
.collect(Collectors.toSet());
@ -185,7 +184,7 @@ public class CloseWhileRelocatingShardsIT extends ESIntegTestCase {
}
}
assertAcked(clusterAdmin().reroute(new ClusterRerouteRequest().commands(commands)).get());
ClusterRerouteUtils.reroute(client(), commands.toArray(AllocationCommand[]::new));
// start index closing threads
final List<Thread> threads = new ArrayList<>();

View file

@ -11,6 +11,7 @@ package org.elasticsearch.indices.store;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteUtils;
import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.health.ClusterHealthStatus;
@ -128,7 +129,7 @@ public class IndicesStoreIntegrationIT extends ESIntegTestCase {
logger.info("--> stopping disruption");
disruption.stopDisrupting();
} else {
internalCluster().client().admin().cluster().prepareReroute().add(new MoveAllocationCommand("test", 0, node_1, node_3)).get();
ClusterRerouteUtils.reroute(internalCluster().client(), new MoveAllocationCommand("test", 0, node_1, node_3));
}
clusterHealth = clusterAdmin().prepareHealth().setWaitForNoRelocatingShards(true).get();
assertThat(clusterHealth.isTimedOut(), equalTo(false));
@ -172,7 +173,7 @@ public class IndicesStoreIntegrationIT extends ESIntegTestCase {
}
}
});
internalCluster().client().admin().cluster().prepareReroute().add(new MoveAllocationCommand(index, shard, nodeFrom, nodeTo)).get();
ClusterRerouteUtils.reroute(internalCluster().client(), new MoveAllocationCommand(index, shard, nodeFrom, nodeTo));
logger.info("--> waiting for relocation to start");
beginRelocationLatch.await();
logger.info("--> starting disruption");
@ -223,7 +224,7 @@ public class IndicesStoreIntegrationIT extends ESIntegTestCase {
});
logger.info("--> move shard from {} to {}, and wait for relocation to finish", node_1, node_2);
internalCluster().client().admin().cluster().prepareReroute().add(new MoveAllocationCommand("test", 0, node_1, node_2)).get();
ClusterRerouteUtils.reroute(client(), new MoveAllocationCommand("test", 0, node_1, node_2));
shardActiveRequestSent.await();
ClusterHealthResponse clusterHealth = clusterAdmin().prepareHealth().setWaitForNoRelocatingShards(true).get();
assertThat(clusterHealth.isTimedOut(), equalTo(false));

View file

@ -13,7 +13,10 @@ import org.apache.lucene.tests.util.English;
import org.elasticsearch.action.ActionFuture;
import org.elasticsearch.action.DocWriteResponse;
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteRequest;
import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteResponse;
import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteUtils;
import org.elasticsearch.action.admin.cluster.reroute.TransportClusterRerouteAction;
import org.elasticsearch.action.admin.indices.stats.ShardStats;
import org.elasticsearch.action.index.IndexRequestBuilder;
import org.elasticsearch.action.support.WriteRequest;
@ -144,7 +147,7 @@ public class RelocationIT extends ESIntegTestCase {
assertThat(clusterHealthResponse.isTimedOut(), equalTo(false));
logger.info("--> relocate the shard from node1 to node2");
clusterAdmin().prepareReroute().add(new MoveAllocationCommand("test", 0, node_1, node_2)).get();
ClusterRerouteUtils.reroute(client(), new MoveAllocationCommand("test", 0, node_1, node_2));
clusterHealthResponse = clusterAdmin().prepareHealth()
.setWaitForEvents(Priority.LANGUID)
@ -207,7 +210,7 @@ public class RelocationIT extends ESIntegTestCase {
logger.debug("--> Allow indexer to index [{}] documents", numDocs);
indexer.continueIndexing(numDocs);
logger.info("--> START relocate the shard from {} to {}", nodes[fromNode], nodes[toNode]);
clusterAdmin().prepareReroute().add(new MoveAllocationCommand("test", 0, nodes[fromNode], nodes[toNode])).get();
ClusterRerouteUtils.reroute(client(), new MoveAllocationCommand("test", 0, nodes[fromNode], nodes[toNode]));
if (rarely()) {
logger.debug("--> flushing");
indicesAdmin().prepareFlush().get();
@ -334,7 +337,7 @@ public class RelocationIT extends ESIntegTestCase {
logger.info("--> START relocate the shard from {} to {}", nodes[fromNode], nodes[toNode]);
clusterAdmin().prepareReroute().add(new MoveAllocationCommand("test", 0, nodes[fromNode], nodes[toNode])).get();
ClusterRerouteUtils.reroute(client(), new MoveAllocationCommand("test", 0, nodes[fromNode], nodes[toNode]));
logger.debug("--> index [{}] documents", builders1.size());
indexRandom(false, true, builders1);
@ -555,7 +558,7 @@ public class RelocationIT extends ESIntegTestCase {
assertThat(clusterHealthResponse.isTimedOut(), equalTo(false));
logger.info("--> relocate the shard from node1 to node2");
clusterAdmin().prepareReroute().add(new MoveAllocationCommand("test", 0, node1, node2)).get();
ClusterRerouteUtils.reroute(client(), new MoveAllocationCommand("test", 0, node1, node2));
clusterHealthResponse = clusterAdmin().prepareHealth()
.setWaitForEvents(Priority.LANGUID)
@ -606,9 +609,10 @@ public class RelocationIT extends ESIntegTestCase {
assertThat(clusterHealthResponse.isTimedOut(), equalTo(false));
logger.info("--> relocate the shard from node1 to node2");
ActionFuture<ClusterRerouteResponse> relocationListener = clusterAdmin().prepareReroute()
.add(new MoveAllocationCommand("test", 0, node1, node2))
.execute();
ActionFuture<ClusterRerouteResponse> relocationListener = client().execute(
TransportClusterRerouteAction.TYPE,
new ClusterRerouteRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT).add(new MoveAllocationCommand("test", 0, node1, node2))
);
logger.info("--> index 100 docs while relocating");
for (int i = 20; i < 120; i++) {
pendingIndexResponses.add(
@ -618,7 +622,7 @@ public class RelocationIT extends ESIntegTestCase {
.execute()
);
}
relocationListener.actionGet();
safeGet(relocationListener);
clusterHealthResponse = clusterAdmin().prepareHealth()
.setWaitForEvents(Priority.LANGUID)
.setWaitForNoRelocatingShards(true)

View file

@ -10,6 +10,7 @@ package org.elasticsearch.search.basic;
import org.elasticsearch.action.NoShardAvailableActionException;
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteUtils;
import org.elasticsearch.action.index.IndexRequestBuilder;
import org.elasticsearch.action.search.SearchPhaseExecutionException;
import org.elasticsearch.common.Priority;
@ -118,7 +119,7 @@ public class SearchWhileRelocatingIT extends ESIntegTestCase {
threads[j].start();
}
allowNodes("test", between(1, 3));
clusterAdmin().prepareReroute().get();
ClusterRerouteUtils.reroute(client());
stop.set(true);
for (int j = 0; j < threads.length; j++) {
threads[j].join();

View file

@ -13,6 +13,7 @@ import org.apache.http.entity.StringEntity;
import org.apache.logging.log4j.Level;
import org.apache.lucene.util.BytesRef;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteUtils;
import org.elasticsearch.action.fieldcaps.FieldCapabilities;
import org.elasticsearch.action.fieldcaps.FieldCapabilitiesFailure;
import org.elasticsearch.action.fieldcaps.FieldCapabilitiesRequest;
@ -549,18 +550,14 @@ public class FieldCapabilitiesIT extends ESIntegTestCase {
if (targetNodes.isEmpty()) {
continue;
}
safeGet(
clusterAdmin().prepareReroute()
.add(
new MoveAllocationCommand(
shardId.getIndexName(),
shardId.id(),
indicesService.clusterService().localNode().getId(),
randomFrom(targetNodes)
)
)
.execute()
ClusterRerouteUtils.reroute(
client(),
new MoveAllocationCommand(
shardId.getIndexName(),
shardId.id(),
indicesService.clusterService().localNode().getId(),
randomFrom(targetNodes)
)
);
}
}

View file

@ -12,6 +12,7 @@ import org.apache.lucene.util.BytesRef;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.action.ActionFuture;
import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteUtils;
import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse;
import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsResponse;
import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse;
@ -597,7 +598,7 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas
Runnable fixupAction = () -> {
// remove the shard allocation filtering settings and use the Reroute API to retry the failed shards
updateIndexSettings(Settings.builder().putNull("index.routing.allocation.include._name"), indexName);
assertAcked(clusterAdmin().prepareReroute().setRetryFailed(true));
ClusterRerouteUtils.rerouteRetryFailed(client());
};
unrestorableUseCase(

View file

@ -188,6 +188,7 @@ public class TransportVersions {
public static final TransportVersion RANK_DOC_IN_SHARD_FETCH_REQUEST = def(8_679_00_0);
public static final TransportVersion SECURITY_SETTINGS_REQUEST_TIMEOUTS = def(8_680_00_0);
public static final TransportVersion QUERY_RULE_CRUD_API_PUT = def(8_681_00_0);
public static final TransportVersion DROP_UNUSED_NODES_REQUESTS = def(8_682_00_0);
/*
* STOP! READ THIS FIRST! No, really,

View file

@ -11,6 +11,7 @@ package org.elasticsearch.action.admin.cluster.node.info;
import org.elasticsearch.action.support.nodes.BaseNodesRequest;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.core.UpdateForV9;
import java.io.IOException;
import java.util.Set;
@ -30,6 +31,7 @@ public final class NodesInfoRequest extends BaseNodesRequest<NodesInfoRequest> {
* @param in A stream input object.
* @throws IOException if the stream cannot be deserialized.
*/
@UpdateForV9 // this constructor is unused in v9
public NodesInfoRequest(StreamInput in) throws IOException {
super(in);
nodesInfoMetrics = new NodesInfoMetrics(in);
@ -111,6 +113,7 @@ public final class NodesInfoRequest extends BaseNodesRequest<NodesInfoRequest> {
return this;
}
@UpdateForV9 // this method can just call localOnly() in v9
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);

View file

@ -13,6 +13,7 @@ import org.elasticsearch.action.support.nodes.BaseNodesRequest;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.core.UpdateForV9;
import org.elasticsearch.tasks.CancellableTask;
import org.elasticsearch.tasks.Task;
import org.elasticsearch.tasks.TaskId;
@ -36,9 +37,9 @@ public class NodesStatsRequest extends BaseNodesRequest<NodesStatsRequest> {
nodesStatsRequestParameters = new NodesStatsRequestParameters();
}
@UpdateForV9 // this constructor is unused in v9
public NodesStatsRequest(StreamInput in) throws IOException {
super(in);
nodesStatsRequestParameters = new NodesStatsRequestParameters(in);
}
@ -178,6 +179,7 @@ public class NodesStatsRequest extends BaseNodesRequest<NodesStatsRequest> {
nodesStatsRequestParameters.setIncludeShardsStats(includeShardsStats);
}
@UpdateForV9 // this method can just call localOnly() in v9
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);

View file

@ -13,6 +13,7 @@ import org.elasticsearch.cluster.routing.allocation.command.AllocationCommand;
import org.elasticsearch.cluster.routing.allocation.command.AllocationCommands;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.core.TimeValue;
import java.io.IOException;
import java.util.Objects;
@ -34,8 +35,8 @@ public class ClusterRerouteRequest extends AcknowledgedRequest<ClusterRerouteReq
retryFailed = in.readBoolean();
}
public ClusterRerouteRequest() {
super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT, DEFAULT_ACK_TIMEOUT);
public ClusterRerouteRequest(TimeValue masterNodeTimeout, TimeValue ackTimeout) {
super(masterNodeTimeout, ackTimeout);
}
/**

View file

@ -8,19 +8,31 @@
package org.elasticsearch.action.admin.cluster.reroute;
import org.elasticsearch.action.support.master.AcknowledgedRequest;
import org.elasticsearch.action.support.master.AcknowledgedRequestBuilder;
import org.elasticsearch.action.support.master.MasterNodeRequest;
import org.elasticsearch.client.internal.ElasticsearchClient;
import org.elasticsearch.cluster.routing.allocation.command.AllocationCommand;
/**
* Builder for a cluster reroute request
*
* @deprecated just build the request directly
*/
@Deprecated(forRemoval = true) // temporary compatibility shim
public class ClusterRerouteRequestBuilder extends AcknowledgedRequestBuilder<
ClusterRerouteRequest,
ClusterRerouteResponse,
ClusterRerouteRequestBuilder> {
public ClusterRerouteRequestBuilder(ElasticsearchClient client) {
super(client, TransportClusterRerouteAction.TYPE, new ClusterRerouteRequest());
super(
client,
TransportClusterRerouteAction.TYPE,
new ClusterRerouteRequest(
MasterNodeRequest.TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT,
AcknowledgedRequest.DEFAULT_ACK_TIMEOUT
)
);
}
/**

View file

@ -11,6 +11,7 @@ package org.elasticsearch.action.admin.cluster.stats;
import org.elasticsearch.action.support.nodes.BaseNodesRequest;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.core.UpdateForV9;
import org.elasticsearch.tasks.CancellableTask;
import org.elasticsearch.tasks.Task;
import org.elasticsearch.tasks.TaskId;
@ -23,6 +24,7 @@ import java.util.Map;
*/
public class ClusterStatsRequest extends BaseNodesRequest<ClusterStatsRequest> {
@UpdateForV9 // this constructor is unused in v9
public ClusterStatsRequest(StreamInput in) throws IOException {
super(in);
}
@ -40,6 +42,7 @@ public class ClusterStatsRequest extends BaseNodesRequest<ClusterStatsRequest> {
return new CancellableTask(id, type, action, "", parentTaskId, headers);
}
@UpdateForV9 // this method can just call localOnly() in v9
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);

View file

@ -9,6 +9,7 @@
package org.elasticsearch.action.admin.cluster.stats;
import org.apache.lucene.store.AlreadyClosedException;
import org.elasticsearch.TransportVersions;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionType;
import org.elasticsearch.action.FailedNodeException;
@ -32,6 +33,7 @@ import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.util.CancellableSingleObjectCache;
import org.elasticsearch.common.util.concurrent.ListenableFuture;
import org.elasticsearch.common.util.concurrent.ThreadContext;
import org.elasticsearch.core.UpdateForV9;
import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.engine.CommitStats;
import org.elasticsearch.index.seqno.RetentionLeaseStats;
@ -167,7 +169,7 @@ public class TransportClusterStatsAction extends TransportNodesAction<
@Override
protected ClusterStatsNodeRequest newNodeRequest(ClusterStatsRequest request) {
return new ClusterStatsNodeRequest(request);
return new ClusterStatsNodeRequest();
}
@Override
@ -251,18 +253,16 @@ public class TransportClusterStatsAction extends TransportNodesAction<
);
}
@UpdateForV9 // this can be replaced with TransportRequest.Empty in v9
public static class ClusterStatsNodeRequest extends TransportRequest {
// TODO don't wrap the whole top-level request, it contains heavy and irrelevant DiscoveryNode things; see #100878
ClusterStatsRequest request;
ClusterStatsNodeRequest() {}
public ClusterStatsNodeRequest(StreamInput in) throws IOException {
super(in);
request = new ClusterStatsRequest(in);
}
ClusterStatsNodeRequest(ClusterStatsRequest request) {
this.request = request;
if (in.getTransportVersion().before(TransportVersions.DROP_UNUSED_NODES_REQUESTS)) {
new ClusterStatsRequest(in);
}
}
@Override
@ -273,7 +273,9 @@ public class TransportClusterStatsAction extends TransportNodesAction<
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
request.writeTo(out);
if (out.getTransportVersion().before(TransportVersions.DROP_UNUSED_NODES_REQUESTS)) {
new ClusterStatsRequest().writeTo(out);
}
}
}

View file

@ -9,6 +9,7 @@ package org.elasticsearch.action.search;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.join.ScoreMode;
import org.elasticsearch.common.lucene.Lucene;
import org.elasticsearch.index.query.NestedQueryBuilder;
import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.search.SearchPhaseResult;
@ -152,7 +153,7 @@ final class DfsQueryPhase extends SearchPhase {
scoreDocs.sort(Comparator.comparingInt(scoreDoc -> scoreDoc.doc));
String nestedPath = dfsKnnResults.getNestedPath();
QueryBuilder query = new KnnScoreDocQueryBuilder(
scoreDocs.toArray(new ScoreDoc[0]),
scoreDocs.toArray(Lucene.EMPTY_SCORE_DOCS),
source.knnSearch().get(i).getField(),
source.knnSearch().get(i).getQueryVector()
).boost(source.knnSearch().get(i).boost()).queryName(source.knnSearch().get(i).queryName());

View file

@ -24,6 +24,7 @@ import org.apache.lucene.search.TotalHits.Relation;
import org.apache.lucene.util.SetOnce;
import org.elasticsearch.common.breaker.CircuitBreaker;
import org.elasticsearch.common.io.stream.DelayableWriteable;
import org.elasticsearch.common.lucene.Lucene;
import org.elasticsearch.common.lucene.search.TopDocsAndMaxScore;
import org.elasticsearch.common.util.Maps;
import org.elasticsearch.common.util.concurrent.AtomicArray;
@ -66,7 +67,6 @@ import java.util.function.Supplier;
import static org.elasticsearch.search.SearchService.DEFAULT_SIZE;
public final class SearchPhaseController {
private static final ScoreDoc[] EMPTY_DOCS = new ScoreDoc[0];
private final BiFunction<
Supplier<Boolean>,
@ -195,7 +195,7 @@ public final class SearchPhaseController {
return SortedTopDocs.EMPTY;
}
final TopDocs mergedTopDocs = mergeTopDocs(topDocs, size, ignoreFrom ? 0 : from);
final ScoreDoc[] mergedScoreDocs = mergedTopDocs == null ? EMPTY_DOCS : mergedTopDocs.scoreDocs;
final ScoreDoc[] mergedScoreDocs = mergedTopDocs == null ? Lucene.EMPTY_SCORE_DOCS : mergedTopDocs.scoreDocs;
ScoreDoc[] scoreDocs = mergedScoreDocs;
int numSuggestDocs = 0;
if (reducedCompletionSuggestions.isEmpty() == false) {
@ -907,6 +907,6 @@ public final class SearchPhaseController {
Object[] collapseValues,
int numberOfCompletionsSuggestions
) {
public static final SortedTopDocs EMPTY = new SortedTopDocs(EMPTY_DOCS, false, null, null, null, 0);
public static final SortedTopDocs EMPTY = new SortedTopDocs(Lucene.EMPTY_SCORE_DOCS, false, null, null, null, 0);
}
}

View file

@ -8,7 +8,6 @@
package org.elasticsearch.action.search;
import org.apache.lucene.search.TotalHits;
import org.elasticsearch.TransportVersions;
import org.elasticsearch.action.ActionResponse;
import org.elasticsearch.action.OriginalIndices;
@ -18,6 +17,7 @@ import org.elasticsearch.common.collect.Iterators;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.lucene.Lucene;
import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
import org.elasticsearch.common.xcontent.ChunkedToXContentHelper;
import org.elasticsearch.common.xcontent.ChunkedToXContentObject;
@ -1154,7 +1154,7 @@ public class SearchResponse extends ActionResponse implements ChunkedToXContentO
// public for tests
public static SearchResponse empty(Supplier<Long> tookInMillisSupplier, Clusters clusters) {
return new SearchResponse(
SearchHits.empty(new TotalHits(0L, TotalHits.Relation.EQUAL_TO), Float.NaN),
SearchHits.empty(Lucene.TOTAL_HITS_EQUAL_TO_ZERO, Float.NaN),
InternalAggregations.EMPTY,
null,
false,

View file

@ -18,6 +18,7 @@ import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.action.search.SearchPhaseController.TopDocsStats;
import org.elasticsearch.action.search.SearchResponse.Clusters;
import org.elasticsearch.action.search.TransportSearchAction.SearchTimeProvider;
import org.elasticsearch.common.lucene.Lucene;
import org.elasticsearch.common.lucene.search.TopDocsAndMaxScore;
import org.elasticsearch.core.Releasable;
import org.elasticsearch.index.shard.ShardId;
@ -177,7 +178,7 @@ public final class SearchResponseMerger implements Releasable {
final TotalHits totalHits;
if (searchHits.getTotalHits() == null) {
// in case we didn't track total hits, we get null from each cluster, but we need to set 0 eq to the TopDocs
totalHits = new TotalHits(0, TotalHits.Relation.EQUAL_TO);
totalHits = Lucene.TOTAL_HITS_EQUAL_TO_ZERO;
assert trackTotalHits == null || trackTotalHits == false;
trackTotalHits = false;
} else {

View file

@ -39,6 +39,14 @@ public abstract class BaseNodesRequest<Request extends BaseNodesRequest<Request>
private TimeValue timeout;
/**
* @deprecated {@link BaseNodesRequest} derivatives are quite heavyweight and should never need sending over the wire. Do not include
* the full top-level request directly in the node-level requests. Instead, copy the needed fields over to a dedicated node-level
* request.
*
* @see <a href="https://github.com/elastic/elasticsearch/issues/100878">#100878</a>
*/
@Deprecated(forRemoval = true)
protected BaseNodesRequest(StreamInput in) throws IOException {
// A bare `BaseNodesRequest` is never sent over the wire, but several implementations send the full top-level request to each node
// (wrapped up in another request). They shouldn't, but until we fix that we must keep this. See #100878.

View file

@ -203,14 +203,26 @@ public class ClusterAdminClient implements ElasticsearchClient {
return new ClusterUpdateSettingsRequestBuilder(this);
}
/**
* @deprecated use {@code ClusterRerouteUtils} in tests, or just run the action directly
*/
@Deprecated(forRemoval = true) // temporary compatibility shim
public ActionFuture<ClusterRerouteResponse> reroute(final ClusterRerouteRequest request) {
return execute(TransportClusterRerouteAction.TYPE, request);
}
/**
* @deprecated use {@code ClusterRerouteUtils} in tests, or just run the action directly
*/
@Deprecated(forRemoval = true) // temporary compatibility shim
public void reroute(final ClusterRerouteRequest request, final ActionListener<ClusterRerouteResponse> listener) {
execute(TransportClusterRerouteAction.TYPE, request, listener);
}
/**
* @deprecated use {@code ClusterRerouteUtils} in tests, or just run the action directly
*/
@Deprecated(forRemoval = true) // temporary compatibility shim
public ClusterRerouteRequestBuilder prepareReroute() {
return new ClusterRerouteRequestBuilder(this);
}

View file

@ -100,13 +100,6 @@ public record AutoExpandReplicas(int minReplicas, int maxReplicas, boolean enabl
public int getDesiredNumberOfReplicas(IndexMetadata indexMetadata, RoutingAllocation allocation) {
assert enabled : "should only be called when enabled";
// Make sure in stateless auto-expand indices always have 1 replica to ensure all shard roles are always present
if (Objects.equals(
indexMetadata.getSettings().get(ExistingShardsAllocator.EXISTING_SHARDS_ALLOCATOR_SETTING.getKey()),
"stateless"
)) {
return 1;
}
int numMatchingDataNodes = 0;
for (DiscoveryNode discoveryNode : allocation.nodes().getDataNodes().values()) {
Decision decision = allocation.deciders().shouldAutoExpandToNode(indexMetadata, discoveryNode, allocation);
@ -150,9 +143,22 @@ public record AutoExpandReplicas(int minReplicas, int maxReplicas, boolean enabl
for (final IndexMetadata indexMetadata : metadata) {
if (indexMetadata.getState() == IndexMetadata.State.OPEN || isIndexVerifiedBeforeClosed(indexMetadata)) {
AutoExpandReplicas autoExpandReplicas = indexMetadata.getAutoExpandReplicas();
// Make sure auto-expand is applied only when configured, and entirely disabled in stateless
if (autoExpandReplicas.enabled() == false) {
continue;
}
// Special case for stateless indices: auto-expand is disabled, unless number_of_replicas has been set
// manually to 0 via index settings, which needs to be converted to 1.
if (Objects.equals(
indexMetadata.getSettings().get(ExistingShardsAllocator.EXISTING_SHARDS_ALLOCATOR_SETTING.getKey()),
"stateless"
)) {
if (indexMetadata.getNumberOfReplicas() == 0) {
nrReplicasChanged.computeIfAbsent(1, ArrayList::new).add(indexMetadata.getIndex().getName());
} else {
continue;
}
}
if (allocation == null) {
allocation = allocationSupplier.get();
}

View file

@ -9,7 +9,6 @@
package org.elasticsearch.common.bytes;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.BytesRefBuilder;
import org.apache.lucene.util.BytesRefIterator;
import org.apache.lucene.util.RamUsageEstimator;
@ -172,18 +171,33 @@ public final class CompositeBytesReference extends AbstractBytesReference {
@Override
public BytesRef toBytesRef() {
BytesRefBuilder builder = new BytesRefBuilder();
builder.grow(length());
final byte[] result = new byte[length];
int offset = 0;
for (BytesReference reference : references) {
if (reference.hasArray()) {
int len = reference.length();
System.arraycopy(reference.array(), reference.arrayOffset(), result, offset, len);
offset += len;
} else {
offset = copyViaIterator(reference, result, offset);
}
}
assert offset == result.length;
return new BytesRef(result);
}
private static int copyViaIterator(BytesReference reference, byte[] result, int offset) {
BytesRef spare;
BytesRefIterator iterator = iterator();
BytesRefIterator iterator = reference.iterator();
try {
while ((spare = iterator.next()) != null) {
builder.append(spare);
System.arraycopy(spare.bytes, spare.offset, result, offset, spare.length);
offset += spare.length;
}
} catch (IOException ex) {
throw new AssertionError("won't happen", ex); // this is really an error since we don't do IO in our bytesreferences
}
return builder.toBytesRef();
return offset;
}
@Override

View file

@ -101,7 +101,10 @@ public class Lucene {
public static final ScoreDoc[] EMPTY_SCORE_DOCS = new ScoreDoc[0];
public static final TopDocs EMPTY_TOP_DOCS = new TopDocs(new TotalHits(0, TotalHits.Relation.EQUAL_TO), EMPTY_SCORE_DOCS);
public static final TotalHits TOTAL_HITS_EQUAL_TO_ZERO = new TotalHits(0, TotalHits.Relation.EQUAL_TO);
public static final TotalHits TOTAL_HITS_GREATER_OR_EQUAL_TO_ZERO = new TotalHits(0, TotalHits.Relation.GREATER_THAN_OR_EQUAL_TO);
public static final TopDocs EMPTY_TOP_DOCS = new TopDocs(TOTAL_HITS_EQUAL_TO_ZERO, EMPTY_SCORE_DOCS);
private Lucene() {}

View file

@ -135,6 +135,15 @@ public final class Sets {
return union;
}
@SafeVarargs
public static <T> Set<T> union(Set<T> first, Set<T>... others) {
Set<T> union = new HashSet<>(first);
for (Set<T> other : others) {
union.addAll(other);
}
return union;
}
/**
* The intersection of two sets. Namely, the resulting set contains all the elements that are in both sets.
* Neither input is mutated by this operation, an entirely new set is returned.

View file

@ -36,6 +36,7 @@ import org.apache.lucene.search.Sort;
import org.apache.lucene.search.SortField;
import org.apache.lucene.search.TotalHits;
import org.apache.lucene.search.grouping.GroupSelector;
import org.elasticsearch.common.lucene.Lucene;
import org.elasticsearch.common.util.Maps;
import org.elasticsearch.core.Nullable;
import org.elasticsearch.index.mapper.MappedFieldType;
@ -202,7 +203,7 @@ public class SinglePassGroupingCollector<T> extends SimpleCollector {
if (groupMap.size() <= groupOffset) {
TotalHits totalHits = new TotalHits(totalHitCount, TotalHits.Relation.EQUAL_TO);
return new TopFieldGroups(groupField, totalHits, new ScoreDoc[0], groupSort.getSort(), new Object[0]);
return new TopFieldGroups(groupField, totalHits, Lucene.EMPTY_SCORE_DOCS, groupSort.getSort(), new Object[0]);
}
if (orderedGroups == null) {

View file

@ -16,6 +16,7 @@ import org.apache.lucene.search.SortField;
import org.apache.lucene.search.TopFieldDocs;
import org.apache.lucene.search.TotalHits;
import org.apache.lucene.util.PriorityQueue;
import org.elasticsearch.common.lucene.Lucene;
import org.elasticsearch.common.util.CollectionUtils;
import java.util.ArrayList;
@ -225,7 +226,7 @@ public final class TopFieldGroups extends TopFieldDocs {
queue.pop();
}
}
hits = hitList.toArray(new ScoreDoc[0]);
hits = hitList.toArray(Lucene.EMPTY_SCORE_DOCS);
values = groupList.toArray(new Object[0]);
}
TotalHits totalHits = new TotalHits(totalHitCount, totalHitsRelation);

View file

@ -83,7 +83,7 @@ public abstract class BaseRestHandler implements RestHandler {
// check if the query has any parameters that are not in the supported set (if declared)
Set<String> supported = allSupportedParameters();
if (supported != null) {
var allSupported = Sets.union(ALWAYS_SUPPORTED, supported);
var allSupported = Sets.union(RestResponse.RESPONSE_PARAMS, ALWAYS_SUPPORTED, supported);
if (allSupported.containsAll(request.params().keySet()) == false) {
Set<String> unsupported = Sets.difference(request.params().keySet(), allSupported);
throw new IllegalArgumentException(unrecognized(request, unsupported, allSupported, "parameter"));
@ -97,6 +97,7 @@ public abstract class BaseRestHandler implements RestHandler {
// use a sorted set so the unconsumed parameters appear in a reliable sorted order
final SortedSet<String> unconsumedParams = request.unconsumedParams()
.stream()
.filter(p -> RestResponse.RESPONSE_PARAMS.contains(p) == false)
.filter(p -> responseParams(request.getRestApiVersion()).contains(p) == false)
.collect(Collectors.toCollection(TreeSet::new));

View file

@ -37,6 +37,7 @@ import static org.elasticsearch.rest.RestController.ELASTIC_PRODUCT_HTTP_HEADER;
public final class RestResponse implements Releasable {
public static final String TEXT_CONTENT_TYPE = "text/plain; charset=UTF-8";
public static final Set<String> RESPONSE_PARAMS = Set.of("error_trace");
static final String STATUS = "status";

View file

@ -9,6 +9,7 @@
package org.elasticsearch.rest.action.admin.cluster;
import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteRequest;
import org.elasticsearch.action.admin.cluster.reroute.TransportClusterRerouteAction;
import org.elasticsearch.client.internal.node.NodeClient;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.routing.allocation.command.AllocationCommands;
@ -86,7 +87,11 @@ public class RestClusterRerouteAction extends BaseRestHandler {
if (metric == null) {
request.params().put("metric", DEFAULT_METRICS);
}
return channel -> client.admin().cluster().reroute(clusterRerouteRequest, new RestRefCountedChunkedToXContentListener<>(channel));
return channel -> client.execute(
TransportClusterRerouteAction.TYPE,
clusterRerouteRequest,
new RestRefCountedChunkedToXContentListener<>(channel)
);
}
@Override
@ -95,12 +100,10 @@ public class RestClusterRerouteAction extends BaseRestHandler {
}
public static ClusterRerouteRequest createRequest(RestRequest request) throws IOException {
ClusterRerouteRequest clusterRerouteRequest = new ClusterRerouteRequest();
final var clusterRerouteRequest = new ClusterRerouteRequest(getMasterNodeTimeout(request), getAckTimeout(request));
clusterRerouteRequest.dryRun(request.paramAsBoolean("dry_run", clusterRerouteRequest.dryRun()));
clusterRerouteRequest.explain(request.paramAsBoolean("explain", clusterRerouteRequest.explain()));
clusterRerouteRequest.ackTimeout(getAckTimeout(request));
clusterRerouteRequest.setRetryFailed(request.paramAsBoolean("retry_failed", clusterRerouteRequest.isRetryFailed()));
clusterRerouteRequest.masterNodeTimeout(getMasterNodeTimeout(request));
request.applyContentParser(parser -> PARSER.parse(parser, clusterRerouteRequest, null));
return clusterRerouteRequest;
}

View file

@ -13,6 +13,7 @@ import org.elasticsearch.script.field.vectors.DenseVector;
import org.elasticsearch.script.field.vectors.DenseVectorDocValuesField;
import java.io.IOException;
import java.util.HexFormat;
import java.util.List;
public class VectorScoreScriptUtils {
@ -65,6 +66,23 @@ public class VectorScoreScriptUtils {
this.qvMagnitude = (float) Math.sqrt(queryMagnitude);
field.getElementType().checkVectorBounds(validateValues);
}
/**
* Constructs a dense vector function used for byte-sized vectors.
*
* @param scoreScript The script in which this function was referenced.
* @param field The vector field.
* @param queryVector The query vector.
*/
public ByteDenseVectorFunction(ScoreScript scoreScript, DenseVectorDocValuesField field, byte[] queryVector) {
super(scoreScript, field);
this.queryVector = queryVector;
float queryMagnitude = 0.0f;
for (byte value : queryVector) {
queryMagnitude += value * value;
}
this.qvMagnitude = (float) Math.sqrt(queryMagnitude);
}
}
public static class FloatDenseVectorFunction extends DenseVectorFunction {
@ -116,6 +134,10 @@ public class VectorScoreScriptUtils {
super(scoreScript, field, queryVector);
}
public ByteL1Norm(ScoreScript scoreScript, DenseVectorDocValuesField field, byte[] queryVector) {
super(scoreScript, field, queryVector);
}
public double l1norm() {
setNextVector();
return field.get().l1Norm(queryVector);
@ -138,11 +160,25 @@ public class VectorScoreScriptUtils {
private final L1NormInterface function;
public L1Norm(ScoreScript scoreScript, List<Number> queryVector, String fieldName) {
@SuppressWarnings("unchecked")
public L1Norm(ScoreScript scoreScript, Object queryVector, String fieldName) {
DenseVectorDocValuesField field = (DenseVectorDocValuesField) scoreScript.field(fieldName);
function = switch (field.getElementType()) {
case BYTE -> new ByteL1Norm(scoreScript, field, queryVector);
case FLOAT -> new FloatL1Norm(scoreScript, field, queryVector);
case BYTE -> {
if (queryVector instanceof List) {
yield new ByteL1Norm(scoreScript, field, (List<Number>) queryVector);
} else if (queryVector instanceof String s) {
byte[] parsedQueryVector = HexFormat.of().parseHex(s);
yield new ByteL1Norm(scoreScript, field, parsedQueryVector);
}
throw new IllegalArgumentException("Unsupported input object for byte vectors: " + queryVector.getClass().getName());
}
case FLOAT -> {
if (queryVector instanceof List) {
yield new FloatL1Norm(scoreScript, field, (List<Number>) queryVector);
}
throw new IllegalArgumentException("Unsupported input object for float vectors: " + queryVector.getClass().getName());
}
};
}
@ -162,6 +198,10 @@ public class VectorScoreScriptUtils {
super(scoreScript, field, queryVector);
}
public ByteL2Norm(ScoreScript scoreScript, DenseVectorDocValuesField field, byte[] queryVector) {
super(scoreScript, field, queryVector);
}
public double l2norm() {
setNextVector();
return field.get().l2Norm(queryVector);
@ -184,11 +224,25 @@ public class VectorScoreScriptUtils {
private final L2NormInterface function;
public L2Norm(ScoreScript scoreScript, List<Number> queryVector, String fieldName) {
@SuppressWarnings("unchecked")
public L2Norm(ScoreScript scoreScript, Object queryVector, String fieldName) {
DenseVectorDocValuesField field = (DenseVectorDocValuesField) scoreScript.field(fieldName);
function = switch (field.getElementType()) {
case BYTE -> new ByteL2Norm(scoreScript, field, queryVector);
case FLOAT -> new FloatL2Norm(scoreScript, field, queryVector);
case BYTE -> {
if (queryVector instanceof List) {
yield new ByteL2Norm(scoreScript, field, (List<Number>) queryVector);
} else if (queryVector instanceof String s) {
byte[] parsedQueryVector = HexFormat.of().parseHex(s);
yield new ByteL2Norm(scoreScript, field, parsedQueryVector);
}
throw new IllegalArgumentException("Unsupported input object for byte vectors: " + queryVector.getClass().getName());
}
case FLOAT -> {
if (queryVector instanceof List) {
yield new FloatL2Norm(scoreScript, field, (List<Number>) queryVector);
}
throw new IllegalArgumentException("Unsupported input object for float vectors: " + queryVector.getClass().getName());
}
};
}
@ -208,6 +262,10 @@ public class VectorScoreScriptUtils {
super(scoreScript, field, queryVector);
}
public ByteDotProduct(ScoreScript scoreScript, DenseVectorDocValuesField field, byte[] queryVector) {
super(scoreScript, field, queryVector);
}
public double dotProduct() {
setNextVector();
return field.get().dotProduct(queryVector);
@ -230,11 +288,25 @@ public class VectorScoreScriptUtils {
private final DotProductInterface function;
public DotProduct(ScoreScript scoreScript, List<Number> queryVector, String fieldName) {
@SuppressWarnings("unchecked")
public DotProduct(ScoreScript scoreScript, Object queryVector, String fieldName) {
DenseVectorDocValuesField field = (DenseVectorDocValuesField) scoreScript.field(fieldName);
function = switch (field.getElementType()) {
case BYTE -> new ByteDotProduct(scoreScript, field, queryVector);
case FLOAT -> new FloatDotProduct(scoreScript, field, queryVector);
case BYTE -> {
if (queryVector instanceof List) {
yield new ByteDotProduct(scoreScript, field, (List<Number>) queryVector);
} else if (queryVector instanceof String s) {
byte[] parsedQueryVector = HexFormat.of().parseHex(s);
yield new ByteDotProduct(scoreScript, field, parsedQueryVector);
}
throw new IllegalArgumentException("Unsupported input object for byte vectors: " + queryVector.getClass().getName());
}
case FLOAT -> {
if (queryVector instanceof List) {
yield new FloatDotProduct(scoreScript, field, (List<Number>) queryVector);
}
throw new IllegalArgumentException("Unsupported input object for float vectors: " + queryVector.getClass().getName());
}
};
}
@ -254,6 +326,10 @@ public class VectorScoreScriptUtils {
super(scoreScript, field, queryVector);
}
public ByteCosineSimilarity(ScoreScript scoreScript, DenseVectorDocValuesField field, byte[] queryVector) {
super(scoreScript, field, queryVector);
}
public double cosineSimilarity() {
setNextVector();
return field.get().cosineSimilarity(queryVector, qvMagnitude);
@ -276,11 +352,25 @@ public class VectorScoreScriptUtils {
private final CosineSimilarityInterface function;
public CosineSimilarity(ScoreScript scoreScript, List<Number> queryVector, String fieldName) {
@SuppressWarnings("unchecked")
public CosineSimilarity(ScoreScript scoreScript, Object queryVector, String fieldName) {
DenseVectorDocValuesField field = (DenseVectorDocValuesField) scoreScript.field(fieldName);
function = switch (field.getElementType()) {
case BYTE -> new ByteCosineSimilarity(scoreScript, field, queryVector);
case FLOAT -> new FloatCosineSimilarity(scoreScript, field, queryVector);
case BYTE -> {
if (queryVector instanceof List) {
yield new ByteCosineSimilarity(scoreScript, field, (List<Number>) queryVector);
} else if (queryVector instanceof String s) {
byte[] parsedQueryVector = HexFormat.of().parseHex(s);
yield new ByteCosineSimilarity(scoreScript, field, parsedQueryVector);
}
throw new IllegalArgumentException("Unsupported input object for byte vectors: " + queryVector.getClass().getName());
}
case FLOAT -> {
if (queryVector instanceof List) {
yield new FloatCosineSimilarity(scoreScript, field, (List<Number>) queryVector);
}
throw new IllegalArgumentException("Unsupported input object for float vectors: " + queryVector.getClass().getName());
}
};
}

View file

@ -34,7 +34,7 @@ import java.util.Objects;
public final class SearchHits implements Writeable, ChunkedToXContent, RefCounted, Iterable<SearchHit> {
public static final SearchHit[] EMPTY = new SearchHit[0];
public static final SearchHits EMPTY_WITH_TOTAL_HITS = SearchHits.empty(new TotalHits(0, Relation.EQUAL_TO), 0);
public static final SearchHits EMPTY_WITH_TOTAL_HITS = SearchHits.empty(Lucene.TOTAL_HITS_EQUAL_TO_ZERO, 0);
public static final SearchHits EMPTY_WITHOUT_TOTAL_HITS = SearchHits.empty(null, 0);
private final SearchHit[] hits;

View file

@ -20,7 +20,6 @@ import org.apache.lucene.search.TopDocsCollector;
import org.apache.lucene.search.TopFieldCollector;
import org.apache.lucene.search.TopFieldDocs;
import org.apache.lucene.search.TopScoreDocCollector;
import org.apache.lucene.search.TotalHits;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.action.search.MaxScoreCollector;
import org.elasticsearch.common.lucene.Lucene;
@ -233,11 +232,7 @@ class TopHitsAggregator extends MetricsAggregator {
public InternalTopHits buildEmptyAggregation() {
TopDocs topDocs;
if (subSearchContext.sort() != null) {
topDocs = new TopFieldDocs(
new TotalHits(0, TotalHits.Relation.EQUAL_TO),
new FieldDoc[0],
subSearchContext.sort().sort.getSort()
);
topDocs = new TopFieldDocs(Lucene.TOTAL_HITS_EQUAL_TO_ZERO, new FieldDoc[0], subSearchContext.sort().sort.getSort());
} else {
topDocs = Lucene.EMPTY_TOP_DOCS;
}

View file

@ -49,7 +49,6 @@ import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.Comparator;
import java.util.List;
import java.util.Objects;
import java.util.PriorityQueue;
@ -255,13 +254,11 @@ public class ContextIndexSearcher extends IndexSearcher implements Releasable {
// Make a copy so we can sort:
List<LeafReaderContext> sortedLeaves = new ArrayList<>(leaves);
// Sort by maxDoc, descending:
final Comparator<LeafReaderContext> leafComparator = Comparator.comparingInt(l -> l.reader().maxDoc());
sortedLeaves.sort(leafComparator.reversed());
sortedLeaves.sort((c1, c2) -> Integer.compare(c2.reader().maxDoc(), c1.reader().maxDoc()));
// we add the groups on a priority queue, so we can add orphan leafs to the smallest group
final Comparator<List<LeafReaderContext>> groupComparator = Comparator.comparingInt(
l -> l.stream().mapToInt(lr -> lr.reader().maxDoc()).sum()
final PriorityQueue<List<LeafReaderContext>> queue = new PriorityQueue<>(
(c1, c2) -> Integer.compare(sumMaxDocValues(c1), sumMaxDocValues(c2))
);
final PriorityQueue<List<LeafReaderContext>> queue = new PriorityQueue<>(groupComparator);
long docSum = 0;
List<LeafReaderContext> group = new ArrayList<>();
for (LeafReaderContext ctx : sortedLeaves) {
@ -297,6 +294,14 @@ public class ContextIndexSearcher extends IndexSearcher implements Releasable {
return slices;
}
private static int sumMaxDocValues(List<LeafReaderContext> l) {
int sum = 0;
for (LeafReaderContext lr : l) {
sum += lr.reader().maxDoc();
}
return sum;
}
@Override
public <C extends Collector, T> T search(Query query, CollectorManager<C, T> collectorManager) throws IOException {
final C firstCollector = collectorManager.newCollector();
@ -337,7 +342,7 @@ public class ContextIndexSearcher extends IndexSearcher implements Releasable {
throw new IllegalStateException("CollectorManager does not always produce collectors with the same score mode");
}
}
final List<Callable<C>> listTasks = new ArrayList<>();
final List<Callable<C>> listTasks = new ArrayList<>(leafSlices.length);
for (int i = 0; i < leafSlices.length; ++i) {
final LeafReaderContext[] leaves = leafSlices[i].leaves;
final C collector = collectors.get(i);

View file

@ -22,7 +22,6 @@ import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.ScoreMode;
import org.apache.lucene.search.Sort;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.search.TotalHits;
import org.apache.lucene.search.Weight;
import org.elasticsearch.common.lucene.Lucene;
import org.elasticsearch.common.lucene.search.TopDocsAndMaxScore;
@ -76,11 +75,7 @@ public class QueryPhase {
searchContext.size(0);
QueryPhase.executeQuery(searchContext);
} else {
searchContext.queryResult()
.topDocs(
new TopDocsAndMaxScore(new TopDocs(new TotalHits(0, TotalHits.Relation.EQUAL_TO), Lucene.EMPTY_SCORE_DOCS), Float.NaN),
new DocValueFormat[0]
);
searchContext.queryResult().topDocs(new TopDocsAndMaxScore(Lucene.EMPTY_TOP_DOCS, Float.NaN), new DocValueFormat[0]);
}
List<TopDocs> rrfRankResults = new ArrayList<>();
@ -124,11 +119,7 @@ public class QueryPhase {
static void executeQuery(SearchContext searchContext) throws QueryPhaseExecutionException {
if (searchContext.hasOnlySuggest()) {
SuggestPhase.execute(searchContext);
searchContext.queryResult()
.topDocs(
new TopDocsAndMaxScore(new TopDocs(new TotalHits(0, TotalHits.Relation.EQUAL_TO), Lucene.EMPTY_SCORE_DOCS), Float.NaN),
new DocValueFormat[0]
);
searchContext.queryResult().topDocs(new TopDocsAndMaxScore(Lucene.EMPTY_TOP_DOCS, Float.NaN), new DocValueFormat[0]);
return;
}

View file

@ -400,7 +400,7 @@ abstract class QueryPhaseCollectorManager implements CollectorManager<Collector,
} else if (trackTotalHitsUpTo == SearchContext.TRACK_TOTAL_HITS_DISABLED) {
// don't compute hit counts via the collector
hitCountThreshold = 1;
shortcutTotalHits = new TotalHits(0, TotalHits.Relation.GREATER_THAN_OR_EQUAL_TO);
shortcutTotalHits = Lucene.TOTAL_HITS_GREATER_OR_EQUAL_TO_ZERO;
} else {
// implicit total hit counts are valid only when there is no filter collector in the chain
final int hitCount = hasFilterCollector ? -1 : shortcutTotalHitCount(reader, query);

View file

@ -100,12 +100,7 @@ public class CancellableTasksTests extends TaskManagerTestCase {
}
public static class CancellableNodesRequest extends BaseNodesRequest<CancellableNodesRequest> {
private String requestName;
private CancellableNodesRequest(StreamInput in) throws IOException {
super(in);
requestName = in.readString();
}
private final String requestName;
public CancellableNodesRequest(String requestName, String... nodesIds) {
super(nodesIds);
@ -147,7 +142,7 @@ public class CancellableTasksTests extends TaskManagerTestCase {
boolean shouldBlock,
CountDownLatch actionStartedLatch
) {
super(actionName, threadPool, clusterService, transportService, CancellableNodesRequest::new, CancellableNodeRequest::new);
super(actionName, threadPool, clusterService, transportService, CancellableNodeRequest::new);
this.shouldBlock = shouldBlock;
this.actionStartedLatch = actionStartedLatch;
}

View file

@ -144,7 +144,6 @@ public abstract class TaskManagerTestCase extends ESTestCase {
ThreadPool threadPool,
ClusterService clusterService,
TransportService transportService,
Writeable.Reader<NodesRequest> request,
Writeable.Reader<NodeRequest> nodeRequest
) {
super(

View file

@ -195,19 +195,11 @@ public class TestTaskPlugin extends Plugin implements ActionPlugin, NetworkPlugi
}
public static class NodesRequest extends BaseNodesRequest<NodesRequest> {
private String requestName;
private final String requestName;
private boolean shouldStoreResult = false;
private boolean shouldBlock = true;
private boolean shouldFail = false;
NodesRequest(StreamInput in) throws IOException {
super(in);
requestName = in.readString();
shouldStoreResult = in.readBoolean();
shouldBlock = in.readBoolean();
shouldFail = in.readBoolean();
}
NodesRequest(String requestName, String... nodesIds) {
super(nodesIds);
this.requestName = requestName;

View file

@ -109,11 +109,6 @@ public class TransportTasksActionTests extends TaskManagerTestCase {
public static class NodesRequest extends BaseNodesRequest<NodesRequest> {
private final String requestName;
NodesRequest(StreamInput in) throws IOException {
super(in);
requestName = in.readString();
}
public NodesRequest(String requestName, String... nodesIds) {
super(nodesIds);
this.requestName = requestName;
@ -142,7 +137,7 @@ public class TransportTasksActionTests extends TaskManagerTestCase {
abstract class TestNodesAction extends AbstractTestNodesAction<NodesRequest, NodeRequest> {
TestNodesAction(String actionName, ThreadPool threadPool, ClusterService clusterService, TransportService transportService) {
super(actionName, threadPool, clusterService, transportService, NodesRequest::new, NodeRequest::new);
super(actionName, threadPool, clusterService, transportService, NodeRequest::new);
}
@Override

View file

@ -21,8 +21,8 @@ import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput;
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.network.NetworkModule;
import org.elasticsearch.core.TimeValue;
import org.elasticsearch.rest.RestRequest;
import org.elasticsearch.rest.RestUtils;
import org.elasticsearch.rest.action.admin.cluster.RestClusterRerouteAction;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.test.rest.FakeRestRequest;
@ -38,6 +38,7 @@ import java.util.List;
import java.util.Map;
import java.util.function.Supplier;
import static org.elasticsearch.action.support.master.AcknowledgedRequest.DEFAULT_ACK_TIMEOUT;
import static org.elasticsearch.core.TimeValue.timeValueMillis;
import static org.elasticsearch.rest.RestUtils.REST_MASTER_TIMEOUT_PARAM;
@ -80,7 +81,7 @@ public class ClusterRerouteRequestTests extends ESTestCase {
}
private ClusterRerouteRequest randomRequest() {
ClusterRerouteRequest request = new ClusterRerouteRequest();
ClusterRerouteRequest request = new ClusterRerouteRequest(randomTimeValue(), randomTimeValue());
int commands = between(0, 10);
for (int i = 0; i < commands; i++) {
request.add(randomFrom(RANDOM_COMMAND_GENERATORS).get());
@ -97,7 +98,7 @@ public class ClusterRerouteRequestTests extends ESTestCase {
assertEquals(request, request);
assertEquals(request.hashCode(), request.hashCode());
ClusterRerouteRequest copy = new ClusterRerouteRequest().add(
ClusterRerouteRequest copy = new ClusterRerouteRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT).add(
request.getCommands().commands().toArray(new AllocationCommand[0])
);
AcknowledgedRequest<ClusterRerouteRequest> clusterRerouteRequestAcknowledgedRequest = copy.dryRun(request.dryRun())
@ -196,14 +197,14 @@ public class ClusterRerouteRequestTests extends ESTestCase {
builder.field("dry_run", original.dryRun());
}
params.put("explain", Boolean.toString(original.explain()));
if (false == original.ackTimeout().equals(AcknowledgedRequest.DEFAULT_ACK_TIMEOUT) || randomBoolean()) {
params.put("timeout", original.ackTimeout().toString());
if (false == original.ackTimeout().equals(DEFAULT_ACK_TIMEOUT) || randomBoolean()) {
params.put("timeout", original.ackTimeout().getStringRep());
}
if (original.isRetryFailed() || randomBoolean()) {
params.put("retry_failed", Boolean.toString(original.isRetryFailed()));
}
if (false == original.masterNodeTimeout().equals(TimeValue.THIRTY_SECONDS) || randomBoolean()) {
params.put(REST_MASTER_TIMEOUT_PARAM, original.masterNodeTimeout().toString());
if (false == original.masterNodeTimeout().equals(RestUtils.REST_MASTER_TIMEOUT_DEFAULT) || randomBoolean()) {
params.put(REST_MASTER_TIMEOUT_PARAM, original.masterNodeTimeout().getStringRep());
}
if (original.getCommands() != null) {
hasBody = true;

View file

@ -51,7 +51,7 @@ import static org.hamcrest.Matchers.not;
public class ClusterRerouteTests extends ESAllocationTestCase {
public void testSerializeRequest() throws IOException {
ClusterRerouteRequest req = new ClusterRerouteRequest();
ClusterRerouteRequest req = new ClusterRerouteRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT);
req.setRetryFailed(randomBoolean());
req.dryRun(randomBoolean());
req.explain(randomBoolean());
@ -86,7 +86,7 @@ public class ClusterRerouteTests extends ESAllocationTestCase {
var responseRef = new AtomicReference<ClusterRerouteResponse>();
var responseActionListener = ActionTestUtils.assertNoFailureListener(responseRef::set);
var request = new ClusterRerouteRequest().dryRun(true);
var request = new ClusterRerouteRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT).dryRun(true);
var task = new TransportClusterRerouteAction.ClusterRerouteResponseAckedClusterStateUpdateTask(
logger,
allocationService,
@ -112,7 +112,7 @@ public class ClusterRerouteTests extends ESAllocationTestCase {
);
ClusterState clusterState = createInitialClusterState(allocationService);
var req = new ClusterRerouteRequest().dryRun(false);
var req = new ClusterRerouteRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT).dryRun(false);
var task = new TransportClusterRerouteAction.ClusterRerouteResponseAckedClusterStateUpdateTask(
logger,
allocationService,

View file

@ -323,11 +323,9 @@ public class TransportNodesActionTests extends ESTestCase {
public DataNodesOnlyTransportNodesAction getDataNodesOnlyTransportNodesAction(TransportService transportService) {
return new DataNodesOnlyTransportNodesAction(
THREAD_POOL,
clusterService,
transportService,
new ActionFilters(Collections.emptySet()),
TestNodesRequest::new,
TestNodeRequest::new,
THREAD_POOL.executor(ThreadPool.Names.GENERIC)
);
@ -383,11 +381,9 @@ public class TransportNodesActionTests extends ESTestCase {
private static class DataNodesOnlyTransportNodesAction extends TestTransportNodesAction {
DataNodesOnlyTransportNodesAction(
ThreadPool threadPool,
ClusterService clusterService,
TransportService transportService,
ActionFilters actionFilters,
Writeable.Reader<TestNodesRequest> request,
Writeable.Reader<TestNodeRequest> nodeRequest,
Executor nodeExecutor
) {
@ -401,10 +397,6 @@ public class TransportNodesActionTests extends ESTestCase {
}
private static class TestNodesRequest extends BaseNodesRequest<TestNodesRequest> {
TestNodesRequest(StreamInput in) throws IOException {
super(in);
}
TestNodesRequest(String... nodesIds) {
super(nodesIds);
}

View file

@ -11,6 +11,7 @@ package org.elasticsearch.client.internal;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionType;
import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteRequest;
import org.elasticsearch.action.admin.cluster.reroute.TransportClusterRerouteAction;
import org.elasticsearch.action.admin.cluster.snapshots.create.TransportCreateSnapshotAction;
import org.elasticsearch.action.admin.cluster.stats.TransportClusterStatsAction;
@ -118,10 +119,11 @@ public abstract class AbstractClientHeadersTestCase extends ESTestCase {
.cluster()
.prepareCreateSnapshot("repo", "bck")
.execute(new AssertingActionListener<>(TransportCreateSnapshotAction.TYPE.name(), client.threadPool()));
client.admin()
.cluster()
.prepareReroute()
.execute(new AssertingActionListener<>(TransportClusterRerouteAction.TYPE.name(), client.threadPool()));
client.execute(
TransportClusterRerouteAction.TYPE,
new ClusterRerouteRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT),
new AssertingActionListener<>(TransportClusterRerouteAction.TYPE.name(), client.threadPool())
);
// choosing arbitrary indices admin actions to test
client.admin()

View file

@ -142,7 +142,7 @@ public class AutoExpandReplicasTests extends ESTestCase {
state,
state.routingTable().index("index").shard(0).shardsWithState(ShardRoutingState.INITIALIZING)
);
state = cluster.reroute(state, new ClusterRerouteRequest());
state = cluster.reroute(state, new ClusterRerouteRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT));
}
IndexShardRoutingTable preTable = state.routingTable().index("index").shard(0);

View file

@ -123,7 +123,8 @@ public class FailedNodeRoutingTests extends ESAllocationTestCase {
for (int i = 0; i < randomIntBetween(4, 8); i++) {
DiscoveryNodes newNodes = DiscoveryNodes.builder(state.nodes()).add(createNode()).build();
state = ClusterState.builder(state).nodes(newNodes).build();
state = cluster.reroute(state, new ClusterRerouteRequest()); // always reroute after adding node
// always reroute after adding node
state = cluster.reroute(state, new ClusterRerouteRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT));
}
// Log the node versions (for debugging if necessary)

View file

@ -441,7 +441,7 @@ public class IndicesClusterStateServiceRandomUpdatesTests extends AbstractIndice
// randomly reroute
if (rarely()) {
state = cluster.reroute(state, new ClusterRerouteRequest());
state = cluster.reroute(state, new ClusterRerouteRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT));
}
// randomly start and fail allocated shards

Some files were not shown because too many files have changed in this diff Show more