Merge main into multi-project

This commit is contained in:
Niels Bauman 2025-01-24 15:47:40 +10:00
commit 6495dcbb40
287 changed files with 9134 additions and 2149 deletions

View file

@ -1,11 +1,22 @@
config:
allow-labels: test-release
steps:
- label: release-tests
command: .buildkite/scripts/release-tests.sh
timeout_in_minutes: 300
agents:
provider: gcp
image: family/elasticsearch-ubuntu-2004
diskSizeGb: 350
machineType: custom-32-98304
- group: release-tests
steps:
- label: "{{matrix.CHECK_TASK}} / release-tests"
key: "packaging-tests-unix"
command: .buildkite/scripts/release-tests.sh {{matrix.CHECK_TASK}}
timeout_in_minutes: 120
matrix:
setup:
CHECK_TASK:
- checkPart1
- checkPart2
- checkPart3
- checkPart4
- checkPart5
agents:
provider: gcp
image: family/elasticsearch-ubuntu-2004
diskSizeGb: 350
machineType: custom-32-98304

View file

@ -20,4 +20,4 @@ curl --fail -o "${ML_IVY_REPO}/maven/org/elasticsearch/ml/ml-cpp/${ES_VERSION}/m
curl --fail -o "${ML_IVY_REPO}/maven/org/elasticsearch/ml/ml-cpp/${ES_VERSION}/ml-cpp-${ES_VERSION}.zip" https://artifacts-snapshot.elastic.co/ml-cpp/${ES_VERSION}-SNAPSHOT/downloads/ml-cpp/ml-cpp-${ES_VERSION}-SNAPSHOT.zip
.ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dbuild.snapshot=false -Dbuild.ml_cpp.repo=file://${ML_IVY_REPO} \
-Dtests.jvm.argline=-Dbuild.snapshot=false -Dlicense.key=${WORKSPACE}/x-pack/license-tools/src/test/resources/public.key -Dbuild.id=deadbeef assemble functionalTests
-Dtests.jvm.argline=-Dbuild.snapshot=false -Dlicense.key=${WORKSPACE}/x-pack/license-tools/src/test/resources/public.key -Dbuild.id=deadbeef ${@:-functionalTests}

1
.gitattributes vendored
View file

@ -11,6 +11,7 @@ x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/*.interp li
x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseLexer*.java linguist-generated=true
x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlBaseParser*.java linguist-generated=true
x-pack/plugin/esql/src/main/generated/** linguist-generated=true
x-pack/plugin/esql/src/main/generated-src/** linguist-generated=true
# ESQL functions docs are autogenerated. More information at `docs/reference/esql/functions/README.md`
docs/reference/esql/functions/*/** linguist-generated=true

View file

@ -126,9 +126,12 @@ exit
Grab the async profiler from https://github.com/jvm-profiling-tools/async-profiler
and run `prof async` like so:
```
gradlew -p benchmarks/ run --args 'LongKeyedBucketOrdsBenchmark.multiBucket -prof "async:libPath=/home/nik9000/Downloads/tmp/async-profiler-1.8.3-linux-x64/build/libasyncProfiler.so;dir=/tmp/prof;output=flamegraph"'
gradlew -p benchmarks/ run --args 'LongKeyedBucketOrdsBenchmark.multiBucket -prof "async:libPath=/home/nik9000/Downloads/async-profiler-3.0-29ee888-linux-x64/lib/libasyncProfiler.so;dir=/tmp/prof;output=flamegraph"'
```
Note: As of January 2025 the latest release of async profiler doesn't work
with our JDK but the nightly is fine.
If you are on Mac, this'll warn you that you downloaded the shared library from
the internet. You'll need to go to settings and allow it to run.

View file

@ -38,6 +38,7 @@ import org.elasticsearch.xpack.esql.expression.function.scalar.conditional.Case;
import org.elasticsearch.xpack.esql.expression.function.scalar.date.DateTrunc;
import org.elasticsearch.xpack.esql.expression.function.scalar.math.Abs;
import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvMin;
import org.elasticsearch.xpack.esql.expression.function.scalar.nulls.Coalesce;
import org.elasticsearch.xpack.esql.expression.function.scalar.string.RLike;
import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.Add;
import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.Equals;
@ -96,6 +97,9 @@ public class EvalBenchmark {
"add_double",
"case_1_eager",
"case_1_lazy",
"coalesce_2_noop",
"coalesce_2_eager",
"coalesce_2_lazy",
"date_trunc",
"equal_to_const",
"long_equal_to_long",
@ -142,8 +146,34 @@ public class EvalBenchmark {
lhs = new Add(Source.EMPTY, lhs, new Literal(Source.EMPTY, 1L, DataType.LONG));
rhs = new Add(Source.EMPTY, rhs, new Literal(Source.EMPTY, 1L, DataType.LONG));
}
yield EvalMapper.toEvaluator(FOLD_CONTEXT, new Case(Source.EMPTY, condition, List.of(lhs, rhs)), layout(f1, f2))
.get(driverContext);
EvalOperator.ExpressionEvaluator evaluator = EvalMapper.toEvaluator(
FOLD_CONTEXT,
new Case(Source.EMPTY, condition, List.of(lhs, rhs)),
layout(f1, f2)
).get(driverContext);
String desc = operation.endsWith("lazy") ? "CaseLazyEvaluator" : "CaseEagerEvaluator";
if (evaluator.toString().contains(desc) == false) {
throw new IllegalArgumentException("Evaluator was [" + evaluator + "] but expected one containing [" + desc + "]");
}
yield evaluator;
}
case "coalesce_2_noop", "coalesce_2_eager", "coalesce_2_lazy" -> {
FieldAttribute f1 = longField();
FieldAttribute f2 = longField();
Expression lhs = f1;
if (operation.endsWith("lazy")) {
lhs = new Add(Source.EMPTY, lhs, new Literal(Source.EMPTY, 1L, DataType.LONG));
}
EvalOperator.ExpressionEvaluator evaluator = EvalMapper.toEvaluator(
FOLD_CONTEXT,
new Coalesce(Source.EMPTY, lhs, List.of(f2)),
layout(f1, f2)
).get(driverContext);
String desc = operation.endsWith("lazy") ? "CoalesceLazyEvaluator" : "CoalesceEagerEvaluator";
if (evaluator.toString().contains(desc) == false) {
throw new IllegalArgumentException("Evaluator was [" + evaluator + "] but expected one containing [" + desc + "]");
}
yield evaluator;
}
case "date_trunc" -> {
FieldAttribute timestamp = new FieldAttribute(
@ -260,6 +290,38 @@ public class EvalBenchmark {
}
}
}
case "coalesce_2_noop" -> {
LongVector f1 = actual.<LongBlock>getBlock(0).asVector();
LongVector result = actual.<LongBlock>getBlock(2).asVector();
for (int i = 0; i < BLOCK_LENGTH; i++) {
long expected = f1.getLong(i);
if (result.getLong(i) != expected) {
throw new AssertionError("[" + operation + "] expected [" + expected + "] but was [" + result.getLong(i) + "]");
}
}
}
case "coalesce_2_eager" -> {
LongBlock f1 = actual.<LongBlock>getBlock(0);
LongVector f2 = actual.<LongBlock>getBlock(1).asVector();
LongVector result = actual.<LongBlock>getBlock(2).asVector();
for (int i = 0; i < BLOCK_LENGTH; i++) {
long expected = i % 5 == 0 ? f2.getLong(i) : f1.getLong(f1.getFirstValueIndex(i));
if (result.getLong(i) != expected) {
throw new AssertionError("[" + operation + "] expected [" + expected + "] but was [" + result.getLong(i) + "]");
}
}
}
case "coalesce_2_lazy" -> {
LongBlock f1 = actual.<LongBlock>getBlock(0);
LongVector f2 = actual.<LongBlock>getBlock(1).asVector();
LongVector result = actual.<LongBlock>getBlock(2).asVector();
for (int i = 0; i < BLOCK_LENGTH; i++) {
long expected = i % 5 == 0 ? f2.getLong(i) : f1.getLong(f1.getFirstValueIndex(i)) + 1;
if (result.getLong(i) != expected) {
throw new AssertionError("[" + operation + "] expected [" + expected + "] but was [" + result.getLong(i) + "]");
}
}
}
case "date_trunc" -> {
LongVector v = actual.<LongBlock>getBlock(1).asVector();
long oneDay = TimeValue.timeValueHours(24).millis();
@ -304,7 +366,7 @@ public class EvalBenchmark {
}
}
}
default -> throw new UnsupportedOperationException();
default -> throw new UnsupportedOperationException(operation);
}
}
@ -324,7 +386,7 @@ public class EvalBenchmark {
}
yield new Page(builder.build());
}
case "case_1_eager", "case_1_lazy" -> {
case "case_1_eager", "case_1_lazy", "coalesce_2_noop" -> {
var f1 = blockFactory.newLongBlockBuilder(BLOCK_LENGTH);
var f2 = blockFactory.newLongBlockBuilder(BLOCK_LENGTH);
for (int i = 0; i < BLOCK_LENGTH; i++) {
@ -333,6 +395,19 @@ public class EvalBenchmark {
}
yield new Page(f1.build(), f2.build());
}
case "coalesce_2_eager", "coalesce_2_lazy" -> {
var f1 = blockFactory.newLongBlockBuilder(BLOCK_LENGTH);
var f2 = blockFactory.newLongBlockBuilder(BLOCK_LENGTH);
for (int i = 0; i < BLOCK_LENGTH; i++) {
if (i % 5 == 0) {
f1.appendNull();
} else {
f1.appendLong(i);
}
f2.appendLong(-i);
}
yield new Page(f1.build(), f2.build());
}
case "long_equal_to_long" -> {
var lhs = blockFactory.newLongBlockBuilder(BLOCK_LENGTH);
var rhs = blockFactory.newLongBlockBuilder(BLOCK_LENGTH);

View file

@ -293,22 +293,57 @@ allprojects {
}
}
ext.withReleaseBuild = { Closure config ->
if(buildParams.snapshotBuild == false) {
config.call()
}
}
plugins.withId('lifecycle-base') {
if (project.path.startsWith(":x-pack:")) {
if (project.path.contains("security") || project.path.contains(":ml")) {
tasks.register('checkPart4') { dependsOn 'check' }
tasks.register('checkPart4') {
dependsOn 'check'
withReleaseBuild {
dependsOn 'assemble'
}
}
} else if (project.path == ":x-pack:plugin" || project.path.contains("ql") || project.path.contains("smoke-test")) {
tasks.register('checkPart3') { dependsOn 'check' }
tasks.register('checkPart3') {
dependsOn 'check'
withReleaseBuild {
dependsOn 'assemble'
}
}
} else if (project.path.contains("multi-node")) {
tasks.register('checkPart5') { dependsOn 'check' }
tasks.register('checkPart5') {
dependsOn 'check'
withReleaseBuild {
dependsOn 'assemble'
}
}
} else {
tasks.register('checkPart2') { dependsOn 'check' }
tasks.register('checkPart2') {
dependsOn 'check'
withReleaseBuild {
dependsOn 'assemble'
}
}
}
} else {
tasks.register('checkPart1') { dependsOn 'check' }
tasks.register('checkPart1') {
dependsOn 'check'
withReleaseBuild {
dependsOn 'assemble'
}
}
}
tasks.register('functionalTests') {
dependsOn 'check'
withReleaseBuild {
dependsOn 'assemble'
}
}
tasks.register('functionalTests') { dependsOn 'check' }
}
/*

View file

@ -0,0 +1,5 @@
pr: 118122
summary: "ES|QL: Partial result on demand for async queries"
area: ES|QL
type: enhancement
issues: []

View file

@ -0,0 +1,7 @@
pr: 120256
summary: Improve memory aspects of enrich cache
area: Ingest Node
type: enhancement
issues:
- 96050
- 120021

View file

@ -0,0 +1,5 @@
pr: 120405
summary: Automatically rollover legacy ml indices
area: Machine Learning
type: upgrade
issues: []

View file

@ -0,0 +1,6 @@
pr: 120629
summary: Report Deprecated Indices That Are Flagged To Ignore Migration Reindex As
A Warning
area: Data streams
type: enhancement
issues: []

View file

@ -0,0 +1,6 @@
pr: 120645
summary: Esql Support date nanos on date diff function
area: ES|QL
type: enhancement
issues:
- 109999

View file

@ -0,0 +1,5 @@
pr: 120722
summary: Migrate stream to core error parsing
area: Machine Learning
type: enhancement
issues: []

View file

@ -210,6 +210,7 @@ Which returns:
{
"is_running": false,
"took": 42, <1>
"is_partial": false, <7>
"columns" : [
{
"name" : "COUNT(http.response.status_code)",
@ -275,8 +276,9 @@ Which returns:
<2> This section of counters shows all possible cluster search states and how many cluster
searches are currently in that state. The clusters can have one of the following statuses: *running*,
*successful* (searches on all shards were successful), *skipped* (the search
failed on a cluster marked with `skip_unavailable`=`true`) or *failed* (the search
failed on a cluster marked with `skip_unavailable`=`false`).
failed on a cluster marked with `skip_unavailable`=`true`), *failed* (the search
failed on a cluster marked with `skip_unavailable`=`false`) or **partial** (the search was
<<esql-async-query-stop-api, interrupted>> before finishing).
<3> The `_clusters/details` section shows metadata about the search on each cluster.
<4> If you included indices from the local cluster you sent the request to in your {ccs},
it is identified as "(local)".
@ -285,6 +287,8 @@ which clusters have slower response times than others.
<6> The shard details for the search on that cluster, including a count of shards that were
skipped due to the can-match phase results. Shards are skipped when they cannot have any matching data
and therefore are not included in the full ES|QL query.
<7> The `is_partial` field is set to `true` if the search has partial results for any reason,
for example if it was interrupted before finishing using the <<esql-async-query-stop-api,async query stop API>>.
The cross-cluster metadata can be used to determine whether any data came back from a cluster.
@ -314,6 +318,7 @@ Which returns:
{
"is_running": false,
"took": 55,
"is_partial": false,
"columns": [
... // not shown
],

View file

@ -17,6 +17,7 @@ overview of {esql} and related tutorials, see <<esql>>.
* <<esql-async-query-api>>
* <<esql-async-query-get-api>>
* <<esql-async-query-delete-api>>
* <<esql-async-query-stop-api>>
include::esql-query-api.asciidoc[]
@ -26,3 +27,5 @@ include::esql-async-query-api.asciidoc[]
include::esql-async-query-get-api.asciidoc[]
include::esql-async-query-delete-api.asciidoc[]
include::esql-async-query-stop-api.asciidoc[]

View file

@ -170,3 +170,10 @@ API>> to get the current status and available results for the query.
(Boolean)
If `true`, the query request is still executing.
--
`is_partial`::
+
--
(Boolean)
If `true`, the query has partial results - for example, as a result of using the <<esql-async-query-stop-api, async query stop API>>.
--

View file

@ -0,0 +1,49 @@
[[esql-async-query-stop-api]]
=== {esql} async query stop API
++++
<titleabbrev>{esql} async query stop API</titleabbrev>
++++
.New API reference
[sidebar]
--
For the most up-to-date API details, refer to {api-es}/group/endpoint-esql[ES|QL APIs].
--
The <<esql,{esql}>> async query stop API is used to manually stop an async query. Once the stop command is issued,
the query stops processing new data and returns the results that have been already processed. Note that due to the pipelined
nature of {esql} queries, the stop operation is not immediate and may take time to return results.
The results are returned in <<esql-query-api-response-body,the same format>> as the
<<esql-async-query-get-api,{esql} async query get API>>.
If the query has been finished by the time the stop command is issued, the results are returned immediately.
If the query processing has not finished by the time the stop command is issued, the response will have the `is_partial`
field set to `true`.
[source,console]
----
POST /query/async/FkpMRkJGS1gzVDRlM3g4ZzMyRGlLbkEaTXlJZHdNT09TU2VTZVBoNDM3cFZMUToxMDM=/stop
----
// TEST[skip: no access to query ID]
[[esql-async-query-stop-api-request]]
==== {api-request-title}
`POST /_query/async/<query_id>/stop`
[[esql-async-query-stop-api-prereqs]]
==== {api-prereq-title}
* If the {es} {security-features} are enabled, only the authenticated user that submitted the original query request
can stop the query.
[[esql-async-query-stop-api-path-params]]
==== {api-path-parms-title}
`<query_id>`::
(Required, string)
Identifier for the query to stop.
+
A query ID is provided in the <<esql-async-query-api,{esql} async query API>>'s
response for a query that does not complete in the awaited time.

View file

@ -193,6 +193,7 @@ Which returns:
----
{
"took": 28,
"is_partial": false,
"columns": [
{"name": "author", "type": "text"},
{"name": "name", "type": "text"},

View file

@ -34,8 +34,3 @@ more, refer to {security-guide}/rules-ui-create.html#create-esql-rule[Create an
Use the Elastic AI Assistant to build {esql} queries, or answer questions about
the {esql} query language. To learn more, refer to
{security-guide}/security-assistant.html[AI Assistant].
NOTE: For AI Assistant to answer questions about {esql} and write {esql}
queries, you need to
{security-guide}/security-assistant.html#set-up-ai-assistant[enable knowledge
base].

View file

@ -28,6 +28,102 @@
"variadic" : false,
"returnType" : "integer"
},
{
"params" : [
{
"name" : "unit",
"type" : "keyword",
"optional" : false,
"description" : "Time difference unit"
},
{
"name" : "startTimestamp",
"type" : "date",
"optional" : false,
"description" : "A string representing a start timestamp"
},
{
"name" : "endTimestamp",
"type" : "date_nanos",
"optional" : false,
"description" : "A string representing an end timestamp"
}
],
"variadic" : false,
"returnType" : "integer"
},
{
"params" : [
{
"name" : "unit",
"type" : "keyword",
"optional" : false,
"description" : "Time difference unit"
},
{
"name" : "startTimestamp",
"type" : "date_nanos",
"optional" : false,
"description" : "A string representing a start timestamp"
},
{
"name" : "endTimestamp",
"type" : "date",
"optional" : false,
"description" : "A string representing an end timestamp"
}
],
"variadic" : false,
"returnType" : "integer"
},
{
"params" : [
{
"name" : "unit",
"type" : "keyword",
"optional" : false,
"description" : "Time difference unit"
},
{
"name" : "startTimestamp",
"type" : "date_nanos",
"optional" : false,
"description" : "A string representing a start timestamp"
},
{
"name" : "endTimestamp",
"type" : "date_nanos",
"optional" : false,
"description" : "A string representing an end timestamp"
}
],
"variadic" : false,
"returnType" : "integer"
},
{
"params" : [
{
"name" : "unit",
"type" : "text",
"optional" : false,
"description" : "Time difference unit"
},
{
"name" : "startTimestamp",
"type" : "date",
"optional" : false,
"description" : "A string representing a start timestamp"
},
{
"name" : "endTimestamp",
"type" : "date",
"optional" : false,
"description" : "A string representing an end timestamp"
}
],
"variadic" : false,
"returnType" : "integer"
},
{
"params" : [
{
@ -42,6 +138,30 @@
"optional" : false,
"description" : "A string representing a start timestamp"
},
{
"name" : "endTimestamp",
"type" : "date_nanos",
"optional" : false,
"description" : "A string representing an end timestamp"
}
],
"variadic" : false,
"returnType" : "integer"
},
{
"params" : [
{
"name" : "unit",
"type" : "text",
"optional" : false,
"description" : "Time difference unit"
},
{
"name" : "startTimestamp",
"type" : "date_nanos",
"optional" : false,
"description" : "A string representing a start timestamp"
},
{
"name" : "endTimestamp",
"type" : "date",
@ -51,6 +171,30 @@
],
"variadic" : false,
"returnType" : "integer"
},
{
"params" : [
{
"name" : "unit",
"type" : "text",
"optional" : false,
"description" : "Time difference unit"
},
{
"name" : "startTimestamp",
"type" : "date_nanos",
"optional" : false,
"description" : "A string representing a start timestamp"
},
{
"name" : "endTimestamp",
"type" : "date_nanos",
"optional" : false,
"description" : "A string representing an end timestamp"
}
],
"variadic" : false,
"returnType" : "integer"
}
],
"examples" : [

View file

@ -0,0 +1,263 @@
{
"comment" : "This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.",
"type" : "operator",
"operator" : "NOT IN",
"name" : "not_in",
"description" : "The `NOT IN` operator allows testing whether a field or expression does *not* equal any element in a list of literals, fields or expressions.",
"signatures" : [
{
"params" : [
{
"name" : "field",
"type" : "boolean",
"optional" : false,
"description" : "An expression."
},
{
"name" : "inlist",
"type" : "boolean",
"optional" : false,
"description" : "A list of items."
}
],
"variadic" : true,
"returnType" : "boolean"
},
{
"params" : [
{
"name" : "field",
"type" : "cartesian_point",
"optional" : false,
"description" : "An expression."
},
{
"name" : "inlist",
"type" : "cartesian_point",
"optional" : false,
"description" : "A list of items."
}
],
"variadic" : true,
"returnType" : "boolean"
},
{
"params" : [
{
"name" : "field",
"type" : "cartesian_shape",
"optional" : false,
"description" : "An expression."
},
{
"name" : "inlist",
"type" : "cartesian_shape",
"optional" : false,
"description" : "A list of items."
}
],
"variadic" : true,
"returnType" : "boolean"
},
{
"params" : [
{
"name" : "field",
"type" : "double",
"optional" : false,
"description" : "An expression."
},
{
"name" : "inlist",
"type" : "double",
"optional" : false,
"description" : "A list of items."
}
],
"variadic" : true,
"returnType" : "boolean"
},
{
"params" : [
{
"name" : "field",
"type" : "geo_point",
"optional" : false,
"description" : "An expression."
},
{
"name" : "inlist",
"type" : "geo_point",
"optional" : false,
"description" : "A list of items."
}
],
"variadic" : true,
"returnType" : "boolean"
},
{
"params" : [
{
"name" : "field",
"type" : "geo_shape",
"optional" : false,
"description" : "An expression."
},
{
"name" : "inlist",
"type" : "geo_shape",
"optional" : false,
"description" : "A list of items."
}
],
"variadic" : true,
"returnType" : "boolean"
},
{
"params" : [
{
"name" : "field",
"type" : "integer",
"optional" : false,
"description" : "An expression."
},
{
"name" : "inlist",
"type" : "integer",
"optional" : false,
"description" : "A list of items."
}
],
"variadic" : true,
"returnType" : "boolean"
},
{
"params" : [
{
"name" : "field",
"type" : "ip",
"optional" : false,
"description" : "An expression."
},
{
"name" : "inlist",
"type" : "ip",
"optional" : false,
"description" : "A list of items."
}
],
"variadic" : true,
"returnType" : "boolean"
},
{
"params" : [
{
"name" : "field",
"type" : "keyword",
"optional" : false,
"description" : "An expression."
},
{
"name" : "inlist",
"type" : "keyword",
"optional" : false,
"description" : "A list of items."
}
],
"variadic" : true,
"returnType" : "boolean"
},
{
"params" : [
{
"name" : "field",
"type" : "keyword",
"optional" : false,
"description" : "An expression."
},
{
"name" : "inlist",
"type" : "text",
"optional" : false,
"description" : "A list of items."
}
],
"variadic" : true,
"returnType" : "boolean"
},
{
"params" : [
{
"name" : "field",
"type" : "long",
"optional" : false,
"description" : "An expression."
},
{
"name" : "inlist",
"type" : "long",
"optional" : false,
"description" : "A list of items."
}
],
"variadic" : true,
"returnType" : "boolean"
},
{
"params" : [
{
"name" : "field",
"type" : "text",
"optional" : false,
"description" : "An expression."
},
{
"name" : "inlist",
"type" : "keyword",
"optional" : false,
"description" : "A list of items."
}
],
"variadic" : true,
"returnType" : "boolean"
},
{
"params" : [
{
"name" : "field",
"type" : "text",
"optional" : false,
"description" : "An expression."
},
{
"name" : "inlist",
"type" : "text",
"optional" : false,
"description" : "A list of items."
}
],
"variadic" : true,
"returnType" : "boolean"
},
{
"params" : [
{
"name" : "field",
"type" : "version",
"optional" : false,
"description" : "An expression."
},
{
"name" : "inlist",
"type" : "version",
"optional" : false,
"description" : "A list of items."
}
],
"variadic" : true,
"returnType" : "boolean"
}
],
"preview" : false,
"snapshot_only" : false
}

View file

@ -0,0 +1,47 @@
{
"comment" : "This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.",
"type" : "operator",
"operator" : "NOT LIKE",
"name" : "not_like",
"description" : "Use `NOT LIKE` to filter data based on string patterns using wildcards. `NOT LIKE`\nusually acts on a field placed on the left-hand side of the operator, but it can\nalso act on a constant (literal) expression. The right-hand side of the operator\nrepresents the pattern.\n\nThe following wildcard characters are supported:\n\n* `*` matches zero or more characters.\n* `?` matches one character.",
"signatures" : [
{
"params" : [
{
"name" : "str",
"type" : "keyword",
"optional" : false,
"description" : "A literal expression."
},
{
"name" : "pattern",
"type" : "keyword",
"optional" : false,
"description" : "Pattern."
}
],
"variadic" : true,
"returnType" : "boolean"
},
{
"params" : [
{
"name" : "str",
"type" : "text",
"optional" : false,
"description" : "A literal expression."
},
{
"name" : "pattern",
"type" : "keyword",
"optional" : false,
"description" : "Pattern."
}
],
"variadic" : true,
"returnType" : "boolean"
}
],
"preview" : false,
"snapshot_only" : false
}

View file

@ -0,0 +1,47 @@
{
"comment" : "This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.",
"type" : "operator",
"operator" : "NOT RLIKE",
"name" : "not_rlike",
"description" : "Use `NOT RLIKE` to filter data based on string patterns using using\n<<regexp-syntax,regular expressions>>. `NOT RLIKE` usually acts on a field placed on\nthe left-hand side of the operator, but it can also act on a constant (literal)\nexpression. The right-hand side of the operator represents the pattern.",
"signatures" : [
{
"params" : [
{
"name" : "str",
"type" : "keyword",
"optional" : false,
"description" : "A literal value."
},
{
"name" : "pattern",
"type" : "keyword",
"optional" : false,
"description" : "A regular expression."
}
],
"variadic" : true,
"returnType" : "boolean"
},
{
"params" : [
{
"name" : "str",
"type" : "text",
"optional" : false,
"description" : "A literal value."
},
{
"name" : "pattern",
"type" : "keyword",
"optional" : false,
"description" : "A regular expression."
}
],
"variadic" : true,
"returnType" : "boolean"
}
],
"preview" : false,
"snapshot_only" : false
}

View file

@ -0,0 +1,7 @@
<!--
This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.
-->
### NOT_IN
The `NOT IN` operator allows testing whether a field or expression does *not* equal any element in a list of literals, fields or expressions.

View file

@ -0,0 +1,15 @@
<!--
This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.
-->
### NOT_LIKE
Use `NOT LIKE` to filter data based on string patterns using wildcards. `NOT LIKE`
usually acts on a field placed on the left-hand side of the operator, but it can
also act on a constant (literal) expression. The right-hand side of the operator
represents the pattern.
The following wildcard characters are supported:
* `*` matches zero or more characters.
* `?` matches one character.

View file

@ -0,0 +1,10 @@
<!--
This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.
-->
### NOT_RLIKE
Use `NOT RLIKE` to filter data based on string patterns using using
<<regexp-syntax,regular expressions>>. `NOT RLIKE` usually acts on a field placed on
the left-hand side of the operator, but it can also act on a constant (literal)
expression. The right-hand side of the operator represents the pattern.

View file

@ -6,5 +6,11 @@
|===
unit | startTimestamp | endTimestamp | result
keyword | date | date | integer
keyword | date | date_nanos | integer
keyword | date_nanos | date | integer
keyword | date_nanos | date_nanos | integer
text | date | date | integer
text | date | date_nanos | integer
text | date_nanos | date | integer
text | date_nanos | date_nanos | integer
|===

View file

@ -0,0 +1,22 @@
// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.
*Supported types*
[%header.monospaced.styled,format=dsv,separator=|]
|===
field | inlist | result
boolean | boolean | boolean
cartesian_point | cartesian_point | boolean
cartesian_shape | cartesian_shape | boolean
double | double | boolean
geo_point | geo_point | boolean
geo_shape | geo_shape | boolean
integer | integer | boolean
ip | ip | boolean
keyword | keyword | boolean
keyword | text | boolean
long | long | boolean
text | keyword | boolean
text | text | boolean
version | version | boolean
|===

View file

@ -0,0 +1,10 @@
// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.
*Supported types*
[%header.monospaced.styled,format=dsv,separator=|]
|===
str | pattern | result
keyword | keyword | boolean
text | keyword | boolean
|===

View file

@ -0,0 +1,10 @@
// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.
*Supported types*
[%header.monospaced.styled,format=dsv,separator=|]
|===
str | pattern | result
keyword | keyword | boolean
text | keyword | boolean
|===

View file

@ -27,6 +27,7 @@ Multivalued fields come back as a JSON array:
----
{
"took": 28,
"is_partial": false,
"columns": [
{ "name": "a", "type": "long"},
{ "name": "b", "type": "long"}
@ -78,6 +79,7 @@ And {esql} sees that removal:
----
{
"took": 28,
"is_partial": false,
"columns": [
{ "name": "a", "type": "long"},
{ "name": "b", "type": "keyword"}
@ -122,6 +124,7 @@ And {esql} also sees that:
----
{
"took": 28,
"is_partial": false,
"columns": [
{ "name": "a", "type": "long"},
{ "name": "b", "type": "long"}
@ -165,6 +168,7 @@ POST /_query
----
{
"took": 28,
"is_partial": false,
"columns": [
{ "name": "a", "type": "long"},
{ "name": "b", "type": "keyword"}
@ -198,6 +202,7 @@ POST /_query
----
{
"took": 28,
"is_partial": false,
"columns": [
{ "name": "a", "type": "long"},
],
@ -241,6 +246,7 @@ POST /_query
----
{
"took": 28,
"is_partial": false,
"columns": [
{ "name": "a", "type": "long"},
{ "name": "b", "type": "long"},
@ -278,6 +284,7 @@ POST /_query
----
{
"took": 28,
"is_partial": false,
"columns": [
{ "name": "a", "type": "long"},
{ "name": "b", "type": "long"},

View file

@ -34,9 +34,13 @@ However, if you do not plan to use the {infer} APIs to use these models or if yo
The chat completion {infer} API enables real-time responses for chat completion tasks by delivering answers incrementally, reducing response times during computation.
It only works with the `chat_completion` task type for `openai` and `elastic` {infer} services.
[NOTE]
====
The `chat_completion` task type is only available within the _unified API and only supports streaming.
* The `chat_completion` task type is only available within the _unified API and only supports streaming.
* The Chat completion {infer} API and the Stream {infer} API differ in their response structure and capabilities.
The Chat completion {infer} API provides more comprehensive customization options through more fields and function calling support.
If you use the `openai` service or the `elastic` service, use the Chat completion {infer} API.
====
[discrete]

View file

@ -40,6 +40,10 @@ However, if you do not plan to use the {infer} APIs to use these models or if yo
The stream {infer} API enables real-time responses for completion tasks by delivering answers incrementally, reducing response times during computation.
It only works with the `completion` and `chat_completion` task types.
The Chat completion {infer} API and the Stream {infer} API differ in their response structure and capabilities.
The Chat completion {infer} API provides more comprehensive customization options through more fields and function calling support.
If you use the `openai` service or the `elastic` service, use the Chat completion {infer} API.
[NOTE]
====
include::inference-shared.asciidoc[tag=chat-completion-docs]

View file

@ -290,7 +290,7 @@ with the number and sizes of arrays present in source of each document, naturall
[[synthetic-source-fields-native-list]]
===== Field types that support synthetic source with no storage overhead
The following field types support synthetic source using data from <<doc-values,`doc_values`>> or
<stored-fields, stored fields>>, and require no additional storage space to construct the `_source` field.
<<stored-fields, stored fields>>, and require no additional storage space to construct the `_source` field.
NOTE: If you enable the <<ignore-malformed,`ignore_malformed`>> or <<ignore-above,`ignore_above`>> settings, then
additional storage is required to store ignored field values for these types.

View file

@ -12,6 +12,6 @@ apply plugin: 'elasticsearch.internal-java-rest-test'
apply plugin: 'elasticsearch.internal-test-artifact'
dependencies {
javaRestTestImplementation project(':libs:entitlement:qa:test-plugin')
clusterModules project(':libs:entitlement:qa:test-plugin')
javaRestTestImplementation project(':libs:entitlement:qa:entitlement-test-plugin')
clusterModules project(':libs:entitlement:qa:entitlement-test-plugin')
}

View file

@ -14,7 +14,7 @@ apply plugin: 'elasticsearch.build'
apply plugin: 'elasticsearch.mrjar'
esplugin {
name = 'test-plugin'
name = 'entitlement-test-plugin'
description = 'A test plugin that invokes methods checked by entitlements'
classname = 'org.elasticsearch.entitlement.qa.test.EntitlementTestPlugin'
}

View file

@ -28,7 +28,7 @@ public class EntitlementsAllowedIT extends ESRestTestCase {
@ClassRule
public static ElasticsearchCluster cluster = ElasticsearchCluster.local()
.module("test-plugin", spec -> EntitlementsUtil.setupEntitlements(spec, true, ALLOWED_ENTITLEMENTS))
.module("entitlement-test-plugin", spec -> EntitlementsUtil.setupEntitlements(spec, true, ALLOWED_ENTITLEMENTS))
.systemProperty("es.entitlements.enabled", "true")
.setting("xpack.security.enabled", "false")
.build();

View file

@ -28,7 +28,7 @@ public class EntitlementsAllowedNonModularIT extends ESRestTestCase {
@ClassRule
public static ElasticsearchCluster cluster = ElasticsearchCluster.local()
.module("test-plugin", spec -> EntitlementsUtil.setupEntitlements(spec, false, ALLOWED_ENTITLEMENTS))
.module("entitlement-test-plugin", spec -> EntitlementsUtil.setupEntitlements(spec, false, ALLOWED_ENTITLEMENTS))
.systemProperty("es.entitlements.enabled", "true")
.setting("xpack.security.enabled", "false")
.build();

View file

@ -26,7 +26,7 @@ public class EntitlementsDeniedIT extends ESRestTestCase {
@ClassRule
public static ElasticsearchCluster cluster = ElasticsearchCluster.local()
.module("test-plugin", spec -> EntitlementsUtil.setupEntitlements(spec, true, null))
.module("entitlement-test-plugin", spec -> EntitlementsUtil.setupEntitlements(spec, true, null))
.systemProperty("es.entitlements.enabled", "true")
.setting("xpack.security.enabled", "false")
// Logs in libs/entitlement/qa/build/test-results/javaRestTest/TEST-org.elasticsearch.entitlement.qa.EntitlementsDeniedIT.xml

View file

@ -26,7 +26,7 @@ public class EntitlementsDeniedNonModularIT extends ESRestTestCase {
@ClassRule
public static ElasticsearchCluster cluster = ElasticsearchCluster.local()
.module("test-plugin", spec -> EntitlementsUtil.setupEntitlements(spec, false, null))
.module("entitlement-test-plugin", spec -> EntitlementsUtil.setupEntitlements(spec, false, null))
.systemProperty("es.entitlements.enabled", "true")
.setting("xpack.security.enabled", "false")
// Logs in libs/entitlement/qa/build/test-results/javaRestTest/TEST-org.elasticsearch.entitlement.qa.EntitlementsDeniedIT.xml

View file

@ -37,7 +37,6 @@ import org.elasticsearch.action.support.master.AcknowledgedResponse;
import org.elasticsearch.client.internal.Client;
import org.elasticsearch.cluster.metadata.DataStream;
import org.elasticsearch.cluster.metadata.DataStreamAlias;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.ByteSizeUnit;
import org.elasticsearch.index.Index;
@ -341,13 +340,13 @@ public class DataStreamsSnapshotsIT extends AbstractSnapshotIntegTestCase {
assertThat(rolloverResponse.getNewIndex(), equalTo(DataStream.getDefaultBackingIndexName("ds", 3)));
}
public void testFailureStoreSnapshotAndRestore() throws Exception {
public void testFailureStoreSnapshotAndRestore() {
String dataStreamName = "with-fs";
CreateSnapshotResponse createSnapshotResponse = client.admin()
.cluster()
.prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, REPO, SNAPSHOT)
.setWaitForCompletion(true)
.setIndices(IndexNameExpressionResolver.combineSelector(dataStreamName, IndexComponentSelector.ALL_APPLICABLE))
.setIndices(dataStreamName)
.setIncludeGlobalState(false)
.get();
@ -398,6 +397,49 @@ public class DataStreamsSnapshotsIT extends AbstractSnapshotIntegTestCase {
}
}
public void testSelectorsNotAllowedInSnapshotAndRestore() {
String dataStreamName = "with-fs";
try {
client.admin()
.cluster()
.prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, REPO, SNAPSHOT)
.setWaitForCompletion(true)
.setIndices(dataStreamName + "::" + randomFrom(IndexComponentSelector.values()).getKey())
.setIncludeGlobalState(false)
.get();
fail("Should have failed because selectors are not allowed in snapshot creation");
} catch (IllegalArgumentException e) {
assertThat(
e.getMessage(),
containsString("Index component selectors are not supported in this context but found selector in expression")
);
}
CreateSnapshotResponse createSnapshotResponse = client.admin()
.cluster()
.prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, REPO, SNAPSHOT)
.setWaitForCompletion(true)
.setIndices("ds")
.setIncludeGlobalState(false)
.get();
RestStatus status = createSnapshotResponse.getSnapshotInfo().status();
assertEquals(RestStatus.OK, status);
try {
client.admin()
.cluster()
.prepareRestoreSnapshot(TEST_REQUEST_TIMEOUT, REPO, SNAPSHOT)
.setWaitForCompletion(true)
.setIndices(dataStreamName + "::" + randomFrom(IndexComponentSelector.values()).getKey())
.get();
fail("Should have failed because selectors are not allowed in snapshot restore");
} catch (IllegalArgumentException e) {
assertThat(
e.getMessage(),
containsString("Index component selectors are not supported in this context but found selector in expression")
);
}
}
public void testSnapshotAndRestoreAllIncludeSpecificDataStream() throws Exception {
DocWriteResponse indexResponse = client.prepareIndex("other-ds")
.setOpType(DocWriteRequest.OpType.CREATE)
@ -1241,6 +1283,7 @@ public class DataStreamsSnapshotsIT extends AbstractSnapshotIntegTestCase {
SnapshotInfo retrievedSnapshot = getSnapshot(REPO, SNAPSHOT);
assertThat(retrievedSnapshot.dataStreams(), contains(dataStreamName));
assertThat(retrievedSnapshot.indices(), containsInAnyOrder(fsBackingIndexName));
assertThat(retrievedSnapshot.indices(), not(containsInAnyOrder(fsFailureIndexName)));
assertAcked(
safeGet(client.execute(DeleteDataStreamAction.INSTANCE, new DeleteDataStreamAction.Request(TEST_REQUEST_TIMEOUT, "*")))

View file

@ -16,7 +16,6 @@ import org.elasticsearch.action.datastreams.DataStreamsActionUtil;
import org.elasticsearch.action.datastreams.DataStreamsStatsAction;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.DefaultShardOperationFailedException;
import org.elasticsearch.action.support.IndexComponentSelector;
import org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlockException;
@ -105,17 +104,6 @@ public class DataStreamsStatsTransportAction extends TransportBroadcastByNodeAct
return state.blocks().indicesBlockedException(projectResolver.getProjectId(), ClusterBlockLevel.METADATA_READ, concreteIndices);
}
@Override
protected String[] resolveConcreteIndexNames(ClusterState clusterState, DataStreamsStatsAction.Request request) {
return DataStreamsActionUtil.resolveConcreteIndexNamesWithSelector(
indexNameExpressionResolver,
projectResolver.getProjectMetadata(clusterState),
request.indices(),
IndexComponentSelector.ALL_APPLICABLE,
request.indicesOptions()
).toArray(String[]::new);
}
@Override
protected ShardsIterator shards(ClusterState clusterState, DataStreamsStatsAction.Request request, String[] concreteIndices) {
return clusterState.routingTable(projectResolver.getProjectId()).allShards(concreteIndices);

View file

@ -184,8 +184,9 @@ public class ChildQuerySearchIT extends ParentChildTestCase {
assertNoFailuresAndResponse(prepareSearch("test").setQuery(idsQuery().addIds("c1")), response -> {
assertThat(response.getHits().getTotalHits().value(), equalTo(1L));
assertThat(response.getHits().getAt(0).getId(), equalTo("c1"));
assertThat(extractValue("join_field.name", response.getHits().getAt(0).getSourceAsMap()), equalTo("child"));
assertThat(extractValue("join_field.parent", response.getHits().getAt(0).getSourceAsMap()), equalTo("p1"));
Map<String, Object> source = response.getHits().getAt(0).getSourceAsMap();
assertThat(extractValue("join_field.name", source), equalTo("child"));
assertThat(extractValue("join_field.parent", source), equalTo("p1"));
});
@ -197,11 +198,13 @@ public class ChildQuerySearchIT extends ParentChildTestCase {
response -> {
assertThat(response.getHits().getTotalHits().value(), equalTo(2L));
assertThat(response.getHits().getAt(0).getId(), anyOf(equalTo("c1"), equalTo("c2")));
assertThat(extractValue("join_field.name", response.getHits().getAt(0).getSourceAsMap()), equalTo("child"));
assertThat(extractValue("join_field.parent", response.getHits().getAt(0).getSourceAsMap()), equalTo("p1"));
Map<String, Object> source0 = response.getHits().getAt(0).getSourceAsMap();
assertThat(extractValue("join_field.name", source0), equalTo("child"));
assertThat(extractValue("join_field.parent", source0), equalTo("p1"));
assertThat(response.getHits().getAt(1).getId(), anyOf(equalTo("c1"), equalTo("c2")));
assertThat(extractValue("join_field.name", response.getHits().getAt(1).getSourceAsMap()), equalTo("child"));
assertThat(extractValue("join_field.parent", response.getHits().getAt(1).getSourceAsMap()), equalTo("p1"));
Map<String, Object> source1 = response.getHits().getAt(1).getSourceAsMap();
assertThat(extractValue("join_field.name", source1), equalTo("child"));
assertThat(extractValue("join_field.parent", source1), equalTo("p1"));
}
);

View file

@ -68,10 +68,11 @@ public abstract class AbstractFeatureMigrationIntegTest extends ESIntegTestCase
static final String INTERNAL_MANAGED_INDEX_NAME = ".int-man-old";
static final int INDEX_DOC_COUNT = 100; // arbitrarily chosen
static final int INTERNAL_MANAGED_FLAG_VALUE = 1;
public static final Version NEEDS_UPGRADE_VERSION = TransportGetFeatureUpgradeStatusAction.NO_UPGRADE_REQUIRED_VERSION.previousMajor();
public static final IndexVersion NEEDS_UPGRADE_INDEX_VERSION = IndexVersionUtils.getPreviousMajorVersion(
static final String FIELD_NAME = "some_field";
protected static final IndexVersion NEEDS_UPGRADE_INDEX_VERSION = IndexVersionUtils.getPreviousMajorVersion(
TransportGetFeatureUpgradeStatusAction.NO_UPGRADE_REQUIRED_INDEX_VERSION
);
protected static final int UPGRADED_TO_VERSION = TransportGetFeatureUpgradeStatusAction.NO_UPGRADE_REQUIRED_VERSION.major + 1;
static final SystemIndexDescriptor EXTERNAL_UNMANAGED = SystemIndexDescriptor.builder()
.setIndexPattern(".ext-unman-*")
@ -131,11 +132,6 @@ public abstract class AbstractFeatureMigrationIntegTest extends ESIntegTestCase
@Before
public void setup() {
assumeTrue(
"We can only create the test indices we need if they're in the previous major version",
NEEDS_UPGRADE_VERSION.onOrAfter(Version.CURRENT.previousMajor())
);
internalCluster().setBootstrapMasterNodeIndex(0);
masterName = internalCluster().startMasterOnlyNode();
masterAndDataNode = internalCluster().startNode();

View file

@ -208,7 +208,7 @@ public class FeatureMigrationIT extends AbstractFeatureMigrationIntegTest {
assertIndexHasCorrectProperties(
finalMetadata,
".int-man-old-reindexed-for-9",
".int-man-old-reindexed-for-" + UPGRADED_TO_VERSION,
INTERNAL_MANAGED_FLAG_VALUE,
true,
true,
@ -216,7 +216,7 @@ public class FeatureMigrationIT extends AbstractFeatureMigrationIntegTest {
);
assertIndexHasCorrectProperties(
finalMetadata,
".int-unman-old-reindexed-for-9",
".int-unman-old-reindexed-for-" + UPGRADED_TO_VERSION,
INTERNAL_UNMANAGED_FLAG_VALUE,
false,
true,
@ -224,7 +224,7 @@ public class FeatureMigrationIT extends AbstractFeatureMigrationIntegTest {
);
assertIndexHasCorrectProperties(
finalMetadata,
".ext-man-old-reindexed-for-9",
".ext-man-old-reindexed-for-" + UPGRADED_TO_VERSION,
EXTERNAL_MANAGED_FLAG_VALUE,
true,
false,
@ -232,7 +232,7 @@ public class FeatureMigrationIT extends AbstractFeatureMigrationIntegTest {
);
assertIndexHasCorrectProperties(
finalMetadata,
".ext-unman-old-reindexed-for-9",
".ext-unman-old-reindexed-for-" + UPGRADED_TO_VERSION,
EXTERNAL_UNMANAGED_FLAG_VALUE,
false,
false,

View file

@ -218,7 +218,7 @@ public class MultiFeatureMigrationIT extends AbstractFeatureMigrationIntegTest {
// Finally, verify that all the indices exist and have the properties we expect.
assertIndexHasCorrectProperties(
finalMetadata,
".int-man-old-reindexed-for-9",
".int-man-old-reindexed-for-" + UPGRADED_TO_VERSION,
INTERNAL_MANAGED_FLAG_VALUE,
true,
true,
@ -226,7 +226,7 @@ public class MultiFeatureMigrationIT extends AbstractFeatureMigrationIntegTest {
);
assertIndexHasCorrectProperties(
finalMetadata,
".int-unman-old-reindexed-for-9",
".int-unman-old-reindexed-for-" + UPGRADED_TO_VERSION,
INTERNAL_UNMANAGED_FLAG_VALUE,
false,
true,
@ -234,7 +234,7 @@ public class MultiFeatureMigrationIT extends AbstractFeatureMigrationIntegTest {
);
assertIndexHasCorrectProperties(
finalMetadata,
".ext-man-old-reindexed-for-9",
".ext-man-old-reindexed-for-" + UPGRADED_TO_VERSION,
EXTERNAL_MANAGED_FLAG_VALUE,
true,
false,
@ -242,7 +242,7 @@ public class MultiFeatureMigrationIT extends AbstractFeatureMigrationIntegTest {
);
assertIndexHasCorrectProperties(
finalMetadata,
".ext-unman-old-reindexed-for-9",
".ext-unman-old-reindexed-for-" + UPGRADED_TO_VERSION,
EXTERNAL_UNMANAGED_FLAG_VALUE,
false,
false,
@ -251,7 +251,7 @@ public class MultiFeatureMigrationIT extends AbstractFeatureMigrationIntegTest {
assertIndexHasCorrectProperties(
finalMetadata,
".second-int-man-old-reindexed-for-9",
".second-int-man-old-reindexed-for-" + UPGRADED_TO_VERSION,
SECOND_FEATURE_IDX_FLAG_VALUE,
true,
true,

View file

@ -233,12 +233,11 @@ tests:
- class: org.elasticsearch.xpack.inference.DefaultEndPointsIT
method: testMultipleInferencesTriggeringDownloadAndDeploy
issue: https://github.com/elastic/elasticsearch/issues/120668
- class: org.elasticsearch.entitlement.qa.EntitlementsAllowedIT
issue: https://github.com/elastic/elasticsearch/issues/120674
- class: org.elasticsearch.entitlement.qa.EntitlementsDeniedNonModularIT
issue: https://github.com/elastic/elasticsearch/issues/120675
- class: org.elasticsearch.entitlement.qa.EntitlementsDeniedIT
issue: https://github.com/elastic/elasticsearch/issues/120676
- class: org.elasticsearch.xpack.security.authc.ldap.ADLdapUserSearchSessionFactoryTests
issue: https://github.com/elastic/elasticsearch/issues/119882
- class: org.elasticsearch.xpack.esql.action.CrossClusterAsyncEnrichStopIT
method: testEnrichAfterStop
issue: https://github.com/elastic/elasticsearch/issues/120757
# Examples:
#

View file

@ -0,0 +1,31 @@
{
"esql.async_query_stop": {
"documentation": {
"url": "https://www.elastic.co/guide/en/elasticsearch/reference/master/esql-async-query-stop-api.html",
"description": "Stops a previously submitted async query request given its ID and collects the results."
},
"stability": "stable",
"visibility": "public",
"headers": {
"accept": [
"application/json"
]
},
"url": {
"paths": [
{
"path": "/_query/async/{id}/stop",
"methods": [
"POST"
],
"parts": {
"id": {
"type": "string",
"description": "The async query ID"
}
}
}
]
}
}
}

View file

@ -113,7 +113,7 @@ public class PersistentTaskCreationFailureIT extends ESIntegTestCase {
UUIDs.base64UUID(),
FailingCreationPersistentTaskExecutor.TASK_NAME,
new FailingCreationTaskParams(),
null,
TEST_REQUEST_TIMEOUT,
l.map(ignored -> null)
)
);

View file

@ -50,7 +50,7 @@ public class PersistentTaskInitializationFailureIT extends ESIntegTestCase {
UUIDs.base64UUID(),
FailingInitializationPersistentTaskExecutor.TASK_NAME,
new FailingInitializationTaskParams(),
null,
TEST_REQUEST_TIMEOUT,
startPersistentTaskFuture
);
startPersistentTaskFuture.actionGet();

View file

@ -45,7 +45,7 @@ public class PersistentTasksExecutorFullRestartIT extends ESIntegTestCase {
PlainActionFuture<PersistentTask<TestParams>> future = new PlainActionFuture<>();
futures.add(future);
taskIds[i] = UUIDs.base64UUID();
service.sendStartRequest(taskIds[i], TestPersistentTasksExecutor.NAME, new TestParams("Blah"), null, future);
service.sendStartRequest(taskIds[i], TestPersistentTasksExecutor.NAME, new TestParams("Blah"), TEST_REQUEST_TIMEOUT, future);
}
for (int i = 0; i < numberOfTasks; i++) {

View file

@ -69,7 +69,13 @@ public class PersistentTasksExecutorIT extends ESIntegTestCase {
public void testPersistentActionFailure() throws Exception {
PersistentTasksService persistentTasksService = internalCluster().getInstance(PersistentTasksService.class);
PlainActionFuture<PersistentTask<TestParams>> future = new PlainActionFuture<>();
persistentTasksService.sendStartRequest(UUIDs.base64UUID(), TestPersistentTasksExecutor.NAME, new TestParams("Blah"), null, future);
persistentTasksService.sendStartRequest(
UUIDs.base64UUID(),
TestPersistentTasksExecutor.NAME,
new TestParams("Blah"),
TEST_REQUEST_TIMEOUT,
future
);
long allocationId = future.get().getAllocationId();
waitForTaskToStart();
TaskInfo firstRunningTask = clusterAdmin().prepareListTasks()
@ -100,7 +106,13 @@ public class PersistentTasksExecutorIT extends ESIntegTestCase {
PersistentTasksService persistentTasksService = internalCluster().getInstance(PersistentTasksService.class);
PlainActionFuture<PersistentTask<TestParams>> future = new PlainActionFuture<>();
String taskId = UUIDs.base64UUID();
persistentTasksService.sendStartRequest(taskId, TestPersistentTasksExecutor.NAME, new TestParams("Blah"), null, future);
persistentTasksService.sendStartRequest(
taskId,
TestPersistentTasksExecutor.NAME,
new TestParams("Blah"),
TEST_REQUEST_TIMEOUT,
future
);
long allocationId = future.get().getAllocationId();
waitForTaskToStart();
TaskInfo firstRunningTask = clusterAdmin().prepareListTasks()
@ -119,7 +131,14 @@ public class PersistentTasksExecutorIT extends ESIntegTestCase {
logger.info("Simulating errant completion notification");
// try sending completion request with incorrect allocation id
PlainActionFuture<PersistentTask<?>> failedCompletionNotificationFuture = new PlainActionFuture<>();
persistentTasksService.sendCompletionRequest(taskId, Long.MAX_VALUE, null, null, null, failedCompletionNotificationFuture);
persistentTasksService.sendCompletionRequest(
taskId,
Long.MAX_VALUE,
null,
null,
TEST_REQUEST_TIMEOUT,
failedCompletionNotificationFuture
);
assertFutureThrows(failedCompletionNotificationFuture, ResourceNotFoundException.class);
// Make sure that the task is still running
assertThat(
@ -141,7 +160,13 @@ public class PersistentTasksExecutorIT extends ESIntegTestCase {
PlainActionFuture<PersistentTask<TestParams>> future = new PlainActionFuture<>();
TestParams testParams = new TestParams("Blah");
testParams.setExecutorNodeAttr("test");
persistentTasksService.sendStartRequest(UUIDs.base64UUID(), TestPersistentTasksExecutor.NAME, testParams, null, future);
persistentTasksService.sendStartRequest(
UUIDs.base64UUID(),
TestPersistentTasksExecutor.NAME,
testParams,
TEST_REQUEST_TIMEOUT,
future
);
String taskId = future.get().getId();
Settings nodeSettings = Settings.builder().put(nodeSettings(0, Settings.EMPTY)).put("node.attr.test_attr", "test").build();
@ -165,7 +190,7 @@ public class PersistentTasksExecutorIT extends ESIntegTestCase {
// Remove the persistent task
PlainActionFuture<PersistentTask<?>> removeFuture = new PlainActionFuture<>();
persistentTasksService.sendRemoveRequest(taskId, null, removeFuture);
persistentTasksService.sendRemoveRequest(taskId, TEST_REQUEST_TIMEOUT, removeFuture);
assertEquals(removeFuture.get().getId(), taskId);
}
@ -182,7 +207,13 @@ public class PersistentTasksExecutorIT extends ESIntegTestCase {
PersistentTasksService persistentTasksService = internalCluster().getInstance(PersistentTasksService.class);
PlainActionFuture<PersistentTask<TestParams>> future = new PlainActionFuture<>();
TestParams testParams = new TestParams("Blah");
persistentTasksService.sendStartRequest(UUIDs.base64UUID(), TestPersistentTasksExecutor.NAME, testParams, null, future);
persistentTasksService.sendStartRequest(
UUIDs.base64UUID(),
TestPersistentTasksExecutor.NAME,
testParams,
TEST_REQUEST_TIMEOUT,
future
);
String taskId = future.get().getId();
assertThat(clusterAdmin().prepareListTasks().setActions(TestPersistentTasksExecutor.NAME + "[c]").get().getTasks(), empty());
@ -197,14 +228,20 @@ public class PersistentTasksExecutorIT extends ESIntegTestCase {
// Remove the persistent task
PlainActionFuture<PersistentTask<?>> removeFuture = new PlainActionFuture<>();
persistentTasksService.sendRemoveRequest(taskId, null, removeFuture);
persistentTasksService.sendRemoveRequest(taskId, TEST_REQUEST_TIMEOUT, removeFuture);
assertEquals(removeFuture.get().getId(), taskId);
}
public void testPersistentActionStatusUpdate() throws Exception {
PersistentTasksService persistentTasksService = internalCluster().getInstance(PersistentTasksService.class);
PlainActionFuture<PersistentTask<TestParams>> future = new PlainActionFuture<>();
persistentTasksService.sendStartRequest(UUIDs.base64UUID(), TestPersistentTasksExecutor.NAME, new TestParams("Blah"), null, future);
persistentTasksService.sendStartRequest(
UUIDs.base64UUID(),
TestPersistentTasksExecutor.NAME,
new TestParams("Blah"),
TEST_REQUEST_TIMEOUT,
future
);
String taskId = future.get().getId();
waitForTaskToStart();
TaskInfo firstRunningTask = clusterAdmin().prepareListTasks()
@ -250,7 +287,7 @@ public class PersistentTasksExecutorIT extends ESIntegTestCase {
assertFutureThrows(future1, IllegalStateException.class, "timed out after 10ms");
PlainActionFuture<PersistentTask<?>> failedUpdateFuture = new PlainActionFuture<>();
persistentTasksService.sendUpdateStateRequest(taskId, -2, new State("should fail"), null, failedUpdateFuture);
persistentTasksService.sendUpdateStateRequest(taskId, -2, new State("should fail"), TEST_REQUEST_TIMEOUT, failedUpdateFuture);
assertFutureThrows(
failedUpdateFuture,
ResourceNotFoundException.class,
@ -275,11 +312,23 @@ public class PersistentTasksExecutorIT extends ESIntegTestCase {
PersistentTasksService persistentTasksService = internalCluster().getInstance(PersistentTasksService.class);
PlainActionFuture<PersistentTask<TestParams>> future = new PlainActionFuture<>();
String taskId = UUIDs.base64UUID();
persistentTasksService.sendStartRequest(taskId, TestPersistentTasksExecutor.NAME, new TestParams("Blah"), null, future);
persistentTasksService.sendStartRequest(
taskId,
TestPersistentTasksExecutor.NAME,
new TestParams("Blah"),
TEST_REQUEST_TIMEOUT,
future
);
future.get();
PlainActionFuture<PersistentTask<TestParams>> future2 = new PlainActionFuture<>();
persistentTasksService.sendStartRequest(taskId, TestPersistentTasksExecutor.NAME, new TestParams("Blah"), null, future2);
persistentTasksService.sendStartRequest(
taskId,
TestPersistentTasksExecutor.NAME,
new TestParams("Blah"),
TEST_REQUEST_TIMEOUT,
future2
);
assertFutureThrows(future2, ResourceAlreadyExistsException.class);
waitForTaskToStart();
@ -315,7 +364,13 @@ public class PersistentTasksExecutorIT extends ESIntegTestCase {
PlainActionFuture<PersistentTask<TestParams>> future = new PlainActionFuture<>();
TestParams testParams = new TestParams("Blah");
testParams.setExecutorNodeAttr("test");
persistentTasksService.sendStartRequest(UUIDs.base64UUID(), TestPersistentTasksExecutor.NAME, testParams, null, future);
persistentTasksService.sendStartRequest(
UUIDs.base64UUID(),
TestPersistentTasksExecutor.NAME,
testParams,
TEST_REQUEST_TIMEOUT,
future
);
PersistentTask<TestParams> task = future.get();
String taskId = task.getId();
@ -366,7 +421,13 @@ public class PersistentTasksExecutorIT extends ESIntegTestCase {
persistentTasksClusterService.setRecheckInterval(TimeValue.timeValueMillis(1));
PersistentTasksService persistentTasksService = internalCluster().getInstance(PersistentTasksService.class);
PlainActionFuture<PersistentTask<TestParams>> future = new PlainActionFuture<>();
persistentTasksService.sendStartRequest(UUIDs.base64UUID(), TestPersistentTasksExecutor.NAME, new TestParams("Blah"), null, future);
persistentTasksService.sendStartRequest(
UUIDs.base64UUID(),
TestPersistentTasksExecutor.NAME,
new TestParams("Blah"),
TEST_REQUEST_TIMEOUT,
future
);
String taskId = future.get().getId();
long allocationId = future.get().getAllocationId();
waitForTaskToStart();

View file

@ -52,7 +52,7 @@ public class EnableAssignmentDeciderIT extends ESIntegTestCase {
"task_" + i,
TestPersistentTasksExecutor.NAME,
new TestParams(randomAlphaOfLength(10)),
null,
TEST_REQUEST_TIMEOUT,
ActionListener.running(latch::countDown)
);
}

View file

@ -655,8 +655,9 @@ public class TopHitsIT extends ESIntegTestCase {
assertThat(hit.field("field2").getValue(), equalTo(2.71f));
assertThat(hit.field("script").getValue().toString(), equalTo("5"));
assertThat(hit.getSourceAsMap().size(), equalTo(1));
assertThat(hit.getSourceAsMap().get("text").toString(), equalTo("some text to entertain"));
Map<String, Object> source = hit.getSourceAsMap();
assertThat(source.size(), equalTo(1));
assertThat(source.get("text").toString(), equalTo("some text to entertain"));
assertEquals("some text to entertain", hit.getFields().get("text").getValue());
assertEquals("some text to entertain", hit.getFields().get("text_stored_lookup").getValue());
}
@ -927,8 +928,9 @@ public class TopHitsIT extends ESIntegTestCase {
field = searchHit.field("script");
assertThat(field.getValue().toString(), equalTo("5"));
assertThat(searchHit.getSourceAsMap().size(), equalTo(1));
assertThat(extractValue("message", searchHit.getSourceAsMap()), equalTo("some comment"));
Map<String, Object> source = searchHit.getSourceAsMap();
assertThat(source.size(), equalTo(1));
assertThat(extractValue("message", source), equalTo("some comment"));
}
);
}

View file

@ -490,8 +490,9 @@ public class InnerHitsIT extends ESIntegTestCase {
response -> {
SearchHits innerHits = response.getHits().getAt(0).getInnerHits().get("comments");
innerHits = innerHits.getAt(0).getInnerHits().get("remark");
assertNotNull(innerHits.getAt(0).getSourceAsMap());
assertFalse(innerHits.getAt(0).getSourceAsMap().isEmpty());
Map<String, Object> source = innerHits.getAt(0).getSourceAsMap();
assertNotNull(source);
assertFalse(source.isEmpty());
}
);
assertNoFailuresAndResponse(
@ -507,8 +508,9 @@ public class InnerHitsIT extends ESIntegTestCase {
response -> {
SearchHits innerHits = response.getHits().getAt(0).getInnerHits().get("comments");
innerHits = innerHits.getAt(0).getInnerHits().get("remark");
assertNotNull(innerHits.getAt(0).getSourceAsMap());
assertFalse(innerHits.getAt(0).getSourceAsMap().isEmpty());
Map<String, Object> source = innerHits.getAt(0).getSourceAsMap();
assertNotNull(source);
assertFalse(source.isEmpty());
}
);
}
@ -845,16 +847,12 @@ public class InnerHitsIT extends ESIntegTestCase {
assertHitCount(response, 1);
assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getTotalHits().value(), equalTo(2L));
assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getAt(0).getSourceAsMap().size(), equalTo(1));
assertThat(
response.getHits().getAt(0).getInnerHits().get("comments").getAt(0).getSourceAsMap().get("message"),
equalTo("fox eat quick")
);
assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getAt(1).getSourceAsMap().size(), equalTo(1));
assertThat(
response.getHits().getAt(0).getInnerHits().get("comments").getAt(1).getSourceAsMap().get("message"),
equalTo("fox ate rabbit x y z")
);
Map<String, Object> source0 = response.getHits().getAt(0).getInnerHits().get("comments").getAt(0).getSourceAsMap();
assertThat(source0.size(), equalTo(1));
assertThat(source0.get("message"), equalTo("fox eat quick"));
Map<String, Object> source1 = response.getHits().getAt(0).getInnerHits().get("comments").getAt(1).getSourceAsMap();
assertThat(source1.size(), equalTo(1));
assertThat(source1.get("message"), equalTo("fox ate rabbit x y z"));
}
);
@ -866,16 +864,12 @@ public class InnerHitsIT extends ESIntegTestCase {
assertHitCount(response, 1);
assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getTotalHits().value(), equalTo(2L));
assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getAt(0).getSourceAsMap().size(), equalTo(2));
assertThat(
response.getHits().getAt(0).getInnerHits().get("comments").getAt(0).getSourceAsMap().get("message"),
equalTo("fox eat quick")
);
assertThat(response.getHits().getAt(0).getInnerHits().get("comments").getAt(0).getSourceAsMap().size(), equalTo(2));
assertThat(
response.getHits().getAt(0).getInnerHits().get("comments").getAt(1).getSourceAsMap().get("message"),
equalTo("fox ate rabbit x y z")
);
Map<String, Object> source0 = response.getHits().getAt(0).getInnerHits().get("comments").getAt(0).getSourceAsMap();
assertThat(source0.size(), equalTo(2));
assertThat(source0.get("message"), equalTo("fox eat quick"));
Map<String, Object> source1 = response.getHits().getAt(0).getInnerHits().get("comments").getAt(1).getSourceAsMap();
assertThat(source1.size(), equalTo(2));
assertThat(source1.get("message"), equalTo("fox ate rabbit x y z"));
}
);

View file

@ -47,6 +47,7 @@ import java.util.Arrays;
import java.util.Collections;
import java.util.Comparator;
import java.util.List;
import java.util.Map;
import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE;
import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
@ -152,8 +153,9 @@ public class SearchAfterIT extends ESIntegTestCase {
searchResponse -> {
assertThat(searchResponse.getHits().getTotalHits().value(), Matchers.equalTo(2L));
assertThat(searchResponse.getHits().getHits().length, Matchers.equalTo(1));
assertThat(searchResponse.getHits().getHits()[0].getSourceAsMap().get("field1"), Matchers.equalTo(100));
assertThat(searchResponse.getHits().getHits()[0].getSourceAsMap().get("field2"), Matchers.equalTo("toto"));
Map<String, Object> source = searchResponse.getHits().getHits()[0].getSourceAsMap();
assertThat(source.get("field1"), Matchers.equalTo(100));
assertThat(source.get("field2"), Matchers.equalTo("toto"));
}
);
}
@ -438,8 +440,9 @@ public class SearchAfterIT extends ESIntegTestCase {
int foundHits = 0;
do {
for (SearchHit hit : resp.getHits().getHits()) {
assertNotNull(hit.getSourceAsMap());
final Object timestamp = hit.getSourceAsMap().get("timestamp");
Map<String, Object> source = hit.getSourceAsMap();
assertNotNull(source);
final Object timestamp = source.get("timestamp");
assertNotNull(timestamp);
assertThat(((Number) timestamp).longValue(), equalTo(timestamps.get(foundHits)));
foundHits++;
@ -469,8 +472,9 @@ public class SearchAfterIT extends ESIntegTestCase {
do {
Object[] after = null;
for (SearchHit hit : resp.getHits().getHits()) {
assertNotNull(hit.getSourceAsMap());
final Object timestamp = hit.getSourceAsMap().get("timestamp");
Map<String, Object> source = hit.getSourceAsMap();
assertNotNull(source);
final Object timestamp = source.get("timestamp");
assertNotNull(timestamp);
assertThat(((Number) timestamp).longValue(), equalTo(timestamps.get(foundHits)));
after = hit.getSortValues();
@ -505,8 +509,9 @@ public class SearchAfterIT extends ESIntegTestCase {
do {
Object[] after = null;
for (SearchHit hit : resp.getHits().getHits()) {
assertNotNull(hit.getSourceAsMap());
final Object timestamp = hit.getSourceAsMap().get("timestamp");
Map<String, Object> source = hit.getSourceAsMap();
assertNotNull(source);
final Object timestamp = source.get("timestamp");
assertNotNull(timestamp);
foundSeqNos.add(((Number) timestamp).longValue());
after = hit.getSortValues();

View file

@ -130,10 +130,11 @@ public class FieldSortIT extends ESIntegTestCase {
.setSize(10),
response -> {
logClusterState();
Number previous = (Number) response.getHits().getHits()[0].getSourceAsMap().get("entry");
for (int j = 1; j < response.getHits().getHits().length; j++) {
Number current = (Number) response.getHits().getHits()[j].getSourceAsMap().get("entry");
Number previous = (Number) response.getHits().getHits()[j - 1].getSourceAsMap().get("entry");
assertThat(response.toString(), current.intValue(), lessThan(previous.intValue()));
previous = current;
}
}
);
@ -144,10 +145,11 @@ public class FieldSortIT extends ESIntegTestCase {
.setSize(10),
response -> {
logClusterState();
Number previous = (Number) response.getHits().getHits()[0].getSourceAsMap().get("entry");
for (int j = 1; j < response.getHits().getHits().length; j++) {
Number current = (Number) response.getHits().getHits()[j].getSourceAsMap().get("entry");
Number previous = (Number) response.getHits().getHits()[j - 1].getSourceAsMap().get("entry");
assertThat(response.toString(), current.intValue(), greaterThan(previous.intValue()));
previous = current;
}
}
);

View file

@ -11,6 +11,8 @@ package org.elasticsearch.search.source;
import org.elasticsearch.test.ESIntegTestCase;
import java.util.Map;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponses;
import static org.hamcrest.Matchers.notNullValue;
@ -57,8 +59,9 @@ public class SourceFetchingIT extends ESIntegTestCase {
assertResponses(response -> {
assertThat(response.getHits().getAt(0).getSourceAsString(), notNullValue());
assertThat(response.getHits().getAt(0).getSourceAsMap().size(), equalTo(1));
assertThat((String) response.getHits().getAt(0).getSourceAsMap().get("field1"), equalTo("value"));
Map<String, Object> source = response.getHits().getAt(0).getSourceAsMap();
assertThat(source.size(), equalTo(1));
assertThat(source.get("field1"), equalTo("value"));
},
prepareSearch("test").setFetchSource("field1", null),
prepareSearch("test").setFetchSource(new String[] { "*" }, new String[] { "field2" })
@ -84,8 +87,9 @@ public class SourceFetchingIT extends ESIntegTestCase {
assertResponses(response -> {
assertThat(response.getHits().getAt(0).getSourceAsString(), notNullValue());
assertThat(response.getHits().getAt(0).getSourceAsMap().size(), equalTo(1));
assertThat((String) response.getHits().getAt(0).getSourceAsMap().get("field"), equalTo("value"));
Map<String, Object> source = response.getHits().getAt(0).getSourceAsMap();
assertThat(source.size(), equalTo(1));
assertThat((String) source.get("field"), equalTo("value"));
},
prepareSearch("test").setFetchSource(new String[] { "*.notexisting", "field" }, null),
prepareSearch("test").setFetchSource(new String[] { "field.notexisting.*", "field" }, null)

View file

@ -356,8 +356,9 @@ public class CompletionSuggestSearchIT extends ESIntegTestCase {
assertThat(option.getText().toString(), equalTo("suggestion" + id));
assertThat(option.getHit(), hasId("" + id));
assertThat(option.getHit(), hasScore((id)));
assertNotNull(option.getHit().getSourceAsMap());
Set<String> sourceFields = option.getHit().getSourceAsMap().keySet();
Map<String, Object> source = option.getHit().getSourceAsMap();
assertNotNull(source);
Set<String> sourceFields = source.keySet();
assertThat(sourceFields, contains("a"));
assertThat(sourceFields, not(contains("b")));
id--;

View file

@ -162,6 +162,9 @@ public class TransportVersions {
public static final TransportVersion ESQL_SKIP_ES_INDEX_SERIALIZATION = def(8_827_00_0);
public static final TransportVersion ADD_INDEX_BLOCK_TWO_PHASE = def(8_828_00_0);
public static final TransportVersion RESOLVE_CLUSTER_NO_INDEX_EXPRESSION = def(8_829_00_0);
public static final TransportVersion ML_ROLLOVER_LEGACY_INDICES = def(8_830_00_0);
public static final TransportVersion ADD_INCLUDE_FAILURE_INDICES_OPTION = def(8_831_00_0);
public static final TransportVersion ESQL_RESPONSE_PARTIAL = def(8_832_00_0);
/*
* WARNING: DO NOT MERGE INTO MAIN!

View file

@ -19,12 +19,12 @@ import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.util.concurrent.EsExecutors;
import org.elasticsearch.core.UpdateForV10;
import org.elasticsearch.index.IndexVersion;
import org.elasticsearch.index.IndexVersions;
import org.elasticsearch.indices.SystemIndices;
import org.elasticsearch.injection.guice.Inject;
import org.elasticsearch.persistent.PersistentTasksCustomMetadata;
import org.elasticsearch.persistent.PersistentTasksService;
import org.elasticsearch.tasks.Task;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
@ -53,13 +53,13 @@ public class TransportGetFeatureUpgradeStatusAction extends TransportMasterNodeA
GetFeatureUpgradeStatusResponse> {
/**
* Once all feature migrations for 9.x -> 10.x have been tested, we can bump this to Version.V_9_0_0
* These versions should be set to current major and current major's index version
*/
public static final Version NO_UPGRADE_REQUIRED_VERSION = Version.V_8_0_0;
public static final IndexVersion NO_UPGRADE_REQUIRED_INDEX_VERSION = IndexVersions.V_8_0_0;
@UpdateForV10(owner = UpdateForV10.Owner.CORE_INFRA)
public static final Version NO_UPGRADE_REQUIRED_VERSION = Version.V_9_0_0;
public static final IndexVersion NO_UPGRADE_REQUIRED_INDEX_VERSION = IndexVersions.UPGRADE_TO_LUCENE_10_0_0;
private final SystemIndices systemIndices;
PersistentTasksService persistentTasksService;
@Inject
public TransportGetFeatureUpgradeStatusAction(
@ -68,7 +68,6 @@ public class TransportGetFeatureUpgradeStatusAction extends TransportMasterNodeA
ActionFilters actionFilters,
ClusterService clusterService,
IndexNameExpressionResolver indexNameExpressionResolver,
PersistentTasksService persistentTasksService,
SystemIndices systemIndices
) {
super(
@ -83,7 +82,6 @@ public class TransportGetFeatureUpgradeStatusAction extends TransportMasterNodeA
);
this.systemIndices = systemIndices;
this.persistentTasksService = persistentTasksService;
}
@Override

View file

@ -21,6 +21,7 @@ import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.util.concurrent.EsExecutors;
import org.elasticsearch.core.TimeValue;
import org.elasticsearch.indices.SystemIndices;
import org.elasticsearch.injection.guice.Inject;
import org.elasticsearch.persistent.PersistentTasksService;
@ -95,7 +96,7 @@ public class TransportPostFeatureUpgradeAction extends TransportMasterNodeAction
SYSTEM_INDEX_UPGRADE_TASK_NAME,
SYSTEM_INDEX_UPGRADE_TASK_NAME,
new SystemIndexMigrationTaskParams(),
null,
TimeValue.THIRTY_SECONDS /* TODO should this be configurable? longer by default? infinite? */,
ActionListener.wrap(startedTask -> {
listener.onResponse(new PostFeatureUpgradeResponse(true, featuresToMigrate, null, null));
}, ex -> {

View file

@ -16,7 +16,6 @@ import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.IndicesRequest;
import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.action.support.master.MasterNodeRequest;
import org.elasticsearch.cluster.metadata.DataStream;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.UUIDs;
import org.elasticsearch.common.bytes.BytesReference;
@ -69,9 +68,7 @@ public class CreateSnapshotRequest extends MasterNodeRequest<CreateSnapshotReque
private String[] indices = EMPTY_ARRAY;
private IndicesOptions indicesOptions = DataStream.isFailureStoreFeatureFlagEnabled()
? IndicesOptions.strictExpandHiddenIncludeFailureStore()
: IndicesOptions.strictExpandHidden();
private IndicesOptions indicesOptions = IndicesOptions.strictExpandHiddenFailureNoSelectors();
private String[] featureStates = EMPTY_ARRAY;

View file

@ -721,7 +721,9 @@ public class TransportGetSnapshotsAction extends TransportMasterNodeAction<GetSn
void getSnapshotInfo(Repository repository, SnapshotId snapshotId, ActionListener<SnapshotInfo> listener) {
enqueueTask(listener.delegateFailure((l, ref) -> {
if (isCancelledSupplier.getAsBoolean()) {
l.onFailure(new TaskCancelledException("task cancelled"));
try (ref) {
l.onFailure(new TaskCancelledException("task cancelled"));
}
} else {
repository.getSnapshotInfo(snapshotId, ActionListener.releaseAfter(l, ref));
}

View file

@ -14,6 +14,7 @@ import org.elasticsearch.TransportVersions;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.action.support.master.MasterNodeRequest;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
@ -43,7 +44,7 @@ public class RestoreSnapshotRequest extends MasterNodeRequest<RestoreSnapshotReq
private String snapshot;
private String repository;
private String[] indices = Strings.EMPTY_ARRAY;
private IndicesOptions indicesOptions = IndicesOptions.strictExpandOpen();
private IndicesOptions indicesOptions = IndicesOptions.strictExpandOpenFailureNoSelectors();
private String[] featureStates = Strings.EMPTY_ARRAY;
private String renamePattern;
private String renameReplacement;
@ -138,6 +139,16 @@ public class RestoreSnapshotRequest extends MasterNodeRequest<RestoreSnapshotReq
if (indicesOptions == null) {
validationException = addValidationError("indicesOptions is missing", validationException);
}
// This action does not use the IndexNameExpressionResolver to resolve concrete indices, this is why we check here for selectors
if (indicesOptions.allowSelectors() == false) {
for (String index : indices) {
try {
IndexNameExpressionResolver.SelectorResolver.parseExpression(index, indicesOptions);
} catch (IllegalArgumentException e) {
validationException = addValidationError(e.getMessage(), validationException);
}
}
}
if (featureStates == null) {
validationException = addValidationError("featureStates is missing", validationException);
}

View file

@ -9,7 +9,6 @@
package org.elasticsearch.action.datastreams;
import org.elasticsearch.action.support.IndexComponentSelector;
import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.cluster.metadata.DataStream;
import org.elasticsearch.cluster.metadata.IndexAbstraction;
@ -55,60 +54,24 @@ public class DataStreamsActionUtil {
String[] names,
IndicesOptions indicesOptions
) {
List<ResolvedExpression> abstractionNames = indexNameExpressionResolver.dataStreams(
List<ResolvedExpression> resolvedDataStreamExpressions = indexNameExpressionResolver.dataStreams(
project,
updateIndicesOptions(indicesOptions),
names
);
SortedMap<String, IndexAbstraction> indicesLookup = project.getIndicesLookup();
List<String> results = new ArrayList<>(abstractionNames.size());
for (ResolvedExpression abstractionName : abstractionNames) {
IndexAbstraction indexAbstraction = indicesLookup.get(abstractionName.resource());
List<String> results = new ArrayList<>(resolvedDataStreamExpressions.size());
for (ResolvedExpression resolvedExpression : resolvedDataStreamExpressions) {
IndexAbstraction indexAbstraction = indicesLookup.get(resolvedExpression.resource());
assert indexAbstraction != null;
if (indexAbstraction.getType() == IndexAbstraction.Type.DATA_STREAM) {
selectDataStreamIndicesNames(
(DataStream) indexAbstraction,
IndexComponentSelector.FAILURES.equals(abstractionName.selector()),
results
);
}
}
return results;
}
/**
* Resolves a list of expressions into data stream names and then collects the concrete indices
* that are applicable for those data streams based on the selector provided in the arguments.
* @param indexNameExpressionResolver resolver object
* @param project Project to query
* @param names data stream expressions
* @param selector which component indices of the data stream should be returned
* @param indicesOptions options for expression resolution
* @return A stream of concrete index names that belong to the components specified
* on the data streams returned from the expressions given
*/
public static List<String> resolveConcreteIndexNamesWithSelector(
IndexNameExpressionResolver indexNameExpressionResolver,
ProjectMetadata project,
String[] names,
IndexComponentSelector selector,
IndicesOptions indicesOptions
) {
assert indicesOptions.allowSelectors() == false : "If selectors are enabled, use resolveConcreteIndexNames instead";
List<String> abstractionNames = indexNameExpressionResolver.dataStreamNames(project, updateIndicesOptions(indicesOptions), names);
SortedMap<String, IndexAbstraction> indicesLookup = project.getIndicesLookup();
List<String> results = new ArrayList<>(abstractionNames.size());
for (String abstractionName : abstractionNames) {
IndexAbstraction indexAbstraction = indicesLookup.get(abstractionName);
assert indexAbstraction != null;
if (indexAbstraction.getType() == IndexAbstraction.Type.DATA_STREAM) {
if (selector.shouldIncludeData()) {
selectDataStreamIndicesNames((DataStream) indexAbstraction, false, results);
DataStream dataStream = (DataStream) indexAbstraction;
if (IndexNameExpressionResolver.shouldIncludeRegularIndices(indicesOptions, resolvedExpression.selector())) {
selectDataStreamIndicesNames(dataStream, false, results);
}
if (selector.shouldIncludeFailures()) {
selectDataStreamIndicesNames((DataStream) indexAbstraction, true, results);
if (IndexNameExpressionResolver.shouldIncludeFailureIndices(indicesOptions, resolvedExpression.selector())) {
selectDataStreamIndicesNames(dataStream, true, results);
}
}
}

View file

@ -57,6 +57,7 @@ public class DataStreamsStatsAction extends ActionType<DataStreamsStatsAction.Re
.allowClosedIndices(true)
.ignoreThrottled(false)
.allowSelectors(false)
.includeFailureIndices(true)
.build()
)
.build()

View file

@ -300,6 +300,8 @@ public record IndicesOptions(
* @param allowAliasToMultipleIndices, allow aliases to multiple indices, true by default.
* @param allowClosedIndices, allow closed indices, true by default.
* @param allowSelectors, allow selectors within index expressions, true by default.
* @param includeFailureIndices, when true includes the failure indices when a data stream or a data stream alias is encountered and
* selectors are not allowed.
* @param ignoreThrottled, filters out throttled (aka frozen indices), defaults to true. This is deprecated and the only one
* that only filters and never throws an error.
*/
@ -307,11 +309,12 @@ public record IndicesOptions(
boolean allowAliasToMultipleIndices,
boolean allowClosedIndices,
boolean allowSelectors,
boolean includeFailureIndices,
@Deprecated boolean ignoreThrottled
) implements ToXContentFragment {
public static final String IGNORE_THROTTLED = "ignore_throttled";
public static final GatekeeperOptions DEFAULT = new GatekeeperOptions(true, true, true, false);
public static final GatekeeperOptions DEFAULT = new GatekeeperOptions(true, true, true, false, false);
public static GatekeeperOptions parseParameter(Object ignoreThrottled, GatekeeperOptions defaultOptions) {
if (ignoreThrottled == null && defaultOptions != null) {
@ -331,6 +334,7 @@ public record IndicesOptions(
private boolean allowAliasToMultipleIndices;
private boolean allowClosedIndices;
private boolean allowSelectors;
private boolean includeFailureIndices;
private boolean ignoreThrottled;
public Builder() {
@ -341,6 +345,7 @@ public record IndicesOptions(
allowAliasToMultipleIndices = options.allowAliasToMultipleIndices;
allowClosedIndices = options.allowClosedIndices;
allowSelectors = options.allowSelectors;
includeFailureIndices = options.includeFailureIndices;
ignoreThrottled = options.ignoreThrottled;
}
@ -372,6 +377,15 @@ public record IndicesOptions(
return this;
}
/**
* When the selectors are not allowed, this flag determines if we will include the failure store
* indices in the resolution or not. Defaults to false.
*/
public Builder includeFailureIndices(boolean includeFailureIndices) {
this.includeFailureIndices = includeFailureIndices;
return this;
}
/**
* Throttled indices will not be included in the result. Defaults to false.
*/
@ -381,7 +395,13 @@ public record IndicesOptions(
}
public GatekeeperOptions build() {
return new GatekeeperOptions(allowAliasToMultipleIndices, allowClosedIndices, allowSelectors, ignoreThrottled);
return new GatekeeperOptions(
allowAliasToMultipleIndices,
allowClosedIndices,
allowSelectors,
includeFailureIndices,
ignoreThrottled
);
}
}
@ -429,8 +449,9 @@ public record IndicesOptions(
ERROR_WHEN_CLOSED_INDICES,
IGNORE_THROTTLED,
ALLOW_FAILURE_INDICES, // Added in 8.14, Removed in 8.18
ALLOW_SELECTORS // Added in 8.18
ALLOW_FAILURE_INDICES, // Added in 8.14, Removed in 8.18
ALLOW_SELECTORS, // Added in 8.18
INCLUDE_FAILURE_INDICES // Added in 8.18
}
private static final DeprecationLogger DEPRECATION_LOGGER = DeprecationLogger.getLogger(IndicesOptions.class);
@ -464,7 +485,7 @@ public record IndicesOptions(
.ignoreThrottled(false)
)
.build();
public static final IndicesOptions STRICT_EXPAND_OPEN_FAILURE_STORE = IndicesOptions.builder()
public static final IndicesOptions STRICT_EXPAND_OPEN_FAILURE_NO_SELECTOR = IndicesOptions.builder()
.concreteTargetOptions(ConcreteTargetOptions.ERROR_WHEN_UNAVAILABLE_TARGETS)
.wildcardOptions(
WildcardOptions.builder()
@ -478,7 +499,8 @@ public record IndicesOptions(
GatekeeperOptions.builder()
.allowAliasToMultipleIndices(true)
.allowClosedIndices(true)
.allowSelectors(true)
.allowSelectors(false)
.includeFailureIndices(true)
.ignoreThrottled(false)
)
.build();
@ -624,25 +646,7 @@ public record IndicesOptions(
.ignoreThrottled(false)
)
.build();
public static final IndicesOptions LENIENT_EXPAND_OPEN_CLOSED_FAILURE_STORE = IndicesOptions.builder()
.concreteTargetOptions(ConcreteTargetOptions.ALLOW_UNAVAILABLE_TARGETS)
.wildcardOptions(
WildcardOptions.builder()
.matchOpen(true)
.matchClosed(true)
.includeHidden(false)
.allowEmptyExpressions(true)
.resolveAliases(true)
)
.gatekeeperOptions(
GatekeeperOptions.builder()
.allowAliasToMultipleIndices(true)
.allowClosedIndices(true)
.allowSelectors(true)
.ignoreThrottled(false)
)
.build();
public static final IndicesOptions STRICT_EXPAND_OPEN_CLOSED_HIDDEN_FAILURE_STORE = IndicesOptions.builder()
public static final IndicesOptions STRICT_EXPAND_OPEN_CLOSED_HIDDEN_FAILURE_NO_SELECTORS = IndicesOptions.builder()
.concreteTargetOptions(ConcreteTargetOptions.ERROR_WHEN_UNAVAILABLE_TARGETS)
.wildcardOptions(
WildcardOptions.builder().matchOpen(true).matchClosed(true).includeHidden(true).allowEmptyExpressions(true).resolveAliases(true)
@ -651,25 +655,8 @@ public record IndicesOptions(
GatekeeperOptions.builder()
.allowAliasToMultipleIndices(true)
.allowClosedIndices(true)
.allowSelectors(true)
.ignoreThrottled(false)
)
.build();
public static final IndicesOptions STRICT_EXPAND_OPEN_CLOSED_FAILURE_STORE = IndicesOptions.builder()
.concreteTargetOptions(ConcreteTargetOptions.ERROR_WHEN_UNAVAILABLE_TARGETS)
.wildcardOptions(
WildcardOptions.builder()
.matchOpen(true)
.matchClosed(true)
.includeHidden(false)
.allowEmptyExpressions(true)
.resolveAliases(true)
)
.gatekeeperOptions(
GatekeeperOptions.builder()
.allowAliasToMultipleIndices(true)
.allowClosedIndices(true)
.allowSelectors(true)
.allowSelectors(false)
.includeFailureIndices(true)
.ignoreThrottled(false)
)
.build();
@ -843,6 +830,13 @@ public record IndicesOptions(
return DataStream.isFailureStoreFeatureFlagEnabled() && gatekeeperOptions.allowSelectors();
}
/**
* @return true when selectors should be included in the resolution, false otherwise.
*/
public boolean includeFailureIndices() {
return DataStream.isFailureStoreFeatureFlagEnabled() && gatekeeperOptions.includeFailureIndices();
}
/**
* @return whether aliases pointing to multiple indices are allowed
*/
@ -893,6 +887,11 @@ public record IndicesOptions(
backwardsCompatibleOptions.add(Option.ALLOW_SELECTORS);
}
}
if (out.getTransportVersion().onOrAfter(TransportVersions.ADD_INCLUDE_FAILURE_INDICES_OPTION)
&& gatekeeperOptions.includeFailureIndices()) {
backwardsCompatibleOptions.add(Option.INCLUDE_FAILURE_INDICES);
}
out.writeEnumSet(backwardsCompatibleOptions);
EnumSet<WildcardStates> states = EnumSet.noneOf(WildcardStates.class);
@ -937,10 +936,15 @@ public record IndicesOptions(
} else if (in.getTransportVersion().onOrAfter(TransportVersions.REPLACE_FAILURE_STORE_OPTIONS_WITH_SELECTOR_SYNTAX)) {
allowSelectors = options.contains(Option.ALLOW_SELECTORS);
}
boolean includeFailureIndices = false;
if (in.getTransportVersion().onOrAfter(TransportVersions.ADD_INCLUDE_FAILURE_INDICES_OPTION)) {
includeFailureIndices = options.contains(Option.INCLUDE_FAILURE_INDICES);
}
GatekeeperOptions gatekeeperOptions = GatekeeperOptions.builder()
.allowClosedIndices(options.contains(Option.ERROR_WHEN_CLOSED_INDICES) == false)
.allowAliasToMultipleIndices(options.contains(Option.ERROR_WHEN_ALIASES_TO_MULTIPLE_INDICES) == false)
.allowSelectors(allowSelectors)
.includeFailureIndices(includeFailureIndices)
.ignoreThrottled(options.contains(Option.IGNORE_THROTTLED))
.build();
if (in.getTransportVersion().between(TransportVersions.V_8_14_0, TransportVersions.V_8_16_0)) {
@ -1319,6 +1323,15 @@ public record IndicesOptions(
return STRICT_EXPAND_OPEN;
}
/**
* @return indices options that requires every specified index to exist, expands wildcards only to open indices and
* allows that no indices are resolved from wildcard expressions (not returning an error). It disallows selectors
* in the expression (no :: separators).
*/
public static IndicesOptions strictExpandOpenFailureNoSelectors() {
return STRICT_EXPAND_OPEN_FAILURE_NO_SELECTOR;
}
/**
* @return indices options that requires every specified index to exist, expands wildcards only to open indices,
* allows that no indices are resolved from wildcard expressions (not returning an error) and forbids the
@ -1362,22 +1375,13 @@ public record IndicesOptions(
return STRICT_EXPAND_OPEN_CLOSED_HIDDEN_NO_SELECTORS;
}
/**
* @return indices option that expands wildcards to both open and closed indices, includes failure store
* (with data stream) and allows that indices can be missing and no indices are resolved from wildcard expressions
* (not returning an error).
*/
public static IndicesOptions lenientExpandIncludeFailureStore() {
return LENIENT_EXPAND_OPEN_CLOSED_FAILURE_STORE;
}
/**
* @return indices option that requires every specified index to exist, expands wildcards to both open and closed indices, includes
* hidden indices, includes failure store (with data stream) and allows that no indices are resolved from wildcard expressions
* (not returning an error).
* hidden indices, allows that no indices are resolved from wildcard expressions (not returning an error), and disallows selectors
* in the expression (no :: separators) but includes the failure indices.
*/
public static IndicesOptions strictExpandHiddenIncludeFailureStore() {
return STRICT_EXPAND_OPEN_CLOSED_HIDDEN_FAILURE_STORE;
public static IndicesOptions strictExpandHiddenFailureNoSelectors() {
return STRICT_EXPAND_OPEN_CLOSED_HIDDEN_FAILURE_NO_SELECTORS;
}
/**
@ -1467,6 +1471,9 @@ public record IndicesOptions(
+ ignoreThrottled()
// Until the feature flag is removed we access the field directly from the gatekeeper options.
+ (DataStream.isFailureStoreFeatureFlagEnabled() ? ", allow_selectors=" + gatekeeperOptions().allowSelectors() : "")
+ (DataStream.isFailureStoreFeatureFlagEnabled()
? ", include_failure_indices=" + gatekeeperOptions().includeFailureIndices()
: "")
+ ']';
}
}

View file

@ -96,7 +96,17 @@ public class IndexNameExpressionResolver {
*/
public record ResolvedExpression(String resource, @Nullable IndexComponentSelector selector) {
public ResolvedExpression(String indexAbstraction) {
this(indexAbstraction, null);
this(indexAbstraction, (IndexComponentSelector) null);
}
/**
* Constructs a ResolvedExpression with the DATA selector if the selectors are allowed
* or null otherwise.
* @param indexAbstraction
* @param options
*/
public ResolvedExpression(String indexAbstraction, IndicesOptions options) {
this(indexAbstraction, options.allowSelectors() ? IndexComponentSelector.DATA : null);
}
public String combined() {
@ -710,19 +720,19 @@ public class IndexNameExpressionResolver {
}
}
private static boolean shouldIncludeRegularIndices(IndicesOptions indicesOptions, IndexComponentSelector expressionSelector) {
public static boolean shouldIncludeRegularIndices(IndicesOptions indicesOptions, IndexComponentSelector expressionSelector) {
if (indicesOptions.allowSelectors()) {
return expressionSelector == null || expressionSelector.shouldIncludeData();
}
return true;
}
private static boolean shouldIncludeFailureIndices(IndicesOptions indicesOptions, IndexComponentSelector expressionSelector) {
public static boolean shouldIncludeFailureIndices(IndicesOptions indicesOptions, IndexComponentSelector expressionSelector) {
// We return failure indices regardless of whether the data stream actually has the `failureStoreEnabled` flag set to true.
if (indicesOptions.allowSelectors()) {
return expressionSelector != null && expressionSelector.shouldIncludeFailures();
}
return false;
return indicesOptions.includeFailureIndices();
}
private static boolean resolvesToMoreThanOneIndex(IndexAbstraction indexAbstraction, Context context, ResolvedExpression expression) {
@ -1941,12 +1951,7 @@ public class IndexNameExpressionResolver {
Index index = indexAbstraction.getIndices().get(i);
IndexMetadata indexMetadata = context.project.index(index);
if (indexMetadata.getState() != excludeState) {
resources.add(
new ResolvedExpression(
index.getName(),
context.options.allowSelectors() ? IndexComponentSelector.DATA : null
)
);
resources.add(new ResolvedExpression(index.getName(), context.getOptions()));
}
}
}
@ -1957,7 +1962,7 @@ public class IndexNameExpressionResolver {
Index index = failureIndices.get(i);
IndexMetadata indexMetadata = context.getProject().index(index);
if (indexMetadata.getState() != excludeState) {
resources.add(new ResolvedExpression(index.getName(), IndexComponentSelector.DATA));
resources.add(new ResolvedExpression(index.getName(), context.getOptions()));
}
}
}

View file

@ -22,6 +22,7 @@ import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.core.TimeValue;
import org.elasticsearch.features.FeatureService;
import org.elasticsearch.node.NodeClosedException;
import org.elasticsearch.persistent.AllocatedPersistentTask;
@ -162,7 +163,7 @@ public final class HealthNodeTaskExecutor extends PersistentTasksExecutor<Health
TASK_NAME,
TASK_NAME,
new HealthNodeTaskParams(),
null,
TimeValue.THIRTY_SECONDS /* TODO should this be configurable? longer by default? infinite? */,
ActionListener.wrap(r -> logger.debug("Created the health node task"), e -> {
if (e instanceof NodeClosedException) {
logger.debug("Failed to create health node task because node is shutting down", e);

View file

@ -16,6 +16,7 @@ import org.apache.lucene.util.automaton.Automaton;
import org.apache.lucene.util.automaton.CharacterRunAutomaton;
import org.apache.lucene.util.automaton.Operations;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.admin.cluster.migration.TransportGetFeatureUpgradeStatusAction;
import org.elasticsearch.action.admin.cluster.snapshots.features.ResetFeatureStateResponse.ResetFeatureStateStatus;
import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest;
import org.elasticsearch.action.admin.indices.delete.TransportDeleteIndexAction;
@ -110,7 +111,8 @@ import static org.elasticsearch.tasks.TaskResultsService.TASKS_FEATURE_NAME;
public class SystemIndices {
public static final String SYSTEM_INDEX_ACCESS_CONTROL_HEADER_KEY = "_system_index_access_allowed";
public static final String EXTERNAL_SYSTEM_INDEX_ACCESS_CONTROL_HEADER_KEY = "_external_system_index_access_origin";
public static final String UPGRADED_INDEX_SUFFIX = "-reindexed-for-9";
private static final int UPGRADED_TO_VERSION = TransportGetFeatureUpgradeStatusAction.NO_UPGRADE_REQUIRED_VERSION.major + 1;
public static final String UPGRADED_INDEX_SUFFIX = "-reindexed-for-" + UPGRADED_TO_VERSION;
private static final Automaton EMPTY = Automata.makeEmpty();

View file

@ -65,7 +65,13 @@ public class AllocatedPersistentTask extends CancellableTask {
final PersistentTaskState state,
final ActionListener<PersistentTasksCustomMetadata.PersistentTask<?>> listener
) {
persistentTasksService.sendUpdateStateRequest(persistentTaskId, allocationId, state, null, listener);
persistentTasksService.sendUpdateStateRequest(
persistentTaskId,
allocationId,
state,
TimeValue.THIRTY_SECONDS /* TODO should this be longer? infinite? */,
listener
);
}
public String getPersistentTaskId() {
@ -201,7 +207,7 @@ public class AllocatedPersistentTask extends CancellableTask {
getAllocationId(),
failure,
localAbortReason,
null,
TimeValue.THIRTY_SECONDS /* TODO should this be longer? infinite? */,
new ActionListener<>() {
@Override
public void onResponse(PersistentTasksCustomMetadata.PersistentTask<?> persistentTask) {

View file

@ -21,6 +21,7 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.core.TimeValue;
import org.elasticsearch.injection.guice.Inject;
import org.elasticsearch.tasks.Task;
import org.elasticsearch.threadpool.ThreadPool;
@ -56,8 +57,8 @@ public class CompletionPersistentTaskAction {
localAbortReason = in.readOptionalString();
}
public Request(String taskId, long allocationId, Exception exception, String localAbortReason) {
super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT);
public Request(TimeValue masterNodeTimeout, String taskId, long allocationId, Exception exception, String localAbortReason) {
super(masterNodeTimeout);
this.taskId = taskId;
this.exception = exception;
this.allocationId = allocationId;

View file

@ -18,6 +18,7 @@ import org.elasticsearch.common.Strings;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.util.concurrent.EsExecutors;
import org.elasticsearch.core.TimeValue;
import org.elasticsearch.gateway.GatewayService;
import org.elasticsearch.persistent.PersistentTasksCustomMetadata.PersistentTask;
import org.elasticsearch.tasks.Task;
@ -313,7 +314,7 @@ public class PersistentTasksNodeService implements ClusterStateListener {
taskInProgress.getAllocationId(),
originalException,
null,
null,
TimeValue.THIRTY_SECONDS /* TODO should this be longer? infinite? */,
new ActionListener<>() {
@Override
public void onResponse(PersistentTask<?> persistentTask) {
@ -349,7 +350,7 @@ public class PersistentTasksNodeService implements ClusterStateListener {
if (task.markAsCancelled()) {
// Cancel the local task using the task manager
String reason = "task has been removed, cancelling locally";
persistentTasksService.sendCancelRequest(task.getId(), reason, null, new ActionListener<>() {
persistentTasksService.sendCancelRequest(task.getId(), reason, new ActionListener<>() {
@Override
public void onResponse(ListTasksResponse cancelTasksResponse) {
logger.trace(

View file

@ -27,6 +27,7 @@ import org.elasticsearch.persistent.PersistentTasksCustomMetadata.PersistentTask
import org.elasticsearch.tasks.TaskId;
import org.elasticsearch.threadpool.ThreadPool;
import java.util.Objects;
import java.util.function.Predicate;
/**
@ -57,16 +58,16 @@ public class PersistentTasksService {
final String taskId,
final String taskName,
final Params taskParams,
final @Nullable TimeValue timeout,
final TimeValue timeout,
final ActionListener<PersistentTask<Params>> listener
) {
@SuppressWarnings("unchecked")
final ActionListener<PersistentTask<?>> wrappedListener = listener.map(t -> (PersistentTask<Params>) t);
StartPersistentTaskAction.Request request = new StartPersistentTaskAction.Request(taskId, taskName, taskParams);
if (timeout != null) {
request.masterNodeTimeout(timeout);
}
execute(request, StartPersistentTaskAction.INSTANCE, wrappedListener);
execute(
new StartPersistentTaskAction.Request(Objects.requireNonNull(timeout), taskId, taskName, taskParams),
StartPersistentTaskAction.INSTANCE,
wrappedListener
);
}
/**
@ -85,33 +86,27 @@ public class PersistentTasksService {
final @Nullable TimeValue timeout,
final ActionListener<PersistentTask<?>> listener
) {
CompletionPersistentTaskAction.Request request = new CompletionPersistentTaskAction.Request(
taskId,
taskAllocationId,
taskFailure,
localAbortReason
execute(
new CompletionPersistentTaskAction.Request(
Objects.requireNonNull(timeout),
taskId,
taskAllocationId,
taskFailure,
localAbortReason
),
CompletionPersistentTaskAction.INSTANCE,
listener
);
if (timeout != null) {
request.masterNodeTimeout(timeout);
}
execute(request, CompletionPersistentTaskAction.INSTANCE, listener);
}
/**
* Cancels a locally running task using the Task Manager API. Accepts operation timeout as optional parameter
*/
void sendCancelRequest(
final long taskId,
final String reason,
final @Nullable TimeValue timeout,
final ActionListener<ListTasksResponse> listener
) {
void sendCancelRequest(final long taskId, final String reason, final ActionListener<ListTasksResponse> listener) {
CancelTasksRequest request = new CancelTasksRequest();
request.setTargetTaskId(new TaskId(clusterService.localNode().getId(), taskId));
request.setReason(reason);
if (timeout != null) {
request.setTimeout(timeout);
}
// TODO set timeout?
try {
client.admin().cluster().cancelTasks(request, listener);
} catch (Exception e) {
@ -130,33 +125,25 @@ public class PersistentTasksService {
final String taskId,
final long taskAllocationID,
final PersistentTaskState taskState,
final @Nullable TimeValue timeout,
final TimeValue timeout,
final ActionListener<PersistentTask<?>> listener
) {
UpdatePersistentTaskStatusAction.Request request = new UpdatePersistentTaskStatusAction.Request(
taskId,
taskAllocationID,
taskState
execute(
new UpdatePersistentTaskStatusAction.Request(Objects.requireNonNull(timeout), taskId, taskAllocationID, taskState),
UpdatePersistentTaskStatusAction.INSTANCE,
listener
);
if (timeout != null) {
request.masterNodeTimeout(timeout);
}
execute(request, UpdatePersistentTaskStatusAction.INSTANCE, listener);
}
/**
* Notifies the master node to remove a persistent task from the cluster state. Accepts operation timeout as optional parameter
*/
public void sendRemoveRequest(
final String taskId,
final @Nullable TimeValue timeout,
final ActionListener<PersistentTask<?>> listener
) {
RemovePersistentTaskAction.Request request = new RemovePersistentTaskAction.Request(taskId);
if (timeout != null) {
request.masterNodeTimeout(timeout);
}
execute(request, RemovePersistentTaskAction.INSTANCE, listener);
public void sendRemoveRequest(final String taskId, final TimeValue timeout, final ActionListener<PersistentTask<?>> listener) {
execute(
new RemovePersistentTaskAction.Request(Objects.requireNonNull(timeout), taskId),
RemovePersistentTaskAction.INSTANCE,
listener
);
}
/**

View file

@ -21,6 +21,7 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.core.TimeValue;
import org.elasticsearch.injection.guice.Inject;
import org.elasticsearch.tasks.Task;
import org.elasticsearch.threadpool.ThreadPool;
@ -44,8 +45,8 @@ public class RemovePersistentTaskAction {
taskId = in.readString();
}
public Request(String taskId) {
super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT);
public Request(TimeValue masterNodeTimeout, String taskId) {
super(masterNodeTimeout);
this.taskId = taskId;
}

View file

@ -21,6 +21,7 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.core.TimeValue;
import org.elasticsearch.injection.guice.Inject;
import org.elasticsearch.tasks.Task;
import org.elasticsearch.threadpool.ThreadPool;
@ -52,8 +53,8 @@ public class StartPersistentTaskAction {
params = in.readNamedWriteable(PersistentTaskParams.class);
}
public Request(String taskId, String taskName, PersistentTaskParams params) {
super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT);
public Request(TimeValue masterNodeTimeout, String taskId, String taskName, PersistentTaskParams params) {
super(masterNodeTimeout);
this.taskId = taskId;
this.taskName = taskName;
this.params = params;

View file

@ -21,6 +21,7 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.core.TimeValue;
import org.elasticsearch.injection.guice.Inject;
import org.elasticsearch.tasks.Task;
import org.elasticsearch.threadpool.ThreadPool;
@ -49,8 +50,8 @@ public class UpdatePersistentTaskStatusAction {
state = in.readOptionalNamedWriteable(PersistentTaskState.class);
}
public Request(String taskId, long allocationId, PersistentTaskState state) {
super(TRAPPY_IMPLICIT_DEFAULT_MASTER_NODE_TIMEOUT);
public Request(TimeValue masterNodeTimeout, String taskId, long allocationId, PersistentTaskState state) {
super(masterNodeTimeout);
this.taskId = taskId;
this.allocationId = allocationId;
this.state = state;

View file

@ -104,7 +104,8 @@ public final class SearchHit implements Writeable, ToXContentObject, RefCounted
private transient String index;
private transient String clusterAlias;
private Map<String, Object> sourceAsMap;
// For asserting that the method #getSourceAsMap is called just once on the lifetime of this object
private boolean sourceAsMapCalled = false;
private Map<String, SearchHits> innerHits;
@ -142,7 +143,6 @@ public final class SearchHit implements Writeable, ToXContentObject, RefCounted
null,
null,
null,
null,
new HashMap<>(),
new HashMap<>(),
refCounted
@ -166,7 +166,6 @@ public final class SearchHit implements Writeable, ToXContentObject, RefCounted
SearchShardTarget shard,
String index,
String clusterAlias,
Map<String, Object> sourceAsMap,
Map<String, SearchHits> innerHits,
Map<String, DocumentField> documentFields,
Map<String, DocumentField> metaFields,
@ -188,7 +187,6 @@ public final class SearchHit implements Writeable, ToXContentObject, RefCounted
this.shard = shard;
this.index = index;
this.clusterAlias = clusterAlias;
this.sourceAsMap = sourceAsMap;
this.innerHits = innerHits;
this.documentFields = documentFields;
this.metaFields = metaFields;
@ -279,7 +277,6 @@ public final class SearchHit implements Writeable, ToXContentObject, RefCounted
shardTarget,
index,
clusterAlias,
null,
innerHits,
documentFields,
metaFields,
@ -447,7 +444,6 @@ public final class SearchHit implements Writeable, ToXContentObject, RefCounted
*/
public SearchHit sourceRef(BytesReference source) {
this.source = source;
this.sourceAsMap = null;
return this;
}
@ -476,19 +472,18 @@ public final class SearchHit implements Writeable, ToXContentObject, RefCounted
}
/**
* The source of the document as a map (can be {@code null}).
* The source of the document as a map (can be {@code null}). This method is expected
* to be called at most once during the lifetime of the object as the generated map
* is expensive to generate and it does not get cache.
*/
public Map<String, Object> getSourceAsMap() {
assert hasReferences();
assert sourceAsMapCalled == false : "getSourceAsMap() called twice";
sourceAsMapCalled = true;
if (source == null) {
return null;
}
if (sourceAsMap != null) {
return sourceAsMap;
}
sourceAsMap = Source.fromBytes(source).source();
return sourceAsMap;
return Source.fromBytes(source).source();
}
/**
@ -758,7 +753,6 @@ public final class SearchHit implements Writeable, ToXContentObject, RefCounted
shard,
index,
clusterAlias,
sourceAsMap,
innerHits == null
? null
: innerHits.entrySet().stream().collect(Collectors.toMap(Map.Entry::getKey, e -> e.getValue().asUnpooled())),

View file

@ -235,6 +235,8 @@ public class InternalTopHits extends InternalAggregation implements TopHits {
} else if (tokens[0].equals(SCORE)) {
return topHit.getScore();
} else if (tokens[0].equals(SOURCE)) {
// Caching the map might help here but memory usage is a concern for this class
// This is dead code, pipeline aggregations do not support _source.field.
Map<String, Object> sourceAsMap = topHit.getSourceAsMap();
if (sourceAsMap != null) {
Object property = sourceAsMap.get(tokens[1]);

View file

@ -4141,9 +4141,6 @@ public final class SnapshotsService extends AbstractLifecycleComponent implement
request.partial(),
indexIds,
CollectionUtils.concatLists(
// It's ok to just get the data stream names here because we have already resolved every concrete index that will be
// in the snapshot, and thus already resolved any selectors that might be present. We now only care about which data
// streams we're packing up in the resulting snapshot, not what their contents are.
indexNameExpressionResolver.dataStreamNames(currentState, request.indicesOptions(), request.indices()),
systemDataStreamNames
),

View file

@ -89,6 +89,7 @@ public class CreateSnapshotRequestTests extends ESTestCase {
randomBoolean()
)
)
.gatekeeperOptions(IndicesOptions.GatekeeperOptions.builder().allowSelectors(false).includeFailureIndices(true).build())
.build()
);
}

View file

@ -89,6 +89,7 @@ public class RestoreSnapshotRequestTests extends AbstractWireSerializingTestCase
randomBoolean()
)
)
.gatekeeperOptions(IndicesOptions.GatekeeperOptions.builder().allowSelectors(false).includeFailureIndices(true).build())
.build()
);
}

View file

@ -106,6 +106,19 @@ public class DataStreamsActionUtilTests extends ESTestCase {
assertThat(resolved, containsInAnyOrder(".ds-foo1", ".ds-foo2", ".ds-baz1"));
// Including the failure indices
resolved = DataStreamsActionUtil.resolveConcreteIndexNames(
indexNameExpressionResolver,
clusterState.getMetadata().getProject(projectId),
query,
IndicesOptions.builder()
.wildcardOptions(IndicesOptions.WildcardOptions.builder().includeHidden(true))
.gatekeeperOptions(IndicesOptions.GatekeeperOptions.builder().allowSelectors(false).includeFailureIndices(true))
.build()
);
assertThat(resolved, containsInAnyOrder(".ds-foo1", ".ds-foo2", ".ds-baz1", ".fs-foo1"));
when(indexNameExpressionResolver.dataStreams(any(ProjectMetadata.class), any(), eq(query))).thenReturn(
List.of(
new ResolvedExpression("fooDs", IndexComponentSelector.DATA),

View file

@ -55,6 +55,7 @@ public class IndicesOptionsTests extends ESTestCase {
.ignoreThrottled(randomBoolean())
.allowAliasToMultipleIndices(randomBoolean())
.allowClosedIndices(randomBoolean())
.allowSelectors(randomBoolean())
)
.build();
@ -340,7 +341,13 @@ public class IndicesOptionsTests extends ESTestCase {
randomBoolean(),
randomBoolean()
);
GatekeeperOptions gatekeeperOptions = new GatekeeperOptions(randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean());
GatekeeperOptions gatekeeperOptions = new GatekeeperOptions(
randomBoolean(),
randomBoolean(),
randomBoolean(),
randomBoolean(),
randomBoolean()
);
IndicesOptions indicesOptions = new IndicesOptions(concreteTargetOptions, wildcardOptions, gatekeeperOptions);

View file

@ -42,6 +42,7 @@ import static org.elasticsearch.test.ClusterServiceUtils.createClusterService;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.ArgumentMatchers.anyString;
import static org.mockito.ArgumentMatchers.eq;
import static org.mockito.ArgumentMatchers.isNotNull;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.never;
import static org.mockito.Mockito.times;
@ -102,7 +103,7 @@ public class HealthNodeTaskExecutorTests extends ESTestCase {
eq("health-node"),
eq("health-node"),
eq(new HealthNodeTaskParams()),
eq(null),
isNotNull(),
any()
)
);
@ -121,7 +122,7 @@ public class HealthNodeTaskExecutorTests extends ESTestCase {
eq("health-node"),
eq("health-node"),
eq(new HealthNodeTaskParams()),
eq(null),
isNotNull(),
any()
);
}

View file

@ -18,7 +18,7 @@ public class CancelPersistentTaskRequestTests extends AbstractWireSerializingTes
@Override
protected Request createTestInstance() {
return new Request(randomAsciiOfLength(10));
return new Request(randomTimeValue(), randomAsciiOfLength(10));
}
@Override

View file

@ -17,9 +17,9 @@ public class CompletionPersistentTaskRequestTests extends AbstractWireSerializin
@Override
protected Request createTestInstance() {
if (randomBoolean()) {
return new Request(randomAlphaOfLength(10), randomNonNegativeLong(), null, null);
return new Request(randomTimeValue(), randomAlphaOfLength(10), randomNonNegativeLong(), null, null);
} else {
return new Request(randomAlphaOfLength(10), randomNonNegativeLong(), null, randomAlphaOfLength(20));
return new Request(randomTimeValue(), randomAlphaOfLength(10), randomNonNegativeLong(), null, randomAlphaOfLength(20));
}
}

View file

@ -260,12 +260,7 @@ public class PersistentTasksNodeServiceTests extends ESTestCase {
when(client.settings()).thenReturn(Settings.EMPTY);
PersistentTasksService persistentTasksService = new PersistentTasksService(null, null, client) {
@Override
void sendCancelRequest(
final long taskId,
final String reason,
final TimeValue timeout,
final ActionListener<ListTasksResponse> listener
) {
void sendCancelRequest(final long taskId, final String reason, final ActionListener<ListTasksResponse> listener) {
capturedTaskId.set(taskId);
capturedListener.set(listener);
}
@ -356,12 +351,7 @@ public class PersistentTasksNodeServiceTests extends ESTestCase {
when(client.settings()).thenReturn(Settings.EMPTY);
PersistentTasksService persistentTasksService = new PersistentTasksService(null, null, client) {
@Override
void sendCancelRequest(
final long taskId,
final String reason,
final TimeValue timeout,
final ActionListener<ListTasksResponse> listener
) {
void sendCancelRequest(final long taskId, final String reason, final ActionListener<ListTasksResponse> listener) {
fail("Shouldn't be called during local abort");
}

View file

@ -30,7 +30,7 @@ public class StartPersistentActionRequestTests extends AbstractWireSerializingTe
if (randomBoolean()) {
testParams.setExecutorNodeAttr(randomAlphaOfLengthBetween(1, 20));
}
return new Request(UUIDs.base64UUID(), randomAlphaOfLengthBetween(1, 20), testParams);
return new Request(randomTimeValue(), UUIDs.base64UUID(), randomAlphaOfLengthBetween(1, 20), testParams);
}
@Override

View file

@ -22,7 +22,7 @@ public class UpdatePersistentTaskRequestTests extends AbstractWireSerializingTes
@Override
protected Request createTestInstance() {
return new Request(UUIDs.base64UUID(), randomLong(), new State(randomAlphaOfLength(10)));
return new Request(randomTimeValue(), UUIDs.base64UUID(), randomLong(), new State(randomAlphaOfLength(10)));
}
@Override

Some files were not shown because too many files have changed in this diff Show more