Merge main into multi-project

This commit is contained in:
Tim Vernum 2025-01-09 19:23:53 +11:00
commit 280fcb1a57
527 changed files with 16545 additions and 7364 deletions

View file

@ -10,6 +10,8 @@
import org.elasticsearch.gradle.util.Pair import org.elasticsearch.gradle.util.Pair
import org.elasticsearch.gradle.util.GradleUtils import org.elasticsearch.gradle.util.GradleUtils
import org.elasticsearch.gradle.internal.test.TestUtil import org.elasticsearch.gradle.internal.test.TestUtil
import org.elasticsearch.gradle.internal.idea.EnablePreviewFeaturesTask
import org.elasticsearch.gradle.internal.idea.IdeaXmlUtil
import org.jetbrains.gradle.ext.JUnit import org.jetbrains.gradle.ext.JUnit
import java.nio.file.Files import java.nio.file.Files
@ -144,19 +146,10 @@ if (providers.systemProperty('idea.active').getOrNull() == 'true') {
} }
// modifies the idea module config to enable preview features on ':libs:native' module // modifies the idea module config to enable preview features on ':libs:native' module
tasks.register("enablePreviewFeatures") { tasks.register("enablePreviewFeatures", EnablePreviewFeaturesTask) {
group = 'ide' group = 'ide'
description = 'Enables preview features on native library module' description = 'Enables preview features on native library module'
dependsOn tasks.named("enableExternalConfiguration") dependsOn tasks.named("enableExternalConfiguration")
// ext {
def enablePreview = { moduleFile, languageLevel ->
IdeaXmlUtil.modifyXml(moduleFile) { xml ->
xml.component.find { it.'@name' == 'NewModuleRootManager' }?.'@LANGUAGE_LEVEL' = languageLevel
}
}
// }
doLast { doLast {
enablePreview('.idea/modules/libs/native/elasticsearch.libs.native.main.iml', 'JDK_21_PREVIEW') enablePreview('.idea/modules/libs/native/elasticsearch.libs.native.main.iml', 'JDK_21_PREVIEW')
enablePreview('.idea/modules/libs/native/elasticsearch.libs.native.test.iml', 'JDK_21_PREVIEW') enablePreview('.idea/modules/libs/native/elasticsearch.libs.native.test.iml', 'JDK_21_PREVIEW')
@ -277,46 +270,6 @@ if (providers.systemProperty('idea.active').getOrNull() == 'true') {
} }
} }
/**
* Parses a given XML file, applies a set of changes, and writes those changes back to the original file.
*
* @param path Path to existing XML file
* @param action Action to perform on parsed XML document
* @param preface optional front matter to add after the XML declaration
* but before the XML document, e.g. a doctype or comment
*/
class IdeaXmlUtil {
static Node parseXml(Object xmlPath) {
File xmlFile = new File(xmlPath)
XmlParser xmlParser = new XmlParser(false, true, true)
xmlParser.setFeature("http://apache.org/xml/features/nonvalidating/load-external-dtd", false)
Node xml = xmlParser.parse(xmlFile)
return xml
}
static void modifyXml(Object xmlPath, Action<? super Node> action, String preface = null) {
File xmlFile = new File(xmlPath)
if (xmlFile.exists()) {
Node xml = parseXml(xmlPath)
action.execute(xml)
xmlFile.withPrintWriter { writer ->
def printer = new XmlNodePrinter(writer)
printer.namespaceAware = true
printer.preserveWhitespace = true
writer.write("<?xml version=\"1.0\"?>\n")
if (preface != null) {
writer.write(preface)
}
printer.print(xml)
}
}
}
}
Pair<File, IncludedBuild> locateElasticsearchWorkspace(Gradle gradle) { Pair<File, IncludedBuild> locateElasticsearchWorkspace(Gradle gradle) {
if (gradle.parent == null) { if (gradle.parent == null) {
// See if any of these included builds is the Elasticsearch gradle // See if any of these included builds is the Elasticsearch gradle

View file

@ -0,0 +1,43 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the "Elastic License
* 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side
* Public License v 1"; you may not use this file except in compliance with, at
* your election, the "Elastic License 2.0", the "GNU Affero General Public
* License v3.0 only", or the "Server Side Public License, v 1".
*/
package org.elasticsearch.gradle.internal.idea;
import groovy.util.Node;
import groovy.util.NodeList;
import org.gradle.api.DefaultTask;
import org.xml.sax.SAXException;
import java.io.IOException;
import javax.xml.parsers.ParserConfigurationException;
public class EnablePreviewFeaturesTask extends DefaultTask {
public void enablePreview(String moduleFile, String languageLevel) throws IOException, ParserConfigurationException, SAXException {
IdeaXmlUtil.modifyXml(moduleFile, xml -> {
// Find the 'component' node
NodeList nodes = (NodeList) xml.depthFirst();
Node componentNode = null;
for (Object node : nodes) {
Node currentNode = (Node) node;
if ("component".equals(currentNode.name()) && "NewModuleRootManager".equals(currentNode.attribute("name"))) {
componentNode = currentNode;
break;
}
}
// Add the attribute to the 'component' node
if (componentNode != null) {
componentNode.attributes().put("LANGUAGE_LEVEL", languageLevel);
}
});
}
}

View file

@ -0,0 +1,73 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the "Elastic License
* 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side
* Public License v 1"; you may not use this file except in compliance with, at
* your election, the "Elastic License 2.0", the "GNU Affero General Public
* License v3.0 only", or the "Server Side Public License, v 1".
*/
package org.elasticsearch.gradle.internal.idea;
import groovy.util.Node;
import groovy.util.XmlParser;
import groovy.xml.XmlNodePrinter;
import org.gradle.api.Action;
import org.xml.sax.SAXException;
import java.io.File;
import java.io.IOException;
import java.io.PrintWriter;
import javax.xml.parsers.ParserConfigurationException;
public class IdeaXmlUtil {
static Node parseXml(String xmlPath) throws IOException, SAXException, ParserConfigurationException {
File xmlFile = new File(xmlPath);
XmlParser xmlParser = new XmlParser(false, true, true);
xmlParser.setFeature("http://apache.org/xml/features/nonvalidating/load-external-dtd", false);
Node xml = xmlParser.parse(xmlFile);
return xml;
}
/**
* Parses a given XML file, applies a set of changes, and writes those changes back to the original file.
*
* @param path Path to existing XML file
* @param action Action to perform on parsed XML document
* but before the XML document, e.g. a doctype or comment
*/
static void modifyXml(String xmlPath, Action<? super Node> action) throws IOException, ParserConfigurationException, SAXException {
modifyXml(xmlPath, action, null);
}
/**
* Parses a given XML file, applies a set of changes, and writes those changes back to the original file.
*
* @param path Path to existing XML file
* @param action Action to perform on parsed XML document
* @param preface optional front matter to add after the XML declaration
* but before the XML document, e.g. a doctype or comment
*/
static void modifyXml(String xmlPath, Action<? super Node> action, String preface) throws IOException, ParserConfigurationException,
SAXException {
File xmlFile = new File(xmlPath);
if (xmlFile.exists()) {
Node xml = parseXml(xmlPath);
action.execute(xml);
try (PrintWriter writer = new PrintWriter(xmlFile)) {
var printer = new XmlNodePrinter(writer);
printer.setNamespaceAware(true);
printer.setPreserveWhitespace(true);
writer.write("<?xml version=\"1.0\"?>\n");
if (preface != null) {
writer.write(preface);
}
printer.print(xml);
}
}
}
}

View file

@ -89,7 +89,6 @@ public class RestClientBuilderIntegTests extends RestClientTestCase {
} }
public void testBuilderUsesDefaultSSLContext() throws Exception { public void testBuilderUsesDefaultSSLContext() throws Exception {
assumeFalse("https://github.com/elastic/elasticsearch/issues/49094", inFipsJvm());
final SSLContext defaultSSLContext = SSLContext.getDefault(); final SSLContext defaultSSLContext = SSLContext.getDefault();
try { try {
try (RestClient client = buildRestClient()) { try (RestClient client = buildRestClient()) {
@ -97,10 +96,15 @@ public class RestClientBuilderIntegTests extends RestClientTestCase {
client.performRequest(new Request("GET", "/")); client.performRequest(new Request("GET", "/"));
fail("connection should have been rejected due to SSL handshake"); fail("connection should have been rejected due to SSL handshake");
} catch (Exception e) { } catch (Exception e) {
if (inFipsJvm()) {
// Bouncy Castle throw a different exception
assertThat(e, instanceOf(IOException.class));
assertThat(e.getCause(), instanceOf(javax.net.ssl.SSLException.class));
} else {
assertThat(e, instanceOf(SSLHandshakeException.class)); assertThat(e, instanceOf(SSLHandshakeException.class));
} }
} }
}
SSLContext.setDefault(getSslContext()); SSLContext.setDefault(getSslContext());
try (RestClient client = buildRestClient()) { try (RestClient client = buildRestClient()) {
Response response = client.performRequest(new Request("GET", "/")); Response response = client.performRequest(new Request("GET", "/"));
@ -112,7 +116,6 @@ public class RestClientBuilderIntegTests extends RestClientTestCase {
} }
public void testBuilderSetsThreadName() throws Exception { public void testBuilderSetsThreadName() throws Exception {
assumeFalse("https://github.com/elastic/elasticsearch/issues/49094", inFipsJvm());
final SSLContext defaultSSLContext = SSLContext.getDefault(); final SSLContext defaultSSLContext = SSLContext.getDefault();
try { try {
SSLContext.setDefault(getSslContext()); SSLContext.setDefault(getSslContext());

View file

@ -71,7 +71,7 @@ final class SystemJvmOptions {
maybeSetActiveProcessorCount(nodeSettings), maybeSetActiveProcessorCount(nodeSettings),
maybeSetReplayFile(distroType, isHotspot), maybeSetReplayFile(distroType, isHotspot),
maybeWorkaroundG1Bug(), maybeWorkaroundG1Bug(),
maybeAllowSecurityManager(), maybeAllowSecurityManager(useEntitlements),
maybeAttachEntitlementAgent(useEntitlements) maybeAttachEntitlementAgent(useEntitlements)
).flatMap(s -> s).toList(); ).flatMap(s -> s).toList();
} }
@ -140,7 +140,7 @@ final class SystemJvmOptions {
} }
@UpdateForV9(owner = UpdateForV9.Owner.CORE_INFRA) @UpdateForV9(owner = UpdateForV9.Owner.CORE_INFRA)
private static Stream<String> maybeAllowSecurityManager() { private static Stream<String> maybeAllowSecurityManager(boolean useEntitlements) {
if (RuntimeVersionFeature.isSecurityManagerAvailable()) { if (RuntimeVersionFeature.isSecurityManagerAvailable()) {
// Will become conditional on useEntitlements once entitlements can run without SM // Will become conditional on useEntitlements once entitlements can run without SM
return Stream.of("-Djava.security.manager=allow"); return Stream.of("-Djava.security.manager=allow");

View file

@ -0,0 +1,6 @@
pr: 118324
summary: Allow the data type of `null` in filters
area: ES|QL
type: bug
issues:
- 116351

View file

@ -0,0 +1,5 @@
pr: 118652
summary: Add Jina AI API to do inference for Embedding and Rerank models
area: Machine Learning
type: enhancement
issues: []

View file

@ -0,0 +1,5 @@
pr: 118871
summary: "[Elastic Inference Service] Add ElasticInferenceService Unified ChatCompletions Integration"
area: Inference
type: enhancement
issues: []

View file

@ -0,0 +1,5 @@
pr: 118919
summary: Remove unsupported timeout from rest-api-spec license API
area: License
type: bug
issues: []

View file

@ -0,0 +1,5 @@
pr: 118921
summary: Add missing timeouts to rest-api-spec shutdown APIs
area: Infra/Node Lifecycle
type: bug
issues: []

View file

@ -0,0 +1,5 @@
pr: 118938
summary: Hash functions
area: ES|QL
type: enhancement
issues: []

View file

@ -0,0 +1,5 @@
pr: 118954
summary: Add missing parameter to `xpack.info` rest-api-spec
area: Infra/REST API
type: bug
issues: []

View file

@ -0,0 +1,5 @@
pr: 119067
summary: Metrics for indexing failures due to version conflicts
area: CRUD
type: feature
issues: []

View file

@ -0,0 +1,5 @@
pr: 119250
summary: Add rest endpoint for `create_from_source_index`
area: Data streams
type: enhancement
issues: []

View file

@ -0,0 +1,5 @@
pr: 119389
summary: Restrict Connector APIs to manage/monitor_connector privileges
area: Extract&Transform
type: feature
issues: []

View file

@ -0,0 +1,5 @@
pr: 119449
summary: Add missing traces ilm policy for OTel traces data streams
area: Data streams
type: bug
issues: []

View file

@ -0,0 +1,5 @@
pr: 119504
summary: Optimized index sorting for OTel logs
area: Data streams
type: enhancement
issues: []

View file

@ -0,0 +1,5 @@
pr: 119542
summary: Wait while index is blocked
area: Transform
type: enhancement
issues: []

View file

@ -0,0 +1,7 @@
pr: 119543
summary: "[Inference API] Fix unique ID message for inference ID matches trained model\
\ ID"
area: Machine Learning
type: bug
issues:
- 111312

View file

@ -0,0 +1,6 @@
pr: 119691
summary: Fix `bbq_hnsw` merge file cleanup on random IO exceptions
area: Vector Search
type: bug
issues:
- 119392

View file

@ -239,6 +239,9 @@ Number of indexing operations, such as `1`.
`indexing.index_failed`, `iif`, `indexingIndexFailed`:: `indexing.index_failed`, `iif`, `indexingIndexFailed`::
Number of failed indexing operations, such as `0`. Number of failed indexing operations, such as `0`.
`indexing.index_failed_due_to_version_conflict`, `iifvc`, `indexingIndexFailedDueToVersionConflict`::
Number of failed indexing operations due to version conflict, such as `0`.
`merges.current`, `mc`, `mergesCurrent`:: `merges.current`, `mc`, `mergesCurrent`::
Number of current merge operations, such as `0`. Number of current merge operations, such as `0`.

View file

@ -162,6 +162,9 @@ Number of indexing operations, such as `1`.
`indexing.index_failed`, `iif`, `indexingIndexFailed`:: `indexing.index_failed`, `iif`, `indexingIndexFailed`::
Number of failed indexing operations, such as `0`. Number of failed indexing operations, such as `0`.
`indexing.index_failed_due_to_version_conflict`, `iifvc`, `indexingIndexFailedDueToVersionConflict`::
Number of failed indexing operations due to version conflict, such as `0`.
`merges.current`, `mc`, `mergesCurrent`:: `merges.current`, `mc`, `mergesCurrent`::
Number of current merge operations, such as `0`. Number of current merge operations, such as `0`.

View file

@ -0,0 +1,5 @@
// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.
*Description*
Computes the MD5 hash of the input.

View file

@ -0,0 +1,5 @@
// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.
*Description*
Computes the SHA1 hash of the input.

View file

@ -0,0 +1,5 @@
// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.
*Description*
Computes the SHA256 hash of the input.

View file

@ -0,0 +1,13 @@
// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.
*Example*
[source.merge.styled,esql]
----
include::{esql-specs}/hash.csv-spec[tag=hash]
----
[%header.monospaced.styled,format=dsv,separator=|]
|===
include::{esql-specs}/hash.csv-spec[tag=hash-result]
|===

View file

@ -0,0 +1,13 @@
// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.
*Example*
[source.merge.styled,esql]
----
include::{esql-specs}/hash.csv-spec[tag=md5]
----
[%header.monospaced.styled,format=dsv,separator=|]
|===
include::{esql-specs}/hash.csv-spec[tag=md5-result]
|===

View file

@ -0,0 +1,13 @@
// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.
*Example*
[source.merge.styled,esql]
----
include::{esql-specs}/hash.csv-spec[tag=sha1]
----
[%header.monospaced.styled,format=dsv,separator=|]
|===
include::{esql-specs}/hash.csv-spec[tag=sha1-result]
|===

View file

@ -0,0 +1,13 @@
// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.
*Example*
[source.merge.styled,esql]
----
include::{esql-specs}/hash.csv-spec[tag=sha256]
----
[%header.monospaced.styled,format=dsv,separator=|]
|===
include::{esql-specs}/hash.csv-spec[tag=sha256-result]
|===

View file

@ -77,6 +77,9 @@
"returnType" : "keyword" "returnType" : "keyword"
} }
], ],
"examples" : [
"FROM sample_data \n| WHERE message != \"Connection error\"\n| EVAL md5 = hash(\"md5\", message), sha256 = hash(\"sha256\", message) \n| KEEP message, md5, sha256;"
],
"preview" : false, "preview" : false,
"snapshot_only" : false "snapshot_only" : false
} }

View file

@ -0,0 +1,37 @@
{
"comment" : "This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.",
"type" : "eval",
"name" : "md5",
"description" : "Computes the MD5 hash of the input.",
"signatures" : [
{
"params" : [
{
"name" : "input",
"type" : "keyword",
"optional" : false,
"description" : "Input to hash."
}
],
"variadic" : false,
"returnType" : "keyword"
},
{
"params" : [
{
"name" : "input",
"type" : "text",
"optional" : false,
"description" : "Input to hash."
}
],
"variadic" : false,
"returnType" : "keyword"
}
],
"examples" : [
"FROM sample_data \n| WHERE message != \"Connection error\"\n| EVAL md5 = md5(message)\n| KEEP message, md5;"
],
"preview" : false,
"snapshot_only" : false
}

View file

@ -0,0 +1,37 @@
{
"comment" : "This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.",
"type" : "eval",
"name" : "sha1",
"description" : "Computes the SHA1 hash of the input.",
"signatures" : [
{
"params" : [
{
"name" : "input",
"type" : "keyword",
"optional" : false,
"description" : "Input to hash."
}
],
"variadic" : false,
"returnType" : "keyword"
},
{
"params" : [
{
"name" : "input",
"type" : "text",
"optional" : false,
"description" : "Input to hash."
}
],
"variadic" : false,
"returnType" : "keyword"
}
],
"examples" : [
"FROM sample_data \n| WHERE message != \"Connection error\"\n| EVAL sha1 = sha1(message)\n| KEEP message, sha1;"
],
"preview" : false,
"snapshot_only" : false
}

View file

@ -0,0 +1,37 @@
{
"comment" : "This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.",
"type" : "eval",
"name" : "sha256",
"description" : "Computes the SHA256 hash of the input.",
"signatures" : [
{
"params" : [
{
"name" : "input",
"type" : "keyword",
"optional" : false,
"description" : "Input to hash."
}
],
"variadic" : false,
"returnType" : "keyword"
},
{
"params" : [
{
"name" : "input",
"type" : "text",
"optional" : false,
"description" : "Input to hash."
}
],
"variadic" : false,
"returnType" : "keyword"
}
],
"examples" : [
"FROM sample_data \n| WHERE message != \"Connection error\"\n| EVAL sha256 = sha256(message)\n| KEEP message, sha256;"
],
"preview" : false,
"snapshot_only" : false
}

View file

@ -5,3 +5,9 @@ This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../READ
### HASH ### HASH
Computes the hash of the input using various algorithms such as MD5, SHA, SHA-224, SHA-256, SHA-384, SHA-512. Computes the hash of the input using various algorithms such as MD5, SHA, SHA-224, SHA-256, SHA-384, SHA-512.
```
FROM sample_data
| WHERE message != "Connection error"
| EVAL md5 = hash("md5", message), sha256 = hash("sha256", message)
| KEEP message, md5, sha256;
```

View file

@ -0,0 +1,13 @@
<!--
This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.
-->
### MD5
Computes the MD5 hash of the input.
```
FROM sample_data
| WHERE message != "Connection error"
| EVAL md5 = md5(message)
| KEEP message, md5;
```

View file

@ -0,0 +1,13 @@
<!--
This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.
-->
### SHA1
Computes the SHA1 hash of the input.
```
FROM sample_data
| WHERE message != "Connection error"
| EVAL sha1 = sha1(message)
| KEEP message, sha1;
```

View file

@ -0,0 +1,13 @@
<!--
This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.
-->
### SHA256
Computes the SHA256 hash of the input.
```
FROM sample_data
| WHERE message != "Connection error"
| EVAL sha256 = sha256(message)
| KEEP message, sha256;
```

View file

@ -12,3 +12,4 @@ image::esql/functions/signature/hash.svg[Embedded,opts=inline]
include::../parameters/hash.asciidoc[] include::../parameters/hash.asciidoc[]
include::../description/hash.asciidoc[] include::../description/hash.asciidoc[]
include::../types/hash.asciidoc[] include::../types/hash.asciidoc[]
include::../examples/hash.asciidoc[]

View file

@ -0,0 +1,15 @@
// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.
[discrete]
[[esql-md5]]
=== `MD5`
*Syntax*
[.text-center]
image::esql/functions/signature/md5.svg[Embedded,opts=inline]
include::../parameters/md5.asciidoc[]
include::../description/md5.asciidoc[]
include::../types/md5.asciidoc[]
include::../examples/md5.asciidoc[]

View file

@ -0,0 +1,15 @@
// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.
[discrete]
[[esql-sha1]]
=== `SHA1`
*Syntax*
[.text-center]
image::esql/functions/signature/sha1.svg[Embedded,opts=inline]
include::../parameters/sha1.asciidoc[]
include::../description/sha1.asciidoc[]
include::../types/sha1.asciidoc[]
include::../examples/sha1.asciidoc[]

View file

@ -0,0 +1,15 @@
// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.
[discrete]
[[esql-sha256]]
=== `SHA256`
*Syntax*
[.text-center]
image::esql/functions/signature/sha256.svg[Embedded,opts=inline]
include::../parameters/sha256.asciidoc[]
include::../description/sha256.asciidoc[]
include::../types/sha256.asciidoc[]
include::../examples/sha256.asciidoc[]

View file

@ -0,0 +1,6 @@
// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.
*Parameters*
`input`::
Input to hash.

View file

@ -0,0 +1,6 @@
// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.
*Parameters*
`input`::
Input to hash.

View file

@ -0,0 +1,6 @@
// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.
*Parameters*
`input`::
Input to hash.

View file

@ -0,0 +1 @@
<svg version="1.1" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns="http://www.w3.org/2000/svg" width="240" height="46" viewbox="0 0 240 46"><defs><style type="text/css">#guide .c{fill:none;stroke:#222222;}#guide .k{fill:#000000;font-family:Roboto Mono,Sans-serif;font-size:20px;}#guide .s{fill:#e4f4ff;stroke:#222222;}#guide .syn{fill:#8D8D8D;font-family:Roboto Mono,Sans-serif;font-size:20px;}</style></defs><path class="c" d="M0 31h5m56 0h10m32 0h10m80 0h10m32 0h5"/><rect class="s" x="5" y="5" width="56" height="36"/><text class="k" x="15" y="31">MD5</text><rect class="s" x="71" y="5" width="32" height="36" rx="7"/><text class="syn" x="81" y="31">(</text><rect class="s" x="113" y="5" width="80" height="36" rx="7"/><text class="k" x="123" y="31">input</text><rect class="s" x="203" y="5" width="32" height="36" rx="7"/><text class="syn" x="213" y="31">)</text></svg>

After

Width:  |  Height:  |  Size: 875 B

View file

@ -0,0 +1 @@
<svg version="1.1" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns="http://www.w3.org/2000/svg" width="252" height="46" viewbox="0 0 252 46"><defs><style type="text/css">#guide .c{fill:none;stroke:#222222;}#guide .k{fill:#000000;font-family:Roboto Mono,Sans-serif;font-size:20px;}#guide .s{fill:#e4f4ff;stroke:#222222;}#guide .syn{fill:#8D8D8D;font-family:Roboto Mono,Sans-serif;font-size:20px;}</style></defs><path class="c" d="M0 31h5m68 0h10m32 0h10m80 0h10m32 0h5"/><rect class="s" x="5" y="5" width="68" height="36"/><text class="k" x="15" y="31">SHA1</text><rect class="s" x="83" y="5" width="32" height="36" rx="7"/><text class="syn" x="93" y="31">(</text><rect class="s" x="125" y="5" width="80" height="36" rx="7"/><text class="k" x="135" y="31">input</text><rect class="s" x="215" y="5" width="32" height="36" rx="7"/><text class="syn" x="225" y="31">)</text></svg>

After

Width:  |  Height:  |  Size: 876 B

View file

@ -0,0 +1 @@
<svg version="1.1" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns="http://www.w3.org/2000/svg" width="276" height="46" viewbox="0 0 276 46"><defs><style type="text/css">#guide .c{fill:none;stroke:#222222;}#guide .k{fill:#000000;font-family:Roboto Mono,Sans-serif;font-size:20px;}#guide .s{fill:#e4f4ff;stroke:#222222;}#guide .syn{fill:#8D8D8D;font-family:Roboto Mono,Sans-serif;font-size:20px;}</style></defs><path class="c" d="M0 31h5m92 0h10m32 0h10m80 0h10m32 0h5"/><rect class="s" x="5" y="5" width="92" height="36"/><text class="k" x="15" y="31">SHA256</text><rect class="s" x="107" y="5" width="32" height="36" rx="7"/><text class="syn" x="117" y="31">(</text><rect class="s" x="149" y="5" width="80" height="36" rx="7"/><text class="k" x="159" y="31">input</text><rect class="s" x="239" y="5" width="32" height="36" rx="7"/><text class="syn" x="249" y="31">)</text></svg>

After

Width:  |  Height:  |  Size: 880 B

View file

@ -18,11 +18,14 @@
* <<esql-length>> * <<esql-length>>
* <<esql-locate>> * <<esql-locate>>
* <<esql-ltrim>> * <<esql-ltrim>>
* <<esql-md5>>
* <<esql-repeat>> * <<esql-repeat>>
* <<esql-replace>> * <<esql-replace>>
* <<esql-reverse>> * <<esql-reverse>>
* <<esql-right>> * <<esql-right>>
* <<esql-rtrim>> * <<esql-rtrim>>
* <<esql-sha1>>
* <<esql-sha256>>
* <<esql-space>> * <<esql-space>>
* <<esql-split>> * <<esql-split>>
* <<esql-starts_with>> * <<esql-starts_with>>
@ -43,11 +46,14 @@ include::layout/left.asciidoc[]
include::layout/length.asciidoc[] include::layout/length.asciidoc[]
include::layout/locate.asciidoc[] include::layout/locate.asciidoc[]
include::layout/ltrim.asciidoc[] include::layout/ltrim.asciidoc[]
include::layout/md5.asciidoc[]
include::layout/repeat.asciidoc[] include::layout/repeat.asciidoc[]
include::layout/replace.asciidoc[] include::layout/replace.asciidoc[]
include::layout/reverse.asciidoc[] include::layout/reverse.asciidoc[]
include::layout/right.asciidoc[] include::layout/right.asciidoc[]
include::layout/rtrim.asciidoc[] include::layout/rtrim.asciidoc[]
include::layout/sha1.asciidoc[]
include::layout/sha256.asciidoc[]
include::layout/space.asciidoc[] include::layout/space.asciidoc[]
include::layout/split.asciidoc[] include::layout/split.asciidoc[]
include::layout/starts_with.asciidoc[] include::layout/starts_with.asciidoc[]

View file

@ -0,0 +1,10 @@
// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.
*Supported types*
[%header.monospaced.styled,format=dsv,separator=|]
|===
input | result
keyword | keyword
text | keyword
|===

View file

@ -0,0 +1,10 @@
// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.
*Supported types*
[%header.monospaced.styled,format=dsv,separator=|]
|===
input | result
keyword | keyword
text | keyword
|===

View file

@ -0,0 +1,10 @@
// This is generated by ESQL's AbstractFunctionTestCase. Do no edit it. See ../README.md for how to regenerate it.
*Supported types*
[%header.monospaced.styled,format=dsv,separator=|]
|===
input | result
keyword | keyword
text | keyword
|===

View file

@ -557,4 +557,3 @@ The API returns the following results:
// TESTRESPONSE[s/"job_version" : "8.4.0"/"job_version" : $body.job_version/] // TESTRESPONSE[s/"job_version" : "8.4.0"/"job_version" : $body.job_version/]
// TESTRESPONSE[s/1656087283340/$body.$_path/] // TESTRESPONSE[s/1656087283340/$body.$_path/]
// TESTRESPONSE[s/"superuser"/"_es_test_root"/] // TESTRESPONSE[s/"superuser"/"_es_test_root"/]
// TESTRESPONSE[s/"ignore_throttled" : true/"ignore_throttled" : true,"failure_store":"exclude"/]

View file

@ -350,7 +350,7 @@ include::repository-shared-settings.asciidoc[]
will disable retries altogether. Note that if retries are enabled in the Azure client, each of these retries will disable retries altogether. Note that if retries are enabled in the Azure client, each of these retries
comprises that many client-level retries. comprises that many client-level retries.
`get_register_retry_delay` `get_register_retry_delay`::
(<<time-units,time value>>) Sets the time to wait before trying again if an attempt to read a (<<time-units,time value>>) Sets the time to wait before trying again if an attempt to read a
<<repository-s3-linearizable-registers,linearizable register>> fails. Defaults to `5s`. <<repository-s3-linearizable-registers,linearizable register>> fails. Defaults to `5s`.

View file

@ -37,6 +37,8 @@ public interface EntitlementChecker {
void check$java_lang_Runtime$halt(Class<?> callerClass, Runtime runtime, int status); void check$java_lang_Runtime$halt(Class<?> callerClass, Runtime runtime, int status);
void check$java_lang_System$$exit(Class<?> callerClass, int status);
//////////////////// ////////////////////
// //
// ClassLoader ctor // ClassLoader ctor

View file

@ -83,6 +83,7 @@ public class RestEntitlementsCheckAction extends BaseRestHandler {
private static final Map<String, CheckAction> checkActions = Map.ofEntries( private static final Map<String, CheckAction> checkActions = Map.ofEntries(
entry("runtime_exit", deniedToPlugins(RestEntitlementsCheckAction::runtimeExit)), entry("runtime_exit", deniedToPlugins(RestEntitlementsCheckAction::runtimeExit)),
entry("runtime_halt", deniedToPlugins(RestEntitlementsCheckAction::runtimeHalt)), entry("runtime_halt", deniedToPlugins(RestEntitlementsCheckAction::runtimeHalt)),
entry("system_exit", deniedToPlugins(RestEntitlementsCheckAction::systemExit)),
entry("create_classloader", forPlugins(RestEntitlementsCheckAction::createClassLoader)), entry("create_classloader", forPlugins(RestEntitlementsCheckAction::createClassLoader)),
entry("processBuilder_start", deniedToPlugins(RestEntitlementsCheckAction::processBuilder_start)), entry("processBuilder_start", deniedToPlugins(RestEntitlementsCheckAction::processBuilder_start)),
entry("processBuilder_startPipeline", deniedToPlugins(RestEntitlementsCheckAction::processBuilder_startPipeline)), entry("processBuilder_startPipeline", deniedToPlugins(RestEntitlementsCheckAction::processBuilder_startPipeline)),
@ -153,6 +154,11 @@ public class RestEntitlementsCheckAction extends BaseRestHandler {
Runtime.getRuntime().halt(123); Runtime.getRuntime().halt(123);
} }
@SuppressForbidden(reason = "Specifically testing System.exit")
private static void systemExit() {
System.exit(123);
}
private static void createClassLoader() { private static void createClassLoader() {
try (var classLoader = new URLClassLoader("test", new URL[0], RestEntitlementsCheckAction.class.getClassLoader())) { try (var classLoader = new URLClassLoader("test", new URL[0], RestEntitlementsCheckAction.class.getClassLoader())) {
logger.info("Created URLClassLoader [{}]", classLoader.getName()); logger.info("Created URLClassLoader [{}]", classLoader.getName());

View file

@ -51,6 +51,11 @@ public class ElasticsearchEntitlementChecker implements EntitlementChecker {
policyManager.checkExitVM(callerClass); policyManager.checkExitVM(callerClass);
} }
@Override
public void check$java_lang_System$$exit(Class<?> callerClass, int status) {
policyManager.checkExitVM(callerClass);
}
@Override @Override
public void check$java_lang_ClassLoader$(Class<?> callerClass) { public void check$java_lang_ClassLoader$(Class<?> callerClass) {
policyManager.checkCreateClassLoader(callerClass); policyManager.checkCreateClassLoader(callerClass);

View file

@ -27,7 +27,7 @@ import java.util.Objects;
import java.util.Set; import java.util.Set;
import java.util.function.Function; import java.util.function.Function;
import static org.elasticsearch.common.settings.Setting.Property.DeprecatedWarning; import static org.elasticsearch.common.settings.Setting.Property.Deprecated;
import static org.elasticsearch.common.settings.Setting.Property.NodeScope; import static org.elasticsearch.common.settings.Setting.Property.NodeScope;
import static org.elasticsearch.common.settings.Setting.Property.OperatorDynamic; import static org.elasticsearch.common.settings.Setting.Property.OperatorDynamic;
@ -250,7 +250,7 @@ public class APMAgentSettings {
TELEMETRY_SETTING_PREFIX + "agent.", TELEMETRY_SETTING_PREFIX + "agent.",
LEGACY_TRACING_APM_SETTING_PREFIX + "agent.", LEGACY_TRACING_APM_SETTING_PREFIX + "agent.",
(namespace, qualifiedKey) -> qualifiedKey.startsWith(LEGACY_TRACING_APM_SETTING_PREFIX) (namespace, qualifiedKey) -> qualifiedKey.startsWith(LEGACY_TRACING_APM_SETTING_PREFIX)
? concreteAgentSetting(namespace, qualifiedKey, NodeScope, OperatorDynamic, DeprecatedWarning) ? concreteAgentSetting(namespace, qualifiedKey, NodeScope, OperatorDynamic, Deprecated)
: concreteAgentSetting(namespace, qualifiedKey, NodeScope, OperatorDynamic) : concreteAgentSetting(namespace, qualifiedKey, NodeScope, OperatorDynamic)
); );
@ -262,7 +262,7 @@ public class APMAgentSettings {
LEGACY_TRACING_APM_SETTING_PREFIX + "names.include", LEGACY_TRACING_APM_SETTING_PREFIX + "names.include",
OperatorDynamic, OperatorDynamic,
NodeScope, NodeScope,
DeprecatedWarning Deprecated
); );
public static final Setting<List<String>> TELEMETRY_TRACING_NAMES_INCLUDE_SETTING = Setting.listSetting( public static final Setting<List<String>> TELEMETRY_TRACING_NAMES_INCLUDE_SETTING = Setting.listSetting(
@ -281,7 +281,7 @@ public class APMAgentSettings {
LEGACY_TRACING_APM_SETTING_PREFIX + "names.exclude", LEGACY_TRACING_APM_SETTING_PREFIX + "names.exclude",
OperatorDynamic, OperatorDynamic,
NodeScope, NodeScope,
DeprecatedWarning Deprecated
); );
public static final Setting<List<String>> TELEMETRY_TRACING_NAMES_EXCLUDE_SETTING = Setting.listSetting( public static final Setting<List<String>> TELEMETRY_TRACING_NAMES_EXCLUDE_SETTING = Setting.listSetting(
@ -314,7 +314,7 @@ public class APMAgentSettings {
), ),
OperatorDynamic, OperatorDynamic,
NodeScope, NodeScope,
DeprecatedWarning Deprecated
); );
public static final Setting<List<String>> TELEMETRY_TRACING_SANITIZE_FIELD_NAMES = Setting.listSetting( public static final Setting<List<String>> TELEMETRY_TRACING_SANITIZE_FIELD_NAMES = Setting.listSetting(
@ -334,7 +334,7 @@ public class APMAgentSettings {
false, false,
OperatorDynamic, OperatorDynamic,
NodeScope, NodeScope,
DeprecatedWarning Deprecated
); );
public static final Setting<Boolean> TELEMETRY_TRACING_ENABLED_SETTING = Setting.boolSetting( public static final Setting<Boolean> TELEMETRY_TRACING_ENABLED_SETTING = Setting.boolSetting(
@ -358,7 +358,7 @@ public class APMAgentSettings {
public static final Setting<SecureString> TRACING_APM_SECRET_TOKEN_SETTING = SecureSetting.secureString( public static final Setting<SecureString> TRACING_APM_SECRET_TOKEN_SETTING = SecureSetting.secureString(
LEGACY_TRACING_APM_SETTING_PREFIX + "secret_token", LEGACY_TRACING_APM_SETTING_PREFIX + "secret_token",
null, null,
DeprecatedWarning Deprecated
); );
public static final Setting<SecureString> TELEMETRY_SECRET_TOKEN_SETTING = SecureSetting.secureString( public static final Setting<SecureString> TELEMETRY_SECRET_TOKEN_SETTING = SecureSetting.secureString(
@ -373,7 +373,7 @@ public class APMAgentSettings {
public static final Setting<SecureString> TRACING_APM_API_KEY_SETTING = SecureSetting.secureString( public static final Setting<SecureString> TRACING_APM_API_KEY_SETTING = SecureSetting.secureString(
LEGACY_TRACING_APM_SETTING_PREFIX + "api_key", LEGACY_TRACING_APM_SETTING_PREFIX + "api_key",
null, null,
DeprecatedWarning Deprecated
); );
public static final Setting<SecureString> TELEMETRY_API_KEY_SETTING = SecureSetting.secureString( public static final Setting<SecureString> TELEMETRY_API_KEY_SETTING = SecureSetting.secureString(

View file

@ -20,6 +20,7 @@ restResources {
dependencies { dependencies {
testImplementation project(path: ':test:test-clusters') testImplementation project(path: ':test:test-clusters')
testImplementation project(":modules:mapper-extras")
internalClusterTestImplementation project(":modules:mapper-extras") internalClusterTestImplementation project(":modules:mapper-extras")
} }
@ -70,6 +71,18 @@ tasks.named("yamlRestCompatTestTransform").configure({ task ->
task.skipTest("data_stream/200_rollover_failure_store/Lazily roll over a data stream's failure store after a shard failure", "Configuring the failure store via data stream templates is not supported anymore.") task.skipTest("data_stream/200_rollover_failure_store/Lazily roll over a data stream's failure store after a shard failure", "Configuring the failure store via data stream templates is not supported anymore.")
task.skipTest("data_stream/200_rollover_failure_store/Don't roll over a data stream's failure store when conditions aren't met", "Configuring the failure store via data stream templates is not supported anymore.") task.skipTest("data_stream/200_rollover_failure_store/Don't roll over a data stream's failure store when conditions aren't met", "Configuring the failure store via data stream templates is not supported anymore.")
task.skipTest("data_stream/200_rollover_failure_store/Roll over a data stream's failure store with conditions", "Configuring the failure store via data stream templates is not supported anymore.") task.skipTest("data_stream/200_rollover_failure_store/Roll over a data stream's failure store with conditions", "Configuring the failure store via data stream templates is not supported anymore.")
task.skipTest("data_stream/200_rollover_failure_store/Rolling over a failure store on a data stream without the failure store enabled should work", "Rolling over a data stream using target_failure_store is no longer supported.")
task.skipTest("data_stream/200_rollover_failure_store/Rolling over an uninitialized failure store should initialize it", "Rolling over a data stream using target_failure_store is no longer supported.")
task.skipTest("data_stream/210_rollover_failure_store/A failure store marked for lazy rollover should only be rolled over when there is a failure", "Rolling over a data stream using target_failure_store is no longer supported.")
task.skipTest("data_stream/210_rollover_failure_store/Don't roll over a data stream's failure store when conditions aren't met", "Rolling over a data stream using target_failure_store is no longer supported.")
task.skipTest("data_stream/210_rollover_failure_store/Rolling over a failure store on a data stream without the failure store enabled should work", "Rolling over a data stream using target_failure_store is no longer supported.")
task.skipTest("data_stream/210_rollover_failure_store/Rolling over an uninitialized failure store should initialize it", "Rolling over a data stream using target_failure_store is no longer supported.")
task.skipTest("data_stream/210_rollover_failure_store/Roll over a data stream's failure store with conditions", "Rolling over a data stream using target_failure_store is no longer supported.")
task.skipTest("data_stream/210_rollover_failure_store/Lazily roll over a data stream's failure store after an ingest failure", "Rolling over a data stream using target_failure_store is no longer supported.")
task.skipTest("data_stream/210_rollover_failure_store/Lazily roll over a data stream's failure store after a shard failure", "Rolling over a data stream using target_failure_store is no longer supported.")
task.skipTest("data_stream/210_rollover_failure_store/Roll over a data stream's failure store without conditions", "Rolling over a data stream using target_failure_store is no longer supported.")
}) })
configurations { configurations {

View file

@ -527,7 +527,7 @@ public class DataStreamAutoshardingIT extends ESIntegTestCase {
CommonStats stats = new CommonStats(); CommonStats stats = new CommonStats();
stats.docs = new DocsStats(100, 0, randomByteSizeValue().getBytes()); stats.docs = new DocsStats(100, 0, randomByteSizeValue().getBytes());
stats.store = new StoreStats(); stats.store = new StoreStats();
stats.indexing = new IndexingStats(new IndexingStats.Stats(1, 1, 1, 1, 1, 1, 1, 1, false, 1, targetWriteLoad, 1)); stats.indexing = new IndexingStats(new IndexingStats.Stats(1, 1, 1, 1, 1, 1, 1, 1, 1, false, 1, targetWriteLoad, 1));
return new ShardStats(shardRouting, new ShardPath(false, path, path, shardId), stats, null, null, null, false, 0); return new ShardStats(shardRouting, new ShardPath(false, path, path, shardId), stats, null, null, null, false, 0);
} }

View file

@ -31,11 +31,13 @@ import org.elasticsearch.action.admin.indices.template.delete.TransportDeleteCom
import org.elasticsearch.action.datastreams.CreateDataStreamAction; import org.elasticsearch.action.datastreams.CreateDataStreamAction;
import org.elasticsearch.action.datastreams.DeleteDataStreamAction; import org.elasticsearch.action.datastreams.DeleteDataStreamAction;
import org.elasticsearch.action.datastreams.GetDataStreamAction; import org.elasticsearch.action.datastreams.GetDataStreamAction;
import org.elasticsearch.action.support.IndexComponentSelector;
import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.action.support.master.AcknowledgedResponse;
import org.elasticsearch.client.internal.Client; import org.elasticsearch.client.internal.Client;
import org.elasticsearch.cluster.metadata.DataStream; import org.elasticsearch.cluster.metadata.DataStream;
import org.elasticsearch.cluster.metadata.DataStreamAlias; import org.elasticsearch.cluster.metadata.DataStreamAlias;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeUnit;
import org.elasticsearch.index.Index; import org.elasticsearch.index.Index;
@ -136,10 +138,7 @@ public class DataStreamsSnapshotsIT extends AbstractSnapshotIntegTestCase {
assertTrue(response.isAcknowledged()); assertTrue(response.isAcknowledged());
// Initialize the failure store. // Initialize the failure store.
RolloverRequest rolloverRequest = new RolloverRequest("with-fs", null); RolloverRequest rolloverRequest = new RolloverRequest("with-fs::failures", null);
rolloverRequest.setIndicesOptions(
IndicesOptions.builder(rolloverRequest.indicesOptions()).selectorOptions(IndicesOptions.SelectorOptions.FAILURES).build()
);
response = client.execute(RolloverAction.INSTANCE, rolloverRequest).get(); response = client.execute(RolloverAction.INSTANCE, rolloverRequest).get();
assertTrue(response.isAcknowledged()); assertTrue(response.isAcknowledged());
@ -345,7 +344,7 @@ public class DataStreamsSnapshotsIT extends AbstractSnapshotIntegTestCase {
.cluster() .cluster()
.prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, REPO, SNAPSHOT) .prepareCreateSnapshot(TEST_REQUEST_TIMEOUT, REPO, SNAPSHOT)
.setWaitForCompletion(true) .setWaitForCompletion(true)
.setIndices(dataStreamName) .setIndices(IndexNameExpressionResolver.combineSelector(dataStreamName, IndexComponentSelector.ALL_APPLICABLE))
.setIncludeGlobalState(false) .setIncludeGlobalState(false)
.get(); .get();

View file

@ -20,11 +20,12 @@ import org.elasticsearch.action.bulk.BulkRequest;
import org.elasticsearch.action.bulk.FailureStoreMetrics; import org.elasticsearch.action.bulk.FailureStoreMetrics;
import org.elasticsearch.action.datastreams.CreateDataStreamAction; import org.elasticsearch.action.datastreams.CreateDataStreamAction;
import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.IndexComponentSelector;
import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.action.support.WriteRequest;
import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; import org.elasticsearch.cluster.metadata.ComposableIndexTemplate;
import org.elasticsearch.cluster.metadata.DataStreamTestHelper; import org.elasticsearch.cluster.metadata.DataStreamTestHelper;
import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.IndexMetadata;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.metadata.Template; import org.elasticsearch.cluster.metadata.Template;
import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.compress.CompressedXContent;
import org.elasticsearch.core.Strings; import org.elasticsearch.core.Strings;
@ -194,9 +195,9 @@ public class IngestFailureStoreMetricsIT extends ESIntegTestCase {
createDataStream(); createDataStream();
// Initialize failure store. // Initialize failure store.
var rolloverRequest = new RolloverRequest(dataStream, null); var rolloverRequest = new RolloverRequest(
rolloverRequest.setIndicesOptions( IndexNameExpressionResolver.combineSelector(dataStream, IndexComponentSelector.FAILURES),
IndicesOptions.builder(rolloverRequest.indicesOptions()).selectorOptions(IndicesOptions.SelectorOptions.FAILURES).build() null
); );
var rolloverResponse = client().execute(RolloverAction.INSTANCE, rolloverRequest).actionGet(); var rolloverResponse = client().execute(RolloverAction.INSTANCE, rolloverRequest).actionGet();
var failureStoreIndex = rolloverResponse.getNewIndex(); var failureStoreIndex = rolloverResponse.getNewIndex();

View file

@ -60,7 +60,7 @@ public class DataStreamOptionsIT extends DisabledSecurityDataStreamTestCase {
assertOK(client().performRequest(new Request("PUT", "/_data_stream/" + DATA_STREAM_NAME))); assertOK(client().performRequest(new Request("PUT", "/_data_stream/" + DATA_STREAM_NAME)));
// Initialize the failure store. // Initialize the failure store.
assertOK(client().performRequest(new Request("POST", DATA_STREAM_NAME + "/_rollover?target_failure_store"))); assertOK(client().performRequest(new Request("POST", DATA_STREAM_NAME + "::failures/_rollover")));
ensureGreen(DATA_STREAM_NAME); ensureGreen(DATA_STREAM_NAME);
final Response dataStreamResponse = client().performRequest(new Request("GET", "/_data_stream/" + DATA_STREAM_NAME)); final Response dataStreamResponse = client().performRequest(new Request("GET", "/_data_stream/" + DATA_STREAM_NAME));

View file

@ -1,221 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the "Elastic License
* 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side
* Public License v 1"; you may not use this file except in compliance with, at
* your election, the "Elastic License 2.0", the "GNU Affero General Public
* License v3.0 only", or the "Server Side Public License, v 1".
*/
package org.elasticsearch.datastreams;
import org.elasticsearch.client.Request;
import org.elasticsearch.client.Response;
import org.elasticsearch.client.ResponseException;
import org.junit.Before;
import java.io.IOException;
import java.util.List;
import java.util.Map;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.is;
/**
* This should be a yaml test, but in order to write one we would need to expose the new parameter in the rest-api-spec.
* We do not want to do that until the feature flag is removed. For this reason, we temporarily, test the affected APIs here.
* Please convert this to a yaml test when the feature flag is removed.
*/
public class FailureStoreQueryParamIT extends DisabledSecurityDataStreamTestCase {
private static final String DATA_STREAM_NAME = "failure-data-stream";
private String backingIndex;
private String failureStoreIndex;
@SuppressWarnings("unchecked")
@Before
public void setup() throws IOException {
Request putComposableIndexTemplateRequest = new Request("POST", "/_index_template/ds-template");
putComposableIndexTemplateRequest.setJsonEntity("""
{
"index_patterns": ["failure-data-stream"],
"template": {
"settings": {
"number_of_replicas": 0
},
"data_stream_options": {
"failure_store": {
"enabled": true
}
}
},
"data_stream": {
}
}
""");
assertOK(client().performRequest(putComposableIndexTemplateRequest));
assertOK(client().performRequest(new Request("PUT", "/_data_stream/" + DATA_STREAM_NAME)));
// Initialize the failure store.
assertOK(client().performRequest(new Request("POST", DATA_STREAM_NAME + "/_rollover?target_failure_store")));
ensureGreen(DATA_STREAM_NAME);
final Response dataStreamResponse = client().performRequest(new Request("GET", "/_data_stream/" + DATA_STREAM_NAME));
List<Object> dataStreams = (List<Object>) entityAsMap(dataStreamResponse).get("data_streams");
assertThat(dataStreams.size(), is(1));
Map<String, Object> dataStream = (Map<String, Object>) dataStreams.get(0);
assertThat(dataStream.get("name"), equalTo(DATA_STREAM_NAME));
List<String> backingIndices = getIndices(dataStream);
assertThat(backingIndices.size(), is(1));
List<String> failureStore = getFailureStore(dataStream);
assertThat(failureStore.size(), is(1));
backingIndex = backingIndices.get(0);
failureStoreIndex = failureStore.get(0);
}
public void testGetIndexApi() throws IOException {
{
final Response indicesResponse = client().performRequest(new Request("GET", "/" + DATA_STREAM_NAME));
Map<String, Object> indices = entityAsMap(indicesResponse);
assertThat(indices.size(), is(2));
assertThat(indices.containsKey(backingIndex), is(true));
assertThat(indices.containsKey(failureStoreIndex), is(true));
}
{
final Response indicesResponse = client().performRequest(new Request("GET", "/" + DATA_STREAM_NAME + "?failure_store=exclude"));
Map<String, Object> indices = entityAsMap(indicesResponse);
assertThat(indices.size(), is(1));
assertThat(indices.containsKey(backingIndex), is(true));
}
{
final Response indicesResponse = client().performRequest(new Request("GET", "/" + DATA_STREAM_NAME + "?failure_store=only"));
Map<String, Object> indices = entityAsMap(indicesResponse);
assertThat(indices.size(), is(1));
assertThat(indices.containsKey(failureStoreIndex), is(true));
}
}
@SuppressWarnings("unchecked")
public void testGetIndexStatsApi() throws IOException {
{
final Response statsResponse = client().performRequest(new Request("GET", "/" + DATA_STREAM_NAME + "/_stats"));
Map<String, Object> indices = (Map<String, Object>) entityAsMap(statsResponse).get("indices");
assertThat(indices.size(), is(1));
assertThat(indices.containsKey(backingIndex), is(true));
}
{
final Response statsResponse = client().performRequest(
new Request("GET", "/" + DATA_STREAM_NAME + "/_stats?failure_store=include")
);
Map<String, Object> indices = (Map<String, Object>) entityAsMap(statsResponse).get("indices");
assertThat(indices.size(), is(2));
assertThat(indices.containsKey(backingIndex), is(true));
assertThat(indices.containsKey(failureStoreIndex), is(true));
}
{
final Response statsResponse = client().performRequest(
new Request("GET", "/" + DATA_STREAM_NAME + "/_stats?failure_store=only")
);
Map<String, Object> indices = (Map<String, Object>) entityAsMap(statsResponse).get("indices");
assertThat(indices.size(), is(1));
assertThat(indices.containsKey(failureStoreIndex), is(true));
}
}
public void testGetIndexSettingsApi() throws IOException {
{
final Response indicesResponse = client().performRequest(new Request("GET", "/" + DATA_STREAM_NAME + "/_settings"));
Map<String, Object> indices = entityAsMap(indicesResponse);
assertThat(indices.size(), is(1));
assertThat(indices.containsKey(backingIndex), is(true));
}
{
final Response indicesResponse = client().performRequest(
new Request("GET", "/" + DATA_STREAM_NAME + "/_settings?failure_store=include")
);
Map<String, Object> indices = entityAsMap(indicesResponse);
assertThat(indices.size(), is(2));
assertThat(indices.containsKey(backingIndex), is(true));
assertThat(indices.containsKey(failureStoreIndex), is(true));
}
{
final Response indicesResponse = client().performRequest(
new Request("GET", "/" + DATA_STREAM_NAME + "/_settings?failure_store=only")
);
Map<String, Object> indices = entityAsMap(indicesResponse);
assertThat(indices.size(), is(1));
assertThat(indices.containsKey(failureStoreIndex), is(true));
}
}
public void testGetIndexMappingApi() throws IOException {
{
final Response indicesResponse = client().performRequest(new Request("GET", "/" + DATA_STREAM_NAME + "/_mapping"));
Map<String, Object> indices = entityAsMap(indicesResponse);
assertThat(indices.size(), is(1));
assertThat(indices.containsKey(backingIndex), is(true));
}
{
final Response indicesResponse = client().performRequest(
new Request("GET", "/" + DATA_STREAM_NAME + "/_mapping?failure_store=include")
);
Map<String, Object> indices = entityAsMap(indicesResponse);
assertThat(indices.size(), is(2));
assertThat(indices.containsKey(backingIndex), is(true));
assertThat(indices.containsKey(failureStoreIndex), is(true));
}
{
final Response indicesResponse = client().performRequest(
new Request("GET", "/" + DATA_STREAM_NAME + "/_mapping?failure_store=only")
);
Map<String, Object> indices = entityAsMap(indicesResponse);
assertThat(indices.size(), is(1));
assertThat(indices.containsKey(failureStoreIndex), is(true));
}
}
@SuppressWarnings("unchecked")
public void testPutIndexMappingApi() throws IOException {
{
final Request mappingRequest = new Request("PUT", "/" + DATA_STREAM_NAME + "/_mapping");
mappingRequest.setJsonEntity("""
{
"properties": {
"email": {
"type": "keyword"
}
}
}
""");
assertAcknowledged(client().performRequest(mappingRequest));
}
{
final Request mappingRequest = new Request("PUT", "/" + DATA_STREAM_NAME + "/_mapping?failure_store=include");
mappingRequest.setJsonEntity("""
{
"properties": {
"email": {
"type": "keyword"
}
}
}
""");
ResponseException responseException = expectThrows(ResponseException.class, () -> client().performRequest(mappingRequest));
Map<String, Object> response = entityAsMap(responseException.getResponse());
assertThat(((Map<String, Object>) response.get("error")).get("reason"), is("failure index not supported"));
}
}
@SuppressWarnings("unchecked")
private List<String> getFailureStore(Map<String, Object> response) {
var failureStore = (Map<String, Object>) response.get("failure_store");
return getIndices(failureStore);
}
@SuppressWarnings("unchecked")
private List<String> getIndices(Map<String, Object> response) {
List<Map<String, String>> indices = (List<Map<String, String>>) response.get("indices");
return indices.stream().map(index -> index.get("index_name")).toList();
}
}

View file

@ -16,6 +16,7 @@ import org.elasticsearch.action.datastreams.DataStreamsActionUtil;
import org.elasticsearch.action.datastreams.DataStreamsStatsAction; import org.elasticsearch.action.datastreams.DataStreamsStatsAction;
import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.DefaultShardOperationFailedException; import org.elasticsearch.action.support.DefaultShardOperationFailedException;
import org.elasticsearch.action.support.IndexComponentSelector;
import org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction; import org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction;
import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockException;
@ -106,10 +107,11 @@ public class DataStreamsStatsTransportAction extends TransportBroadcastByNodeAct
@Override @Override
protected String[] resolveConcreteIndexNames(ClusterState clusterState, DataStreamsStatsAction.Request request) { protected String[] resolveConcreteIndexNames(ClusterState clusterState, DataStreamsStatsAction.Request request) {
return DataStreamsActionUtil.resolveConcreteIndexNames( return DataStreamsActionUtil.resolveConcreteIndexNamesWithSelector(
indexNameExpressionResolver, indexNameExpressionResolver,
projectResolver.getProjectMetadata(clusterState), projectResolver.getProjectMetadata(clusterState),
request.indices(), request.indices(),
IndexComponentSelector.ALL_APPLICABLE,
request.indicesOptions() request.indicesOptions()
).toArray(String[]::new); ).toArray(String[]::new);
} }
@ -166,13 +168,17 @@ public class DataStreamsStatsTransportAction extends TransportBroadcastByNodeAct
// or if a backing index simply has no shards allocated, it would be excluded from the counts if we only used // or if a backing index simply has no shards allocated, it would be excluded from the counts if we only used
// shard results to calculate. // shard results to calculate.
List<String> abstractionNames = indexNameExpressionResolver.dataStreamNames(project, request.indicesOptions(), request.indices()); List<String> abstractionNames = indexNameExpressionResolver.dataStreamNames(project, request.indicesOptions(), request.indices());
for (String abstractionName : abstractionNames) { for (String abstraction : abstractionNames) {
IndexAbstraction indexAbstraction = indicesLookup.get(abstractionName); IndexAbstraction indexAbstraction = indicesLookup.get(abstraction);
assert indexAbstraction != null; assert indexAbstraction != null;
if (indexAbstraction.getType() == IndexAbstraction.Type.DATA_STREAM) { if (indexAbstraction.getType() == IndexAbstraction.Type.DATA_STREAM) {
DataStream dataStream = (DataStream) indexAbstraction; DataStream dataStream = (DataStream) indexAbstraction;
AggregatedStats stats = aggregatedDataStreamsStats.computeIfAbsent(dataStream.getName(), s -> new AggregatedStats()); AggregatedStats stats = aggregatedDataStreamsStats.computeIfAbsent(dataStream.getName(), s -> new AggregatedStats());
dataStream.getIndices().stream().map(Index::getName).forEach(index -> { dataStream.getBackingIndices().getIndices().stream().map(Index::getName).forEach(index -> {
stats.backingIndices.add(index);
allBackingIndices.add(index);
});
dataStream.getFailureIndices().getIndices().stream().map(Index::getName).forEach(index -> {
stats.backingIndices.add(index); stats.backingIndices.add(index);
allBackingIndices.add(index); allBackingIndices.add(index);
}); });

View file

@ -33,7 +33,7 @@ import org.elasticsearch.action.datastreams.lifecycle.ErrorEntry;
import org.elasticsearch.action.downsample.DownsampleAction; import org.elasticsearch.action.downsample.DownsampleAction;
import org.elasticsearch.action.downsample.DownsampleConfig; import org.elasticsearch.action.downsample.DownsampleConfig;
import org.elasticsearch.action.support.DefaultShardOperationFailedException; import org.elasticsearch.action.support.DefaultShardOperationFailedException;
import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.IndexComponentSelector;
import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.action.support.broadcast.BroadcastResponse;
import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.action.support.master.AcknowledgedResponse;
import org.elasticsearch.client.internal.Client; import org.elasticsearch.client.internal.Client;
@ -49,6 +49,9 @@ import org.elasticsearch.cluster.metadata.DataStreamGlobalRetentionSettings;
import org.elasticsearch.cluster.metadata.DataStreamLifecycle; import org.elasticsearch.cluster.metadata.DataStreamLifecycle;
import org.elasticsearch.cluster.metadata.IndexAbstraction; import org.elasticsearch.cluster.metadata.IndexAbstraction;
import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.IndexMetadata;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.ResolvedExpression;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.SelectorResolver;
import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.metadata.Metadata;
import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.routing.allocation.AllocationService;
import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.cluster.service.ClusterService;
@ -948,11 +951,6 @@ public class DataStreamLifecycleService implements ClusterStateListener, Closeab
if ((configuredFloorSegmentMerge == null || configuredFloorSegmentMerge.equals(targetMergePolicyFloorSegment) == false) if ((configuredFloorSegmentMerge == null || configuredFloorSegmentMerge.equals(targetMergePolicyFloorSegment) == false)
|| (configuredMergeFactor == null || configuredMergeFactor.equals(targetMergePolicyFactor) == false)) { || (configuredMergeFactor == null || configuredMergeFactor.equals(targetMergePolicyFactor) == false)) {
UpdateSettingsRequest updateMergePolicySettingsRequest = new UpdateSettingsRequest(); UpdateSettingsRequest updateMergePolicySettingsRequest = new UpdateSettingsRequest();
updateMergePolicySettingsRequest.indicesOptions(
IndicesOptions.builder(updateMergePolicySettingsRequest.indicesOptions())
.selectorOptions(IndicesOptions.SelectorOptions.ALL_APPLICABLE)
.build()
);
updateMergePolicySettingsRequest.indices(indexName); updateMergePolicySettingsRequest.indices(indexName);
updateMergePolicySettingsRequest.settings( updateMergePolicySettingsRequest.settings(
Settings.builder() Settings.builder()
@ -1002,8 +1000,11 @@ public class DataStreamLifecycleService implements ClusterStateListener, Closeab
private void rolloverDataStream(String writeIndexName, RolloverRequest rolloverRequest, ActionListener<Void> listener) { private void rolloverDataStream(String writeIndexName, RolloverRequest rolloverRequest, ActionListener<Void> listener) {
// "saving" the rollover target name here so we don't capture the entire request // "saving" the rollover target name here so we don't capture the entire request
String rolloverTarget = rolloverRequest.getRolloverTarget(); ResolvedExpression resolvedRolloverTarget = SelectorResolver.parseExpression(
logger.trace("Data stream lifecycle issues rollover request for data stream [{}]", rolloverTarget); rolloverRequest.getRolloverTarget(),
rolloverRequest.indicesOptions()
);
logger.trace("Data stream lifecycle issues rollover request for data stream [{}]", rolloverRequest.getRolloverTarget());
client.admin().indices().rolloverIndex(rolloverRequest, new ActionListener<>() { client.admin().indices().rolloverIndex(rolloverRequest, new ActionListener<>() {
@Override @Override
public void onResponse(RolloverResponse rolloverResponse) { public void onResponse(RolloverResponse rolloverResponse) {
@ -1018,7 +1019,7 @@ public class DataStreamLifecycleService implements ClusterStateListener, Closeab
logger.info( logger.info(
"Data stream lifecycle successfully rolled over datastream [{}] due to the following met rollover " "Data stream lifecycle successfully rolled over datastream [{}] due to the following met rollover "
+ "conditions {}. The new index is [{}]", + "conditions {}. The new index is [{}]",
rolloverTarget, rolloverRequest.getRolloverTarget(),
metConditions, metConditions,
rolloverResponse.getNewIndex() rolloverResponse.getNewIndex()
); );
@ -1028,7 +1029,7 @@ public class DataStreamLifecycleService implements ClusterStateListener, Closeab
@Override @Override
public void onFailure(Exception e) { public void onFailure(Exception e) {
DataStream dataStream = clusterService.state().metadata().getProject().dataStreams().get(rolloverTarget); DataStream dataStream = clusterService.state().metadata().getProject().dataStreams().get(resolvedRolloverTarget.resource());
if (dataStream == null || dataStream.getWriteIndex().getName().equals(writeIndexName) == false) { if (dataStream == null || dataStream.getWriteIndex().getName().equals(writeIndexName) == false) {
// the data stream has another write index so no point in recording an error for the previous write index we were // the data stream has another write index so no point in recording an error for the previous write index we were
// attempting to roll over // attempting to roll over
@ -1411,9 +1412,7 @@ public class DataStreamLifecycleService implements ClusterStateListener, Closeab
) { ) {
RolloverRequest rolloverRequest = new RolloverRequest(dataStream, null).masterNodeTimeout(TimeValue.MAX_VALUE); RolloverRequest rolloverRequest = new RolloverRequest(dataStream, null).masterNodeTimeout(TimeValue.MAX_VALUE);
if (rolloverFailureStore) { if (rolloverFailureStore) {
rolloverRequest.setIndicesOptions( rolloverRequest.setRolloverTarget(IndexNameExpressionResolver.combineSelector(dataStream, IndexComponentSelector.FAILURES));
IndicesOptions.builder(rolloverRequest.indicesOptions()).selectorOptions(IndicesOptions.SelectorOptions.FAILURES).build()
);
} }
rolloverRequest.setConditions(rolloverConfiguration.resolveRolloverConditions(dataRetention)); rolloverRequest.setConditions(rolloverConfiguration.resolveRolloverConditions(dataRetention));
return rolloverRequest; return rolloverRequest;

View file

@ -50,7 +50,7 @@ public class DeleteDataStreamLifecycleAction {
.allowAliasToMultipleIndices(false) .allowAliasToMultipleIndices(false)
.allowClosedIndices(true) .allowClosedIndices(true)
.ignoreThrottled(false) .ignoreThrottled(false)
.allowFailureIndices(false) .allowSelectors(false)
.build() .build()
) )
.build(); .build();

View file

@ -39,7 +39,9 @@ public class DeleteDataStreamOptionsAction {
.wildcardOptions( .wildcardOptions(
IndicesOptions.WildcardOptions.builder().matchOpen(true).matchClosed(true).allowEmptyExpressions(true).resolveAliases(false) IndicesOptions.WildcardOptions.builder().matchOpen(true).matchClosed(true).allowEmptyExpressions(true).resolveAliases(false)
) )
.gatekeeperOptions(IndicesOptions.GatekeeperOptions.builder().allowAliasToMultipleIndices(false).allowClosedIndices(true)) .gatekeeperOptions(
IndicesOptions.GatekeeperOptions.builder().allowAliasToMultipleIndices(false).allowClosedIndices(true).allowSelectors(false)
)
.build(); .build();
public Request(StreamInput in) throws IOException { public Request(StreamInput in) throws IOException {

View file

@ -50,7 +50,9 @@ public class GetDataStreamOptionsAction {
.wildcardOptions( .wildcardOptions(
IndicesOptions.WildcardOptions.builder().matchOpen(true).matchClosed(true).allowEmptyExpressions(true).resolveAliases(false) IndicesOptions.WildcardOptions.builder().matchOpen(true).matchClosed(true).allowEmptyExpressions(true).resolveAliases(false)
) )
.gatekeeperOptions(IndicesOptions.GatekeeperOptions.builder().allowAliasToMultipleIndices(false).allowClosedIndices(true)) .gatekeeperOptions(
IndicesOptions.GatekeeperOptions.builder().allowAliasToMultipleIndices(false).allowClosedIndices(true).allowSelectors(false)
)
.build(); .build();
private boolean includeDefaults = false; private boolean includeDefaults = false;

View file

@ -71,7 +71,9 @@ public class PutDataStreamOptionsAction {
.wildcardOptions( .wildcardOptions(
IndicesOptions.WildcardOptions.builder().matchOpen(true).matchClosed(true).allowEmptyExpressions(true).resolveAliases(false) IndicesOptions.WildcardOptions.builder().matchOpen(true).matchClosed(true).allowEmptyExpressions(true).resolveAliases(false)
) )
.gatekeeperOptions(IndicesOptions.GatekeeperOptions.builder().allowAliasToMultipleIndices(false).allowClosedIndices(true)) .gatekeeperOptions(
IndicesOptions.GatekeeperOptions.builder().allowAliasToMultipleIndices(false).allowClosedIndices(true).allowSelectors(false)
)
.build(); .build();
private final DataStreamOptions options; private final DataStreamOptions options;

View file

@ -11,7 +11,6 @@ package org.elasticsearch.datastreams.rest;
import org.elasticsearch.action.datastreams.GetDataStreamAction; import org.elasticsearch.action.datastreams.GetDataStreamAction;
import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.client.internal.node.NodeClient;
import org.elasticsearch.cluster.metadata.DataStream;
import org.elasticsearch.cluster.metadata.DataStreamLifecycle; import org.elasticsearch.cluster.metadata.DataStreamLifecycle;
import org.elasticsearch.common.Strings; import org.elasticsearch.common.Strings;
import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.common.util.set.Sets;
@ -42,8 +41,7 @@ public class RestGetDataStreamsAction extends BaseRestHandler {
IndicesOptions.WildcardOptions.ALLOW_NO_INDICES, IndicesOptions.WildcardOptions.ALLOW_NO_INDICES,
IndicesOptions.GatekeeperOptions.IGNORE_THROTTLED, IndicesOptions.GatekeeperOptions.IGNORE_THROTTLED,
"verbose" "verbose"
), )
DataStream.isFailureStoreFeatureFlagEnabled() ? Set.of(IndicesOptions.FAILURE_STORE_QUERY_PARAM) : Set.of()
) )
); );

View file

@ -15,6 +15,7 @@ import org.elasticsearch.action.admin.indices.refresh.RefreshRequest;
import org.elasticsearch.action.admin.indices.rollover.RolloverRequest; import org.elasticsearch.action.admin.indices.rollover.RolloverRequest;
import org.elasticsearch.action.admin.indices.template.delete.TransportDeleteComposableIndexTemplateAction; import org.elasticsearch.action.admin.indices.template.delete.TransportDeleteComposableIndexTemplateAction;
import org.elasticsearch.action.admin.indices.template.put.TransportPutComposableIndexTemplateAction; import org.elasticsearch.action.admin.indices.template.put.TransportPutComposableIndexTemplateAction;
import org.elasticsearch.action.bulk.BulkRequest;
import org.elasticsearch.action.datastreams.CreateDataStreamAction; import org.elasticsearch.action.datastreams.CreateDataStreamAction;
import org.elasticsearch.action.datastreams.DataStreamsStatsAction; import org.elasticsearch.action.datastreams.DataStreamsStatsAction;
import org.elasticsearch.action.datastreams.DeleteDataStreamAction; import org.elasticsearch.action.datastreams.DeleteDataStreamAction;
@ -22,8 +23,12 @@ import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.cluster.health.ClusterHealthStatus;
import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; import org.elasticsearch.cluster.metadata.ComposableIndexTemplate;
import org.elasticsearch.cluster.metadata.DataStreamFailureStore;
import org.elasticsearch.cluster.metadata.DataStreamOptions;
import org.elasticsearch.cluster.metadata.ResettableValue;
import org.elasticsearch.cluster.metadata.Template; import org.elasticsearch.cluster.metadata.Template;
import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.compress.CompressedXContent;
import org.elasticsearch.index.mapper.extras.MapperExtrasPlugin;
import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.test.ESSingleNodeTestCase;
import org.elasticsearch.xcontent.json.JsonXContent; import org.elasticsearch.xcontent.json.JsonXContent;
@ -40,12 +45,14 @@ import java.util.concurrent.TimeUnit;
import static java.lang.Math.max; import static java.lang.Math.max;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
import static org.hamcrest.Matchers.greaterThan;
import static org.hamcrest.Matchers.is;
public class DataStreamsStatsTests extends ESSingleNodeTestCase { public class DataStreamsStatsTests extends ESSingleNodeTestCase {
@Override @Override
protected Collection<Class<? extends Plugin>> getPlugins() { protected Collection<Class<? extends Plugin>> getPlugins() {
return List.of(DataStreamsPlugin.class); return List.of(DataStreamsPlugin.class, MapperExtrasPlugin.class);
} }
private final Set<String> createdDataStreams = new HashSet<>(); private final Set<String> createdDataStreams = new HashSet<>();
@ -107,8 +114,30 @@ public class DataStreamsStatsTests extends ESSingleNodeTestCase {
assertEquals(stats.getTotalStoreSize().getBytes(), stats.getDataStreams()[0].getStoreSize().getBytes()); assertEquals(stats.getTotalStoreSize().getBytes(), stats.getDataStreams()[0].getStoreSize().getBytes());
} }
public void testStatsExistingDataStreamWithFailureStores() throws Exception {
String dataStreamName = createDataStream(false, true);
createFailedDocument(dataStreamName);
DataStreamsStatsAction.Response stats = getDataStreamsStats();
assertEquals(2, stats.getSuccessfulShards());
assertEquals(0, stats.getFailedShards());
assertEquals(1, stats.getDataStreamCount());
assertEquals(2, stats.getBackingIndices());
assertNotEquals(0L, stats.getTotalStoreSize().getBytes());
assertEquals(1, stats.getDataStreams().length);
assertEquals(dataStreamName, stats.getDataStreams()[0].getDataStream());
assertEquals(2, stats.getDataStreams()[0].getBackingIndices());
// The timestamp is going to not be something we can validate because
// it captures the time of failure which is uncontrolled in the test
// Just make sure it exists by ensuring it isn't zero
assertThat(stats.getDataStreams()[0].getMaximumTimestamp(), is(greaterThan(0L)));
assertNotEquals(0L, stats.getDataStreams()[0].getStoreSize().getBytes());
assertEquals(stats.getTotalStoreSize().getBytes(), stats.getDataStreams()[0].getStoreSize().getBytes());
}
public void testStatsExistingHiddenDataStream() throws Exception { public void testStatsExistingHiddenDataStream() throws Exception {
String dataStreamName = createDataStream(true); String dataStreamName = createDataStream(true, false);
long timestamp = createDocument(dataStreamName); long timestamp = createDocument(dataStreamName);
DataStreamsStatsAction.Response stats = getDataStreamsStats(true); DataStreamsStatsAction.Response stats = getDataStreamsStats(true);
@ -221,14 +250,19 @@ public class DataStreamsStatsTests extends ESSingleNodeTestCase {
} }
private String createDataStream() throws Exception { private String createDataStream() throws Exception {
return createDataStream(false); return createDataStream(false, false);
} }
private String createDataStream(boolean hidden) throws Exception { private String createDataStream(boolean hidden, boolean failureStore) throws Exception {
String dataStreamName = randomAlphaOfLength(10).toLowerCase(Locale.getDefault()); String dataStreamName = randomAlphaOfLength(10).toLowerCase(Locale.getDefault());
ResettableValue<DataStreamOptions.Template> failureStoreOptions = failureStore == false
? ResettableValue.undefined()
: ResettableValue.create(
new DataStreamOptions.Template(ResettableValue.create(new DataStreamFailureStore.Template(ResettableValue.create(true))))
);
Template idxTemplate = new Template(null, new CompressedXContent(""" Template idxTemplate = new Template(null, new CompressedXContent("""
{"properties":{"@timestamp":{"type":"date"},"data":{"type":"keyword"}}} {"properties":{"@timestamp":{"type":"date"},"data":{"type":"keyword"}}}
"""), null); """), null, null, failureStoreOptions);
ComposableIndexTemplate template = ComposableIndexTemplate.builder() ComposableIndexTemplate template = ComposableIndexTemplate.builder()
.indexPatterns(List.of(dataStreamName + "*")) .indexPatterns(List.of(dataStreamName + "*"))
.template(idxTemplate) .template(idxTemplate)
@ -269,6 +303,27 @@ public class DataStreamsStatsTests extends ESSingleNodeTestCase {
return timestamp; return timestamp;
} }
private long createFailedDocument(String dataStreamName) throws Exception {
// Get some randomized but reasonable timestamps on the data since not all of it is guaranteed to arrive in order.
long timeSeed = System.currentTimeMillis();
long timestamp = randomLongBetween(timeSeed - TimeUnit.HOURS.toMillis(5), timeSeed);
client().bulk(
new BulkRequest(dataStreamName).add(
new IndexRequest().opType(DocWriteRequest.OpType.CREATE)
.source(
JsonXContent.contentBuilder()
.startObject()
.field("@timestamp", timestamp)
.object("data", b -> b.field("garbage", randomAlphaOfLength(25)))
.endObject()
)
)
).get();
indicesAdmin().refresh(new RefreshRequest(".fs-" + dataStreamName + "*").indicesOptions(IndicesOptions.lenientExpandOpenHidden()))
.get();
return timestamp;
}
private DataStreamsStatsAction.Response getDataStreamsStats() throws Exception { private DataStreamsStatsAction.Response getDataStreamsStats() throws Exception {
return getDataStreamsStats(false); return getDataStreamsStats(false);
} }

View file

@ -27,7 +27,7 @@ import org.elasticsearch.action.datastreams.lifecycle.ErrorEntry;
import org.elasticsearch.action.downsample.DownsampleAction; import org.elasticsearch.action.downsample.DownsampleAction;
import org.elasticsearch.action.downsample.DownsampleConfig; import org.elasticsearch.action.downsample.DownsampleConfig;
import org.elasticsearch.action.support.DefaultShardOperationFailedException; import org.elasticsearch.action.support.DefaultShardOperationFailedException;
import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.IndexComponentSelector;
import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.action.support.broadcast.BroadcastResponse;
import org.elasticsearch.client.internal.Client; import org.elasticsearch.client.internal.Client;
import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterName;
@ -46,6 +46,7 @@ import org.elasticsearch.cluster.metadata.DataStreamTestHelper;
import org.elasticsearch.cluster.metadata.IndexAbstraction; import org.elasticsearch.cluster.metadata.IndexAbstraction;
import org.elasticsearch.cluster.metadata.IndexGraveyard; import org.elasticsearch.cluster.metadata.IndexGraveyard;
import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.IndexMetadata;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.metadata.Metadata;
import org.elasticsearch.cluster.metadata.MetadataIndexStateService; import org.elasticsearch.cluster.metadata.MetadataIndexStateService;
import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNode;
@ -225,11 +226,12 @@ public class DataStreamLifecycleServiceTests extends ESTestCase {
assertThat(clientSeenRequests.get(0), instanceOf(RolloverRequest.class)); assertThat(clientSeenRequests.get(0), instanceOf(RolloverRequest.class));
RolloverRequest rolloverBackingIndexRequest = (RolloverRequest) clientSeenRequests.get(0); RolloverRequest rolloverBackingIndexRequest = (RolloverRequest) clientSeenRequests.get(0);
assertThat(rolloverBackingIndexRequest.getRolloverTarget(), is(dataStreamName)); assertThat(rolloverBackingIndexRequest.getRolloverTarget(), is(dataStreamName));
assertThat(rolloverBackingIndexRequest.indicesOptions().selectorOptions(), equalTo(IndicesOptions.SelectorOptions.DATA));
assertThat(clientSeenRequests.get(1), instanceOf(RolloverRequest.class)); assertThat(clientSeenRequests.get(1), instanceOf(RolloverRequest.class));
RolloverRequest rolloverFailureIndexRequest = (RolloverRequest) clientSeenRequests.get(1); RolloverRequest rolloverFailureIndexRequest = (RolloverRequest) clientSeenRequests.get(1);
assertThat(rolloverFailureIndexRequest.getRolloverTarget(), is(dataStreamName)); assertThat(
assertThat(rolloverFailureIndexRequest.indicesOptions().selectorOptions(), equalTo(IndicesOptions.SelectorOptions.FAILURES)); rolloverFailureIndexRequest.getRolloverTarget(),
is(IndexNameExpressionResolver.combineSelector(dataStreamName, IndexComponentSelector.FAILURES))
);
List<DeleteIndexRequest> deleteRequests = clientSeenRequests.subList(2, 5) List<DeleteIndexRequest> deleteRequests = clientSeenRequests.subList(2, 5)
.stream() .stream()
.map(transportRequest -> (DeleteIndexRequest) transportRequest) .map(transportRequest -> (DeleteIndexRequest) transportRequest)
@ -1573,11 +1575,12 @@ public class DataStreamLifecycleServiceTests extends ESTestCase {
assertThat(clientSeenRequests.get(0), instanceOf(RolloverRequest.class)); assertThat(clientSeenRequests.get(0), instanceOf(RolloverRequest.class));
RolloverRequest rolloverBackingIndexRequest = (RolloverRequest) clientSeenRequests.get(0); RolloverRequest rolloverBackingIndexRequest = (RolloverRequest) clientSeenRequests.get(0);
assertThat(rolloverBackingIndexRequest.getRolloverTarget(), is(dataStreamName)); assertThat(rolloverBackingIndexRequest.getRolloverTarget(), is(dataStreamName));
assertThat(rolloverBackingIndexRequest.indicesOptions().selectorOptions(), equalTo(IndicesOptions.SelectorOptions.DATA));
assertThat(clientSeenRequests.get(1), instanceOf(RolloverRequest.class)); assertThat(clientSeenRequests.get(1), instanceOf(RolloverRequest.class));
RolloverRequest rolloverFailureIndexRequest = (RolloverRequest) clientSeenRequests.get(1); RolloverRequest rolloverFailureIndexRequest = (RolloverRequest) clientSeenRequests.get(1);
assertThat(rolloverFailureIndexRequest.getRolloverTarget(), is(dataStreamName)); assertThat(
assertThat(rolloverFailureIndexRequest.indicesOptions().selectorOptions(), equalTo(IndicesOptions.SelectorOptions.FAILURES)); rolloverFailureIndexRequest.getRolloverTarget(),
is(IndexNameExpressionResolver.combineSelector(dataStreamName, IndexComponentSelector.FAILURES))
);
assertThat( assertThat(
((DeleteIndexRequest) clientSeenRequests.get(2)).indices()[0], ((DeleteIndexRequest) clientSeenRequests.get(2)).indices()[0],
is(dataStream.getFailureIndices().getIndices().get(0).getName()) is(dataStream.getFailureIndices().getIndices().get(0).getName())

View file

@ -148,8 +148,7 @@
# rollover data stream to create new failure store index # rollover data stream to create new failure store index
- do: - do:
indices.rollover: indices.rollover:
alias: "data-stream-for-modification" alias: "data-stream-for-modification::failures"
target_failure_store: true
- is_true: acknowledged - is_true: acknowledged
# save index names for later use # save index names for later use

View file

@ -9,7 +9,7 @@ setup:
capabilities: [ 'failure_store_in_template' ] capabilities: [ 'failure_store_in_template' ]
- method: POST - method: POST
path: /{index}/_rollover path: /{index}/_rollover
capabilities: [ 'lazy-rollover-failure-store' ] capabilities: [ 'lazy-rollover-failure-store', 'index-expression-selectors' ]
- do: - do:
allowed_warnings: allowed_warnings:
@ -58,8 +58,7 @@ teardown:
- do: - do:
indices.rollover: indices.rollover:
alias: "data-stream-for-rollover" alias: "data-stream-for-rollover::failures"
target_failure_store: true
- match: { acknowledged: true } - match: { acknowledged: true }
- match: { old_index: "/\\.fs-data-stream-for-rollover-(\\d{4}\\.\\d{2}\\.\\d{2}-)?000002/" } - match: { old_index: "/\\.fs-data-stream-for-rollover-(\\d{4}\\.\\d{2}\\.\\d{2}-)?000002/" }
@ -92,8 +91,7 @@ teardown:
- do: - do:
indices.rollover: indices.rollover:
alias: "data-stream-for-rollover" alias: "data-stream-for-rollover::failures"
target_failure_store: true
body: body:
conditions: conditions:
max_docs: 1 max_docs: 1
@ -130,8 +128,7 @@ teardown:
- do: - do:
indices.rollover: indices.rollover:
alias: "data-stream-for-rollover" alias: "data-stream-for-rollover::failures"
target_failure_store: true
body: body:
conditions: conditions:
max_primary_shard_docs: 2 max_primary_shard_docs: 2
@ -165,8 +162,7 @@ teardown:
# Mark the failure store for lazy rollover # Mark the failure store for lazy rollover
- do: - do:
indices.rollover: indices.rollover:
alias: "data-stream-for-rollover" alias: "data-stream-for-rollover::failures"
target_failure_store: true
lazy: true lazy: true
- match: { acknowledged: true } - match: { acknowledged: true }
@ -263,8 +259,7 @@ teardown:
# Mark the failure store for lazy rollover # Mark the failure store for lazy rollover
- do: - do:
indices.rollover: indices.rollover:
alias: data-stream-for-lazy-rollover alias: data-stream-for-lazy-rollover::failures
target_failure_store: true
lazy: true lazy: true
- match: { acknowledged: true } - match: { acknowledged: true }
@ -332,8 +327,7 @@ teardown:
# Mark the failure store for lazy rollover # Mark the failure store for lazy rollover
- do: - do:
indices.rollover: indices.rollover:
alias: "data-stream-for-rollover" alias: "data-stream-for-rollover::failures"
target_failure_store: true
lazy: true lazy: true
- match: { acknowledged: true } - match: { acknowledged: true }
@ -377,16 +371,14 @@ teardown:
- do: - do:
catch: /Rolling over\/initializing an empty failure store is only supported without conditions\./ catch: /Rolling over\/initializing an empty failure store is only supported without conditions\./
indices.rollover: indices.rollover:
alias: "data-stream-for-rollover" alias: "data-stream-for-rollover::failures"
target_failure_store: true
body: body:
conditions: conditions:
max_docs: 1 max_docs: 1
- do: - do:
indices.rollover: indices.rollover:
alias: "data-stream-for-rollover" alias: "data-stream-for-rollover::failures"
target_failure_store: true
- match: { acknowledged: true } - match: { acknowledged: true }
- match: { old_index: "_none_" } - match: { old_index: "_none_" }
@ -424,8 +416,7 @@ teardown:
# Initializing should work # Initializing should work
- do: - do:
indices.rollover: indices.rollover:
alias: "other-data-stream-for-rollover" alias: "other-data-stream-for-rollover::failures"
target_failure_store: true
- match: { acknowledged: true } - match: { acknowledged: true }
- match: { old_index: "_none_" } - match: { old_index: "_none_" }
@ -448,8 +439,7 @@ teardown:
# And "regular" rollover should work # And "regular" rollover should work
- do: - do:
indices.rollover: indices.rollover:
alias: "other-data-stream-for-rollover" alias: "other-data-stream-for-rollover::failures"
target_failure_store: true
- match: { acknowledged: true } - match: { acknowledged: true }
- match: { old_index: "/\\.fs-other-data-stream-for-rollover-(\\d{4}\\.\\d{2}\\.\\d{2}-)?000002/" } - match: { old_index: "/\\.fs-other-data-stream-for-rollover-(\\d{4}\\.\\d{2}\\.\\d{2}-)?000002/" }

View file

@ -13,7 +13,6 @@ import org.elasticsearch.action.search.SearchRequest;
import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestRequest;
import org.elasticsearch.script.ScriptType; import org.elasticsearch.script.ScriptType;
import org.elasticsearch.search.Scroll;
import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.builder.SearchSourceBuilder;
import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.test.StreamsUtils; import org.elasticsearch.test.StreamsUtils;
@ -102,7 +101,7 @@ public class MultiSearchTemplateRequestTests extends ESTestCase {
String[] indices = { "test" }; String[] indices = { "test" };
SearchRequest searchRequest = new SearchRequest(indices); SearchRequest searchRequest = new SearchRequest(indices);
// scroll is not supported in the current msearch or msearchtemplate api, so unset it: // scroll is not supported in the current msearch or msearchtemplate api, so unset it:
searchRequest.scroll((Scroll) null); searchRequest.scroll(null);
// batched reduce size is currently not set-able on a per-request basis as it is a query string parameter only // batched reduce size is currently not set-able on a per-request basis as it is a query string parameter only
searchRequest.setBatchedReduceSize(SearchRequest.DEFAULT_BATCHED_REDUCE_SIZE); searchRequest.setBatchedReduceSize(SearchRequest.DEFAULT_BATCHED_REDUCE_SIZE);
SearchTemplateRequest searchTemplateRequest = new SearchTemplateRequest(searchRequest); SearchTemplateRequest searchTemplateRequest = new SearchTemplateRequest(searchRequest);

View file

@ -43,7 +43,6 @@ import org.elasticsearch.script.CtxMap;
import org.elasticsearch.script.Metadata; import org.elasticsearch.script.Metadata;
import org.elasticsearch.script.Script; import org.elasticsearch.script.Script;
import org.elasticsearch.script.ScriptService; import org.elasticsearch.script.ScriptService;
import org.elasticsearch.search.Scroll;
import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.builder.SearchSourceBuilder;
import org.elasticsearch.search.sort.SortBuilder; import org.elasticsearch.search.sort.SortBuilder;
import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPool;
@ -212,7 +211,7 @@ public abstract class AbstractAsyncBulkByScrollAction<
if (mainRequest.getMaxDocs() != MAX_DOCS_ALL_MATCHES if (mainRequest.getMaxDocs() != MAX_DOCS_ALL_MATCHES
&& mainRequest.getMaxDocs() <= preparedSearchRequest.source().size() && mainRequest.getMaxDocs() <= preparedSearchRequest.source().size()
&& mainRequest.isAbortOnVersionConflict()) { && mainRequest.isAbortOnVersionConflict()) {
preparedSearchRequest.scroll((Scroll) null); preparedSearchRequest.scroll(null);
} }
return preparedSearchRequest; return preparedSearchRequest;

View file

@ -53,7 +53,7 @@ final class RemoteRequestBuilders {
Request request = new Request("POST", path.toString()); Request request = new Request("POST", path.toString());
if (searchRequest.scroll() != null) { if (searchRequest.scroll() != null) {
TimeValue keepAlive = searchRequest.scroll().keepAlive(); TimeValue keepAlive = searchRequest.scroll();
// V_5_0_0 // V_5_0_0
if (remoteVersion.before(Version.fromId(5000099))) { if (remoteVersion.before(Version.fromId(5000099))) {
/* Versions of Elasticsearch before 5.0 couldn't parse nanos or micros /* Versions of Elasticsearch before 5.0 couldn't parse nanos or micros

View file

@ -100,7 +100,7 @@ public class RemoteScrollableHitSource extends ScrollableHitSource {
@Override @Override
protected void doStartNextScroll(String scrollId, TimeValue extraKeepAlive, RejectAwareActionListener<Response> searchListener) { protected void doStartNextScroll(String scrollId, TimeValue extraKeepAlive, RejectAwareActionListener<Response> searchListener) {
TimeValue keepAlive = timeValueNanos(searchRequest.scroll().keepAlive().nanos() + extraKeepAlive.nanos()); TimeValue keepAlive = timeValueNanos(searchRequest.scroll().nanos() + extraKeepAlive.nanos());
execute(RemoteRequestBuilders.scroll(scrollId, keepAlive, remoteVersion), RESPONSE_PARSER, searchListener); execute(RemoteRequestBuilders.scroll(scrollId, keepAlive, remoteVersion), RESPONSE_PARSER, searchListener);
} }

View file

@ -597,7 +597,7 @@ public class AsyncBulkByScrollActionTests extends ESTestCase {
capturedCommand.get().run(); capturedCommand.get().run();
// So the next request is going to have to wait an extra 100 seconds or so (base was 10 seconds, so 110ish) // So the next request is going to have to wait an extra 100 seconds or so (base was 10 seconds, so 110ish)
assertThat(client.lastScroll.get().request.scroll().keepAlive().seconds(), either(equalTo(110L)).or(equalTo(109L))); assertThat(client.lastScroll.get().request.scroll().seconds(), either(equalTo(110L)).or(equalTo(109L)));
// Now we can simulate a response and check the delay that we used for the task // Now we can simulate a response and check the delay that we used for the task
if (randomBoolean()) { if (randomBoolean()) {

View file

@ -155,10 +155,7 @@ public class ClientScrollableHitSourceTests extends ESTestCase {
); );
hitSource.startNextScroll(timeValueSeconds(100)); hitSource.startNextScroll(timeValueSeconds(100));
client.validateRequest( client.validateRequest(TransportSearchScrollAction.TYPE, (SearchScrollRequest r) -> assertEquals(r.scroll().seconds(), 110));
TransportSearchScrollAction.TYPE,
(SearchScrollRequest r) -> assertEquals(r.scroll().keepAlive().seconds(), 110)
);
} }
private SearchResponse createSearchResponse() { private SearchResponse createSearchResponse() {

View file

@ -113,14 +113,20 @@ public class ReindexRestClientSslTests extends ESTestCase {
} }
public void testClientFailsWithUntrustedCertificate() throws IOException { public void testClientFailsWithUntrustedCertificate() throws IOException {
assumeFalse("https://github.com/elastic/elasticsearch/issues/49094", inFipsJvm());
final List<Thread> threads = new ArrayList<>(); final List<Thread> threads = new ArrayList<>();
final Settings.Builder builder = Settings.builder().put("path.home", createTempDir()); final Settings.Builder builder = Settings.builder().put("path.home", createTempDir());
final Settings settings = builder.build(); final Settings settings = builder.build();
final Environment environment = TestEnvironment.newEnvironment(settings); final Environment environment = TestEnvironment.newEnvironment(settings);
final ReindexSslConfig ssl = new ReindexSslConfig(settings, environment, mock(ResourceWatcherService.class)); final ReindexSslConfig ssl = new ReindexSslConfig(settings, environment, mock(ResourceWatcherService.class));
try (RestClient client = Reindexer.buildRestClient(getRemoteInfo(), ssl, 1L, threads)) { try (RestClient client = Reindexer.buildRestClient(getRemoteInfo(), ssl, 1L, threads)) {
if (inFipsJvm()) {
// Bouncy Castle throws a different exception
IOException exception = expectThrows(IOException.class, () -> client.performRequest(new Request("GET", "/")));
assertThat(exception.getCause(), Matchers.instanceOf(javax.net.ssl.SSLException.class));
} else {
expectThrows(SSLHandshakeException.class, () -> client.performRequest(new Request("GET", "/"))); expectThrows(SSLHandshakeException.class, () -> client.performRequest(new Request("GET", "/")));
}
} }
} }

View file

@ -102,9 +102,6 @@ tests:
- class: org.elasticsearch.xpack.shutdown.NodeShutdownIT - class: org.elasticsearch.xpack.shutdown.NodeShutdownIT
method: testAllocationPreventedForRemoval method: testAllocationPreventedForRemoval
issue: https://github.com/elastic/elasticsearch/issues/116363 issue: https://github.com/elastic/elasticsearch/issues/116363
- class: org.elasticsearch.xpack.downsample.ILMDownsampleDisruptionIT
method: testILMDownsampleRollingRestart
issue: https://github.com/elastic/elasticsearch/issues/114233
- class: org.elasticsearch.reservedstate.service.RepositoriesFileSettingsIT - class: org.elasticsearch.reservedstate.service.RepositoriesFileSettingsIT
method: testSettingsApplied method: testSettingsApplied
issue: https://github.com/elastic/elasticsearch/issues/116694 issue: https://github.com/elastic/elasticsearch/issues/116694
@ -236,44 +233,38 @@ tests:
- class: org.elasticsearch.smoketest.MlWithSecurityIT - class: org.elasticsearch.smoketest.MlWithSecurityIT
method: test {yaml=ml/sparse_vector_search/Test sparse_vector search with query vector and pruning config} method: test {yaml=ml/sparse_vector_search/Test sparse_vector search with query vector and pruning config}
issue: https://github.com/elastic/elasticsearch/issues/119548 issue: https://github.com/elastic/elasticsearch/issues/119548
- class: org.elasticsearch.lucene.RollingUpgradeSearchableSnapshotIndexCompatibilityIT
method: testSearchableSnapshotUpgrade {p0=[9.0.0, 8.18.0, 8.18.0]}
issue: https://github.com/elastic/elasticsearch/issues/119549
- class: org.elasticsearch.lucene.RollingUpgradeSearchableSnapshotIndexCompatibilityIT
method: testMountSearchableSnapshot {p0=[9.0.0, 8.18.0, 8.18.0]}
issue: https://github.com/elastic/elasticsearch/issues/119550
- class: org.elasticsearch.lucene.RollingUpgradeSearchableSnapshotIndexCompatibilityIT
method: testMountSearchableSnapshot {p0=[9.0.0, 9.0.0, 8.18.0]}
issue: https://github.com/elastic/elasticsearch/issues/119551
- class: org.elasticsearch.index.engine.LuceneSyntheticSourceChangesSnapshotTests - class: org.elasticsearch.index.engine.LuceneSyntheticSourceChangesSnapshotTests
method: testSkipNonRootOfNestedDocuments method: testSkipNonRootOfNestedDocuments
issue: https://github.com/elastic/elasticsearch/issues/119553 issue: https://github.com/elastic/elasticsearch/issues/119553
- class: org.elasticsearch.lucene.RollingUpgradeSearchableSnapshotIndexCompatibilityIT
method: testSearchableSnapshotUpgrade {p0=[9.0.0, 9.0.0, 8.18.0]}
issue: https://github.com/elastic/elasticsearch/issues/119560
- class: org.elasticsearch.lucene.RollingUpgradeSearchableSnapshotIndexCompatibilityIT
method: testMountSearchableSnapshot {p0=[9.0.0, 9.0.0, 9.0.0]}
issue: https://github.com/elastic/elasticsearch/issues/119561
- class: org.elasticsearch.lucene.RollingUpgradeSearchableSnapshotIndexCompatibilityIT
method: testSearchableSnapshotUpgrade {p0=[9.0.0, 9.0.0, 9.0.0]}
issue: https://github.com/elastic/elasticsearch/issues/119562
- class: org.elasticsearch.xpack.ml.integration.ForecastIT - class: org.elasticsearch.xpack.ml.integration.ForecastIT
method: testOverflowToDisk method: testOverflowToDisk
issue: https://github.com/elastic/elasticsearch/issues/117740 issue: https://github.com/elastic/elasticsearch/issues/117740
- class: org.elasticsearch.xpack.security.authc.ldap.MultiGroupMappingIT - class: org.elasticsearch.xpack.security.authc.ldap.MultiGroupMappingIT
issue: https://github.com/elastic/elasticsearch/issues/119599 issue: https://github.com/elastic/elasticsearch/issues/119599
- class: org.elasticsearch.lucene.FullClusterRestartSearchableSnapshotIndexCompatibilityIT
method: testSearchableSnapshotUpgrade {p0=8.18.0}
issue: https://github.com/elastic/elasticsearch/issues/119631
- class: org.elasticsearch.lucene.FullClusterRestartSearchableSnapshotIndexCompatibilityIT
method: testSearchableSnapshotUpgrade {p0=9.0.0}
issue: https://github.com/elastic/elasticsearch/issues/119632
- class: org.elasticsearch.search.profile.dfs.DfsProfilerIT - class: org.elasticsearch.search.profile.dfs.DfsProfilerIT
method: testProfileDfs method: testProfileDfs
issue: https://github.com/elastic/elasticsearch/issues/119711 issue: https://github.com/elastic/elasticsearch/issues/119711
- class: org.elasticsearch.upgrades.DataStreamsUpgradeIT - class: org.elasticsearch.xpack.esql.optimizer.LocalPhysicalPlanOptimizerTests
method: testUpgradeDataStream method: testSingleMatchFunctionFilterPushdownWithStringValues {default}
issue: https://github.com/elastic/elasticsearch/issues/119717 issue: https://github.com/elastic/elasticsearch/issues/119720
- class: org.elasticsearch.xpack.esql.optimizer.LocalPhysicalPlanOptimizerTests
method: testSingleMatchFunctionPushdownWithCasting {default}
issue: https://github.com/elastic/elasticsearch/issues/119722
- class: org.elasticsearch.xpack.esql.optimizer.LocalPhysicalPlanOptimizerTests
method: testSingleMatchOperatorFilterPushdownWithStringValues {default}
issue: https://github.com/elastic/elasticsearch/issues/119721
- class: org.elasticsearch.script.mustache.SearchTemplateRequestTests
method: testConcurrentSerialization
issue: https://github.com/elastic/elasticsearch/issues/119819
- class: org.elasticsearch.script.mustache.SearchTemplateRequestTests
method: testEqualsAndHashcode
issue: https://github.com/elastic/elasticsearch/issues/119820
- class: org.elasticsearch.script.mustache.SearchTemplateRequestTests
method: testConcurrentEquals
issue: https://github.com/elastic/elasticsearch/issues/119821
- class: org.elasticsearch.script.mustache.SearchTemplateRequestTests
method: testSerialization
issue: https://github.com/elastic/elasticsearch/issues/119822
# Examples: # Examples:
# #

View file

@ -50,8 +50,6 @@ buildParams.bwcVersions.withWireCompatible { bwcVersion, baseName ->
nonInputProperties.systemProperty('tests.rest.cluster', localCluster.map(c -> c.allHttpSocketURI.join(","))) nonInputProperties.systemProperty('tests.rest.cluster', localCluster.map(c -> c.allHttpSocketURI.join(",")))
nonInputProperties.systemProperty('tests.rest.remote_cluster', remoteCluster.map(c -> c.allHttpSocketURI.join(","))) nonInputProperties.systemProperty('tests.rest.remote_cluster', remoteCluster.map(c -> c.allHttpSocketURI.join(",")))
} }
onlyIf("FIPS mode disabled") { buildParams.inFipsJvm == false }
} }
tasks.register("${baseName}#oldClusterTest", StandaloneRestIntegTestTask) { tasks.register("${baseName}#oldClusterTest", StandaloneRestIntegTestTask) {

View file

@ -14,7 +14,9 @@ buildParams.bwcVersions.withLatestReadOnlyIndexCompatible { bwcVersion ->
tasks.named("javaRestTest").configure { tasks.named("javaRestTest").configure {
systemProperty("tests.minimum.index.compatible", bwcVersion) systemProperty("tests.minimum.index.compatible", bwcVersion)
usesBwcDistribution(bwcVersion) usesBwcDistribution(bwcVersion)
enabled = true
// Tests rely on unreleased code in 8.18 branch
enabled = buildParams.isSnapshotBuild()
} }
} }
@ -22,4 +24,3 @@ tasks.withType(Test).configureEach {
// CI doesn't like it when there's multiple clusters running at once // CI doesn't like it when there's multiple clusters running at once
maxParallelForks = 1 maxParallelForks = 1
} }

View file

@ -207,7 +207,7 @@ public abstract class AbstractIndexCompatibilityTestCase extends ESRestTestCase
switch (i) { switch (i) {
case 0 -> settings.putList(IndexSettings.DEFAULT_FIELD_SETTING.getKey(), "field_" + randomInt(2)); case 0 -> settings.putList(IndexSettings.DEFAULT_FIELD_SETTING.getKey(), "field_" + randomInt(2));
case 1 -> settings.put(IndexSettings.MAX_INNER_RESULT_WINDOW_SETTING.getKey(), randomIntBetween(1, 100)); case 1 -> settings.put(IndexSettings.MAX_INNER_RESULT_WINDOW_SETTING.getKey(), randomIntBetween(1, 100));
case 2 -> settings.put(MapperService.INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING.getKey(), randomLongBetween(0L, 1000L)); case 2 -> settings.put(MapperService.INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING.getKey(), randomLongBetween(100L, 1000L));
case 3 -> settings.put(IndexSettings.MAX_SLICES_PER_SCROLL.getKey(), randomIntBetween(1, 1024)); case 3 -> settings.put(IndexSettings.MAX_SLICES_PER_SCROLL.getKey(), randomIntBetween(1, 1024));
default -> throw new IllegalStateException(); default -> throw new IllegalStateException();
} }

View file

@ -63,12 +63,6 @@
"type":"boolean", "type":"boolean",
"default":"false", "default":"false",
"description":"If set to true, the rollover action will only mark a data stream to signal that it needs to be rolled over at the next write. Only allowed on data streams." "description":"If set to true, the rollover action will only mark a data stream to signal that it needs to be rolled over at the next write. Only allowed on data streams."
},
"target_failure_store":{
"type":"boolean",
"description":"If set to true, the rollover action will be applied on the failure store of the data stream.",
"visibility": "feature_flag",
"feature_flag": "es.failure_store_feature_flag_enabled"
} }
}, },
"body":{ "body":{

View file

@ -31,10 +31,6 @@
"master_timeout": { "master_timeout": {
"type": "time", "type": "time",
"description": "Timeout for processing on master node" "description": "Timeout for processing on master node"
},
"timeout": {
"type": "time",
"description": "Timeout for acknowledgement of update from all nodes in cluster"
} }
} }
} }

View file

@ -0,0 +1,37 @@
{
"migrate.create_from":{
"documentation":{
"url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/data-stream-reindex.html",
"description":"This API creates a destination from a source index. It copies the mappings and settings from the source index while allowing request settings and mappings to override the source values."
},
"stability":"experimental",
"visibility":"private",
"headers":{
"accept": [ "application/json"],
"content_type": ["application/json"]
},
"url":{
"paths":[
{
"path":"/_create_from/{source}/{dest}",
"methods":[ "PUT", "POST"],
"parts":{
"source":{
"type":"string",
"description":"The source index name"
},
"dest":{
"type":"string",
"description":"The destination index name"
}
}
}
]
},
"body":{
"description":"The body contains the fields `mappings_override` and `settings_override`.",
"required":false
}
}
}

View file

@ -26,6 +26,15 @@
} }
] ]
}, },
"params":{} "params":{
"master_timeout":{
"type":"time",
"description":"Explicit operation timeout for connection to master node"
},
"timeout":{
"type":"time",
"description":"Explicit operation timeout"
}
}
} }
} }

View file

@ -26,7 +26,16 @@
} }
] ]
}, },
"params":{}, "params":{
"master_timeout":{
"type":"time",
"description":"Explicit operation timeout for connection to master node"
},
"timeout":{
"type":"time",
"description":"Explicit operation timeout"
}
},
"body":{ "body":{
"description":"The shutdown type definition to register", "description":"The shutdown type definition to register",
"required": true "required": true

View file

@ -20,6 +20,12 @@
] ]
}, },
"params":{ "params":{
"human":{
"type":"boolean",
"required":false,
"description":"Defines whether additional human-readable information is included in the response. In particular, it adds descriptions and a tag line. The default value is true.",
"default":true
},
"categories":{ "categories":{
"type":"list", "type":"list",
"description":"Comma-separated list of info categories. Can be any of: build, license, features" "description":"Comma-separated list of info categories. Can be any of: build, license, features"

View file

@ -45,6 +45,7 @@
indexing.index_time .+ \n indexing.index_time .+ \n
indexing.index_total .+ \n indexing.index_total .+ \n
indexing.index_failed .+ \n indexing.index_failed .+ \n
indexing.index_failed_due_to_version_conflict .+ \n
merges.current .+ \n merges.current .+ \n
merges.current_docs .+ \n merges.current_docs .+ \n
merges.current_size .+ \n merges.current_size .+ \n

View file

@ -30,6 +30,7 @@ import org.hamcrest.Matcher;
import java.io.IOException; import java.io.IOException;
import java.util.Collection; import java.util.Collection;
import java.util.HashMap;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
@ -76,6 +77,7 @@ public class IndicesMetricsIT extends ESIntegTestCase {
static final String STANDARD_INDEXING_COUNT = "es.indices.standard.indexing.total"; static final String STANDARD_INDEXING_COUNT = "es.indices.standard.indexing.total";
static final String STANDARD_INDEXING_TIME = "es.indices.standard.indexing.time"; static final String STANDARD_INDEXING_TIME = "es.indices.standard.indexing.time";
static final String STANDARD_INDEXING_FAILURE = "es.indices.standard.indexing.failure.total"; static final String STANDARD_INDEXING_FAILURE = "es.indices.standard.indexing.failure.total";
static final String STANDARD_INDEXING_FAILURE_DUE_TO_VERSION_CONFLICT = "es.indices.standard.indexing.failure.version_conflict.total";
static final String TIME_SERIES_INDEX_COUNT = "es.indices.time_series.total"; static final String TIME_SERIES_INDEX_COUNT = "es.indices.time_series.total";
static final String TIME_SERIES_BYTES_SIZE = "es.indices.time_series.size"; static final String TIME_SERIES_BYTES_SIZE = "es.indices.time_series.size";
@ -89,6 +91,8 @@ public class IndicesMetricsIT extends ESIntegTestCase {
static final String TIME_SERIES_INDEXING_COUNT = "es.indices.time_series.indexing.total"; static final String TIME_SERIES_INDEXING_COUNT = "es.indices.time_series.indexing.total";
static final String TIME_SERIES_INDEXING_TIME = "es.indices.time_series.indexing.time"; static final String TIME_SERIES_INDEXING_TIME = "es.indices.time_series.indexing.time";
static final String TIME_SERIES_INDEXING_FAILURE = "es.indices.time_series.indexing.failure.total"; static final String TIME_SERIES_INDEXING_FAILURE = "es.indices.time_series.indexing.failure.total";
static final String TIME_SERIES_INDEXING_FAILURE_DUE_TO_VERSION_CONFLICT =
"es.indices.time_series.indexing.failure.version_conflict.total";
static final String LOGSDB_INDEX_COUNT = "es.indices.logsdb.total"; static final String LOGSDB_INDEX_COUNT = "es.indices.logsdb.total";
static final String LOGSDB_BYTES_SIZE = "es.indices.logsdb.size"; static final String LOGSDB_BYTES_SIZE = "es.indices.logsdb.size";
@ -102,6 +106,7 @@ public class IndicesMetricsIT extends ESIntegTestCase {
static final String LOGSDB_INDEXING_COUNT = "es.indices.logsdb.indexing.total"; static final String LOGSDB_INDEXING_COUNT = "es.indices.logsdb.indexing.total";
static final String LOGSDB_INDEXING_TIME = "es.indices.logsdb.indexing.time"; static final String LOGSDB_INDEXING_TIME = "es.indices.logsdb.indexing.time";
static final String LOGSDB_INDEXING_FAILURE = "es.indices.logsdb.indexing.failure.total"; static final String LOGSDB_INDEXING_FAILURE = "es.indices.logsdb.indexing.failure.total";
static final String LOGSDB_INDEXING_FAILURE_DUE_TO_VERSION_CONFLICT = "es.indices.logsdb.indexing.failure.version_conflict.total";
public void testIndicesMetrics() { public void testIndicesMetrics() {
String indexNode = internalCluster().startNode(); String indexNode = internalCluster().startNode();
@ -132,7 +137,9 @@ public class IndicesMetricsIT extends ESIntegTestCase {
STANDARD_INDEXING_TIME, STANDARD_INDEXING_TIME,
greaterThanOrEqualTo(0L), greaterThanOrEqualTo(0L),
STANDARD_INDEXING_FAILURE, STANDARD_INDEXING_FAILURE,
equalTo(indexing1.getIndexFailedCount() - indexing0.getIndexCount()) equalTo(indexing1.getIndexFailedCount() - indexing0.getIndexFailedCount()),
STANDARD_INDEXING_FAILURE_DUE_TO_VERSION_CONFLICT,
equalTo(indexing1.getIndexFailedDueToVersionConflictCount() - indexing0.getIndexFailedDueToVersionConflictCount())
) )
); );
@ -155,7 +162,9 @@ public class IndicesMetricsIT extends ESIntegTestCase {
TIME_SERIES_INDEXING_TIME, TIME_SERIES_INDEXING_TIME,
greaterThanOrEqualTo(0L), greaterThanOrEqualTo(0L),
TIME_SERIES_INDEXING_FAILURE, TIME_SERIES_INDEXING_FAILURE,
equalTo(indexing2.getIndexFailedCount() - indexing1.getIndexFailedCount()) equalTo(indexing1.getIndexFailedCount() - indexing0.getIndexFailedCount()),
TIME_SERIES_INDEXING_FAILURE_DUE_TO_VERSION_CONFLICT,
equalTo(indexing1.getIndexFailedDueToVersionConflictCount() - indexing0.getIndexFailedDueToVersionConflictCount())
) )
); );
@ -177,13 +186,14 @@ public class IndicesMetricsIT extends ESIntegTestCase {
LOGSDB_INDEXING_TIME, LOGSDB_INDEXING_TIME,
greaterThanOrEqualTo(0L), greaterThanOrEqualTo(0L),
LOGSDB_INDEXING_FAILURE, LOGSDB_INDEXING_FAILURE,
equalTo(indexing3.getIndexFailedCount() - indexing2.getIndexFailedCount()) equalTo(indexing3.getIndexFailedCount() - indexing2.getIndexFailedCount()),
LOGSDB_INDEXING_FAILURE_DUE_TO_VERSION_CONFLICT,
equalTo(indexing3.getIndexFailedDueToVersionConflictCount() - indexing2.getIndexFailedDueToVersionConflictCount())
) )
); );
// already collected indexing stats // already collected indexing stats
collectThenAssertMetrics( Map<String, Matcher<Long>> zeroMatchers = new HashMap<>();
telemetry, zeroMatchers.putAll(
4,
Map.of( Map.of(
STANDARD_INDEXING_COUNT, STANDARD_INDEXING_COUNT,
equalTo(0L), equalTo(0L),
@ -191,22 +201,35 @@ public class IndicesMetricsIT extends ESIntegTestCase {
equalTo(0L), equalTo(0L),
STANDARD_INDEXING_FAILURE, STANDARD_INDEXING_FAILURE,
equalTo(0L), equalTo(0L),
STANDARD_INDEXING_FAILURE_DUE_TO_VERSION_CONFLICT,
equalTo(0L)
)
);
zeroMatchers.putAll(
Map.of(
TIME_SERIES_INDEXING_COUNT, TIME_SERIES_INDEXING_COUNT,
equalTo(0L), equalTo(0L),
TIME_SERIES_INDEXING_TIME, TIME_SERIES_INDEXING_TIME,
equalTo(0L), equalTo(0L),
TIME_SERIES_INDEXING_FAILURE, TIME_SERIES_INDEXING_FAILURE,
equalTo(0L), equalTo(0L),
TIME_SERIES_INDEXING_FAILURE_DUE_TO_VERSION_CONFLICT,
equalTo(0L)
)
);
zeroMatchers.putAll(
Map.of(
LOGSDB_INDEXING_COUNT, LOGSDB_INDEXING_COUNT,
equalTo(0L), equalTo(0L),
LOGSDB_INDEXING_TIME, LOGSDB_INDEXING_TIME,
equalTo(0L), equalTo(0L),
LOGSDB_INDEXING_FAILURE, LOGSDB_INDEXING_FAILURE,
equalTo(0L),
LOGSDB_INDEXING_FAILURE_DUE_TO_VERSION_CONFLICT,
equalTo(0L) equalTo(0L)
) )
); );
collectThenAssertMetrics(telemetry, 4, zeroMatchers);
String searchNode = internalCluster().startDataOnlyNode(); String searchNode = internalCluster().startDataOnlyNode();
indicesService = internalCluster().getInstance(IndicesService.class, searchNode); indicesService = internalCluster().getInstance(IndicesService.class, searchNode);
telemetry = internalCluster().getInstance(PluginsService.class, searchNode) telemetry = internalCluster().getInstance(PluginsService.class, searchNode)

View file

@ -9,14 +9,17 @@
package org.elasticsearch.monitor.metrics; package org.elasticsearch.monitor.metrics;
import org.apache.lucene.analysis.TokenStream;
import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.DocWriteRequest;
import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.bulk.BulkRequest;
import org.elasticsearch.action.bulk.BulkRequestBuilder; import org.elasticsearch.action.bulk.BulkRequestBuilder;
import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.bulk.BulkResponse;
import org.elasticsearch.action.bulk.IncrementalBulkService; import org.elasticsearch.action.bulk.IncrementalBulkService;
import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.delete.DeleteRequest;
import org.elasticsearch.action.get.GetRequest;
import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.action.support.PlainActionFuture;
import org.elasticsearch.action.update.UpdateRequest;
import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.IndexMetadata;
import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
@ -25,6 +28,13 @@ import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
import org.elasticsearch.core.AbstractRefCounted; import org.elasticsearch.core.AbstractRefCounted;
import org.elasticsearch.core.TimeValue; import org.elasticsearch.core.TimeValue;
import org.elasticsearch.index.IndexingPressure; import org.elasticsearch.index.IndexingPressure;
import org.elasticsearch.index.VersionType;
import org.elasticsearch.index.analysis.AbstractTokenFilterFactory;
import org.elasticsearch.index.analysis.TokenFilterFactory;
import org.elasticsearch.index.engine.VersionConflictEngineException;
import org.elasticsearch.index.mapper.MapperParsingException;
import org.elasticsearch.indices.analysis.AnalysisModule;
import org.elasticsearch.plugins.AnalysisPlugin;
import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.plugins.PluginsService; import org.elasticsearch.plugins.PluginsService;
import org.elasticsearch.rest.RestStatus; import org.elasticsearch.rest.RestStatus;
@ -43,13 +53,16 @@ import java.util.concurrent.CyclicBarrier;
import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicBoolean;
import java.util.function.Function; import java.util.function.Function;
import static java.util.Collections.singletonMap;
import static org.elasticsearch.index.IndexingPressure.MAX_COORDINATING_BYTES; import static org.elasticsearch.index.IndexingPressure.MAX_COORDINATING_BYTES;
import static org.elasticsearch.index.IndexingPressure.MAX_PRIMARY_BYTES; import static org.elasticsearch.index.IndexingPressure.MAX_PRIMARY_BYTES;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.greaterThan;
import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo;
import static org.hamcrest.Matchers.is;
import static org.hamcrest.Matchers.lessThan; import static org.hamcrest.Matchers.lessThan;
@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, numClientNodes = 0) @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, numClientNodes = 0)
@ -66,7 +79,7 @@ public class NodeIndexingMetricsIT extends ESIntegTestCase {
@Override @Override
protected Collection<Class<? extends Plugin>> nodePlugins() { protected Collection<Class<? extends Plugin>> nodePlugins() {
return List.of(TestTelemetryPlugin.class, TestAPMInternalSettings.class); return List.of(TestTelemetryPlugin.class, TestAPMInternalSettings.class, TestAnalysisPlugin.class);
} }
@Override @Override
@ -77,6 +90,197 @@ public class NodeIndexingMetricsIT extends ESIntegTestCase {
.build(); .build();
} }
public void testZeroMetricsForVersionConflictsForNonIndexingOperations() {
final String dataNode = internalCluster().startNode();
ensureStableCluster(1);
final TestTelemetryPlugin plugin = internalCluster().getInstance(PluginsService.class, dataNode)
.filterPlugins(TestTelemetryPlugin.class)
.findFirst()
.orElseThrow();
plugin.resetMeter();
assertAcked(prepareCreate("index_no_refresh", Settings.builder().put("index.refresh_interval", "-1")));
assertAcked(prepareCreate("index_with_default_refresh"));
for (String indexName : List.of("index_no_refresh", "index_with_default_refresh")) {
String docId = randomUUID();
client(dataNode).index(new IndexRequest(indexName).id(docId).source(Map.of())).actionGet();
// test version conflicts are counted when getting from the translog
if (randomBoolean()) {
// this get has the side effect of tracking translog location in the live version map,
// which potentially influences the engine conflict exception path
client(dataNode).get(new GetRequest(indexName, docId).realtime(randomBoolean())).actionGet();
}
{
var e = expectThrows(
VersionConflictEngineException.class,
() -> client(dataNode).get(
new GetRequest(indexName, docId).version(10).versionType(randomFrom(VersionType.EXTERNAL, VersionType.EXTERNAL_GTE))
).actionGet()
);
assertThat(e.getMessage(), containsString("version conflict"));
assertThat(e.status(), is(RestStatus.CONFLICT));
}
if (randomBoolean()) {
client(dataNode).get(new GetRequest(indexName, docId).realtime(false)).actionGet();
}
client(dataNode).admin().indices().prepareRefresh(indexName).get();
{
var e = expectThrows(
VersionConflictEngineException.class,
() -> client(dataNode).get(
new GetRequest(indexName, docId).version(5)
.versionType(randomFrom(VersionType.EXTERNAL, VersionType.EXTERNAL_GTE))
.realtime(false)
).actionGet()
);
assertThat(e.getMessage(), containsString("version conflict"));
assertThat(e.status(), is(RestStatus.CONFLICT));
}
// updates
{
var e = expectThrows(
VersionConflictEngineException.class,
() -> client(dataNode).update(
new UpdateRequest(indexName, docId).setIfPrimaryTerm(1)
.setIfSeqNo(randomIntBetween(2, 5))
.doc(Map.of(randomAlphaOfLengthBetween(1, 10), randomAlphaOfLengthBetween(1, 10)))
).actionGet()
);
assertThat(e.getMessage(), containsString("version conflict"));
assertThat(e.status(), is(RestStatus.CONFLICT));
}
// deletes
{
var e = expectThrows(
VersionConflictEngineException.class,
() -> client(dataNode).delete(
new DeleteRequest(indexName, docId).setIfPrimaryTerm(randomIntBetween(2, 5)).setIfSeqNo(0)
).actionGet()
);
assertThat(e.getMessage(), containsString("version conflict"));
assertThat(e.status(), is(RestStatus.CONFLICT));
}
}
// simulate async apm `polling` call for metrics
plugin.collect();
// there are no indexing (version conflict) failures reported because only gets/updates/deletes generated the conflicts
// and those are not "indexing" operations
var indexingFailedTotal = getSingleRecordedMetric(plugin::getLongAsyncCounterMeasurement, "es.indexing.indexing.failed.total");
assertThat(indexingFailedTotal.getLong(), equalTo(0L));
var indexingFailedDueToVersionConflictTotal = getSingleRecordedMetric(
plugin::getLongAsyncCounterMeasurement,
"es.indexing.indexing.failed.version_conflict.total"
);
assertThat(indexingFailedDueToVersionConflictTotal.getLong(), equalTo(0L));
}
public void testMetricsForIndexingVersionConflicts() {
final String dataNode = internalCluster().startNode();
ensureStableCluster(1);
final TestTelemetryPlugin plugin = internalCluster().getInstance(PluginsService.class, dataNode)
.filterPlugins(TestTelemetryPlugin.class)
.findFirst()
.orElseThrow();
plugin.resetMeter();
assertAcked(
prepareCreate(
"test",
Settings.builder()
.put("index.refresh_interval", "-1")
.put("index.analysis.analyzer.test_analyzer.type", "custom")
.put("index.analysis.analyzer.test_analyzer.tokenizer", "standard")
.putList("index.analysis.analyzer.test_analyzer.filter", "test_token_filter")
).setMapping(Map.of("properties", Map.of("test_field", Map.of("type", "text", "analyzer", "test_analyzer")))).get()
);
String docId = randomUUID();
// successful index (with version)
client(dataNode).index(
new IndexRequest("test").id(docId)
.version(10)
.versionType(randomFrom(VersionType.EXTERNAL, VersionType.EXTERNAL_GTE))
.source(Map.of())
).actionGet();
// if_primary_term conflict
{
var e = expectThrows(
VersionConflictEngineException.class,
() -> client(dataNode).index(new IndexRequest("test").id(docId).source(Map.of()).setIfSeqNo(0).setIfPrimaryTerm(2))
.actionGet()
);
assertThat(e.getMessage(), containsString("version conflict"));
assertThat(e.status(), is(RestStatus.CONFLICT));
}
// if_seq_no conflict
{
var e = expectThrows(
VersionConflictEngineException.class,
() -> client(dataNode).index(new IndexRequest("test").id(docId).source(Map.of()).setIfSeqNo(1).setIfPrimaryTerm(1))
.actionGet()
);
assertThat(e.getMessage(), containsString("version conflict"));
assertThat(e.status(), is(RestStatus.CONFLICT));
}
// version conflict
{
var e = expectThrows(
VersionConflictEngineException.class,
() -> client(dataNode).index(
new IndexRequest("test").id(docId)
.source(Map.of())
.version(3)
.versionType(randomFrom(VersionType.EXTERNAL, VersionType.EXTERNAL_GTE))
).actionGet()
);
assertThat(e.getMessage(), containsString("version conflict"));
assertThat(e.status(), is(RestStatus.CONFLICT));
}
// indexing failure that is NOT a version conflict
PluginsService pluginService = internalCluster().getInstance(PluginsService.class, dataNode);
pluginService.filterPlugins(TestAnalysisPlugin.class).forEach(p -> p.throwParsingError.set(true));
{
var e = expectThrows(
MapperParsingException.class,
() -> client(dataNode).index(new IndexRequest("test").id(docId + "other").source(Map.of("test_field", "this will error")))
.actionGet()
);
assertThat(e.status(), is(RestStatus.BAD_REQUEST));
}
plugin.collect();
var indexingFailedTotal = getSingleRecordedMetric(plugin::getLongAsyncCounterMeasurement, "es.indexing.indexing.failed.total");
assertThat(indexingFailedTotal.getLong(), equalTo(4L));
var indexingFailedDueToVersionConflictTotal = getSingleRecordedMetric(
plugin::getLongAsyncCounterMeasurement,
"es.indexing.indexing.failed.version_conflict.total"
);
assertThat(indexingFailedDueToVersionConflictTotal.getLong(), equalTo(3L));
}
public static final class TestAnalysisPlugin extends Plugin implements AnalysisPlugin {
final AtomicBoolean throwParsingError = new AtomicBoolean(false);
@Override
public Map<String, AnalysisModule.AnalysisProvider<TokenFilterFactory>> getTokenFilters() {
return singletonMap("test_token_filter", (indexSettings, environment, name, settings) -> new AbstractTokenFilterFactory(name) {
@Override
public TokenStream create(TokenStream tokenStream) {
if (throwParsingError.get()) {
throw new MapperParsingException("simulate mapping parsing error");
}
return tokenStream;
}
});
}
}
public void testNodeIndexingMetricsArePublishing() { public void testNodeIndexingMetricsArePublishing() {
final String dataNode = internalCluster().startNode(); final String dataNode = internalCluster().startNode();
@ -116,6 +320,11 @@ public class NodeIndexingMetricsIT extends ESIntegTestCase {
var indexingFailedTotal = getSingleRecordedMetric(plugin::getLongAsyncCounterMeasurement, "es.indexing.indexing.failed.total"); var indexingFailedTotal = getSingleRecordedMetric(plugin::getLongAsyncCounterMeasurement, "es.indexing.indexing.failed.total");
assertThat(indexingFailedTotal.getLong(), equalTo(0L)); assertThat(indexingFailedTotal.getLong(), equalTo(0L));
var indexingFailedDueToVersionConflictTotal = getSingleRecordedMetric(
plugin::getLongAsyncCounterMeasurement,
"es.indexing.indexing.failed.version_conflict.total"
);
assertThat(indexingFailedDueToVersionConflictTotal.getLong(), equalTo(0L));
var deletionTotal = getSingleRecordedMetric(plugin::getLongAsyncCounterMeasurement, "es.indexing.deletion.docs.total"); var deletionTotal = getSingleRecordedMetric(plugin::getLongAsyncCounterMeasurement, "es.indexing.deletion.docs.total");
assertThat(deletionTotal.getLong(), equalTo((long) deletesCount)); assertThat(deletionTotal.getLong(), equalTo((long) deletesCount));

View file

@ -24,7 +24,6 @@ import org.elasticsearch.common.Strings;
import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.core.TimeValue; import org.elasticsearch.core.TimeValue;
import org.elasticsearch.search.Scroll;
import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHit;
import org.elasticsearch.search.builder.PointInTimeBuilder; import org.elasticsearch.search.builder.PointInTimeBuilder;
import org.elasticsearch.search.sort.ShardDocSortField; import org.elasticsearch.search.sort.ShardDocSortField;
@ -97,14 +96,14 @@ public class SearchSliceIT extends ESIntegTestCase {
int fetchSize = randomIntBetween(10, 100); int fetchSize = randomIntBetween(10, 100);
// test _doc sort // test _doc sort
SearchRequestBuilder request = prepareSearch("test").setQuery(matchAllQuery()) SearchRequestBuilder request = prepareSearch("test").setQuery(matchAllQuery())
.setScroll(new Scroll(TimeValue.timeValueSeconds(10))) .setScroll(TimeValue.timeValueSeconds(10))
.setSize(fetchSize) .setSize(fetchSize)
.addSort(SortBuilders.fieldSort("_doc")); .addSort(SortBuilders.fieldSort("_doc"));
assertSearchSlicesWithScroll(request, field, max, numDocs); assertSearchSlicesWithScroll(request, field, max, numDocs);
// test numeric sort // test numeric sort
request = prepareSearch("test").setQuery(matchAllQuery()) request = prepareSearch("test").setQuery(matchAllQuery())
.setScroll(new Scroll(TimeValue.timeValueSeconds(10))) .setScroll(TimeValue.timeValueSeconds(10))
.addSort(SortBuilders.fieldSort("random_int")) .addSort(SortBuilders.fieldSort("random_int"))
.setSize(fetchSize); .setSize(fetchSize);
assertSearchSlicesWithScroll(request, field, max, numDocs); assertSearchSlicesWithScroll(request, field, max, numDocs);
@ -121,7 +120,7 @@ public class SearchSliceIT extends ESIntegTestCase {
int max = randomIntBetween(2, numShards * 3); int max = randomIntBetween(2, numShards * 3);
int fetchSize = randomIntBetween(10, 100); int fetchSize = randomIntBetween(10, 100);
SearchRequestBuilder request = prepareSearch("test").setQuery(matchAllQuery()) SearchRequestBuilder request = prepareSearch("test").setQuery(matchAllQuery())
.setScroll(new Scroll(TimeValue.timeValueSeconds(10))) .setScroll(TimeValue.timeValueSeconds(10))
.setSize(fetchSize) .setSize(fetchSize)
.setPreference("_shards:1,4") .setPreference("_shards:1,4")
.addSort(SortBuilders.fieldSort("_doc")); .addSort(SortBuilders.fieldSort("_doc"));
@ -133,7 +132,7 @@ public class SearchSliceIT extends ESIntegTestCase {
int max = randomIntBetween(2, numShards * 3); int max = randomIntBetween(2, numShards * 3);
int fetchSize = randomIntBetween(10, 100); int fetchSize = randomIntBetween(10, 100);
SearchRequestBuilder request = prepareSearch("test").setQuery(matchAllQuery()) SearchRequestBuilder request = prepareSearch("test").setQuery(matchAllQuery())
.setScroll(new Scroll(TimeValue.timeValueSeconds(10))) .setScroll(TimeValue.timeValueSeconds(10))
.setSize(fetchSize) .setSize(fetchSize)
.setRouting("foo", "bar") .setRouting("foo", "bar")
.addSort(SortBuilders.fieldSort("_doc")); .addSort(SortBuilders.fieldSort("_doc"));
@ -151,7 +150,7 @@ public class SearchSliceIT extends ESIntegTestCase {
int max = randomIntBetween(2, numShards * 3); int max = randomIntBetween(2, numShards * 3);
int fetchSize = randomIntBetween(10, 100); int fetchSize = randomIntBetween(10, 100);
SearchRequestBuilder request = prepareSearch("alias1", "alias3").setQuery(matchAllQuery()) SearchRequestBuilder request = prepareSearch("alias1", "alias3").setQuery(matchAllQuery())
.setScroll(new Scroll(TimeValue.timeValueSeconds(10))) .setScroll(TimeValue.timeValueSeconds(10))
.setSize(fetchSize) .setSize(fetchSize)
.addSort(SortBuilders.fieldSort("_doc")); .addSort(SortBuilders.fieldSort("_doc"));
assertSearchSlicesWithScroll(request, "_id", max, numDocs); assertSearchSlicesWithScroll(request, "_id", max, numDocs);
@ -176,7 +175,7 @@ public class SearchSliceIT extends ESIntegTestCase {
searchResponse.decRef(); searchResponse.decRef();
searchResponse = client().prepareSearchScroll("test") searchResponse = client().prepareSearchScroll("test")
.setScrollId(scrollId) .setScrollId(scrollId)
.setScroll(new Scroll(TimeValue.timeValueSeconds(10))) .setScroll(TimeValue.timeValueSeconds(10))
.get(); .get();
scrollId = searchResponse.getScrollId(); scrollId = searchResponse.getScrollId();
totalResults += searchResponse.getHits().getHits().length; totalResults += searchResponse.getHits().getHits().length;
@ -271,7 +270,7 @@ public class SearchSliceIT extends ESIntegTestCase {
SearchPhaseExecutionException exc = expectThrows( SearchPhaseExecutionException exc = expectThrows(
SearchPhaseExecutionException.class, SearchPhaseExecutionException.class,
prepareSearch("test").setQuery(matchAllQuery()) prepareSearch("test").setQuery(matchAllQuery())
.setScroll(new Scroll(TimeValue.timeValueSeconds(10))) .setScroll(TimeValue.timeValueSeconds(10))
.slice(new SliceBuilder("invalid_random_int", 0, 10)) .slice(new SliceBuilder("invalid_random_int", 0, 10))
); );
@ -282,7 +281,7 @@ public class SearchSliceIT extends ESIntegTestCase {
exc = expectThrows( exc = expectThrows(
SearchPhaseExecutionException.class, SearchPhaseExecutionException.class,
prepareSearch("test").setQuery(matchAllQuery()) prepareSearch("test").setQuery(matchAllQuery())
.setScroll(new Scroll(TimeValue.timeValueSeconds(10))) .setScroll(TimeValue.timeValueSeconds(10))
.slice(new SliceBuilder("invalid_random_kw", 0, 10)) .slice(new SliceBuilder("invalid_random_kw", 0, 10))
); );
rootCause = findRootCause(exc); rootCause = findRootCause(exc);

View file

@ -152,6 +152,10 @@ public class TransportVersions {
public static final TransportVersion ESQL_CCS_TELEMETRY_STATS = def(8_816_00_0); public static final TransportVersion ESQL_CCS_TELEMETRY_STATS = def(8_816_00_0);
public static final TransportVersion TEXT_EMBEDDING_QUERY_VECTOR_BUILDER_INFER_MODEL_ID = def(8_817_00_0); public static final TransportVersion TEXT_EMBEDDING_QUERY_VECTOR_BUILDER_INFER_MODEL_ID = def(8_817_00_0);
public static final TransportVersion ESQL_ENABLE_NODE_LEVEL_REDUCTION = def(8_818_00_0); public static final TransportVersion ESQL_ENABLE_NODE_LEVEL_REDUCTION = def(8_818_00_0);
public static final TransportVersion JINA_AI_INTEGRATION_ADDED = def(8_819_00_0);
public static final TransportVersion TRACK_INDEX_FAILED_DUE_TO_VERSION_CONFLICT_METRIC = def(8_820_00_0);
public static final TransportVersion REPLACE_FAILURE_STORE_OPTIONS_WITH_SELECTOR_SYNTAX = def(8_821_00_0);
public static final TransportVersion ELASTIC_INFERENCE_SERVICE_UNIFIED_CHAT_COMPLETIONS_INTEGRATION = def(8_822_00_0);
/* /*
* WARNING: DO NOT MERGE INTO MAIN! * WARNING: DO NOT MERGE INTO MAIN!

View file

@ -14,7 +14,6 @@ import org.elasticsearch.TransportVersions;
import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.action.support.master.MasterNodeRequest; import org.elasticsearch.action.support.master.MasterNodeRequest;
import org.elasticsearch.cluster.metadata.DataStream;
import org.elasticsearch.common.Strings; import org.elasticsearch.common.Strings;
import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.StreamOutput;
@ -44,9 +43,7 @@ public class RestoreSnapshotRequest extends MasterNodeRequest<RestoreSnapshotReq
private String snapshot; private String snapshot;
private String repository; private String repository;
private String[] indices = Strings.EMPTY_ARRAY; private String[] indices = Strings.EMPTY_ARRAY;
private IndicesOptions indicesOptions = DataStream.isFailureStoreFeatureFlagEnabled() private IndicesOptions indicesOptions = IndicesOptions.strictExpandOpen();
? IndicesOptions.strictExpandOpenIncludeFailureStore()
: IndicesOptions.strictExpandOpen();
private String[] featureStates = Strings.EMPTY_ARRAY; private String[] featureStates = Strings.EMPTY_ARRAY;
private String renamePattern; private String renamePattern;
private String renameReplacement; private String renameReplacement;

View file

@ -74,7 +74,7 @@ public class IndicesAliasesRequest extends AcknowledgedRequest<IndicesAliasesReq
.allowAliasToMultipleIndices(true) .allowAliasToMultipleIndices(true)
.allowClosedIndices(true) .allowClosedIndices(true)
.ignoreThrottled(false) .ignoreThrottled(false)
.allowFailureIndices(true) .allowSelectors(false)
.build() .build()
) )
.build(); .build();

View file

@ -23,7 +23,7 @@ import java.util.Map;
public class GetAliasesRequest extends LocalClusterStateRequest implements AliasesRequest { public class GetAliasesRequest extends LocalClusterStateRequest implements AliasesRequest {
public static final IndicesOptions DEFAULT_INDICES_OPTIONS = IndicesOptions.strictExpandHidden(); public static final IndicesOptions DEFAULT_INDICES_OPTIONS = IndicesOptions.strictExpandHiddenNoSelectors();
private String[] aliases; private String[] aliases;
private String[] originalAliases; private String[] originalAliases;

View file

@ -43,7 +43,7 @@ public class DeleteIndexRequest extends AcknowledgedRequest<DeleteIndexRequest>
.allowAliasToMultipleIndices(false) .allowAliasToMultipleIndices(false)
.allowClosedIndices(true) .allowClosedIndices(true)
.ignoreThrottled(false) .ignoreThrottled(false)
.allowFailureIndices(true) .allowSelectors(false)
.build() .build()
) )
.build(); .build();

Some files were not shown because too many files have changed in this diff Show more