Merge remote-tracking branch 'upstream/main' into lucene_snapshot_9_11

This commit is contained in:
Benjamin Trent 2024-06-11 06:54:23 -04:00
commit 29288d6590
449 changed files with 10305 additions and 3216 deletions

View file

@ -91,8 +91,8 @@ if (USE_ARTIFACTORY) {
} }
gradle.settingsEvaluated { settings -> gradle.settingsEvaluated { settings ->
settings.pluginManager.withPlugin("com.gradle.enterprise") { settings.pluginManager.withPlugin("com.gradle.develocity") {
settings.gradleEnterprise { settings.develocity {
server = 'https://gradle-enterprise.elastic.co' server = 'https://gradle-enterprise.elastic.co'
} }
} }

View file

@ -1,5 +1,5 @@
Elasticsearch Elasticsearch
Copyright 2009-2021 Elasticsearch Copyright 2009-2024 Elasticsearch
This product includes software developed by The Apache Software This product includes software developed by The Apache Software
Foundation (http://www.apache.org/). Foundation (http://www.apache.org/).

View file

@ -15,6 +15,8 @@ import org.elasticsearch.compute.data.Block;
import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.BlockFactory;
import org.elasticsearch.compute.data.BooleanBlock; import org.elasticsearch.compute.data.BooleanBlock;
import org.elasticsearch.compute.data.BooleanVector; import org.elasticsearch.compute.data.BooleanVector;
import org.elasticsearch.compute.data.DoubleBlock;
import org.elasticsearch.compute.data.DoubleVector;
import org.elasticsearch.compute.data.LongBlock; import org.elasticsearch.compute.data.LongBlock;
import org.elasticsearch.compute.data.LongVector; import org.elasticsearch.compute.data.LongVector;
import org.elasticsearch.compute.data.Page; import org.elasticsearch.compute.data.Page;
@ -84,7 +86,17 @@ public class EvalBenchmark {
} }
@Param( @Param(
{ "abs", "add", "date_trunc", "equal_to_const", "long_equal_to_long", "long_equal_to_int", "mv_min", "mv_min_ascending", "rlike" } {
"abs",
"add",
"add_double",
"date_trunc",
"equal_to_const",
"long_equal_to_long",
"long_equal_to_int",
"mv_min",
"mv_min_ascending",
"rlike" }
) )
public String operation; public String operation;
@ -105,6 +117,13 @@ public class EvalBenchmark {
layout(longField) layout(longField)
).get(driverContext); ).get(driverContext);
} }
case "add_double" -> {
FieldAttribute doubleField = doubleField();
yield EvalMapper.toEvaluator(
new Add(Source.EMPTY, doubleField, new Literal(Source.EMPTY, 1D, DataType.DOUBLE)),
layout(doubleField)
).get(driverContext);
}
case "date_trunc" -> { case "date_trunc" -> {
FieldAttribute timestamp = new FieldAttribute( FieldAttribute timestamp = new FieldAttribute(
Source.EMPTY, Source.EMPTY,
@ -150,6 +169,10 @@ public class EvalBenchmark {
return new FieldAttribute(Source.EMPTY, "long", new EsField("long", DataType.LONG, Map.of(), true)); return new FieldAttribute(Source.EMPTY, "long", new EsField("long", DataType.LONG, Map.of(), true));
} }
private static FieldAttribute doubleField() {
return new FieldAttribute(Source.EMPTY, "double", new EsField("double", DataType.DOUBLE, Map.of(), true));
}
private static FieldAttribute intField() { private static FieldAttribute intField() {
return new FieldAttribute(Source.EMPTY, "int", new EsField("int", DataType.INTEGER, Map.of(), true)); return new FieldAttribute(Source.EMPTY, "int", new EsField("int", DataType.INTEGER, Map.of(), true));
} }
@ -182,6 +205,16 @@ public class EvalBenchmark {
} }
} }
} }
case "add_double" -> {
DoubleVector v = actual.<DoubleBlock>getBlock(1).asVector();
for (int i = 0; i < BLOCK_LENGTH; i++) {
if (v.getDouble(i) != i * 100_000 + 1D) {
throw new AssertionError(
"[" + operation + "] expected [" + (i * 100_000 + 1D) + "] but was [" + v.getDouble(i) + "]"
);
}
}
}
case "date_trunc" -> { case "date_trunc" -> {
LongVector v = actual.<LongBlock>getBlock(1).asVector(); LongVector v = actual.<LongBlock>getBlock(1).asVector();
long oneDay = TimeValue.timeValueHours(24).millis(); long oneDay = TimeValue.timeValueHours(24).millis();
@ -239,6 +272,13 @@ public class EvalBenchmark {
} }
yield new Page(builder.build()); yield new Page(builder.build());
} }
case "add_double" -> {
var builder = blockFactory.newDoubleBlockBuilder(BLOCK_LENGTH);
for (int i = 0; i < BLOCK_LENGTH; i++) {
builder.appendDouble(i * 100_000D);
}
yield new Page(builder.build());
}
case "long_equal_to_long" -> { case "long_equal_to_long" -> {
var lhs = blockFactory.newLongBlockBuilder(BLOCK_LENGTH); var lhs = blockFactory.newLongBlockBuilder(BLOCK_LENGTH);
var rhs = blockFactory.newLongBlockBuilder(BLOCK_LENGTH); var rhs = blockFactory.newLongBlockBuilder(BLOCK_LENGTH);

View file

@ -117,7 +117,7 @@ Copyright 2009-2018 Acme Coorp"""
result.task(":darwin-tar:checkNotice").outcome == TaskOutcome.FAILED result.task(":darwin-tar:checkNotice").outcome == TaskOutcome.FAILED
result.output.contains("> expected line [2] in " + result.output.contains("> expected line [2] in " +
"[./darwin-tar/build/tar-extracted/elasticsearch-${VersionProperties.getElasticsearch()}/NOTICE.txt] " + "[./darwin-tar/build/tar-extracted/elasticsearch-${VersionProperties.getElasticsearch()}/NOTICE.txt] " +
"to be [Copyright 2009-2021 Elasticsearch] but was [Copyright 2009-2018 Acme Coorp]") "to be [Copyright 2009-2024 Elasticsearch] but was [Copyright 2009-2018 Acme Coorp]")
} }
def "fails on unexpected ml notice content"() { def "fails on unexpected ml notice content"() {
@ -125,7 +125,7 @@ Copyright 2009-2018 Acme Coorp"""
elasticLicense() elasticLicense()
elasticLicense(file("LICENSE.txt")) elasticLicense(file("LICENSE.txt"))
file("NOTICE.txt").text = """Elasticsearch file("NOTICE.txt").text = """Elasticsearch
Copyright 2009-2021 Elasticsearch""" Copyright 2009-2024 Elasticsearch"""
file("ml/NOTICE.txt").text = "Boost Software License - Version 1.0 - August 17th, 2003" file("ml/NOTICE.txt").text = "Boost Software License - Version 1.0 - August 17th, 2003"
file('darwin-tar/build.gradle') << """ file('darwin-tar/build.gradle') << """

View file

@ -12,18 +12,23 @@ import java.time.LocalDateTime;
import org.elasticsearch.gradle.Architecture import org.elasticsearch.gradle.Architecture
import org.elasticsearch.gradle.OS import org.elasticsearch.gradle.OS
import org.elasticsearch.gradle.internal.info.BuildParams import org.elasticsearch.gradle.internal.info.BuildParams
import org.gradle.initialization.BuildRequestMetaData
buildScan { import java.lang.management.ManagementFactory
import java.time.LocalDateTime
develocity {
buildScan {
URL jenkinsUrl = System.getenv('JENKINS_URL') ? new URL(System.getenv('JENKINS_URL')) : null URL jenkinsUrl = System.getenv('JENKINS_URL') ? new URL(System.getenv('JENKINS_URL')) : null
String buildKiteUrl = System.getenv('BUILDKITE_BUILD_URL') ? System.getenv('BUILDKITE_BUILD_URL') : null String buildKiteUrl = System.getenv('BUILDKITE_BUILD_URL') ? System.getenv('BUILDKITE_BUILD_URL') : null
// Automatically publish scans from Elasticsearch CI // Automatically publish scans from Elasticsearch CI
if (jenkinsUrl?.host?.endsWith('elastic.co') || jenkinsUrl?.host?.endsWith('elastic.dev') || System.getenv('BUILDKITE') == 'true') { if (jenkinsUrl?.host?.endsWith('elastic.co') || jenkinsUrl?.host?.endsWith('elastic.dev') || System.getenv('BUILDKITE') == 'true') {
publishAlways() publishing.onlyIf { true }
buildScan.server = 'https://gradle-enterprise.elastic.co' server = 'https://gradle-enterprise.elastic.co'
} }
background { background {
tag OS.current().name() tag OS.current().name()
tag Architecture.current().name() tag Architecture.current().name()
@ -71,7 +76,8 @@ buildScan {
tag 'CI' tag 'CI'
link 'CI Build', buildUrl link 'CI Build', buildUrl
link 'GCP Upload', "https://console.cloud.google.com/storage/browser/_details/elasticsearch-ci-artifacts/jobs/${URLEncoder.encode(jobName, "UTF-8")}/build/${buildNumber}.tar.bz2" link 'GCP Upload',
"https://console.cloud.google.com/storage/browser/_details/elasticsearch-ci-artifacts/jobs/${URLEncoder.encode(jobName, "UTF-8")}/build/${buildNumber}.tar.bz2"
value 'Job Number', buildNumber value 'Job Number', buildNumber
if (jobBranch) { if (jobBranch) {
tag jobBranch tag jobBranch
@ -116,7 +122,7 @@ buildScan {
value 'Job Name', jobName value 'Job Name', jobName
tag jobName tag jobName
if (jobLabel.contains("/")) { if (jobLabel.contains("/")) {
jobLabel.split("/").collect {safeName(it) }.each {matrix -> jobLabel.split("/").collect { safeName(it) }.each { matrix ->
tag matrix tag matrix
} }
} }
@ -125,8 +131,10 @@ buildScan {
def metricsStartTime = LocalDateTime.now().minusSeconds(uptime.longValue()).minusMinutes(15).toString() def metricsStartTime = LocalDateTime.now().minusSeconds(uptime.longValue()).minusMinutes(15).toString()
def metricsEndTime = LocalDateTime.now().plusMinutes(15).toString() def metricsEndTime = LocalDateTime.now().plusMinutes(15).toString()
link 'Agent Metrics', "https://es-buildkite-agents.elastic.dev/app/metrics/detail/host/${System.getenv('BUILDKITE_AGENT_NAME')}?_a=(time:(from:%27${metricsStartTime}Z%27,interval:%3E%3D1m,to:%27${metricsEndTime}Z%27))" link 'Agent Metrics',
link 'Agent Logs', "https://es-buildkite-agents.elastic.dev/app/logs/stream?logFilter=(filters:!(),query:(language:kuery,query:%27host.name:%20${System.getenv('BUILDKITE_AGENT_NAME')}%27),timeRange:(from:%27${metricsStartTime}Z%27,to:%27${metricsEndTime}Z%27))" "https://es-buildkite-agents.elastic.dev/app/metrics/detail/host/${System.getenv('BUILDKITE_AGENT_NAME')}?_a=(time:(from:%27${metricsStartTime}Z%27,interval:%3E%3D1m,to:%27${metricsEndTime}Z%27))"
link 'Agent Logs',
"https://es-buildkite-agents.elastic.dev/app/logs/stream?logFilter=(filters:!(),query:(language:kuery,query:%27host.name:%20${System.getenv('BUILDKITE_AGENT_NAME')}%27),timeRange:(from:%27${metricsStartTime}Z%27,to:%27${metricsEndTime}Z%27))"
if (branch) { if (branch) {
tag branch tag branch
@ -149,7 +157,8 @@ buildScan {
buildFinished { result -> buildFinished { result ->
buildScanPublished { scan -> buildScanPublished { scan
->
// Attach build scan link as build metadata // Attach build scan link as build metadata
// See: https://buildkite.com/docs/pipelines/build-meta-data // See: https://buildkite.com/docs/pipelines/build-meta-data
new ProcessBuilder('buildkite-agent', 'meta-data', 'set', "build-scan-${System.getenv('BUILDKITE_JOB_ID')}", "${scan.buildScanUri}") new ProcessBuilder('buildkite-agent', 'meta-data', 'set', "build-scan-${System.getenv('BUILDKITE_JOB_ID')}", "${scan.buildScanUri}")
@ -158,17 +167,18 @@ buildScan {
// Add a build annotation // Add a build annotation
// See: https://buildkite.com/docs/agent/v3/cli-annotate // See: https://buildkite.com/docs/agent/v3/cli-annotate
def body = """<div class="mb3"><span class="p1 border rounded">${System.getenv('BUILDKITE_LABEL')}</span> :gradle: ${result.failure ? 'failed' : 'successful'} build: <a href="${scan.buildScanUri}"><code>gradle ${gradle.startParameter.taskNames.join(' ')}</code></a></div>""" def body = """<div class="mb3"><span class="p1 border rounded">${System.getenv('BUILDKITE_LABEL')}</span> :gradle: ${result.failures ? 'failed' : 'successful'} build: <a href="${scan.buildScanUri}"><code>gradle ${gradle.startParameter.taskNames.join(' ')}</code></a></div>"""
def process = [ def process = [
'buildkite-agent', 'buildkite-agent',
'annotate', 'annotate',
'--context', '--context',
result.failure ? 'gradle-build-scans-failed' : 'gradle-build-scans', result.failures ? 'gradle-build-scans-failed' : 'gradle-build-scans',
'--append', '--append',
'--style', '--style',
result.failure ? 'error' : 'info' result.failures ? 'error' : 'info'
].execute() ].execute()
process.withWriter { it.write(body) } // passing the body in as an argument has issues on Windows, so let's use stdin of the process instead process.withWriter { it.write(body) }
// passing the body in as an argument has issues on Windows, so let's use stdin of the process instead
process.waitFor() process.waitFor()
} }
} }
@ -176,6 +186,7 @@ buildScan {
tag 'LOCAL' tag 'LOCAL'
} }
} }
}
} }
static def safeName(String string) { static def safeName(String string) {

View file

@ -135,7 +135,7 @@ public class InternalDistributionArchiveCheckPlugin implements Plugin<Project> {
task.doLast(new Action<Task>() { task.doLast(new Action<Task>() {
@Override @Override
public void execute(Task task) { public void execute(Task task) {
final List<String> noticeLines = Arrays.asList("Elasticsearch", "Copyright 2009-2021 Elasticsearch"); final List<String> noticeLines = Arrays.asList("Elasticsearch", "Copyright 2009-2024 Elasticsearch");
final Path noticePath = checkExtraction.get() final Path noticePath = checkExtraction.get()
.getDestinationDir() .getDestinationDir()
.toPath() .toPath()

View file

@ -7,7 +7,6 @@
*/ */
package org.elasticsearch.gradle.internal.test; package org.elasticsearch.gradle.internal.test;
import org.elasticsearch.gradle.internal.ElasticsearchTestBasePlugin;
import org.gradle.api.internal.tasks.testing.logging.FullExceptionFormatter; import org.gradle.api.internal.tasks.testing.logging.FullExceptionFormatter;
import org.gradle.api.internal.tasks.testing.logging.TestExceptionFormatter; import org.gradle.api.internal.tasks.testing.logging.TestExceptionFormatter;
import org.gradle.api.logging.Logger; import org.gradle.api.logging.Logger;
@ -39,21 +38,24 @@ import java.util.concurrent.ConcurrentHashMap;
public class ErrorReportingTestListener implements TestOutputListener, TestListener { public class ErrorReportingTestListener implements TestOutputListener, TestListener {
private static final String REPRODUCE_WITH_PREFIX = "REPRODUCE WITH"; private static final String REPRODUCE_WITH_PREFIX = "REPRODUCE WITH";
private final Test testTask;
private final TestExceptionFormatter formatter; private final TestExceptionFormatter formatter;
private final File outputDirectory; private final File outputDirectory;
private final Logger taskLogger; private final Logger taskLogger;
private Map<Descriptor, EventWriter> eventWriters = new ConcurrentHashMap<>(); private Map<Descriptor, EventWriter> eventWriters = new ConcurrentHashMap<>();
private Map<Descriptor, Deque<String>> reproductionLines = new ConcurrentHashMap<>(); private Map<Descriptor, Deque<String>> reproductionLines = new ConcurrentHashMap<>();
private Set<Descriptor> failedTests = new LinkedHashSet<>(); private Set<Descriptor> failedTests = new LinkedHashSet<>();
private boolean dumpOutputOnFailure = true;
public ErrorReportingTestListener(Test testTask, File outputDirectory) { public ErrorReportingTestListener(Test testTask, File outputDirectory) {
this.testTask = testTask;
this.formatter = new FullExceptionFormatter(testTask.getTestLogging()); this.formatter = new FullExceptionFormatter(testTask.getTestLogging());
this.taskLogger = testTask.getLogger(); this.taskLogger = testTask.getLogger();
this.outputDirectory = outputDirectory; this.outputDirectory = outputDirectory;
} }
public void setDumpOutputOnFailure(boolean dumpOutputOnFailure) {
this.dumpOutputOnFailure = dumpOutputOnFailure;
}
@Override @Override
public void onOutput(TestDescriptor testDescriptor, TestOutputEvent outputEvent) { public void onOutput(TestDescriptor testDescriptor, TestOutputEvent outputEvent) {
TestDescriptor suite = testDescriptor.getParent(); TestDescriptor suite = testDescriptor.getParent();
@ -83,7 +85,7 @@ public class ErrorReportingTestListener implements TestOutputListener, TestListe
Descriptor descriptor = Descriptor.of(suite); Descriptor descriptor = Descriptor.of(suite);
try { try {
if (isDumpOutputEnabled()) { if (dumpOutputOnFailure) {
// if the test suite failed, report all captured output // if the test suite failed, report all captured output
if (result.getResultType().equals(TestResult.ResultType.FAILURE)) { if (result.getResultType().equals(TestResult.ResultType.FAILURE)) {
EventWriter eventWriter = eventWriters.get(descriptor); EventWriter eventWriter = eventWriters.get(descriptor);
@ -256,11 +258,4 @@ public class ErrorReportingTestListener implements TestOutputListener, TestListe
outputFile.delete(); outputFile.delete();
} }
} }
private boolean isDumpOutputEnabled() {
return (Boolean) testTask.getExtensions()
.getExtraProperties()
.getProperties()
.getOrDefault(ElasticsearchTestBasePlugin.DUMP_OUTPUT_ON_FAILURE_PROP_NAME, true);
}
} }

View file

@ -18,9 +18,9 @@ import org.elasticsearch.gradle.Version;
import org.elasticsearch.gradle.VersionProperties; import org.elasticsearch.gradle.VersionProperties;
import org.elasticsearch.gradle.distribution.ElasticsearchDistributionTypes; import org.elasticsearch.gradle.distribution.ElasticsearchDistributionTypes;
import org.elasticsearch.gradle.internal.ElasticsearchJavaPlugin; import org.elasticsearch.gradle.internal.ElasticsearchJavaPlugin;
import org.elasticsearch.gradle.internal.ElasticsearchTestBasePlugin;
import org.elasticsearch.gradle.internal.InternalDistributionDownloadPlugin; import org.elasticsearch.gradle.internal.InternalDistributionDownloadPlugin;
import org.elasticsearch.gradle.internal.info.BuildParams; import org.elasticsearch.gradle.internal.info.BuildParams;
import org.elasticsearch.gradle.internal.test.ErrorReportingTestListener;
import org.elasticsearch.gradle.internal.test.HistoricalFeaturesMetadataPlugin; import org.elasticsearch.gradle.internal.test.HistoricalFeaturesMetadataPlugin;
import org.elasticsearch.gradle.plugin.BasePluginBuildPlugin; import org.elasticsearch.gradle.plugin.BasePluginBuildPlugin;
import org.elasticsearch.gradle.plugin.PluginBuildPlugin; import org.elasticsearch.gradle.plugin.PluginBuildPlugin;
@ -167,7 +167,7 @@ public class RestTestBasePlugin implements Plugin<Project> {
nonInputSystemProperties.systemProperty(TESTS_MAX_PARALLEL_FORKS_SYSPROP, () -> String.valueOf(task.getMaxParallelForks())); nonInputSystemProperties.systemProperty(TESTS_MAX_PARALLEL_FORKS_SYSPROP, () -> String.valueOf(task.getMaxParallelForks()));
// Disable test failure reporting since this stuff is now captured in build scans // Disable test failure reporting since this stuff is now captured in build scans
task.getExtensions().getExtraProperties().set(ElasticsearchTestBasePlugin.DUMP_OUTPUT_ON_FAILURE_PROP_NAME, false); task.getExtensions().getByType(ErrorReportingTestListener.class).setDumpOutputOnFailure(false);
// Disable the security manager and syscall filter since the test framework needs to fork processes // Disable the security manager and syscall filter since the test framework needs to fork processes
task.systemProperty("tests.security.manager", "false"); task.systemProperty("tests.security.manager", "false");

View file

@ -495,7 +495,7 @@ subprojects {
(project.name.contains('deb') && dpkgExists.call(it)) || (project.name.contains('rpm') && rpmExists.call(it)) (project.name.contains('deb') && dpkgExists.call(it)) || (project.name.contains('rpm') && rpmExists.call(it))
} }
doLast { doLast {
final List<String> noticeLines = Arrays.asList("Elasticsearch", "Copyright 2009-2021 Elasticsearch") final List<String> noticeLines = Arrays.asList("Elasticsearch", "Copyright 2009-2024 Elasticsearch")
final Path noticePath = packageExtractionDir.toPath().resolve("usr/share/elasticsearch/NOTICE.txt") final Path noticePath = packageExtractionDir.toPath().resolve("usr/share/elasticsearch/NOTICE.txt")
assertLinesInFile(noticePath, noticeLines) assertLinesInFile(noticePath, noticeLines)
} }

View file

@ -0,0 +1,5 @@
pr: 106591
summary: Make dense vector field type updatable
area: Search
type: enhancement
issues: []

View file

@ -0,0 +1,5 @@
pr: 107279
summary: Introduce _transform/_node_stats API
area: Transform
type: feature
issues: []

View file

@ -0,0 +1,5 @@
pr: 108895
summary: Add permission to secure access to certain config files specified by settings
area: "Security"
type: bug
issues: []

View file

@ -0,0 +1,5 @@
pr: 108947
summary: Provide the `DocumentSizeReporter` with index mode
area: Infra/Metrics
type: enhancement
issues: []

View file

@ -0,0 +1,5 @@
pr: 109042
summary: Add Create or update query rule API call
area: Application
type: enhancement
issues: [ ]

View file

@ -0,0 +1,6 @@
pr: 109185
summary: Handle unmatching remote cluster wildcards properly for `IndicesRequest.SingleIndexNoWildcards`
requests
area: Authorization
type: bug
issues: []

View file

@ -0,0 +1,5 @@
pr: 109204
summary: Detect long-running tasks on network threads
area: Network
type: enhancement
issues: []

View file

@ -0,0 +1,5 @@
pr: 109233
summary: Fix trappy timeouts in security settings APIs
area: Security
type: bug
issues: []

View file

@ -0,0 +1,5 @@
pr: 109332
summary: "ES|QL: vectorize eval"
area: ES|QL
type: enhancement
issues: []

View file

@ -0,0 +1,5 @@
pr: 109341
summary: Re-define `index.mapper.dynamic` setting in 8.x for a better 7.x to 8.x upgrade if this setting is used.
area: Mapping
type: bug
issues: []

View file

@ -0,0 +1,6 @@
pr: 109370
summary: Enable fallback synthetic source by default
area: Mapping
type: feature
issues:
- 106460

View file

@ -0,0 +1,5 @@
pr: 109410
summary: Support synthetic source for date fields when `ignore_malformed` is used
area: Mapping
type: enhancement
issues: []

View file

@ -0,0 +1,5 @@
pr: 109440
summary: Fix task cancellation on remote cluster when original request fails
area: Network
type: bug
issues: []

View file

@ -0,0 +1,6 @@
pr: 109449
summary: Reset max page size to settings value
area: Transform
type: bug
issues:
- 109308

View file

@ -0,0 +1,5 @@
pr: 109470
summary: Enabling profiling for `RankBuilders` and adding tests for RRF
area: Ranking
type: enhancement
issues: []

View file

@ -0,0 +1,5 @@
pr: 109481
summary: Fork freeing search/scroll contexts to GENERIC pool
area: Search
type: bug
issues: []

View file

@ -0,0 +1,5 @@
pr: 109500
summary: Guard file settings readiness on file settings support
area: Infra/Settings
type: bug
issues: []

View file

@ -0,0 +1,5 @@
pr: 109533
summary: Fix IndexOutOfBoundsException during inference
area: Machine Learning
type: bug
issues: []

View file

@ -0,0 +1,6 @@
pr: 109540
summary: Add metrics@custom component template to metrics-*-* index template
area: Data streams
type: enhancement
issues:
- 109475

View file

@ -123,6 +123,10 @@ alias. For example, `remote1:my_index` indicates that you want to execute
the painless script against the "my_index" index on the "remote1" cluster. This the painless script against the "my_index" index on the "remote1" cluster. This
request will be forwarded to the "remote1" cluster if you have request will be forwarded to the "remote1" cluster if you have
{ref}/remote-clusters-connect.html[configured a connection] to that remote cluster. {ref}/remote-clusters-connect.html[configured a connection] to that remote cluster.
NOTE: Wildcards are not accepted in the index expression for this endpoint. The
expression `*:myindex` will return the error "No such remote cluster" and the
expression `logs*` or `remote1:logs*` will return the error "index not found".
==== ====
`params`:: (`Map`, read-only) `params`:: (`Map`, read-only)

View file

@ -13,9 +13,16 @@ To set up a data stream, follow these steps:
You can also <<convert-index-alias-to-data-stream,convert an index alias to You can also <<convert-index-alias-to-data-stream,convert an index alias to
a data stream>>. a data stream>>.
IMPORTANT: If you use {fleet} or {agent}, skip this tutorial. {fleet} and [IMPORTANT]
{agent} set up data streams for you. See {fleet}'s --
{fleet-guide}/data-streams.html[data streams] documentation. If you use {fleet}, {agent}, or {ls}, skip this tutorial.
They all set up data streams for you.
For {fleet} and {agent}, check out this {fleet-guide}/data-streams.html[data streams documentation].
For {ls}, check out the
{logstash-ref}/plugins-outputs-elasticsearch.html#plugins-outputs-elasticsearch-data_stream[data streams settings]
for the `elasticsearch output` plugin.
--
[discrete] [discrete]
[[create-index-lifecycle-policy]] [[create-index-lifecycle-policy]]

View file

@ -285,7 +285,7 @@ field values that are older than 2 hours (the `index.look_back_time` default).
A TSDS is designed to ingest current metrics data. When the TSDS is first A TSDS is designed to ingest current metrics data. When the TSDS is first
created the initial backing index has: created the initial backing index has:
* an `index.time_series.start_time` value set to `now - index.look_ahead_time` * an `index.time_series.start_time` value set to `now - index.look_back_time`
* an `index.time_series.end_time` value set to `now + index.look_ahead_time` * an `index.time_series.end_time` value set to `now + index.look_ahead_time`
Only data that falls inside that range can be indexed. Only data that falls inside that range can be indexed.

View file

@ -431,7 +431,7 @@ The update by query operation skips updating the document and increments the `n
Set `ctx.op = "delete"` if your script decides that the document should be deleted. Set `ctx.op = "delete"` if your script decides that the document should be deleted.
The update by query operation deletes the document and increments the `deleted` counter. The update by query operation deletes the document and increments the `deleted` counter.
Update by query only supports `update`, `noop`, and `delete`. Update by query only supports `index`, `noop`, and `delete`.
Setting `ctx.op` to anything else is an error. Setting any other field in `ctx` is an error. Setting `ctx.op` to anything else is an error. Setting any other field in `ctx` is an error.
This API only enables you to modify the source of matching documents, you cannot move them. This API only enables you to modify the source of matching documents, you cannot move them.

View file

@ -15,10 +15,9 @@ This getting started is also available as an https://github.com/elastic/elastics
[[esql-getting-started-prerequisites]] [[esql-getting-started-prerequisites]]
=== Prerequisites === Prerequisites
To follow along with the queries in this guide, you can either set up your own To follow along with the queries in this guide, you'll need an {es} deployment with our sample data.
deployment, or use Elastic's public {esql} demo environment.
include::{es-ref-dir}/tab-widgets/esql/esql-getting-started-widget-sample-data.asciidoc[] include::{es-ref-dir}/tab-widgets/esql/esql-getting-started-sample-data.asciidoc[tag=own-deployment]
[discrete] [discrete]
[[esql-getting-started-running-queries]] [[esql-getting-started-running-queries]]
@ -269,7 +268,8 @@ Before you can use `ENRICH`, you first need to
<<esql-create-enrich-policy,create>> and <<esql-execute-enrich-policy,execute>> <<esql-create-enrich-policy,create>> and <<esql-execute-enrich-policy,execute>>
an <<esql-enrich-policy,enrich policy>>. an <<esql-enrich-policy,enrich policy>>.
include::{es-ref-dir}/tab-widgets/esql/esql-getting-started-widget-enrich-policy.asciidoc[] include::{es-ref-dir}/tab-widgets/esql/esql-getting-started-enrich-policy.asciidoc[tag=own-deployment]
After creating and executing a policy, you can use it with the `ENRICH` After creating and executing a policy, you can use it with the `ENRICH`
command: command:

View file

@ -261,6 +261,24 @@
], ],
"variadic" : true, "variadic" : true,
"returnType" : "text" "returnType" : "text"
},
{
"params" : [
{
"name" : "first",
"type" : "version",
"optional" : false,
"description" : "Expression to evaluate."
},
{
"name" : "rest",
"type" : "version",
"optional" : true,
"description" : "Other expression to evaluate."
}
],
"variadic" : true,
"returnType" : "version"
} }
], ],
"examples" : [ "examples" : [

View file

@ -21,4 +21,5 @@ long | long | long
long | | long long | | long
text | text | text text | text | text
text | | text text | | text
version | version | version
|=== |===

View file

@ -242,8 +242,7 @@ of official GA features.
`date` fields support <<synthetic-source,synthetic `_source`>> in their `date` fields support <<synthetic-source,synthetic `_source`>> in their
default configuration. Synthetic `_source` cannot be used together with default configuration. Synthetic `_source` cannot be used together with
<<copy-to,`copy_to`>>, <<ignore-malformed,`ignore_malformed`>> set to true <<copy-to,`copy_to`>> or with <<doc-values,`doc_values`>> disabled.
or with <<doc-values,`doc_values`>> disabled.
Synthetic source always sorts `date` fields. For example: Synthetic source always sorts `date` fields. For example:
[source,console,id=synthetic-source-date-example] [source,console,id=synthetic-source-date-example]

View file

@ -52,6 +52,8 @@ Use the <<put-inference-api>> to create the endpoint.
The `inference_id` will not be validated when the mapping is created, but when documents are ingested into the index. The `inference_id` will not be validated when the mapping is created, but when documents are ingested into the index.
When the first document is indexed, the `inference_id` will be used to generate underlying indexing structures for the field. When the first document is indexed, the `inference_id` will be used to generate underlying indexing structures for the field.
WARNING: Removing an inference endpoint will cause ingestion of documents and semantic queries to fail on indices that define `semantic_text` fields with that inference endpoint as their `inference_id`.
Please check that inference endpoints are not used in `semantic_text` fields before removal.
[discrete] [discrete]
[[auto-text-chunking]] [[auto-text-chunking]]

View file

@ -109,10 +109,49 @@ the `transport_worker` threads are too busy. It is more reliable to use
profiling trace. These tools are independent of any work the JVM is performing. profiling trace. These tools are independent of any work the JVM is performing.
It may also be possible to identify some reasons for delays from the server It may also be possible to identify some reasons for delays from the server
logs, particularly looking at warnings from logs. See for instance the following loggers:
`org.elasticsearch.transport.InboundHandler` and
`org.elasticsearch.transport.OutboundHandler`. Warnings about long processing `org.elasticsearch.transport.InboundHandler`:: This logger reports a warning if
times from the `InboundHandler` are particularly indicative of incorrect processing an inbound message occupies a network thread for unreasonably long,
threading behaviour, whereas the transmission time reported by the which is almost certainly a bug. The warning includes some information which
`OutboundHandler` includes time spent waiting for network congestion and the can be used to identify the message that took unreasonably long to process.
`transport_worker` thread is free to do other work during this time.
`org.elasticsearch.transport.OutboundHandler`:: This logger reports a warning
if sending an outbound message takes longer than expected. This duration
includes time spent waiting for network congestion to clear, and time spent
processing other work on the same network thread, so does not always indicate
the presence of a bug related to the outbound message specified in the log
entry.
`org.elasticsearch.common.network.ThreadWatchdog`:: This logger reports a
warning and a thread dump when it notices that a network thread has not made
progress between two consecutive checks, which is almost certainly a bug:
+
--
[source,text]
----
[WARN ][o.e.c.n.ThreadWatchdog ] the following threads are active but did not make progress in the preceding [5s]: [elasticsearch[instance-0000000004][transport_worker][T#1]]]
[WARN ][o.e.c.n.ThreadWatchdog ] hot threads dump due to active threads not making progress [part 1]: H4sIAAAAAAAA/+1aa2/bOBb93l8hYLUYFWgYvWw5AQbYpEkn6STZbJyiwAwGA1qiY8US6ZJUHvPr90qk/JJky41TtDMuUIci...
[WARN ][o.e.c.n.ThreadWatchdog ] hot threads dump due to active threads not making progress [part 2]: LfXL/x70a3eL8ve6Ral74ZBrp5x7HmUD9KXQz1MaXUNfFC6SeEysxSw1cNXL9JXYl3AigAE7ywbm/AZ+ll3Ox4qXJHNjVr6h...
[WARN ][o.e.c.n.ThreadWatchdog ] hot threads dump due to active threads not making progress (gzip compressed, base64-encoded, and split into 2 parts on preceding log lines; ...
----
To reconstruct the thread dump, base64-decode the data and decompress it using `gzip`. For instance, on Unix-like systems:
[source,sh]
----
cat watchdog.log | sed -e 's/.*://' | base64 --decode | gzip --decompress
----
This mechanism can be controlled with the following settings:
`network.thread.watchdog.interval`:::
(<<static-cluster-setting,Static>>, <<time-units,time value>>)
Defines the interval between watchdog checks. Defaults to `5s`. Set to `0` to
disable the network thread watchdog.
`network.thread.watchdog.quiet_time`:::
(<<static-cluster-setting,Static>>, <<time-units,time value>>)
Defines the interval between watchdog warnings. Defaults to `10m`.
--

View file

@ -101,12 +101,14 @@ GET my-index/_search
}, },
{ {
"standard": { "standard": {
"query": {
"semantic": { "semantic": {
"field": "semantic_field", "field": "semantic_field",
"query": "shoes" "query": "shoes"
} }
} }
} }
}
], ],
"rank_window_size": 50, "rank_window_size": 50,
"rank_constant": 20 "rank_constant": 20

View file

@ -539,7 +539,7 @@ VPC's internet gateway and not be bandwidth limited by the VPC's NAT instance.
==== S3-compatible services ==== S3-compatible services
There are a number of storage systems that provide an S3-compatible API, and There are a number of storage systems that provide an S3-compatible API, and
the `repository-s3` type allows you to use these systems in place of AWS S3. the `s3` repository type allows you to use these systems in place of AWS S3.
To do so, you should set the `s3.client.CLIENT_NAME.endpoint` setting to the To do so, you should set the `s3.client.CLIENT_NAME.endpoint` setting to the
system's endpoint. This setting accepts IP addresses and hostnames and may system's endpoint. This setting accepts IP addresses and hostnames and may
include a port. For example, the endpoint may be `172.17.0.2` or include a port. For example, the endpoint may be `172.17.0.2` or
@ -552,7 +552,7 @@ you wish to use unsecured HTTP communication instead of HTTPS, set
`s3.client.CLIENT_NAME.protocol` to `http`. `s3.client.CLIENT_NAME.protocol` to `http`.
https://minio.io[MinIO] is an example of a storage system that provides an https://minio.io[MinIO] is an example of a storage system that provides an
S3-compatible API. The `repository-s3` type allows {es} to work with S3-compatible API. The `s3` repository type allows {es} to work with
MinIO-backed repositories as well as repositories stored on AWS S3. Other MinIO-backed repositories as well as repositories stored on AWS S3. Other
S3-compatible storage systems may also work with {es}, but these are not S3-compatible storage systems may also work with {es}, but these are not
covered by the {es} test suite. covered by the {es} test suite.
@ -562,7 +562,7 @@ which claim to offer an S3-compatible API despite failing to emulate S3's
behaviour in full. If you are using such a system for your snapshots, consider behaviour in full. If you are using such a system for your snapshots, consider
using a <<snapshots-filesystem-repository,shared filesystem repository>> based using a <<snapshots-filesystem-repository,shared filesystem repository>> based
on a standardized protocol such as NFS to access your storage system instead. on a standardized protocol such as NFS to access your storage system instead.
The `repository-s3` type requires full compatibility with S3. In particular it The `s3` repository type requires full compatibility with S3. In particular it
must support the same set of API endpoints, with the same parameters, return must support the same set of API endpoints, with the same parameters, return
the same errors in case of failures, and offer consistency and performance at the same errors in case of failures, and offer consistency and performance at
least as good as S3 even when accessed concurrently by multiple nodes. You will least as good as S3 even when accessed concurrently by multiple nodes. You will

View file

@ -1,6 +1,6 @@
// tag::own-deployment[] // tag::own-deployment[]
First ingest some sample data. In {kib}, open the main menu and select *Dev First, you'll need to ingest the sample data. In {kib}, open the main menu and select *Dev
Tools*. Run the following two requests: Tools*. Run the following two requests:
[source,console] [source,console]

View file

@ -17,7 +17,7 @@ commons-codec = "commons-codec:commons-codec:1.11"
commmons-io = "commons-io:commons-io:2.2" commmons-io = "commons-io:commons-io:2.2"
docker-compose = "com.avast.gradle:gradle-docker-compose-plugin:0.17.5" docker-compose = "com.avast.gradle:gradle-docker-compose-plugin:0.17.5"
forbiddenApis = "de.thetaphi:forbiddenapis:3.6" forbiddenApis = "de.thetaphi:forbiddenapis:3.6"
gradle-enterprise = "com.gradle:gradle-enterprise-gradle-plugin:3.16.2" gradle-enterprise = "com.gradle:develocity-gradle-plugin:3.17.4"
hamcrest = "org.hamcrest:hamcrest:2.1" hamcrest = "org.hamcrest:hamcrest:2.1"
httpcore = "org.apache.httpcomponents:httpcore:4.4.12" httpcore = "org.apache.httpcomponents:httpcore:4.4.12"
httpclient = "org.apache.httpcomponents:httpclient:4.5.14" httpclient = "org.apache.httpcomponents:httpclient:4.5.14"

View file

@ -741,9 +741,9 @@
<sha256 value="48234cd74e35d91a31a683820a35b5b6d11b55527f32a5b162c6757408b95d7a" origin="Generated by Gradle"/> <sha256 value="48234cd74e35d91a31a683820a35b5b6d11b55527f32a5b162c6757408b95d7a" origin="Generated by Gradle"/>
</artifact> </artifact>
</component> </component>
<component group="com.gradle" name="gradle-enterprise-gradle-plugin" version="3.16.2"> <component group="com.gradle" name="develocity-gradle-plugin" version="3.17.4">
<artifact name="gradle-enterprise-gradle-plugin-3.16.2.jar"> <artifact name="develocity-gradle-plugin-3.17.4.jar">
<sha256 value="e06ca9b1bf0fef710dc74ec969e5c0b4553b92a46224326165ceac0e5c37e0d3" origin="Generated by Gradle"/> <sha256 value="e2b3f8a191b0b401b75c2c4542d3d1719814a4212e6920fae4f2f940678bfd99" origin="Generated by Gradle"/>
</artifact> </artifact>
</component> </component>
<component group="com.h2database" name="h2" version="1.4.197"> <component group="com.h2database" name="h2" version="1.4.197">

View file

@ -0,0 +1,406 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0 and the Server Side Public License, v 1; you may not use this file except
* in compliance with, at your election, the Elastic License 2.0 or the Server
* Side Public License, v 1.
*/
package org.elasticsearch.datastreams;
import org.elasticsearch.action.DocWriteRequest;
import org.elasticsearch.action.admin.indices.refresh.RefreshRequest;
import org.elasticsearch.action.admin.indices.rollover.RolloverRequest;
import org.elasticsearch.action.admin.indices.settings.get.GetSettingsRequest;
import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse;
import org.elasticsearch.action.admin.indices.template.put.TransportPutComposableIndexTemplateAction;
import org.elasticsearch.action.bulk.BulkRequest;
import org.elasticsearch.action.bulk.BulkResponse;
import org.elasticsearch.action.datastreams.CreateDataStreamAction;
import org.elasticsearch.action.datastreams.GetDataStreamAction;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.support.broadcast.BroadcastResponse;
import org.elasticsearch.action.support.master.AcknowledgedResponse;
import org.elasticsearch.client.internal.Client;
import org.elasticsearch.cluster.metadata.ComposableIndexTemplate;
import org.elasticsearch.cluster.metadata.DataStream;
import org.elasticsearch.cluster.metadata.Template;
import org.elasticsearch.common.compress.CompressedXContent;
import org.elasticsearch.common.network.InetAddresses;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.time.DateFormatter;
import org.elasticsearch.common.time.FormatNames;
import org.elasticsearch.core.Strings;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.IndexMode;
import org.elasticsearch.indices.InvalidIndexTemplateException;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.rest.RestStatus;
import org.elasticsearch.test.ESSingleNodeTestCase;
import org.elasticsearch.test.InternalSettingsPlugin;
import org.elasticsearch.xcontent.XContentType;
import org.hamcrest.Matchers;
import java.io.IOException;
import java.time.Instant;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Iterator;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.concurrent.ExecutionException;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
import static org.hamcrest.Matchers.is;
public class LogsDataStreamIT extends ESSingleNodeTestCase {
private static final String LOGS_OR_STANDARD_MAPPING = """
{
"properties": {
"@timestamp" : {
"type": "date"
},
"hostname": {
"type": "keyword"
},
"pid": {
"type": "long"
},
"method": {
"type": "keyword"
},
"message": {
"type": "text"
},
"ip_address": {
"type": "ip"
}
}
}""";
private static final String TIME_SERIES_MAPPING = """
{
"properties": {
"@timestamp" : {
"type": "date"
},
"hostname": {
"type": "keyword",
"time_series_dimension": "true"
},
"pid": {
"type": "long",
"time_series_dimension": "true"
},
"method": {
"type": "keyword"
},
"ip_address": {
"type": "ip"
},
"cpu_usage": {
"type": "float",
"time_series_metric": "gauge"
}
}
}""";
private static final String LOG_DOC_TEMPLATE = """
{
"@timestamp": "%s",
"hostname": "%s",
"pid": "%d",
"method": "%s",
"message": "%s",
"ip_address": "%s"
}
""";
private static final String TIME_SERIES_DOC_TEMPLATE = """
{
"@timestamp": "%s",
"hostname": "%s",
"pid": "%d",
"method": "%s",
"ip_address": "%s",
"cpu_usage": "%f"
}
""";
private static String toIsoTimestamp(final Instant instant) {
return DateFormatter.forPattern(FormatNames.STRICT_DATE_OPTIONAL_TIME.getName()).format(instant);
}
private static String createLogDocument(
final Instant timestamp,
final String hostname,
long pid,
final String method,
final String message,
final String ipAddress
) {
return Strings.format(LOG_DOC_TEMPLATE, toIsoTimestamp(timestamp), hostname, pid, method, message, ipAddress);
}
private static String createTimeSeriesDocument(
final Instant timestamp,
final String hostname,
long pid,
final String method,
final String ipAddress,
double cpuUsage
) {
return Strings.format(TIME_SERIES_DOC_TEMPLATE, toIsoTimestamp(timestamp), hostname, pid, method, ipAddress, cpuUsage);
}
@Override
protected Collection<Class<? extends Plugin>> getPlugins() {
return List.of(DataStreamsPlugin.class, InternalSettingsPlugin.class);
}
public void testLogsIndexModeDataStreamIndexing() throws IOException, ExecutionException, InterruptedException {
putComposableIndexTemplate(
client(),
"logs-composable-template",
LOGS_OR_STANDARD_MAPPING,
Map.of("index.mode", "logs"),
List.of("logs-*-*")
);
final String dataStreamName = generateDataStreamName("logs");
createDataStream(client(), dataStreamName);
indexLogOrStandardDocuments(client(), randomIntBetween(10, 20), randomIntBetween(32, 64), dataStreamName);
rolloverDataStream(dataStreamName);
indexLogOrStandardDocuments(client(), randomIntBetween(10, 20), randomIntBetween(32, 64), dataStreamName);
}
public void testIndexModeLogsAndStandardSwitching() throws IOException, ExecutionException, InterruptedException {
final List<IndexMode> indexModes = new ArrayList<>();
final String dataStreamName = generateDataStreamName("logs");
indexModes.add(IndexMode.STANDARD);
putComposableIndexTemplate(
client(),
"logs-composable-template",
LOGS_OR_STANDARD_MAPPING,
Map.of("index.mode", "standard"),
List.of("logs-*-*")
);
createDataStream(client(), dataStreamName);
for (int i = 0; i < randomIntBetween(5, 10); i++) {
final IndexMode indexMode = i % 2 == 0 ? IndexMode.LOGS : IndexMode.STANDARD;
indexModes.add(indexMode);
updateComposableIndexTemplate(
client(),
"logs-composable-template",
LOGS_OR_STANDARD_MAPPING,
Map.of("index.mode", indexMode.getName()),
List.of("logs-*-*")
);
indexLogOrStandardDocuments(client(), randomIntBetween(10, 20), randomIntBetween(32, 64), dataStreamName);
rolloverDataStream(dataStreamName);
}
assertDataStreamBackingIndicesModes(dataStreamName, indexModes);
}
public void testIndexModeLogsAndTimeSeriesSwitching() throws IOException, ExecutionException, InterruptedException {
final String dataStreamName = generateDataStreamName("custom");
final List<String> indexPatterns = List.of("custom-*-*");
final Map<String, String> logsSettings = Map.of("index.mode", "logs");
final Map<String, String> timeSeriesSettings = Map.of("index.mode", "time_series", "index.routing_path", "hostname");
putComposableIndexTemplate(client(), "custom-composable-template", LOGS_OR_STANDARD_MAPPING, logsSettings, indexPatterns);
createDataStream(client(), dataStreamName);
indexLogOrStandardDocuments(client(), randomIntBetween(10, 20), randomIntBetween(32, 64), dataStreamName);
updateComposableIndexTemplate(client(), "custom-composable-template", TIME_SERIES_MAPPING, timeSeriesSettings, indexPatterns);
rolloverDataStream(dataStreamName);
indexTimeSeriesDocuments(client(), randomIntBetween(10, 20), randomIntBetween(32, 64), dataStreamName);
updateComposableIndexTemplate(client(), "custom-composable-template", LOGS_OR_STANDARD_MAPPING, logsSettings, indexPatterns);
rolloverDataStream(dataStreamName);
indexLogOrStandardDocuments(client(), randomIntBetween(10, 20), randomIntBetween(32, 64), dataStreamName);
assertDataStreamBackingIndicesModes(dataStreamName, List.of(IndexMode.LOGS, IndexMode.TIME_SERIES, IndexMode.LOGS));
}
public void testInvalidIndexModeTimeSeriesSwitchWithoutROutingPath() throws IOException, ExecutionException, InterruptedException {
final String dataStreamName = generateDataStreamName("custom");
final List<String> indexPatterns = List.of("custom-*-*");
final Map<String, String> logsSettings = Map.of("index.mode", "logs");
final Map<String, String> timeSeriesSettings = Map.of("index.mode", "time_series");
putComposableIndexTemplate(client(), "custom-composable-template", LOGS_OR_STANDARD_MAPPING, logsSettings, indexPatterns);
createDataStream(client(), dataStreamName);
indexLogOrStandardDocuments(client(), randomIntBetween(10, 20), randomIntBetween(32, 64), dataStreamName);
expectThrows(
InvalidIndexTemplateException.class,
() -> updateComposableIndexTemplate(
client(),
"custom-composable-template",
LOGS_OR_STANDARD_MAPPING,
timeSeriesSettings,
indexPatterns
)
);
}
public void testInvalidIndexModeTimeSeriesSwitchWithoutDimensions() throws IOException, ExecutionException, InterruptedException {
final String dataStreamName = generateDataStreamName("custom");
final List<String> indexPatterns = List.of("custom-*-*");
final Map<String, String> logsSettings = Map.of("index.mode", "logs");
final Map<String, String> timeSeriesSettings = Map.of("index.mode", "time_series", "index.routing_path", "hostname");
putComposableIndexTemplate(client(), "custom-composable-template", LOGS_OR_STANDARD_MAPPING, logsSettings, indexPatterns);
createDataStream(client(), dataStreamName);
indexLogOrStandardDocuments(client(), randomIntBetween(10, 20), randomIntBetween(32, 64), dataStreamName);
final IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> {
updateComposableIndexTemplate(
client(),
"custom-composable-template",
LOGS_OR_STANDARD_MAPPING,
timeSeriesSettings,
indexPatterns
);
});
assertThat(
exception.getCause().getCause().getMessage(),
Matchers.equalTo(
"All fields that match routing_path must be configured with [time_series_dimension: true] or flattened fields with "
+ "a list of dimensions in [time_series_dimensions] and without the [script] parameter. [hostname] was not a dimension."
)
);
}
private void assertDataStreamBackingIndicesModes(final String dataStreamName, final List<IndexMode> modes) {
final GetDataStreamAction.Request getDataStreamRequest = new GetDataStreamAction.Request(new String[] { dataStreamName });
final GetDataStreamAction.Response getDataStreamResponse = client().execute(GetDataStreamAction.INSTANCE, getDataStreamRequest)
.actionGet();
final DataStream dataStream = getDataStreamResponse.getDataStreams().get(0).getDataStream();
final DataStream.DataStreamIndices backingIndices = dataStream.getBackingIndices();
final Iterator<IndexMode> indexModesIterator = modes.iterator();
assertThat(backingIndices.getIndices().size(), Matchers.equalTo(modes.size()));
for (final Index index : backingIndices.getIndices()) {
final GetSettingsResponse getSettingsResponse = indicesAdmin().getSettings(
new GetSettingsRequest().indices(index.getName()).includeDefaults(true)
).actionGet();
final Settings settings = getSettingsResponse.getIndexToSettings().get(index.getName());
assertThat(settings.get("index.mode"), Matchers.equalTo(indexModesIterator.next().getName()));
}
}
final String generateDataStreamName(final String prefix) {
return String.format(Locale.ROOT, "%s-%s-%s", prefix, randomFrom("apache", "nginx", "system"), randomFrom("dev", "qa", "prod"));
}
private void rolloverDataStream(final String dataStreamName) {
assertAcked(indicesAdmin().rolloverIndex(new RolloverRequest(dataStreamName, null)).actionGet());
}
private void indexLogOrStandardDocuments(
final Client client,
int numBulkRequests,
int numDocsPerBulkRequest,
final String dataStreamName
) {
{
for (int i = 0; i < numBulkRequests; i++) {
BulkRequest bulkRequest = new BulkRequest(dataStreamName);
for (int j = 0; j < numDocsPerBulkRequest; j++) {
var indexRequest = new IndexRequest(dataStreamName).opType(DocWriteRequest.OpType.CREATE);
final String doc = createLogDocument(
Instant.now(),
randomAlphaOfLength(7),
randomIntBetween(100, 200),
randomFrom("POST", "PUT", "GET"),
randomAlphaOfLengthBetween(256, 512),
InetAddresses.toAddrString(randomIp(randomBoolean()))
);
indexRequest.source(doc, XContentType.JSON);
bulkRequest.add(indexRequest);
}
final BulkResponse bulkResponse = client.bulk(bulkRequest).actionGet();
assertThat(bulkResponse.hasFailures(), is(false));
}
final BroadcastResponse refreshResponse = client.admin().indices().refresh(new RefreshRequest(dataStreamName)).actionGet();
assertThat(refreshResponse.getStatus(), is(RestStatus.OK));
}
}
private void indexTimeSeriesDocuments(
final Client client,
int numBulkRequests,
int numDocsPerBulkRequest,
final String dataStreamName
) {
{
for (int i = 0; i < numBulkRequests; i++) {
BulkRequest bulkRequest = new BulkRequest(dataStreamName);
for (int j = 0; j < numDocsPerBulkRequest; j++) {
var indexRequest = new IndexRequest(dataStreamName).opType(DocWriteRequest.OpType.CREATE);
final String doc = createTimeSeriesDocument(
Instant.now(),
randomAlphaOfLength(12),
randomIntBetween(100, 200),
randomFrom("POST", "PUT", "GET"),
InetAddresses.toAddrString(randomIp(randomBoolean())),
randomDoubleBetween(0.0D, 1.0D, false)
);
indexRequest.source(doc, XContentType.JSON);
bulkRequest.add(indexRequest);
}
final BulkResponse bulkResponse = client.bulk(bulkRequest).actionGet();
assertThat(bulkResponse.hasFailures(), is(false));
}
final BroadcastResponse refreshResponse = client.admin().indices().refresh(new RefreshRequest(dataStreamName)).actionGet();
assertThat(refreshResponse.getStatus(), is(RestStatus.OK));
}
}
private void createDataStream(final Client client, final String dataStreamName) throws InterruptedException, ExecutionException {
final CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request(dataStreamName);
final AcknowledgedResponse createDataStreamResponse = client.execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest)
.get();
assertThat(createDataStreamResponse.isAcknowledged(), is(true));
}
private static void updateComposableIndexTemplate(
final Client client,
final String templateName,
final String mapping,
final Map<String, String> settings,
final List<String> indexPatterns
) throws IOException {
putComposableIndexTemplate(client, templateName, mapping, settings, indexPatterns);
}
private static void putComposableIndexTemplate(
final Client client,
final String templateName,
final String mapping,
final Map<String, String> settings,
final List<String> indexPatterns
) throws IOException {
final Settings.Builder templateSettings = Settings.builder();
for (Map.Entry<String, String> setting : settings.entrySet()) {
templateSettings.put(setting.getKey(), setting.getValue());
}
final TransportPutComposableIndexTemplateAction.Request putComposableTemplateRequest =
new TransportPutComposableIndexTemplateAction.Request(templateName);
putComposableTemplateRequest.indexTemplate(
ComposableIndexTemplate.builder()
.indexPatterns(indexPatterns)
.template(new Template(templateSettings.build(), new CompressedXContent(mapping), null))
.dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate(false, false))
.build()
);
final AcknowledgedResponse putComposableTemplateResponse = client.execute(
TransportPutComposableIndexTemplateAction.TYPE,
putComposableTemplateRequest
).actionGet();
assertThat(putComposableTemplateResponse.isAcknowledged(), is(true));
}
}

View file

@ -0,0 +1,169 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0 and the Server Side Public License, v 1; you may not use this file except
* in compliance with, at your election, the Elastic License 2.0 or the Server
* Side Public License, v 1.
*/
package org.elasticsearch.datastreams;
import org.elasticsearch.client.Request;
import org.elasticsearch.client.ResponseException;
import org.elasticsearch.client.RestClient;
import org.elasticsearch.common.settings.SecureString;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.concurrent.ThreadContext;
import org.elasticsearch.test.cluster.ElasticsearchCluster;
import org.elasticsearch.test.cluster.FeatureFlag;
import org.elasticsearch.test.cluster.local.distribution.DistributionType;
import org.elasticsearch.test.rest.ESRestTestCase;
import org.junit.After;
import org.junit.Before;
import org.junit.ClassRule;
import java.io.IOException;
import java.util.List;
import java.util.Map;
/**
* This base class provides the boilerplate to simplify the development of integration tests.
* Aside from providing useful helper methods and disabling unnecessary plugins,
* it waits until an {@linkplain #indexTemplateName() index template} is installed, which happens asynchronously in StackTemplateRegistry.
* This avoids race conditions leading to flaky tests by ensuring the template has been installed before executing the tests.
*/
public abstract class AbstractDataStreamIT extends ESRestTestCase {
@ClassRule
public static ElasticsearchCluster cluster = ElasticsearchCluster.local()
.distribution(DistributionType.DEFAULT)
.feature(FeatureFlag.FAILURE_STORE_ENABLED)
.setting("xpack.security.enabled", "false")
.setting("xpack.watcher.enabled", "false")
// Disable apm-data so the index templates it installs do not impact
// tests such as testIgnoreDynamicBeyondLimit.
.setting("xpack.apm_data.enabled", "false")
.build();
protected RestClient client;
static void waitForIndexTemplate(RestClient client, String indexTemplate) throws Exception {
assertBusy(() -> {
try {
Request request = new Request("GET", "_index_template/" + indexTemplate);
assertOK(client.performRequest(request));
} catch (ResponseException e) {
fail(e.getMessage());
}
});
}
static void createDataStream(RestClient client, String name) throws IOException {
Request request = new Request("PUT", "_data_stream/" + name);
assertOK(client.performRequest(request));
}
@SuppressWarnings("unchecked")
static String getWriteBackingIndex(RestClient client, String name) throws IOException {
Request request = new Request("GET", "_data_stream/" + name);
List<Object> dataStreams = (List<Object>) entityAsMap(client.performRequest(request)).get("data_streams");
Map<String, Object> dataStream = (Map<String, Object>) dataStreams.get(0);
List<Map<String, String>> indices = (List<Map<String, String>>) dataStream.get("indices");
return indices.get(0).get("index_name");
}
@SuppressWarnings("unchecked")
static Map<String, Object> getSettings(RestClient client, String indexName) throws IOException {
Request request = new Request("GET", "/" + indexName + "/_settings?flat_settings");
return ((Map<String, Map<String, Object>>) entityAsMap(client.performRequest(request)).get(indexName)).get("settings");
}
static void putMapping(RestClient client, String indexName) throws IOException {
Request request = new Request("PUT", "/" + indexName + "/_mapping");
request.setJsonEntity("""
{
"properties": {
"numeric_field": {
"type": "integer"
}
}
}
""");
assertOK(client.performRequest(request));
}
@SuppressWarnings("unchecked")
static Map<String, Object> getMappingProperties(RestClient client, String indexName) throws IOException {
Request request = new Request("GET", "/" + indexName + "/_mapping");
Map<String, Object> map = (Map<String, Object>) entityAsMap(client.performRequest(request)).get(indexName);
Map<String, Object> mappings = (Map<String, Object>) map.get("mappings");
return (Map<String, Object>) mappings.get("properties");
}
static void indexDoc(RestClient client, String dataStreamName, String doc) throws IOException {
Request request = new Request("POST", "/" + dataStreamName + "/_doc?refresh=true");
request.setJsonEntity(doc);
assertOK(client.performRequest(request));
}
@SuppressWarnings("unchecked")
static List<Object> searchDocs(RestClient client, String dataStreamName, String query) throws IOException {
Request request = new Request("GET", "/" + dataStreamName + "/_search");
request.setJsonEntity(query);
Map<String, Object> hits = (Map<String, Object>) entityAsMap(client.performRequest(request)).get("hits");
return (List<Object>) hits.get("hits");
}
@SuppressWarnings("unchecked")
static Object getValueFromPath(Map<String, Object> map, List<String> path) {
Map<String, Object> current = map;
for (int i = 0; i < path.size(); i++) {
Object value = current.get(path.get(i));
if (i == path.size() - 1) {
return value;
}
if (value == null) {
throw new IllegalStateException("Path " + String.join(".", path) + " was not found in " + map);
}
if (value instanceof Map<?, ?> next) {
current = (Map<String, Object>) next;
} else {
throw new IllegalStateException(
"Failed to reach the end of the path "
+ String.join(".", path)
+ " last reachable field was "
+ path.get(i)
+ " in "
+ map
);
}
}
return current;
}
@Override
protected String getTestRestCluster() {
return cluster.getHttpAddresses();
}
@Override
protected Settings restAdminSettings() {
if (super.restAdminSettings().keySet().contains(ThreadContext.PREFIX + ".Authorization")) {
return super.restAdminSettings();
} else {
String token = basicAuthHeaderValue("admin", new SecureString("admin-password".toCharArray()));
return Settings.builder().put(super.restAdminSettings()).put(ThreadContext.PREFIX + ".Authorization", token).build();
}
}
@Before
public void setup() throws Exception {
client = client();
AbstractDataStreamIT.waitForIndexTemplate(client, indexTemplateName());
}
protected abstract String indexTemplateName();
@After
public void cleanUp() throws IOException {
adminClient().performRequest(new Request("DELETE", "_data_stream/*"));
}
}

View file

@ -26,7 +26,7 @@ import static org.elasticsearch.datastreams.LogsDataStreamIT.getValueFromPath;
import static org.elasticsearch.datastreams.LogsDataStreamIT.getWriteBackingIndex; import static org.elasticsearch.datastreams.LogsDataStreamIT.getWriteBackingIndex;
import static org.elasticsearch.datastreams.LogsDataStreamIT.indexDoc; import static org.elasticsearch.datastreams.LogsDataStreamIT.indexDoc;
import static org.elasticsearch.datastreams.LogsDataStreamIT.searchDocs; import static org.elasticsearch.datastreams.LogsDataStreamIT.searchDocs;
import static org.elasticsearch.datastreams.LogsDataStreamIT.waitForLogs; import static org.elasticsearch.datastreams.LogsDataStreamIT.waitForIndexTemplate;
import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.is;
public class EcsLogsDataStreamIT extends DisabledSecurityDataStreamTestCase { public class EcsLogsDataStreamIT extends DisabledSecurityDataStreamTestCase {
@ -38,7 +38,7 @@ public class EcsLogsDataStreamIT extends DisabledSecurityDataStreamTestCase {
@Before @Before
public void setup() throws Exception { public void setup() throws Exception {
client = client(); client = client();
waitForLogs(client); waitForIndexTemplate(client, "logs");
{ {
Request request = new Request("PUT", "/_ingest/pipeline/logs@custom"); Request request = new Request("PUT", "/_ingest/pipeline/logs@custom");

View file

@ -9,20 +9,7 @@
package org.elasticsearch.datastreams; package org.elasticsearch.datastreams;
import org.elasticsearch.client.Request; import org.elasticsearch.client.Request;
import org.elasticsearch.client.ResponseException;
import org.elasticsearch.client.RestClient;
import org.elasticsearch.common.settings.SecureString;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.concurrent.ThreadContext;
import org.elasticsearch.test.cluster.ElasticsearchCluster;
import org.elasticsearch.test.cluster.FeatureFlag;
import org.elasticsearch.test.cluster.local.distribution.DistributionType;
import org.elasticsearch.test.rest.ESRestTestCase;
import org.junit.After;
import org.junit.Before;
import org.junit.ClassRule;
import java.io.IOException;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
@ -35,46 +22,7 @@ import static org.hamcrest.Matchers.matchesRegex;
import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.not;
import static org.hamcrest.Matchers.nullValue; import static org.hamcrest.Matchers.nullValue;
public class LogsDataStreamIT extends ESRestTestCase { public class LogsDataStreamIT extends AbstractDataStreamIT {
@ClassRule
public static ElasticsearchCluster cluster = ElasticsearchCluster.local()
.distribution(DistributionType.DEFAULT)
.feature(FeatureFlag.FAILURE_STORE_ENABLED)
.setting("xpack.security.enabled", "false")
.setting("xpack.watcher.enabled", "false")
// Disable apm-data so the index templates it installs do not impact
// tests such as testIgnoreDynamicBeyondLimit.
.setting("xpack.apm_data.enabled", "false")
.build();
@Override
protected String getTestRestCluster() {
return cluster.getHttpAddresses();
}
@Override
protected Settings restAdminSettings() {
if (super.restAdminSettings().keySet().contains(ThreadContext.PREFIX + ".Authorization")) {
return super.restAdminSettings();
} else {
String token = basicAuthHeaderValue("admin", new SecureString("admin-password".toCharArray()));
return Settings.builder().put(super.restAdminSettings()).put(ThreadContext.PREFIX + ".Authorization", token).build();
}
}
private RestClient client;
@Before
public void setup() throws Exception {
client = client();
waitForLogs(client);
}
@After
public void cleanUp() throws IOException {
adminClient().performRequest(new Request("DELETE", "_data_stream/*"));
}
@SuppressWarnings("unchecked") @SuppressWarnings("unchecked")
public void testDefaultLogsSettingAndMapping() throws Exception { public void testDefaultLogsSettingAndMapping() throws Exception {
@ -791,97 +739,8 @@ public class LogsDataStreamIT extends ESRestTestCase {
assertThat(ignored.stream().filter(i -> i.startsWith("field") == false).toList(), empty()); assertThat(ignored.stream().filter(i -> i.startsWith("field") == false).toList(), empty());
} }
static void waitForLogs(RestClient client) throws Exception { @Override
assertBusy(() -> { protected String indexTemplateName() {
try { return "logs";
Request request = new Request("GET", "_index_template/logs");
assertOK(client.performRequest(request));
} catch (ResponseException e) {
fail(e.getMessage());
}
});
}
static void createDataStream(RestClient client, String name) throws IOException {
Request request = new Request("PUT", "_data_stream/" + name);
assertOK(client.performRequest(request));
}
@SuppressWarnings("unchecked")
static String getWriteBackingIndex(RestClient client, String name) throws IOException {
Request request = new Request("GET", "_data_stream/" + name);
List<Object> dataStreams = (List<Object>) entityAsMap(client.performRequest(request)).get("data_streams");
Map<String, Object> dataStream = (Map<String, Object>) dataStreams.get(0);
List<Map<String, String>> indices = (List<Map<String, String>>) dataStream.get("indices");
return indices.get(0).get("index_name");
}
@SuppressWarnings("unchecked")
static Map<String, Object> getSettings(RestClient client, String indexName) throws IOException {
Request request = new Request("GET", "/" + indexName + "/_settings?flat_settings");
return ((Map<String, Map<String, Object>>) entityAsMap(client.performRequest(request)).get(indexName)).get("settings");
}
static void putMapping(RestClient client, String indexName) throws IOException {
Request request = new Request("PUT", "/" + indexName + "/_mapping");
request.setJsonEntity("""
{
"properties": {
"numeric_field": {
"type": "integer"
}
}
}
""");
assertOK(client.performRequest(request));
}
@SuppressWarnings("unchecked")
static Map<String, Object> getMappingProperties(RestClient client, String indexName) throws IOException {
Request request = new Request("GET", "/" + indexName + "/_mapping");
Map<String, Object> map = (Map<String, Object>) entityAsMap(client.performRequest(request)).get(indexName);
Map<String, Object> mappings = (Map<String, Object>) map.get("mappings");
return (Map<String, Object>) mappings.get("properties");
}
static void indexDoc(RestClient client, String dataStreamName, String doc) throws IOException {
Request request = new Request("POST", "/" + dataStreamName + "/_doc?refresh=true");
request.setJsonEntity(doc);
assertOK(client.performRequest(request));
}
@SuppressWarnings("unchecked")
static List<Object> searchDocs(RestClient client, String dataStreamName, String query) throws IOException {
Request request = new Request("GET", "/" + dataStreamName + "/_search");
request.setJsonEntity(query);
Map<String, Object> hits = (Map<String, Object>) entityAsMap(client.performRequest(request)).get("hits");
return (List<Object>) hits.get("hits");
}
@SuppressWarnings("unchecked")
static Object getValueFromPath(Map<String, Object> map, List<String> path) {
Map<String, Object> current = map;
for (int i = 0; i < path.size(); i++) {
Object value = current.get(path.get(i));
if (i == path.size() - 1) {
return value;
}
if (value == null) {
throw new IllegalStateException("Path " + String.join(".", path) + " was not found in " + map);
}
if (value instanceof Map<?, ?> next) {
current = (Map<String, Object>) next;
} else {
throw new IllegalStateException(
"Failed to reach the end of the path "
+ String.join(".", path)
+ " last reachable field was "
+ path.get(i)
+ " in "
+ map
);
}
}
return current;
} }
} }

View file

@ -0,0 +1,293 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0 and the Server Side Public License, v 1; you may not use this file except
* in compliance with, at your election, the Elastic License 2.0 or the Server
* Side Public License, v 1.
*/
package org.elasticsearch.datastreams;
import org.elasticsearch.client.Request;
import org.elasticsearch.client.Response;
import org.elasticsearch.client.ResponseException;
import org.elasticsearch.client.RestClient;
import org.elasticsearch.common.network.InetAddresses;
import org.elasticsearch.common.time.DateFormatter;
import org.elasticsearch.common.time.FormatNames;
import org.elasticsearch.test.cluster.ElasticsearchCluster;
import org.elasticsearch.test.cluster.local.distribution.DistributionType;
import org.elasticsearch.test.rest.ESRestTestCase;
import org.junit.Before;
import org.junit.ClassRule;
import java.io.IOException;
import java.net.InetAddress;
import java.time.Instant;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import static org.hamcrest.Matchers.is;
public class LogsDataStreamRestIT extends ESRestTestCase {
private static final String DATA_STREAM_NAME = "logs-apache-dev";
private RestClient client;
@ClassRule
public static ElasticsearchCluster cluster = ElasticsearchCluster.local()
.distribution(DistributionType.DEFAULT)
.setting("xpack.security.enabled", "false")
.build();
@Override
protected String getTestRestCluster() {
return cluster.getHttpAddresses();
}
@Before
public void setup() throws Exception {
client = client();
waitForLogs(client);
}
private static void waitForLogs(RestClient client) throws Exception {
assertBusy(() -> {
try {
Request request = new Request("GET", "_index_template/logs");
assertOK(client.performRequest(request));
} catch (ResponseException e) {
fail(e.getMessage());
}
});
}
private static final String LOGS_TEMPLATE = """
{
"index_patterns": [ "logs-*-*" ],
"data_stream": {},
"priority": 201,
"composed_of": [ "logs@mappings", "logs@settings" ],
"template": {
"settings": {
"index": {
"mode": "logs"
}
},
"mappings": {
"properties": {
"@timestamp" : {
"type": "date"
},
"hostname": {
"type": "keyword"
},
"pid": {
"type": "long"
},
"method": {
"type": "keyword"
},
"message": {
"type": "text"
},
"ip_address": {
"type": "ip"
}
}
}
}
}""";
private static final String STANDARD_TEMPLATE = """
{
"index_patterns": [ "logs-*-*" ],
"data_stream": {},
"priority": 201,
"template": {
"settings": {
"index": {
"mode": "standard"
}
},
"mappings": {
"properties": {
"@timestamp" : {
"type": "date"
},
"hostname": {
"type": "keyword",
"time_series_dimension": "true"
},
"pid": {
"type": "long",
"time_series_dimension": "true"
},
"method": {
"type": "keyword"
},
"ip_address": {
"type": "ip"
}
}
}
}
}""";
private static final String DOC_TEMPLATE = """
{
"@timestamp": "%s",
"hostname": "%s",
"pid": "%d",
"method": "%s",
"message": "%s",
"ip_address": "%s"
}
""";
public void testLogsIndexing() throws IOException {
putTemplate(client, "custom-template", LOGS_TEMPLATE);
createDataStream(client, DATA_STREAM_NAME);
indexDocument(
client,
DATA_STREAM_NAME,
document(
Instant.now(),
randomAlphaOfLength(10),
randomNonNegativeLong(),
randomFrom("PUT", "POST", "GET"),
randomAlphaOfLength(32),
randomIp(randomBoolean())
)
);
assertDataStreamBackingIndexMode("logs", 0);
rolloverDataStream(client, DATA_STREAM_NAME);
indexDocument(
client,
DATA_STREAM_NAME,
document(
Instant.now(),
randomAlphaOfLength(10),
randomNonNegativeLong(),
randomFrom("PUT", "POST", "GET"),
randomAlphaOfLength(32),
randomIp(randomBoolean())
)
);
assertDataStreamBackingIndexMode("logs", 1);
}
public void testLogsStandardIndexModeSwitch() throws IOException {
putTemplate(client, "custom-template", LOGS_TEMPLATE);
createDataStream(client, DATA_STREAM_NAME);
indexDocument(
client,
DATA_STREAM_NAME,
document(
Instant.now(),
randomAlphaOfLength(10),
randomNonNegativeLong(),
randomFrom("PUT", "POST", "GET"),
randomAlphaOfLength(32),
randomIp(randomBoolean())
)
);
assertDataStreamBackingIndexMode("logs", 0);
putTemplate(client, "custom-template", STANDARD_TEMPLATE);
rolloverDataStream(client, DATA_STREAM_NAME);
indexDocument(
client,
DATA_STREAM_NAME,
document(
Instant.now(),
randomAlphaOfLength(10),
randomNonNegativeLong(),
randomFrom("PUT", "POST", "GET"),
randomAlphaOfLength(64),
randomIp(randomBoolean())
)
);
assertDataStreamBackingIndexMode("standard", 1);
putTemplate(client, "custom-template", LOGS_TEMPLATE);
rolloverDataStream(client, DATA_STREAM_NAME);
indexDocument(
client,
DATA_STREAM_NAME,
document(
Instant.now(),
randomAlphaOfLength(10),
randomNonNegativeLong(),
randomFrom("PUT", "POST", "GET"),
randomAlphaOfLength(32),
randomIp(randomBoolean())
)
);
assertDataStreamBackingIndexMode("logs", 2);
}
private void assertDataStreamBackingIndexMode(final String indexMode, int backingIndex) throws IOException {
assertThat(getSettings(client, getWriteBackingIndex(client, DATA_STREAM_NAME, backingIndex)).get("index.mode"), is(indexMode));
}
private String document(
final Instant timestamp,
final String hostname,
long pid,
final String method,
final String message,
final InetAddress ipAddress
) {
return String.format(
Locale.ROOT,
DOC_TEMPLATE,
DateFormatter.forPattern(FormatNames.DATE.getName()).format(timestamp),
hostname,
pid,
method,
message,
InetAddresses.toAddrString(ipAddress)
);
}
private static void createDataStream(final RestClient client, final String dataStreamName) throws IOException {
Request request = new Request("PUT", "_data_stream/" + dataStreamName);
assertOK(client.performRequest(request));
}
private static void putTemplate(final RestClient client, final String templateName, final String mappings) throws IOException {
final Request request = new Request("PUT", "/_index_template/" + templateName);
request.setJsonEntity(mappings);
assertOK(client.performRequest(request));
}
private static void indexDocument(final RestClient client, String dataStreamName, String doc) throws IOException {
final Request request = new Request("POST", "/" + dataStreamName + "/_doc?refresh=true");
request.setJsonEntity(doc);
assertOK(client.performRequest(request));
}
private static void rolloverDataStream(final RestClient client, final String dataStreamName) throws IOException {
final Request request = new Request("POST", "/" + dataStreamName + "/_rollover");
final Response response = client.performRequest(request);
assertOK(response);
assertThat(entityAsMap(response).get("rolled_over"), is(true));
}
@SuppressWarnings("unchecked")
private static String getWriteBackingIndex(final RestClient client, final String dataStreamName, int backingIndex) throws IOException {
final Request request = new Request("GET", "_data_stream/" + dataStreamName);
final List<Object> dataStreams = (List<Object>) entityAsMap(client.performRequest(request)).get("data_streams");
final Map<String, Object> dataStream = (Map<String, Object>) dataStreams.get(0);
final List<Map<String, String>> backingIndices = (List<Map<String, String>>) dataStream.get("indices");
return backingIndices.get(backingIndex).get("index_name");
}
@SuppressWarnings("unchecked")
private static Map<String, Object> getSettings(final RestClient client, final String indexName) throws IOException {
final Request request = new Request("GET", "/" + indexName + "/_settings?flat_settings");
return ((Map<String, Map<String, Object>>) entityAsMap(client.performRequest(request)).get(indexName)).get("settings");
}
}

View file

@ -0,0 +1,101 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0 and the Server Side Public License, v 1; you may not use this file except
* in compliance with, at your election, the Elastic License 2.0 or the Server
* Side Public License, v 1.
*/
package org.elasticsearch.datastreams;
import org.elasticsearch.client.Request;
import java.util.List;
import java.util.Map;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.is;
public class MetricsDataStreamIT extends AbstractDataStreamIT {
@SuppressWarnings("unchecked")
public void testCustomMapping() throws Exception {
{
Request request = new Request("POST", "/_component_template/metrics@custom");
request.setJsonEntity("""
{
"template": {
"settings": {
"index": {
"query": {
"default_field": ["custom-message"]
}
}
},
"mappings": {
"properties": {
"numeric_field": {
"type": "integer"
},
"socket": {
"properties": {
"ip": {
"type": "keyword"
}
}
}
}
}
}
}
""");
assertOK(client.performRequest(request));
}
String dataStreamName = "metrics-generic-default";
createDataStream(client, dataStreamName);
String backingIndex = getWriteBackingIndex(client, dataStreamName);
// Verify that the custom settings.index.query.default_field overrides the default query field - "message"
Map<String, Object> settings = getSettings(client, backingIndex);
assertThat(settings.get("index.query.default_field"), is(List.of("custom-message")));
// Verify that the new field from the custom component template is applied
putMapping(client, backingIndex);
Map<String, Object> mappingProperties = getMappingProperties(client, backingIndex);
assertThat(getValueFromPath(mappingProperties, List.of("numeric_field", "type")), equalTo("integer"));
assertThat(getValueFromPath(mappingProperties, List.of("socket", "properties", "ip", "type")), is("keyword"));
// Insert valid doc and verify successful indexing
{
indexDoc(client, dataStreamName, """
{
"@timestamp": "2024-06-10",
"test": "doc-with-ip",
"socket": {
"ip": "127.0.0.1"
}
}
""");
List<Object> results = searchDocs(client, dataStreamName, """
{
"query": {
"term": {
"test": {
"value": "doc-with-ip"
}
}
},
"fields": ["socket.ip"]
}
""");
Map<String, Object> fields = ((Map<String, Map<String, Object>>) results.get(0)).get("_source");
assertThat(fields.get("socket"), is(Map.of("ip", "127.0.0.1")));
}
}
@Override
protected String indexTemplateName() {
return "metrics";
}
}

View file

@ -12,6 +12,7 @@ import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.ingest.PutPipelineRequest; import org.elasticsearch.action.ingest.PutPipelineRequest;
import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.index.IndexMode;
import org.elasticsearch.ingest.common.IngestCommonPlugin; import org.elasticsearch.ingest.common.IngestCommonPlugin;
import org.elasticsearch.plugins.IngestPlugin; import org.elasticsearch.plugins.IngestPlugin;
import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.Plugin;
@ -99,7 +100,11 @@ public class DocumentSizeObserverWithPipelinesIT extends ESIntegTestCase {
} }
@Override @Override
public DocumentSizeReporter newDocumentSizeReporter(String indexName, DocumentSizeAccumulator documentSizeAccumulator) { public DocumentSizeReporter newDocumentSizeReporter(
String indexName,
IndexMode indexMode,
DocumentSizeAccumulator documentSizeAccumulator
) {
return DocumentSizeReporter.EMPTY_INSTANCE; return DocumentSizeReporter.EMPTY_INSTANCE;
} }
}; };

View file

@ -188,7 +188,6 @@ public class IngestGeoIpPlugin extends Plugin implements IngestPlugin, SystemInd
.setSettings( .setSettings(
Settings.builder() Settings.builder()
.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)
.put(IndexMetadata.SETTING_AUTO_EXPAND_REPLICAS, "0-1") .put(IndexMetadata.SETTING_AUTO_EXPAND_REPLICAS, "0-1")
.build() .build()
) )

View file

@ -762,11 +762,6 @@ public class SearchAsYouTypeFieldMapper extends FieldMapper {
return subfieldsAndMultifieldsIterator(); return subfieldsAndMultifieldsIterator();
} }
@Override
protected SyntheticSourceMode syntheticSourceMode() {
return SyntheticSourceMode.FALLBACK;
}
/** /**
* An analyzer wrapper to add a shingle token filter, an edge ngram token filter or both to its wrapped analyzer. When adding an edge * An analyzer wrapper to add a shingle token filter, an edge ngram token filter or both to its wrapped analyzer. When adding an edge
* ngrams token filter, it also adds a {@link TrailingShingleTokenFilter} to add extra position increments at the end of the stream * ngrams token filter, it also adds a {@link TrailingShingleTokenFilter} to add extra position increments at the end of the stream

View file

@ -215,9 +215,4 @@ public class TokenCountFieldMapper extends FieldMapper {
public FieldMapper.Builder getMergeBuilder() { public FieldMapper.Builder getMergeBuilder() {
return new Builder(simpleName()).init(this); return new Builder(simpleName()).init(this);
} }
@Override
protected SyntheticSourceMode syntheticSourceMode() {
return SyntheticSourceMode.FALLBACK;
}
} }

View file

@ -0,0 +1,49 @@
setup:
- requires:
cluster_features: ["mapper.source.synthetic_source_fallback"]
reason: introduced in 8.15.0
- do:
indices.create:
index: test
body:
mappings:
_source:
mode: synthetic
properties:
pagerank:
type: rank_feature
---
"synthetic source sanity test":
- do:
index:
index: test
id: "1"
body:
pagerank: 10
- do:
index:
index: test
id: "2"
body:
pagerank: null
- do:
indices.refresh: {}
- do:
get:
index: test
id: "1"
- match: { _source.pagerank: 10 }
- do:
get:
index: test
id: "2"
- match: { _source.pagerank: null }

View file

@ -0,0 +1,56 @@
setup:
- requires:
cluster_features: ["mapper.source.synthetic_source_fallback"]
reason: introduced in 8.15.0
- do:
indices.create:
index: test
body:
mappings:
_source:
mode: synthetic
properties:
tags:
type: rank_features
---
"synthetic source sanity test":
- do:
index:
index: test
id: "1"
body:
tags:
foo: 3
bar: 5
- do:
index:
index: test
id: "2"
body:
tags: []
- do:
indices.refresh: {}
- do:
get:
index: test
id: "1"
- match:
_source:
tags:
foo: 3
bar: 5
- do:
get:
index: test
id: "2"
- match: { _source.tags: [] }

View file

@ -16,7 +16,7 @@ esplugin {
restResources { restResources {
restApi { restApi {
include '_common', 'bulk', 'cluster', 'nodes', 'indices', 'index', 'search' include '_common', 'bulk', 'cluster', 'get', 'nodes', 'indices', 'index', 'search'
} }
} }

View file

@ -1,10 +1,9 @@
unsupported: supported:
- requires: - requires:
cluster_features: ["gte_v8.3.0"] cluster_features: ["mapper.source.synthetic_source_fallback"]
reason: introduced in 8.3.0 reason: introduced in 8.15.0
- do: - do:
catch: bad_request
indices.create: indices.create:
index: test index: test
body: body:
@ -16,3 +15,42 @@ unsupported:
type: join type: join
relations: relations:
parent: child parent: child
- do:
index:
index: test
id: "1"
body: {"foo": "bar", "join_field": {"name" : "parent"} }
- do:
index:
index: test
id: "2"
routing: "1"
body: {"zab": "baz", "join_field": { "name" : "child", "parent": "1"} }
- do:
indices.refresh: {}
- do:
get:
index: test
id: "1"
- match:
_source:
foo: "bar"
join_field:
name: "parent"
- do:
get:
index: test
id: "2"
- match:
_source:
join_field:
name: "child"
parent: "1"
zab: "baz"

View file

@ -20,7 +20,7 @@ dependencies {
restResources { restResources {
restApi { restApi {
include '_common', 'indices', 'index', 'search', 'msearch' include '_common', 'get', 'indices', 'index', 'search', 'msearch'
} }
} }

View file

@ -126,3 +126,41 @@
document: document:
foo.bar: value foo.bar: value
- match: { hits.total.value: 1 } - match: { hits.total.value: 1 }
---
"Synthetic source":
- requires:
cluster_features: ["mapper.source.synthetic_source_fallback"]
reason: introduced in 8.15.0
- do:
indices.create:
index: queries_index
body:
mappings:
_source:
mode: synthetic
properties:
query:
type: percolator
- do:
index:
index: queries_index
id: test_percolator
body:
query:
match_all: {}
- do:
indices.refresh: {}
- do:
get:
index: queries_index
id: "test_percolator"
- match:
_source:
query:
match_all: {}

View file

@ -31,6 +31,7 @@ import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListener;
import org.elasticsearch.common.Strings; import org.elasticsearch.common.Strings;
import org.elasticsearch.common.bytes.ReleasableBytesReference; import org.elasticsearch.common.bytes.ReleasableBytesReference;
import org.elasticsearch.common.network.ThreadWatchdog;
import org.elasticsearch.core.Booleans; import org.elasticsearch.core.Booleans;
import org.elasticsearch.core.Nullable; import org.elasticsearch.core.Nullable;
import org.elasticsearch.core.Tuple; import org.elasticsearch.core.Tuple;
@ -56,6 +57,7 @@ public class Netty4HttpPipeliningHandler extends ChannelDuplexHandler {
private static final Logger logger = LogManager.getLogger(Netty4HttpPipeliningHandler.class); private static final Logger logger = LogManager.getLogger(Netty4HttpPipeliningHandler.class);
private final int maxEventsHeld; private final int maxEventsHeld;
private final ThreadWatchdog.ActivityTracker activityTracker;
private final PriorityQueue<Tuple<? extends Netty4HttpResponse, ChannelPromise>> outboundHoldingQueue; private final PriorityQueue<Tuple<? extends Netty4HttpResponse, ChannelPromise>> outboundHoldingQueue;
private record ChunkedWrite(PromiseCombiner combiner, ChannelPromise onDone, ChunkedRestResponseBodyPart responseBodyPart) {} private record ChunkedWrite(PromiseCombiner combiner, ChannelPromise onDone, ChunkedRestResponseBodyPart responseBodyPart) {}
@ -90,14 +92,21 @@ public class Netty4HttpPipeliningHandler extends ChannelDuplexHandler {
* @param maxEventsHeld the maximum number of channel events that will be retained prior to aborting the channel connection; this is * @param maxEventsHeld the maximum number of channel events that will be retained prior to aborting the channel connection; this is
* required as events cannot queue up indefinitely * required as events cannot queue up indefinitely
*/ */
public Netty4HttpPipeliningHandler(final int maxEventsHeld, final Netty4HttpServerTransport serverTransport) { public Netty4HttpPipeliningHandler(
final int maxEventsHeld,
final Netty4HttpServerTransport serverTransport,
final ThreadWatchdog.ActivityTracker activityTracker
) {
this.maxEventsHeld = maxEventsHeld; this.maxEventsHeld = maxEventsHeld;
this.activityTracker = activityTracker;
this.outboundHoldingQueue = new PriorityQueue<>(1, Comparator.comparingInt(t -> t.v1().getSequence())); this.outboundHoldingQueue = new PriorityQueue<>(1, Comparator.comparingInt(t -> t.v1().getSequence()));
this.serverTransport = serverTransport; this.serverTransport = serverTransport;
} }
@Override @Override
public void channelRead(final ChannelHandlerContext ctx, final Object msg) { public void channelRead(final ChannelHandlerContext ctx, final Object msg) {
activityTracker.startActivity();
try {
assert msg instanceof FullHttpRequest : "Should have fully aggregated message already but saw [" + msg + "]"; assert msg instanceof FullHttpRequest : "Should have fully aggregated message already but saw [" + msg + "]";
final FullHttpRequest fullHttpRequest = (FullHttpRequest) msg; final FullHttpRequest fullHttpRequest = (FullHttpRequest) msg;
final Netty4HttpRequest netty4HttpRequest; final Netty4HttpRequest netty4HttpRequest;
@ -115,6 +124,9 @@ public class Netty4HttpPipeliningHandler extends ChannelDuplexHandler {
netty4HttpRequest = new Netty4HttpRequest(readSequence++, fullHttpRequest); netty4HttpRequest = new Netty4HttpRequest(readSequence++, fullHttpRequest);
} }
handlePipelinedRequest(ctx, netty4HttpRequest); handlePipelinedRequest(ctx, netty4HttpRequest);
} finally {
activityTracker.stopActivity();
}
} }
// protected so tests can override it // protected so tests can override it

View file

@ -38,6 +38,7 @@ import org.apache.logging.log4j.Logger;
import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.common.network.CloseableChannel; import org.elasticsearch.common.network.CloseableChannel;
import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.network.NetworkService;
import org.elasticsearch.common.network.ThreadWatchdog;
import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.ByteSizeValue;
@ -94,6 +95,7 @@ public class Netty4HttpServerTransport extends AbstractHttpServerTransport {
private final TLSConfig tlsConfig; private final TLSConfig tlsConfig;
private final AcceptChannelHandler.AcceptPredicate acceptChannelPredicate; private final AcceptChannelHandler.AcceptPredicate acceptChannelPredicate;
private final HttpValidator httpValidator; private final HttpValidator httpValidator;
private final ThreadWatchdog threadWatchdog;
private final int readTimeoutMillis; private final int readTimeoutMillis;
private final int maxCompositeBufferComponents; private final int maxCompositeBufferComponents;
@ -130,6 +132,7 @@ public class Netty4HttpServerTransport extends AbstractHttpServerTransport {
this.tlsConfig = tlsConfig; this.tlsConfig = tlsConfig;
this.acceptChannelPredicate = acceptChannelPredicate; this.acceptChannelPredicate = acceptChannelPredicate;
this.httpValidator = httpValidator; this.httpValidator = httpValidator;
this.threadWatchdog = networkService.getThreadWatchdog();
this.pipeliningMaxEvents = SETTING_PIPELINING_MAX_EVENTS.get(settings); this.pipeliningMaxEvents = SETTING_PIPELINING_MAX_EVENTS.get(settings);
@ -381,7 +384,15 @@ public class Netty4HttpServerTransport extends AbstractHttpServerTransport {
if (handlingSettings.compression()) { if (handlingSettings.compression()) {
ch.pipeline().addLast("encoder_compress", new HttpContentCompressor(handlingSettings.compressionLevel())); ch.pipeline().addLast("encoder_compress", new HttpContentCompressor(handlingSettings.compressionLevel()));
} }
ch.pipeline().addLast("pipelining", new Netty4HttpPipeliningHandler(transport.pipeliningMaxEvents, transport)); ch.pipeline()
.addLast(
"pipelining",
new Netty4HttpPipeliningHandler(
transport.pipeliningMaxEvents,
transport,
transport.threadWatchdog.getActivityTrackerForCurrentThread()
)
);
transport.serverAcceptedChannel(nettyHttpChannel); transport.serverAcceptedChannel(nettyHttpChannel);
} }

View file

@ -15,6 +15,7 @@ import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.bytes.ReleasableBytesReference; import org.elasticsearch.common.bytes.ReleasableBytesReference;
import org.elasticsearch.common.network.ThreadWatchdog;
import org.elasticsearch.core.RefCounted; import org.elasticsearch.core.RefCounted;
import org.elasticsearch.core.Releasables; import org.elasticsearch.core.Releasables;
import org.elasticsearch.transport.InboundPipeline; import org.elasticsearch.transport.InboundPipeline;
@ -30,9 +31,16 @@ public class Netty4MessageInboundHandler extends ChannelInboundHandlerAdapter {
private final InboundPipeline pipeline; private final InboundPipeline pipeline;
public Netty4MessageInboundHandler(Netty4Transport transport, InboundPipeline inboundPipeline) { private final ThreadWatchdog.ActivityTracker activityTracker;
public Netty4MessageInboundHandler(
Netty4Transport transport,
InboundPipeline inboundPipeline,
ThreadWatchdog.ActivityTracker activityTracker
) {
this.transport = transport; this.transport = transport;
this.pipeline = inboundPipeline; this.pipeline = inboundPipeline;
this.activityTracker = activityTracker;
} }
@Override @Override
@ -44,8 +52,11 @@ public class Netty4MessageInboundHandler extends ChannelInboundHandlerAdapter {
final ByteBuf buffer = (ByteBuf) msg; final ByteBuf buffer = (ByteBuf) msg;
Netty4TcpChannel channel = ctx.channel().attr(Netty4Transport.CHANNEL_KEY).get(); Netty4TcpChannel channel = ctx.channel().attr(Netty4Transport.CHANNEL_KEY).get();
final BytesReference wrapped = Netty4Utils.toBytesReference(buffer); final BytesReference wrapped = Netty4Utils.toBytesReference(buffer);
activityTracker.startActivity();
try (ReleasableBytesReference reference = new ReleasableBytesReference(wrapped, new ByteBufRefCounted(buffer))) { try (ReleasableBytesReference reference = new ReleasableBytesReference(wrapped, new ByteBufRefCounted(buffer))) {
pipeline.handleBytes(channel, reference); pipeline.handleBytes(channel, reference);
} finally {
activityTracker.stopActivity();
} }
} }

View file

@ -30,6 +30,7 @@ import org.elasticsearch.TransportVersion;
import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.network.NetworkService;
import org.elasticsearch.common.network.ThreadWatchdog;
import org.elasticsearch.common.recycler.Recycler; import org.elasticsearch.common.recycler.Recycler;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.ByteSizeValue;
@ -78,6 +79,8 @@ public class Netty4Transport extends TcpTransport {
private volatile SharedGroupFactory.SharedGroup sharedGroup; private volatile SharedGroupFactory.SharedGroup sharedGroup;
protected final boolean remoteClusterPortEnabled; protected final boolean remoteClusterPortEnabled;
private final ThreadWatchdog threadWatchdog;
public Netty4Transport( public Netty4Transport(
Settings settings, Settings settings,
TransportVersion version, TransportVersion version,
@ -92,6 +95,7 @@ public class Netty4Transport extends TcpTransport {
Netty4Utils.setAvailableProcessors(EsExecutors.allocatedProcessors(settings)); Netty4Utils.setAvailableProcessors(EsExecutors.allocatedProcessors(settings));
NettyAllocator.logAllocatorDescriptionIfNeeded(); NettyAllocator.logAllocatorDescriptionIfNeeded();
this.sharedGroupFactory = sharedGroupFactory; this.sharedGroupFactory = sharedGroupFactory;
this.threadWatchdog = networkService.getThreadWatchdog();
// See AdaptiveReceiveBufferSizePredictor#DEFAULT_XXX for default values in netty..., we can use higher ones for us, even fixed one // See AdaptiveReceiveBufferSizePredictor#DEFAULT_XXX for default values in netty..., we can use higher ones for us, even fixed one
this.receivePredictorMin = Netty4Plugin.NETTY_RECEIVE_PREDICTOR_MIN.get(settings); this.receivePredictorMin = Netty4Plugin.NETTY_RECEIVE_PREDICTOR_MIN.get(settings);
@ -125,6 +129,7 @@ public class Netty4Transport extends TcpTransport {
bindServer(profileSettings); bindServer(profileSettings);
} }
} }
threadWatchdog.run(settings, threadPool, lifecycle);
success = true; success = true;
} finally { } finally {
if (success == false) { if (success == false) {
@ -354,7 +359,14 @@ public class Netty4Transport extends TcpTransport {
pipeline.addLast("logging", ESLoggingHandler.INSTANCE); pipeline.addLast("logging", ESLoggingHandler.INSTANCE);
} }
pipeline.addLast("chunked_writer", new Netty4WriteThrottlingHandler(getThreadPool().getThreadContext())); pipeline.addLast("chunked_writer", new Netty4WriteThrottlingHandler(getThreadPool().getThreadContext()));
pipeline.addLast("dispatcher", new Netty4MessageInboundHandler(this, getInboundPipeline(ch, isRemoteClusterServerChannel))); pipeline.addLast(
"dispatcher",
new Netty4MessageInboundHandler(
this,
getInboundPipeline(ch, isRemoteClusterServerChannel),
threadWatchdog.getActivityTrackerForCurrentThread()
)
);
} }
protected InboundPipeline getInboundPipeline(Channel ch, boolean isRemoteClusterServerChannel) { protected InboundPipeline getInboundPipeline(Channel ch, boolean isRemoteClusterServerChannel) {

View file

@ -34,6 +34,8 @@ import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.bytes.ReleasableBytesReference; import org.elasticsearch.common.bytes.ReleasableBytesReference;
import org.elasticsearch.common.bytes.ZeroBytesReference; import org.elasticsearch.common.bytes.ZeroBytesReference;
import org.elasticsearch.common.network.ThreadWatchdog;
import org.elasticsearch.common.network.ThreadWatchdogHelper;
import org.elasticsearch.common.recycler.Recycler; import org.elasticsearch.common.recycler.Recycler;
import org.elasticsearch.http.HttpResponse; import org.elasticsearch.http.HttpResponse;
import org.elasticsearch.rest.ChunkedRestResponseBodyPart; import org.elasticsearch.rest.ChunkedRestResponseBodyPart;
@ -53,11 +55,14 @@ import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutorService; import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors; import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.stream.Collectors; import java.util.stream.Collectors;
import java.util.stream.IntStream; import java.util.stream.IntStream;
import static io.netty.handler.codec.http.HttpHeaderNames.CONTENT_LENGTH; import static io.netty.handler.codec.http.HttpHeaderNames.CONTENT_LENGTH;
import static io.netty.handler.codec.http.HttpVersion.HTTP_1_1; import static io.netty.handler.codec.http.HttpVersion.HTTP_1_1;
import static org.hamcrest.Matchers.empty;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.greaterThan;
import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.hasSize;
import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.instanceOf;
@ -120,7 +125,7 @@ public class Netty4HttpPipeliningHandlerTests extends ESTestCase {
} }
private EmbeddedChannel makeEmbeddedChannelWithSimulatedWork(int numberOfRequests) { private EmbeddedChannel makeEmbeddedChannelWithSimulatedWork(int numberOfRequests) {
return new EmbeddedChannel(new Netty4HttpPipeliningHandler(numberOfRequests, null) { return new EmbeddedChannel(new Netty4HttpPipeliningHandler(numberOfRequests, null, new ThreadWatchdog.ActivityTracker()) {
@Override @Override
protected void handlePipelinedRequest(ChannelHandlerContext ctx, Netty4HttpRequest pipelinedRequest) { protected void handlePipelinedRequest(ChannelHandlerContext ctx, Netty4HttpRequest pipelinedRequest) {
ctx.fireChannelRead(pipelinedRequest); ctx.fireChannelRead(pipelinedRequest);
@ -186,7 +191,9 @@ public class Netty4HttpPipeliningHandlerTests extends ESTestCase {
public void testPipeliningRequestsAreReleased() { public void testPipeliningRequestsAreReleased() {
final int numberOfRequests = 10; final int numberOfRequests = 10;
final EmbeddedChannel embeddedChannel = new EmbeddedChannel(new Netty4HttpPipeliningHandler(numberOfRequests + 1, null)); final EmbeddedChannel embeddedChannel = new EmbeddedChannel(
new Netty4HttpPipeliningHandler(numberOfRequests + 1, null, new ThreadWatchdog.ActivityTracker())
);
for (int i = 0; i < numberOfRequests; i++) { for (int i = 0; i < numberOfRequests; i++) {
embeddedChannel.writeInbound(createHttpRequest("/" + i)); embeddedChannel.writeInbound(createHttpRequest("/" + i));
@ -473,6 +480,30 @@ public class Netty4HttpPipeliningHandlerTests extends ESTestCase {
assertThat(messagesSeen.get(1), instanceOf(DefaultHttpContent.class)); assertThat(messagesSeen.get(1), instanceOf(DefaultHttpContent.class));
} }
public void testActivityTracking() {
final var watchdog = new ThreadWatchdog();
final var activityTracker = watchdog.getActivityTrackerForCurrentThread();
final var requestHandled = new AtomicBoolean();
final var handler = new Netty4HttpPipeliningHandler(Integer.MAX_VALUE, mock(Netty4HttpServerTransport.class), activityTracker) {
@Override
protected void handlePipelinedRequest(ChannelHandlerContext ctx, Netty4HttpRequest pipelinedRequest) {
// thread is not idle while handling the request
assertThat(ThreadWatchdogHelper.getStuckThreadNames(watchdog), empty());
assertThat(ThreadWatchdogHelper.getStuckThreadNames(watchdog), equalTo(List.of(Thread.currentThread().getName())));
ctx.fireChannelRead(pipelinedRequest);
assertTrue(requestHandled.compareAndSet(false, true));
}
};
final EmbeddedChannel embeddedChannel = new EmbeddedChannel(new ChannelDuplexHandler(), handler);
embeddedChannel.writeInbound(createHttpRequest("/test"));
assertTrue(requestHandled.get());
// thread is now idle
assertThat(ThreadWatchdogHelper.getStuckThreadNames(watchdog), empty());
assertThat(ThreadWatchdogHelper.getStuckThreadNames(watchdog), empty());
}
// assert that a message of the given number of repeated chunks is found at the given index in the list and each chunk is equal to // assert that a message of the given number of repeated chunks is found at the given index in the list and each chunk is equal to
// the given BytesReference // the given BytesReference
private static void assertChunkedMessageAtIndex(List<Object> messagesSeen, int index, int chunks, BytesReference chunkBytes) { private static void assertChunkedMessageAtIndex(List<Object> messagesSeen, int index, int chunks, BytesReference chunkBytes) {
@ -494,7 +525,11 @@ public class Netty4HttpPipeliningHandlerTests extends ESTestCase {
} }
private Netty4HttpPipeliningHandler getTestHttpHandler() { private Netty4HttpPipeliningHandler getTestHttpHandler() {
return new Netty4HttpPipeliningHandler(Integer.MAX_VALUE, mock(Netty4HttpServerTransport.class)) { return new Netty4HttpPipeliningHandler(
Integer.MAX_VALUE,
mock(Netty4HttpServerTransport.class),
new ThreadWatchdog.ActivityTracker()
) {
@Override @Override
protected void handlePipelinedRequest(ChannelHandlerContext ctx, Netty4HttpRequest pipelinedRequest) { protected void handlePipelinedRequest(ChannelHandlerContext ctx, Netty4HttpRequest pipelinedRequest) {
ctx.fireChannelRead(pipelinedRequest); ctx.fireChannelRead(pipelinedRequest);

View file

@ -5,9 +5,6 @@ tests:
- class: "org.elasticsearch.cluster.coordination.CoordinatorVotingConfigurationTests" - class: "org.elasticsearch.cluster.coordination.CoordinatorVotingConfigurationTests"
issue: "https://github.com/elastic/elasticsearch/issues/108729" issue: "https://github.com/elastic/elasticsearch/issues/108729"
method: "testClusterUUIDLogging" method: "testClusterUUIDLogging"
- class: "org.elasticsearch.xpack.core.ssl.SSLConfigurationReloaderTests"
issue: "https://github.com/elastic/elasticsearch/issues/108774"
method: "testReloadingKeyStore"
- class: "org.elasticsearch.xpack.esql.ccq.MultiClusterSpecIT" - class: "org.elasticsearch.xpack.esql.ccq.MultiClusterSpecIT"
issue: "https://github.com/elastic/elasticsearch/issues/108808" issue: "https://github.com/elastic/elasticsearch/issues/108808"
method: "test {k8s-metrics.MetricsWithAggs}" method: "test {k8s-metrics.MetricsWithAggs}"
@ -19,7 +16,8 @@ tests:
method: "testGuessIsDayFirstFromLocale" method: "testGuessIsDayFirstFromLocale"
- class: "org.elasticsearch.test.rest.ClientYamlTestSuiteIT" - class: "org.elasticsearch.test.rest.ClientYamlTestSuiteIT"
issue: "https://github.com/elastic/elasticsearch/issues/108857" issue: "https://github.com/elastic/elasticsearch/issues/108857"
method: "test {yaml=search/180_locale_dependent_mapping/Test Index and Search locale dependent mappings / dates}" method: "test {yaml=search/180_locale_dependent_mapping/Test Index and Search locale\
\ dependent mappings / dates}"
- class: "org.elasticsearch.upgrades.SearchStatesIT" - class: "org.elasticsearch.upgrades.SearchStatesIT"
issue: "https://github.com/elastic/elasticsearch/issues/108991" issue: "https://github.com/elastic/elasticsearch/issues/108991"
method: "testCanMatch" method: "testCanMatch"
@ -28,7 +26,8 @@ tests:
method: "testTrainedModelInference" method: "testTrainedModelInference"
- class: "org.elasticsearch.xpack.security.CoreWithSecurityClientYamlTestSuiteIT" - class: "org.elasticsearch.xpack.security.CoreWithSecurityClientYamlTestSuiteIT"
issue: "https://github.com/elastic/elasticsearch/issues/109188" issue: "https://github.com/elastic/elasticsearch/issues/109188"
method: "test {yaml=search/180_locale_dependent_mapping/Test Index and Search locale dependent mappings / dates}" method: "test {yaml=search/180_locale_dependent_mapping/Test Index and Search locale\
\ dependent mappings / dates}"
- class: "org.elasticsearch.xpack.esql.qa.mixed.EsqlClientYamlIT" - class: "org.elasticsearch.xpack.esql.qa.mixed.EsqlClientYamlIT"
issue: "https://github.com/elastic/elasticsearch/issues/109189" issue: "https://github.com/elastic/elasticsearch/issues/109189"
method: "test {p0=esql/70_locale/Date format with Italian locale}" method: "test {p0=esql/70_locale/Date format with Italian locale}"
@ -43,7 +42,8 @@ tests:
method: "testTimestampFieldTypeExposedByAllIndicesServices" method: "testTimestampFieldTypeExposedByAllIndicesServices"
- class: "org.elasticsearch.analysis.common.CommonAnalysisClientYamlTestSuiteIT" - class: "org.elasticsearch.analysis.common.CommonAnalysisClientYamlTestSuiteIT"
issue: "https://github.com/elastic/elasticsearch/issues/109318" issue: "https://github.com/elastic/elasticsearch/issues/109318"
method: "test {yaml=analysis-common/50_char_filters/pattern_replace error handling (too complex pattern)}" method: "test {yaml=analysis-common/50_char_filters/pattern_replace error handling\
\ (too complex pattern)}"
- class: "org.elasticsearch.xpack.ml.integration.ClassificationHousePricingIT" - class: "org.elasticsearch.xpack.ml.integration.ClassificationHousePricingIT"
issue: "https://github.com/elastic/elasticsearch/issues/101598" issue: "https://github.com/elastic/elasticsearch/issues/101598"
method: "testFeatureImportanceValues" method: "testFeatureImportanceValues"
@ -59,6 +59,9 @@ tests:
- class: org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvAppendTests - class: org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvAppendTests
method: testEvaluateBlockWithoutNulls {TestCase=<cartesian_shape>, <cartesian_shape>} method: testEvaluateBlockWithoutNulls {TestCase=<cartesian_shape>, <cartesian_shape>}
issue: https://github.com/elastic/elasticsearch/issues/109409 issue: https://github.com/elastic/elasticsearch/issues/109409
- class: "org.elasticsearch.smoketest.DocsClientYamlTestSuiteIT"
issue: "https://github.com/elastic/elasticsearch/issues/109478"
method: "test {yaml=reference/esql/processing-commands/lookup/line_31}"
# Examples: # Examples:
# #

View file

@ -7,7 +7,7 @@
*/ */
plugins { plugins {
id "com.gradle.enterprise" version "3.16.2" id "com.gradle.develocity" version "3.17.4"
} }
// Include all subdirectories as example projects // Include all subdirectories as example projects

View file

@ -10,7 +10,7 @@ apply plugin: 'elasticsearch.internal-yaml-rest-test'
restResources { restResources {
restApi { restApi {
include '_common', 'bulk', 'count', 'cluster', 'field_caps', 'knn_search', 'index', 'indices', 'msearch', include '_common', 'bulk', 'count', 'cluster', 'field_caps', 'get', 'knn_search', 'index', 'indices', 'msearch',
'search', 'async_search', 'graph', '*_point_in_time', 'info', 'scroll', 'clear_scroll', 'search_mvt', 'eql', 'sql' 'search', 'async_search', 'graph', '*_point_in_time', 'info', 'scroll', 'clear_scroll', 'search_mvt', 'eql', 'sql'
} }
restTests { restTests {

View file

@ -0,0 +1,82 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0 and the Server Side Public License, v 1; you may not use this file except
* in compliance with, at your election, the Elastic License 2.0 or the Server
* Side Public License, v 1.
*/
package org.elasticsearch.upgrades;
import com.carrotsearch.randomizedtesting.annotations.Name;
import org.elasticsearch.client.Request;
import org.elasticsearch.client.ResponseException;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.support.XContentMapValues;
import org.elasticsearch.test.cluster.ElasticsearchCluster;
import org.elasticsearch.test.cluster.FeatureFlag;
import org.elasticsearch.test.cluster.local.LocalClusterConfigProvider;
import org.elasticsearch.test.cluster.local.distribution.DistributionType;
import org.junit.ClassRule;
import java.io.IOException;
import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.equalTo;
public class UpgradeWithOldIndexSettingsIT extends ParameterizedFullClusterRestartTestCase {
protected static LocalClusterConfigProvider clusterConfig = c -> {};
@ClassRule
public static ElasticsearchCluster cluster = ElasticsearchCluster.local()
.distribution(DistributionType.DEFAULT)
.version(getOldClusterTestVersion())
.nodes(2)
.setting("xpack.security.enabled", "false")
.feature(FeatureFlag.FAILURE_STORE_ENABLED)
.apply(() -> clusterConfig)
.build();
@Override
protected ElasticsearchCluster getUpgradeCluster() {
return cluster;
}
public UpgradeWithOldIndexSettingsIT(@Name("cluster") FullClusterRestartUpgradeStatus upgradeStatus) {
super(upgradeStatus);
}
public void testMapperDynamicIndexSetting() throws IOException {
assumeTrue(
"Setting deprecated in 6.x, but remained in 7.x and is no longer defined in 8.x",
getOldClusterTestVersion().before("8.0.0")
);
String indexName = "my-index";
if (isRunningAgainstOldCluster()) {
createIndex(indexName);
var request = new Request("PUT", "/my-index/_settings");
request.setJsonEntity(org.elasticsearch.common.Strings.toString(Settings.builder().put("index.mapper.dynamic", true).build()));
request.setOptions(
expectWarnings(
"[index.mapper.dynamic] setting was deprecated in Elasticsearch and will be removed in a future release! "
+ "See the breaking changes documentation for the next major version."
)
);
assertOK(client().performRequest(request));
} else {
var indexSettings = getIndexSettings(indexName);
assertThat(XContentMapValues.extractValue(indexName + ".settings.index.mapper.dynamic", indexSettings), equalTo("true"));
ensureGreen(indexName);
// New indices can never define the index.mapper.dynamic setting.
Exception e = expectThrows(
ResponseException.class,
() -> createIndex("my-index2", Settings.builder().put("index.mapper.dynamic", true).build())
);
assertThat(e.getMessage(), containsString("unknown setting [index.mapper.dynamic]"));
}
}
}

View file

@ -0,0 +1,160 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0 and the Server Side Public License, v 1; you may not use this file except
* in compliance with, at your election, the Elastic License 2.0 or the Server
* Side Public License, v 1.
*/
package org.elasticsearch.upgrades;
import com.carrotsearch.randomizedtesting.annotations.Name;
import org.apache.http.util.EntityUtils;
import org.elasticsearch.Version;
import org.elasticsearch.client.Request;
import org.elasticsearch.client.Response;
import org.elasticsearch.common.Strings;
import org.elasticsearch.xcontent.XContentBuilder;
import org.elasticsearch.xcontent.XContentType;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.util.Map;
import java.util.function.Predicate;
import static org.elasticsearch.rest.action.search.RestSearchAction.TOTAL_HITS_AS_INT_PARAM;
/**
* This IT indexes some dense vector on an old node, then update its mapping and, once upgraded, checks that KNN search still works
* before and after further data indexing.
*/
public class DenseVectorMappingUpdateIT extends AbstractRollingUpgradeTestCase {
private static final String BULK1 = """
{"index": {"_id": "1"}}
{"embedding": [1, 1, 1, 1]}
{"index": {"_id": "2"}}
{"embedding": [1, 1, 1, 2]}
{"index": {"_id": "3"}}
{"embedding": [1, 1, 1, 3]}
{"index": {"_id": "4"}}
{"embedding": [1, 1, 1, 4]}
{"index": {"_id": "5"}}
{"embedding": [1, 1, 1, 5]}
{"index": {"_id": "6"}}
{"embedding": [1, 1, 1, 6]}
{"index": {"_id": "7"}}
{"embedding": [1, 1, 1, 7]}
{"index": {"_id": "8"}}
{"embedding": [1, 1, 1, 8]}
{"index": {"_id": "9"}}
{"embedding": [1, 1, 1, 9]}
{"index": {"_id": "10"}}
{"embedding": [1, 1, 1, 10]}
""";
private static final String BULK2 = """
{"index": {"_id": "11"}}
{"embedding": [1, 0, 1, 1]}
{"index": {"_id": "12"}}
{"embedding": [1, 2, 1, 1]}
{"index": {"_id": "13"}}
{"embedding": [1, 3, 1, 1]}
{"index": {"_id": "14"}}
{"embedding": [1, 4, 1, 1]}
{"index": {"_id": "15"}}
{"embedding": [1, 5, 1, 1]}
{"index": {"_id": "16"}}
{"embedding": [1, 6, 1, 1]}
{"index": {"_id": "17"}}
{"embedding": [1, 7, 1, 1]}
{"index": {"_id": "18"}}
{"embedding": [1, 8, 1, 1]}
{"index": {"_id": "19"}}
{"embedding": [1, 9, 1, 1]}
{"index": {"_id": "20"}}
{"embedding": [1, 10, 1, 1]}
""";
public DenseVectorMappingUpdateIT(@Name("upgradedNodes") int upgradedNodes) {
super(upgradedNodes);
}
public void testDenseVectorMappingUpdateOnOldCluster() throws IOException {
if (getOldClusterTestVersion().after(Version.V_8_7_0.toString())) {
String indexName = "test_index";
if (isOldCluster()) {
Request createIndex = new Request("PUT", "/" + indexName);
XContentBuilder mappings = XContentBuilder.builder(XContentType.JSON.xContent())
.startObject()
.startObject("mappings")
.startObject("properties")
.startObject("embedding")
.field("type", "dense_vector")
.field("dims", 4)
.startObject("index_options")
.field("type", "hnsw")
.endObject()
.endObject()
.endObject()
.endObject()
.endObject();
createIndex.setJsonEntity(Strings.toString(mappings));
client().performRequest(createIndex);
Request index = new Request("POST", "/" + indexName + "/_bulk/");
index.addParameter("refresh", "true");
index.setJsonEntity(BULK1);
client().performRequest(index);
}
int expectedCount = 10;
assertCount("test_index", expectedCount);
if (isUpgradedCluster() && clusterSupportsDenseVectorTypeUpdate()) {
Request updateMapping = new Request("PUT", "/" + indexName + "/_mapping");
XContentBuilder mappings = XContentBuilder.builder(XContentType.JSON.xContent())
.startObject()
.startObject("properties")
.startObject("embedding")
.field("type", "dense_vector")
.field("dims", 4)
.startObject("index_options")
.field("type", "int8_hnsw")
.endObject()
.endObject()
.endObject()
.endObject();
updateMapping.setJsonEntity(Strings.toString(mappings));
assertOK(client().performRequest(updateMapping));
Request index = new Request("POST", "/" + indexName + "/_bulk/");
index.addParameter("refresh", "true");
index.setJsonEntity(BULK2);
assertOK(client().performRequest(index));
expectedCount = 20;
assertCount("test_index", expectedCount);
}
}
}
private void assertCount(String index, int count) throws IOException {
Request searchTestIndexRequest = new Request("POST", "/" + index + "/_search");
searchTestIndexRequest.addParameter(TOTAL_HITS_AS_INT_PARAM, "true");
searchTestIndexRequest.addParameter("filter_path", "hits.total");
Response searchTestIndexResponse = client().performRequest(searchTestIndexRequest);
assertEquals(
"{\"hits\":{\"total\":" + count + "}}",
EntityUtils.toString(searchTestIndexResponse.getEntity(), StandardCharsets.UTF_8)
);
}
private boolean clusterSupportsDenseVectorTypeUpdate() throws IOException {
Map<?, ?> response = entityAsMap(client().performRequest(new Request("GET", "_nodes")));
Map<?, ?> nodes = (Map<?, ?>) response.get("nodes");
Predicate<Map<?, ?>> nodeSupportsBulkApi = n -> Version.fromString(n.get("version").toString()).onOrAfter(Version.V_8_14_0);
return nodes.values().stream().map(o -> (Map<?, ?>) o).allMatch(nodeSupportsBulkApi);
}
}

View file

@ -22,6 +22,8 @@ import java.io.IOException;
import java.util.Map; import java.util.Map;
import static org.elasticsearch.rest.action.search.RestSearchAction.TOTAL_HITS_AS_INT_PARAM; import static org.elasticsearch.rest.action.search.RestSearchAction.TOTAL_HITS_AS_INT_PARAM;
import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.is;
public class UpgradeWithOldIndexSettingsIT extends AbstractRollingUpgradeTestCase { public class UpgradeWithOldIndexSettingsIT extends AbstractRollingUpgradeTestCase {
@ -102,6 +104,38 @@ public class UpgradeWithOldIndexSettingsIT extends AbstractRollingUpgradeTestCas
} }
} }
public void testMapperDynamicIndexSetting() throws IOException {
assumeTrue(
"Setting deprecated in 6.x, but remained in 7.x and is no longer defined in 8.x",
getOldClusterTestVersion().before("8.0.0")
);
String indexName = "my-index";
if (isOldCluster()) {
createIndex(indexName);
Request request = new Request("PUT", "/" + indexName + "/_settings");
request.setJsonEntity(org.elasticsearch.common.Strings.toString(Settings.builder().put("index.mapper.dynamic", true).build()));
request.setOptions(
expectWarnings(
"[index.mapper.dynamic] setting was deprecated in Elasticsearch and will be removed in a future release! "
+ "See the breaking changes documentation for the next major version."
)
);
assertOK(client().performRequest(request));
} else {
if (isUpgradedCluster()) {
var indexSettings = getIndexSettings(indexName);
assertThat(XContentMapValues.extractValue(indexName + ".settings.index.mapper.dynamic", indexSettings), equalTo("true"));
ensureGreen(indexName);
// New indices can never define the index.mapper.dynamic setting.
Exception e = expectThrows(
ResponseException.class,
() -> createIndex("my-index2", Settings.builder().put("index.mapper.dynamic", true).build())
);
assertThat(e.getMessage(), containsString("unknown setting [index.mapper.dynamic]"));
}
}
}
private void assertCount(String index, int countAtLeast) throws IOException { private void assertCount(String index, int countAtLeast) throws IOException {
Request searchTestIndexRequest = new Request("POST", "/" + index + "/_search"); Request searchTestIndexRequest = new Request("POST", "/" + index + "/_search");
searchTestIndexRequest.addParameter(TOTAL_HITS_AS_INT_PARAM, "true"); searchTestIndexRequest.addParameter(TOTAL_HITS_AS_INT_PARAM, "true");

View file

@ -72,7 +72,6 @@ public class SystemIndicesQA extends Plugin implements SystemIndexPlugin, Action
.setSettings( .setSettings(
Settings.builder() Settings.builder()
.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)
.put(IndexMetadata.SETTING_AUTO_EXPAND_REPLICAS, "0-1") .put(IndexMetadata.SETTING_AUTO_EXPAND_REPLICAS, "0-1")
.build() .build()
) )
@ -95,7 +94,6 @@ public class SystemIndicesQA extends Plugin implements SystemIndexPlugin, Action
.setSettings( .setSettings(
Settings.builder() Settings.builder()
.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)
.put(IndexMetadata.SETTING_AUTO_EXPAND_REPLICAS, "0-1") .put(IndexMetadata.SETTING_AUTO_EXPAND_REPLICAS, "0-1")
.build() .build()
) )

View file

@ -1,16 +1,18 @@
{ {
"inference.delete":{ "inference.delete": {
"documentation":{ "documentation": {
"url":"https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-inference-api.html", "url": "https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-inference-api.html",
"description":"Delete an inference endpoint" "description": "Delete an inference endpoint"
}, },
"stability":"experimental", "stability": "experimental",
"visibility":"public", "visibility": "public",
"headers":{ "headers": {
"accept": [ "application/json"] "accept": [
"application/json"
]
}, },
"url":{ "url": {
"paths":[ "paths": [
{ {
"path": "/_inference/{inference_id}", "path": "/_inference/{inference_id}",
"methods": [ "methods": [
@ -24,22 +26,34 @@
} }
}, },
{ {
"path":"/_inference/{task_type}/{inference_id}", "path": "/_inference/{task_type}/{inference_id}",
"methods":[ "methods": [
"DELETE" "DELETE"
], ],
"parts":{ "parts": {
"task_type":{ "task_type": {
"type":"string", "type": "string",
"description":"The task type" "description": "The task type"
}, },
"inference_id":{ "inference_id": {
"type":"string", "type": "string",
"description":"The inference Id" "description": "The inference Id"
} }
} }
} }
] ]
},
"params": {
"dry_run": {
"type": "boolean",
"description": "If true the endpoint will not be deleted and a list of ingest processors which reference this endpoint will be returned.",
"required": false
},
"force": {
"type": "boolean",
"description": "If true the endpoint will be forcefully stopped (regardless of whether or not it is referenced by any ingest processors or semantic text fields).",
"required": false
}
} }
} }
} }

View file

@ -0,0 +1,42 @@
{
"query_rule.put": {
"documentation": {
"url": "https://www.elastic.co/guide/en/elasticsearch/reference/master/put-query-rule.html",
"description": "Creates or updates a query rule within a ruleset."
},
"stability": "experimental",
"visibility": "public",
"headers": {
"accept": [
"application/json"
],
"content_type": [
"application/json"
]
},
"url": {
"paths": [
{
"path": "/_query_rules/{ruleset_id}/{rule_id}",
"methods": [
"PUT"
],
"parts": {
"ruleset_id": {
"type": "string",
"description": "The unique identifier of the ruleset this rule should be added to. The ruleset will be created if it does not exist."
},
"rule_id": {
"type": "string",
"description": "The unique identifier of the rule to be created or updated."
}
}
}
]
},
"body": {
"description": "The query rule configuration, including the type of rule, the criteria to match the rule, and the action that should be taken if the rule matches.",
"required": true
}
}
}

View file

@ -18,6 +18,11 @@
} }
] ]
}, },
"params":{} "params":{
"master_timeout":{
"type":"time",
"description":"Timeout for connection to master"
}
}
} }
} }

View file

@ -18,7 +18,16 @@
} }
] ]
}, },
"params":{}, "params":{
"master_timeout":{
"type":"time",
"description":"Timeout for connection to master"
},
"timeout":{
"type":"time",
"description":"Timeout for acknowledgements from all nodes"
}
},
"body":{ "body":{
"description": "An object with the new settings for each index, if any", "description": "An object with the new settings for each index, if any",
"required": true "required": true

View file

@ -0,0 +1,23 @@
{
"transform.get_node_stats":{
"documentation":{
"url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/get-transform-node-stats.html",
"description":"Retrieves transform usage information for transform nodes."
},
"stability":"stable",
"visibility":"public",
"headers":{
"accept": [ "application/json"]
},
"url":{
"paths":[
{
"path":"/_transform/_node_stats",
"methods":[
"GET"
]
}
]
}
}
}

View file

@ -1108,3 +1108,35 @@ flattened field with ignore_above:
key7: "key7" key7: "key7"
- is_false: fields - is_false: fields
---
completion:
- requires:
cluster_features: ["mapper.source.synthetic_source_fallback"]
reason: introduced in 8.15.0
- do:
indices.create:
index: test
body:
mappings:
_source:
mode: synthetic
properties:
completion:
type: completion
- do:
index:
index: test
id: 1
refresh: true
body:
completion: "the quick brown fox"
- do:
get:
index: test
id: 1
- match: { _source.completion: "the quick brown fox" }

View file

@ -382,3 +382,92 @@
query: query:
exists: exists:
field: ml.tokens field: ml.tokens
---
"sparse_vector synthetic source":
- requires:
cluster_features: ["mapper.source.synthetic_source_fallback"]
reason: introduced in 8.15.0
- do:
indices.create:
index: test
body:
mappings:
_source:
mode: synthetic
properties:
ml.tokens:
type: sparse_vector
- match: { acknowledged: true }
- do:
index:
index: test
id: "1"
body:
ml:
tokens:
running: 2.4097164
good: 2.170997
run: 2.052153
race: 1.4575411
for: 1.1908325
- match: { result: "created" }
- do:
index:
index: test
id: "2"
body:
ml:
tokens: []
- match: { result: "created" }
- do:
index:
index: test
id: "3"
body:
ml:
tokens: {}
- match: { result: "created" }
- do:
indices.refresh: { }
- do:
get:
index: test
id: "1"
- match:
_source:
ml:
tokens:
running: 2.4097164
good: 2.170997
run: 2.052153
race: 1.4575411
for: 1.1908325
- do:
get:
index: test
id: "2"
- match:
_source.ml.tokens: []
- do:
get:
index: test
id: "3"
- match:
_source.ml.tokens: {}

View file

@ -24,6 +24,7 @@ import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase;
import java.util.HashSet; import java.util.HashSet;
import java.util.List;
import java.util.Set; import java.util.Set;
import java.util.stream.Collectors; import java.util.stream.Collectors;
@ -70,6 +71,8 @@ public class PrevalidateShardPathIT extends ESIntegTestCase {
} }
// Check that after relocation the source node doesn't have the shard path // Check that after relocation the source node doesn't have the shard path
String node3 = internalCluster().startDataOnlyNode(); String node3 = internalCluster().startDataOnlyNode();
ensureStableCluster(4);
logger.info("Relocating shards from the node {}", node2);
updateIndexSettings(Settings.builder().put("index.routing.allocation.exclude._name", node2), indexName); updateIndexSettings(Settings.builder().put("index.routing.allocation.exclude._name", node2), indexName);
ensureGreen(indexName); ensureGreen(indexName);
assertBusy(() -> { assertBusy(() -> {
@ -82,13 +85,20 @@ public class PrevalidateShardPathIT extends ESIntegTestCase {
assertTrue("There should be no failures in the response", resp.failures().isEmpty()); assertTrue("There should be no failures in the response", resp.failures().isEmpty());
Set<ShardId> node2ShardIds = resp2.getNodes().get(0).getShardIds(); Set<ShardId> node2ShardIds = resp2.getNodes().get(0).getShardIds();
if (node2ShardIds.size() > 0) { if (node2ShardIds.size() > 0) {
for (var node2Shard : clusterService().state() logger.info(
"Relocation source node {} should have no shards after the relocation, but still got {}",
node2Id,
node2ShardIds
);
List<ShardRouting> node2Shards = clusterService().state()
.routingTable() .routingTable()
.allShards() .allShards()
.filter(s -> s.getIndexName().equals(indexName)) .filter(s -> s.getIndexName().equals(indexName))
.filter(s -> node2ShardIds.contains(s.shardId())) .filter(s -> node2ShardIds.contains(s.shardId()))
.filter(s -> s.currentNodeId().equals(node2Id)) .filter(s -> s.currentNodeId().equals(node2Id))
.toList()) { .toList();
logger.info("Found {} shards on the relocation source node {} in the cluster state", node2Shards, node2Id);
for (var node2Shard : node2Shards) {
var explanation = ClusterAllocationExplanationUtils.getClusterAllocationExplanation( var explanation = ClusterAllocationExplanationUtils.getClusterAllocationExplanation(
client(), client(),
node2Shard.getIndexName(), node2Shard.getIndexName(),
@ -109,6 +119,7 @@ public class PrevalidateShardPathIT extends ESIntegTestCase {
// If for whatever reason the removal is not triggered (e.g. not enough nodes reported that the shards are active) or it // If for whatever reason the removal is not triggered (e.g. not enough nodes reported that the shards are active) or it
// temporarily failed to clean up the shard folder, we need to trigger another cluster state change for this removal to // temporarily failed to clean up the shard folder, we need to trigger another cluster state change for this removal to
// finally succeed. // finally succeed.
logger.info("Triggering an extra cluster state update");
updateIndexSettings( updateIndexSettings(
Settings.builder().put("index.routing.allocation.exclude.name", "non-existent" + randomAlphaOfLength(5)), Settings.builder().put("index.routing.allocation.exclude.name", "non-existent" + randomAlphaOfLength(5)),
indexName indexName

View file

@ -0,0 +1,163 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0 and the Server Side Public License, v 1; you may not use this file except
* in compliance with, at your election, the Elastic License 2.0 or the Server
* Side Public License, v 1.
*/
package org.elasticsearch.common.network;
import org.apache.logging.log4j.core.LogEvent;
import org.elasticsearch.action.ActionListenerResponseHandler;
import org.elasticsearch.action.support.SubscribableListener;
import org.elasticsearch.client.Request;
import org.elasticsearch.client.internal.node.NodeClient;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.IndexScopedSettings;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.settings.SettingsFilter;
import org.elasticsearch.common.util.CollectionUtils;
import org.elasticsearch.common.util.concurrent.EsExecutors;
import org.elasticsearch.common.util.concurrent.RunOnce;
import org.elasticsearch.features.NodeFeature;
import org.elasticsearch.plugins.ActionPlugin;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.rest.RestChannel;
import org.elasticsearch.rest.RestController;
import org.elasticsearch.rest.RestHandler;
import org.elasticsearch.rest.RestRequest;
import org.elasticsearch.rest.action.RestToXContentListener;
import org.elasticsearch.test.ESIntegTestCase;
import org.elasticsearch.test.MockLog;
import org.elasticsearch.test.rest.ESRestTestCase;
import org.elasticsearch.test.transport.MockTransportService;
import org.elasticsearch.transport.TransportRequest;
import org.elasticsearch.transport.TransportResponse;
import org.elasticsearch.transport.TransportService;
import java.io.IOException;
import java.util.Collection;
import java.util.List;
import java.util.concurrent.CountDownLatch;
import java.util.function.Predicate;
import java.util.function.Supplier;
public class ThreadWatchdogIT extends ESIntegTestCase {
@Override
protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) {
return Settings.builder()
.put(super.nodeSettings(nodeOrdinal, otherSettings))
.put(ThreadWatchdog.NETWORK_THREAD_WATCHDOG_INTERVAL.getKey(), "100ms")
.put(ThreadWatchdog.NETWORK_THREAD_WATCHDOG_QUIET_TIME.getKey(), "0")
.build();
}
@SuppressWarnings("unchecked")
@Override
protected Collection<Class<? extends Plugin>> nodePlugins() {
return CollectionUtils.appendToCopyNoNullElements(
super.nodePlugins(),
SlowRequestProcessingPlugin.class,
MockTransportService.TestPlugin.class
);
}
@Override
protected boolean addMockHttpTransport() {
return false;
}
public static class SlowRequestProcessingPlugin extends Plugin implements ActionPlugin {
@Override
public Collection<RestHandler> getRestHandlers(
Settings settings,
NamedWriteableRegistry namedWriteableRegistry,
RestController restController,
ClusterSettings clusterSettings,
IndexScopedSettings indexScopedSettings,
SettingsFilter settingsFilter,
IndexNameExpressionResolver indexNameExpressionResolver,
Supplier<DiscoveryNodes> nodesInCluster,
Predicate<NodeFeature> clusterSupportsFeature
) {
return List.of(new RestHandler() {
@Override
public List<Route> routes() {
return List.of(Route.builder(RestRequest.Method.POST, "_slow").build());
}
@Override
public void handleRequest(RestRequest request, RestChannel channel, NodeClient client) {
blockAndWaitForWatchdogLogs();
new RestToXContentListener<>(channel).onResponse((b, p) -> b.startObject().endObject());
}
});
}
}
private static void blockAndWaitForWatchdogLogs() {
final var threadName = Thread.currentThread().getName();
final var logsSeenLatch = new CountDownLatch(2);
final var warningSeen = new RunOnce(logsSeenLatch::countDown);
final var threadDumpSeen = new RunOnce(logsSeenLatch::countDown);
MockLog.assertThatLogger(() -> safeAwait(logsSeenLatch), ThreadWatchdog.class, new MockLog.LoggingExpectation() {
@Override
public void match(LogEvent event) {
final var formattedMessage = event.getMessage().getFormattedMessage();
if (formattedMessage.contains("the following threads are active but did not make progress in the preceding [100ms]:")
&& formattedMessage.contains(threadName)) {
warningSeen.run();
}
if (formattedMessage.contains("hot threads dump due to active threads not making progress")) {
threadDumpSeen.run();
}
}
@Override
public void assertMatched() {}
});
}
public void testThreadWatchdogHttpLogging() throws IOException {
ESRestTestCase.assertOK(getRestClient().performRequest(new Request("POST", "_slow")));
}
public void testThreadWatchdogTransportLogging() {
internalCluster().ensureAtLeastNumDataNodes(2);
final var transportServiceIterator = internalCluster().getInstances(TransportService.class).iterator();
final var sourceTransportService = transportServiceIterator.next();
final var targetTransportService = transportServiceIterator.next();
targetTransportService.registerRequestHandler(
"internal:slow",
EsExecutors.DIRECT_EXECUTOR_SERVICE,
TransportRequest.Empty::new,
(request, channel, task) -> {
blockAndWaitForWatchdogLogs();
channel.sendResponse(TransportResponse.Empty.INSTANCE);
}
);
safeAwait(
SubscribableListener.newForked(
l -> sourceTransportService.sendRequest(
targetTransportService.getLocalNode(),
"internal:slow",
new TransportRequest.Empty(),
new ActionListenerResponseHandler<TransportResponse>(
l,
in -> TransportResponse.Empty.INSTANCE,
EsExecutors.DIRECT_EXECUTOR_SERVICE
)
)
)
);
}
}

View file

@ -9,6 +9,7 @@
package org.elasticsearch.plugins.internal; package org.elasticsearch.plugins.internal;
import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.index.IndexMode;
import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.engine.EngineFactory; import org.elasticsearch.index.engine.EngineFactory;
import org.elasticsearch.index.engine.InternalEngine; import org.elasticsearch.index.engine.InternalEngine;
@ -103,6 +104,7 @@ public class DocumentSizeObserverIT extends ESIntegTestCase {
DocumentSizeReporter documentParsingReporter = documentParsingProvider.newDocumentSizeReporter( DocumentSizeReporter documentParsingReporter = documentParsingProvider.newDocumentSizeReporter(
shardId.getIndexName(), shardId.getIndexName(),
IndexMode.STANDARD,
DocumentSizeAccumulator.EMPTY_INSTANCE DocumentSizeAccumulator.EMPTY_INSTANCE
); );
documentParsingReporter.onIndexingCompleted(index.parsedDoc()); documentParsingReporter.onIndexingCompleted(index.parsedDoc());
@ -132,7 +134,11 @@ public class DocumentSizeObserverIT extends ESIntegTestCase {
} }
@Override @Override
public DocumentSizeReporter newDocumentSizeReporter(String indexName, DocumentSizeAccumulator documentSizeAccumulator) { public DocumentSizeReporter newDocumentSizeReporter(
String indexName,
IndexMode indexMode,
DocumentSizeAccumulator documentSizeAccumulator
) {
return new TestDocumentSizeReporter(indexName); return new TestDocumentSizeReporter(indexName);
} }
}; };

View file

@ -25,8 +25,8 @@ import org.elasticsearch.client.Cancellable;
import org.elasticsearch.client.Request; import org.elasticsearch.client.Request;
import org.elasticsearch.client.Response; import org.elasticsearch.client.Response;
import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.IndexMetadata;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.routing.allocation.command.MoveAllocationCommand; import org.elasticsearch.cluster.routing.allocation.command.MoveAllocationCommand;
import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider;
import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.breaker.CircuitBreaker;
import org.elasticsearch.common.breaker.CircuitBreakingException; import org.elasticsearch.common.breaker.CircuitBreakingException;
import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamInput;
@ -187,6 +187,14 @@ public class FieldCapabilitiesIT extends ESIntegTestCase {
return List.of(TestMapperPlugin.class, ExceptionOnRewriteQueryPlugin.class, BlockingOnRewriteQueryPlugin.class); return List.of(TestMapperPlugin.class, ExceptionOnRewriteQueryPlugin.class, BlockingOnRewriteQueryPlugin.class);
} }
@Override
protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) {
return Settings.builder()
.put(super.nodeSettings(nodeOrdinal, otherSettings))
.put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), EnableAllocationDecider.Rebalance.NONE)
.build();
}
@Override @Override
protected boolean addMockHttpTransport() { protected boolean addMockHttpTransport() {
return false; // enable http return false; // enable http
@ -529,23 +537,31 @@ public class FieldCapabilitiesIT extends ESIntegTestCase {
closeShardNoCheck(indexShard, randomBoolean()); closeShardNoCheck(indexShard, randomBoolean());
} else if (randomBoolean()) { } else if (randomBoolean()) {
final ShardId shardId = indexShard.shardId(); final ShardId shardId = indexShard.shardId();
final String[] nodeNames = internalCluster().getNodeNames();
final String newNodeName = randomValueOtherThanMany(n -> nodeName.equals(n) == false, () -> randomFrom(nodeNames)); final var targetNodes = new ArrayList<String>();
DiscoveryNode fromNode = null; for (final var targetIndicesService : internalCluster().getInstances(IndicesService.class)) {
DiscoveryNode toNode = null; final var targetNode = targetIndicesService.clusterService().localNode();
for (DiscoveryNode node : clusterService().state().nodes()) { if (targetNode.canContainData() && targetIndicesService.getShardOrNull(shardId) == null) {
if (node.getName().equals(nodeName)) { targetNodes.add(targetNode.getId());
fromNode = node;
}
if (node.getName().equals(newNodeName)) {
toNode = node;
} }
} }
assertNotNull(fromNode);
assertNotNull(toNode); if (targetNodes.isEmpty()) {
continue;
}
safeGet(
clusterAdmin().prepareReroute() clusterAdmin().prepareReroute()
.add(new MoveAllocationCommand(shardId.getIndexName(), shardId.id(), fromNode.getId(), toNode.getId())) .add(
.get(); new MoveAllocationCommand(
shardId.getIndexName(),
shardId.id(),
indicesService.clusterService().localNode().getId(),
randomFrom(targetNodes)
)
)
.execute()
);
} }
} }
} }
@ -570,7 +586,7 @@ public class FieldCapabilitiesIT extends ESIntegTestCase {
if (randomBoolean()) { if (randomBoolean()) {
request.indexFilter(QueryBuilders.rangeQuery("timestamp").gte("2020-01-01")); request.indexFilter(QueryBuilders.rangeQuery("timestamp").gte("2020-01-01"));
} }
final FieldCapabilitiesResponse response = client().execute(TransportFieldCapabilitiesAction.TYPE, request).actionGet(); final FieldCapabilitiesResponse response = safeGet(client().execute(TransportFieldCapabilitiesAction.TYPE, request));
assertThat(response.getIndices(), arrayContainingInAnyOrder("log-index-1", "log-index-2")); assertThat(response.getIndices(), arrayContainingInAnyOrder("log-index-1", "log-index-2"));
assertThat(response.getField("field1"), aMapWithSize(2)); assertThat(response.getField("field1"), aMapWithSize(2));
assertThat(response.getField("field1"), hasKey("long")); assertThat(response.getField("field1"), hasKey("long"));

View file

@ -81,7 +81,9 @@ import java.util.stream.Stream;
import static org.elasticsearch.repositories.blobstore.ChecksumBlobStoreFormat.SNAPSHOT_ONLY_FORMAT_PARAMS; import static org.elasticsearch.repositories.blobstore.ChecksumBlobStoreFormat.SNAPSHOT_ONLY_FORMAT_PARAMS;
import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder;
import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.greaterThan;
import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.hasSize;
import static org.hamcrest.Matchers.lessThan;
import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.notNullValue;
@LuceneTestCase.SuppressFileSystems(value = "HandleLimitFS") // we sometimes have >2048 open files @LuceneTestCase.SuppressFileSystems(value = "HandleLimitFS") // we sometimes have >2048 open files
@ -468,17 +470,20 @@ public class SnapshotStressTestsIT extends AbstractSnapshotIntegTestCase {
restoreSpecificIndicesTmp = true; restoreSpecificIndicesTmp = true;
continue; continue;
} }
if (randomBoolean() && localReleasables.add(tryAcquireAllPermits(indices.get(indexName).permits)) != null) { final var trackedIndex = indices.get(indexName);
if (randomBoolean() && localReleasables.add(tryAcquireAllPermits(trackedIndex.permits)) != null) {
indicesToRestoreList.add(indexName); indicesToRestoreList.add(indexName);
final int snapshotShardCount = snapshotInfo.indexSnapshotDetails().get(indexName).getShardCount(); final int snapshotShardCount = snapshotInfo.indexSnapshotDetails().get(indexName).getShardCount();
final int indexShardCount = indices.get(indexName).shardCount; final int indexShardCount = trackedIndex.shardCount;
if (snapshotShardCount == indexShardCount && randomBoolean()) { if (snapshotShardCount == indexShardCount
&& randomBoolean()
&& localReleasables.add(trackedIndex.tryAcquireClosingPermit()) != null) {
indicesToCloseList.add(indexName); indicesToCloseList.add(indexName);
} else { } else {
indicesToDeleteList.add(indexName); indicesToDeleteList.add(indexName);
indices.get(indexName).shardCount = snapshotShardCount; trackedIndex.shardCount = snapshotShardCount;
} }
} else { } else {
restoreSpecificIndicesTmp = true; restoreSpecificIndicesTmp = true;
@ -994,7 +999,9 @@ public class SnapshotStressTestsIT extends AbstractSnapshotIntegTestCase {
boolean snapshotSpecificIndicesTmp = randomBoolean(); boolean snapshotSpecificIndicesTmp = randomBoolean();
final List<String> targetIndexNames = new ArrayList<>(indices.size()); final List<String> targetIndexNames = new ArrayList<>(indices.size());
for (TrackedIndex trackedIndex : indices.values()) { for (TrackedIndex trackedIndex : indices.values()) {
if (usually() && releasableAfterStart.add(tryAcquirePermit(trackedIndex.permits)) != null) { if (usually()
&& releasableAfterStart.add(tryAcquirePermit(trackedIndex.permits)) != null
&& localReleasables.add(trackedIndex.tryAcquirePartialSnapshottingPermit()) != null) {
targetIndexNames.add(trackedIndex.indexName); targetIndexNames.add(trackedIndex.indexName);
} else { } else {
snapshotSpecificIndicesTmp = true; snapshotSpecificIndicesTmp = true;
@ -1550,6 +1557,40 @@ public class SnapshotStressTestsIT extends AbstractSnapshotIntegTestCase {
}); });
} }
/**
* We must not close an index while it's being partially snapshotted; this counter tracks the number of ongoing
* close operations (positive) or partial snapshot operations (negative) in order to avoid them happening concurrently.
* <p>
* This is only a problem for partial snapshots because we release the index permit once a partial snapshot has started. With
* non-partial snapshots we retain the index permit until it completes which blocks other operations.
*/
private final AtomicInteger closingOrPartialSnapshottingCount = new AtomicInteger();
private static boolean closingPermitAvailable(int value) {
return value >= 0 && value != Integer.MAX_VALUE;
}
private static boolean partialSnapshottingPermitAvailable(int value) {
return value <= 0 && value != Integer.MIN_VALUE;
}
Releasable tryAcquireClosingPermit() {
final var previous = closingOrPartialSnapshottingCount.getAndUpdate(c -> closingPermitAvailable(c) ? c + 1 : c);
if (closingPermitAvailable(previous)) {
return () -> assertThat(closingOrPartialSnapshottingCount.getAndDecrement(), greaterThan(0));
} else {
return null;
}
}
Releasable tryAcquirePartialSnapshottingPermit() {
final var previous = closingOrPartialSnapshottingCount.getAndUpdate(c -> partialSnapshottingPermitAvailable(c) ? c - 1 : c);
if (partialSnapshottingPermitAvailable(previous)) {
return () -> assertThat(closingOrPartialSnapshottingCount.getAndIncrement(), lessThan(0));
} else {
return null;
}
}
} }
} }

View file

@ -430,7 +430,8 @@ module org.elasticsearch.server {
org.elasticsearch.indices.IndicesFeatures, org.elasticsearch.indices.IndicesFeatures,
org.elasticsearch.action.admin.cluster.allocation.AllocationStatsFeatures, org.elasticsearch.action.admin.cluster.allocation.AllocationStatsFeatures,
org.elasticsearch.index.mapper.MapperFeatures, org.elasticsearch.index.mapper.MapperFeatures,
org.elasticsearch.search.retriever.RetrieversFeatures; org.elasticsearch.search.retriever.RetrieversFeatures,
org.elasticsearch.reservedstate.service.FileSettingsFeatures;
uses org.elasticsearch.plugins.internal.SettingsExtension; uses org.elasticsearch.plugins.internal.SettingsExtension;
uses RestExtension; uses RestExtension;

View file

@ -13,13 +13,14 @@ import java.security.BasicPermission;
/** /**
* A permission granted to ensure secured access to a file in the config directory. * A permission granted to ensure secured access to a file in the config directory.
* <p> * <p>
* By granting this permission, all code that does not have the same permission on the same file * By granting this permission with a file relative to the config directory,
* will be denied all read/write access to that file. * the file is secured from general access by Elasticsearch and other Elasticsearch plugins.
* Note that you also need to wrap any access to the secured files in an {@code AccessController.doPrivileged()} block * All code that does not have a secured permission on the same file will be denied all read/write access to that file.
* Note that you also need to wrap any access to secured files in an {@code AccessController.doPrivileged()} block
* as Elasticsearch itself is denied access to files secured by plugins. * as Elasticsearch itself is denied access to files secured by plugins.
*/ */
public class SecuredFileAccessPermission extends BasicPermission { public class SecuredConfigFileAccessPermission extends BasicPermission {
public SecuredFileAccessPermission(String path) { public SecuredConfigFileAccessPermission(String path) {
super(path, ""); super(path, "");
} }
} }

View file

@ -0,0 +1,26 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0 and the Server Side Public License, v 1; you may not use this file except
* in compliance with, at your election, the Elastic License 2.0 or the Server
* Side Public License, v 1.
*/
package org.elasticsearch;
import java.security.BasicPermission;
/**
* A permission granted to ensure secured access to a file specified by a setting in the config directory.
* <p>
* By granting this permission with a setting key (wildcards are supported),
* the files pointed to by the settings are secured from general access by Elasticsearch and other Elasticsearch plugins.
* All code that does not have a secured permission on the same file will be denied all read/write access to that file.
* Note that you also need to wrap any access to secured files in an {@code AccessController.doPrivileged()} block
* as Elasticsearch itself is denied access to files secured by plugins.
*/
public class SecuredConfigFileSettingAccessPermission extends BasicPermission {
public SecuredConfigFileSettingAccessPermission(String setting) {
super(setting, "");
}
}

View file

@ -186,6 +186,9 @@ public class TransportVersions {
public static final TransportVersion ML_CHUNK_INFERENCE_OPTION = def(8_677_00_0); public static final TransportVersion ML_CHUNK_INFERENCE_OPTION = def(8_677_00_0);
public static final TransportVersion RANK_FEATURE_PHASE_ADDED = def(8_678_00_0); public static final TransportVersion RANK_FEATURE_PHASE_ADDED = def(8_678_00_0);
public static final TransportVersion RANK_DOC_IN_SHARD_FETCH_REQUEST = def(8_679_00_0); public static final TransportVersion RANK_DOC_IN_SHARD_FETCH_REQUEST = def(8_679_00_0);
public static final TransportVersion SECURITY_SETTINGS_REQUEST_TIMEOUTS = def(8_680_00_0);
public static final TransportVersion QUERY_RULE_CRUD_API_PUT = def(8_681_00_0);
/* /*
* STOP! READ THIS FIRST! No, really, * STOP! READ THIS FIRST! No, really,
* ____ _____ ___ ____ _ ____ _____ _ ____ _____ _ _ ___ ____ _____ ___ ____ ____ _____ _ * ____ _____ ___ ____ _ ____ _____ _ ____ _____ _ _ ___ ____ _____ ___ ____ ____ _____ _

View file

@ -223,6 +223,8 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
final long startBulkTime = System.nanoTime(); final long startBulkTime = System.nanoTime();
private final ActionListener<Void> onMappingUpdateDone = ActionListener.wrap(v -> executor.execute(this), this::onRejection);
@Override @Override
protected void doRun() throws Exception { protected void doRun() throws Exception {
while (context.hasMoreOperationsToExecute()) { while (context.hasMoreOperationsToExecute()) {
@ -232,8 +234,7 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
nowInMillisSupplier, nowInMillisSupplier,
mappingUpdater, mappingUpdater,
waitForMappingUpdate, waitForMappingUpdate,
onMappingUpdateDone,
ActionListener.wrap(v -> executor.execute(this), this::onRejection),
documentParsingProvider documentParsingProvider
) == false) { ) == false) {
// We are waiting for a mapping update on another thread, that will invoke this action again once its done // We are waiting for a mapping update on another thread, that will invoke this action again once its done

View file

@ -434,9 +434,6 @@ public class SearchRequest extends ActionRequest implements IndicesRequest.Repla
if (source.pointInTimeBuilder() != null) { if (source.pointInTimeBuilder() != null) {
validationException = addValidationError("[rank] cannot be used with [point in time]", validationException); validationException = addValidationError("[rank] cannot be used with [point in time]", validationException);
} }
if (source.profile()) {
validationException = addValidationError("[rank] requires [profile] is [false]", validationException);
}
} }
if (source.rescores() != null) { if (source.rescores() != null) {
for (@SuppressWarnings("rawtypes") for (@SuppressWarnings("rawtypes")

View file

@ -439,7 +439,7 @@ public class SearchTransportService {
}; };
transportService.registerRequestHandler( transportService.registerRequestHandler(
FREE_CONTEXT_SCROLL_ACTION_NAME, FREE_CONTEXT_SCROLL_ACTION_NAME,
EsExecutors.DIRECT_EXECUTOR_SERVICE, transportService.getThreadPool().generic(),
ScrollFreeContextRequest::new, ScrollFreeContextRequest::new,
instrumentedHandler(FREE_CONTEXT_SCROLL_ACTION_METRIC, transportService, searchTransportMetrics, freeContextHandler) instrumentedHandler(FREE_CONTEXT_SCROLL_ACTION_METRIC, transportService, searchTransportMetrics, freeContextHandler)
); );
@ -447,7 +447,7 @@ public class SearchTransportService {
transportService.registerRequestHandler( transportService.registerRequestHandler(
FREE_CONTEXT_ACTION_NAME, FREE_CONTEXT_ACTION_NAME,
EsExecutors.DIRECT_EXECUTOR_SERVICE, transportService.getThreadPool().generic(),
SearchFreeContextRequest::new, SearchFreeContextRequest::new,
instrumentedHandler(FREE_CONTEXT_ACTION_METRIC, transportService, searchTransportMetrics, freeContextHandler) instrumentedHandler(FREE_CONTEXT_ACTION_METRIC, transportService, searchTransportMetrics, freeContextHandler)
); );
@ -455,7 +455,7 @@ public class SearchTransportService {
transportService.registerRequestHandler( transportService.registerRequestHandler(
CLEAR_SCROLL_CONTEXTS_ACTION_NAME, CLEAR_SCROLL_CONTEXTS_ACTION_NAME,
EsExecutors.DIRECT_EXECUTOR_SERVICE, transportService.getThreadPool().generic(),
TransportRequest.Empty::new, TransportRequest.Empty::new,
instrumentedHandler(CLEAR_SCROLL_CONTEXTS_ACTION_METRIC, transportService, searchTransportMetrics, (request, channel, task) -> { instrumentedHandler(CLEAR_SCROLL_CONTEXTS_ACTION_METRIC, transportService, searchTransportMetrics, (request, channel, task) -> {
searchService.freeAllScrollContexts(); searchService.freeAllScrollContexts();

View file

@ -8,7 +8,8 @@
package org.elasticsearch.bootstrap; package org.elasticsearch.bootstrap;
import org.elasticsearch.SecuredFileAccessPermission; import org.elasticsearch.SecuredConfigFileAccessPermission;
import org.elasticsearch.SecuredConfigFileSettingAccessPermission;
import org.elasticsearch.core.IOUtils; import org.elasticsearch.core.IOUtils;
import org.elasticsearch.core.PathUtils; import org.elasticsearch.core.PathUtils;
import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.core.SuppressForbidden;
@ -169,7 +170,8 @@ public class PolicyUtil {
entry(PrivateCredentialPermission.class, ALLOW_ALL_NAMES), entry(PrivateCredentialPermission.class, ALLOW_ALL_NAMES),
entry(SQLPermission.class, List.of("callAbort", "setNetworkTimeout")), entry(SQLPermission.class, List.of("callAbort", "setNetworkTimeout")),
entry(ClassPermission.class, ALLOW_ALL_NAMES), entry(ClassPermission.class, ALLOW_ALL_NAMES),
entry(SecuredFileAccessPermission.class, ALLOW_ALL_NAMES) entry(SecuredConfigFileAccessPermission.class, ALLOW_ALL_NAMES),
entry(SecuredConfigFileSettingAccessPermission.class, ALLOW_ALL_NAMES)
).collect(Collectors.toMap(e -> e.getKey().getCanonicalName(), Map.Entry::getValue)); ).collect(Collectors.toMap(e -> e.getKey().getCanonicalName(), Map.Entry::getValue));
PermissionCollection pluginPermissionCollection = new Permissions(); PermissionCollection pluginPermissionCollection = new Permissions();
namedPermissions.forEach(pluginPermissionCollection::add); namedPermissions.forEach(pluginPermissionCollection::add);

View file

@ -9,13 +9,16 @@
package org.elasticsearch.bootstrap; package org.elasticsearch.bootstrap;
import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.SecuredFileAccessPermission; import org.elasticsearch.SecuredConfigFileAccessPermission;
import org.elasticsearch.SecuredConfigFileSettingAccessPermission;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.core.PathUtils; import org.elasticsearch.core.PathUtils;
import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.core.SuppressForbidden;
import org.elasticsearch.env.Environment; import org.elasticsearch.env.Environment;
import org.elasticsearch.http.HttpTransportSettings; import org.elasticsearch.http.HttpTransportSettings;
import org.elasticsearch.jdk.JarHell; import org.elasticsearch.jdk.JarHell;
import org.elasticsearch.logging.LogManager;
import org.elasticsearch.logging.Logger;
import org.elasticsearch.plugins.PluginsUtils; import org.elasticsearch.plugins.PluginsUtils;
import org.elasticsearch.secure_sm.SecureSM; import org.elasticsearch.secure_sm.SecureSM;
import org.elasticsearch.transport.TcpTransport; import org.elasticsearch.transport.TcpTransport;
@ -46,7 +49,7 @@ import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.Set; import java.util.Set;
import java.util.function.Consumer; import java.util.function.Consumer;
import java.util.stream.Stream; import java.util.regex.Pattern;
import static java.lang.invoke.MethodType.methodType; import static java.lang.invoke.MethodType.methodType;
import static org.elasticsearch.bootstrap.ESPolicy.POLICY_RESOURCE; import static org.elasticsearch.bootstrap.ESPolicy.POLICY_RESOURCE;
@ -104,6 +107,8 @@ import static org.elasticsearch.reservedstate.service.FileSettingsService.SETTIN
*/ */
final class Security { final class Security {
private static Logger logger; // not init'd until configure call below
static { static {
prepopulateSecurityCaller(); prepopulateSecurityCaller();
} }
@ -122,6 +127,8 @@ final class Security {
* @param filterBadDefaults true if we should filter out bad java defaults in the system policy. * @param filterBadDefaults true if we should filter out bad java defaults in the system policy.
*/ */
static void configure(Environment environment, boolean filterBadDefaults, Path pidFile) throws IOException { static void configure(Environment environment, boolean filterBadDefaults, Path pidFile) throws IOException {
logger = LogManager.getLogger(Security.class);
// enable security policy: union of template and environment-based paths, and possibly plugin permissions // enable security policy: union of template and environment-based paths, and possibly plugin permissions
Map<String, URL> codebases = PolicyUtil.getCodebaseJarMap(JarHell.parseModulesAndClassPath()); Map<String, URL> codebases = PolicyUtil.getCodebaseJarMap(JarHell.parseModulesAndClassPath());
Policy mainPolicy = PolicyUtil.readPolicy(ESPolicy.class.getResource(POLICY_RESOURCE), codebases); Policy mainPolicy = PolicyUtil.readPolicy(ESPolicy.class.getResource(POLICY_RESOURCE), codebases);
@ -133,7 +140,7 @@ final class Security {
pluginPolicies, pluginPolicies,
filterBadDefaults, filterBadDefaults,
createRecursiveDataPathPermission(environment), createRecursiveDataPathPermission(environment),
readSecuredFiles(environment, mainPolicy, codebases.values(), pluginPolicies) readSecuredConfigFiles(environment, mainPolicy, codebases.values(), pluginPolicies)
) )
); );
@ -196,57 +203,103 @@ final class Security {
return toFilePermissions(policy); return toFilePermissions(policy);
} }
private static Map<String, Set<URL>> readSecuredFiles( private static Map<String, Set<URL>> readSecuredConfigFiles(
Environment environment, Environment environment,
Policy template, Policy template,
Collection<URL> mainCodebases, Collection<URL> mainCodebases,
Map<URL, Policy> pluginPolicies Map<URL, Policy> pluginPolicies
) throws IOException { ) throws IOException {
Map<String, Set<URL>> securedFiles = new HashMap<>(); Map<String, Set<URL>> securedConfigFiles = new HashMap<>();
Map<String, Set<URL>> securedSettingKeys = new HashMap<>();
for (URL url : mainCodebases) { for (URL url : mainCodebases) {
PolicyUtil.getPolicyPermissions(url, template, environment.tmpFile()) for (Permission p : PolicyUtil.getPolicyPermissions(url, template, environment.tmpFile())) {
.stream() readSecuredConfigFilePermissions(environment, url, p, securedConfigFiles, securedSettingKeys);
.flatMap(Security::extractSecuredFileName) }
.map(environment.configFile()::resolve)
.forEach(f -> securedFiles.computeIfAbsent(f.toString(), k -> new HashSet<>()).add(url));
} }
for (var pp : pluginPolicies.entrySet()) { for (var pp : pluginPolicies.entrySet()) {
PolicyUtil.getPolicyPermissions(pp.getKey(), pp.getValue(), environment.tmpFile()) for (Permission p : PolicyUtil.getPolicyPermissions(pp.getKey(), pp.getValue(), environment.tmpFile())) {
readSecuredConfigFilePermissions(environment, pp.getKey(), p, securedConfigFiles, securedSettingKeys);
}
}
// compile a Pattern for each setting key we'll be looking for
// the key could include a * wildcard
List<Map.Entry<Pattern, Set<URL>>> settingPatterns = securedSettingKeys.entrySet()
.stream() .stream()
.flatMap(Security::extractSecuredFileName) .map(e -> Map.entry(Pattern.compile(e.getKey()), e.getValue()))
.map(environment.configFile()::resolve) .toList();
.forEach(f -> securedFiles.computeIfAbsent(f.toString(), k -> new HashSet<>()).add(pp.getKey()));
for (String setting : environment.settings().keySet()) {
for (Map.Entry<Pattern, Set<URL>> ps : settingPatterns) {
if (ps.getKey().matcher(setting).matches()) {
// add the setting value to the secured files for these codebase URLs
Path file = environment.configFile().resolve(environment.settings().get(setting));
if (file.startsWith(environment.configFile()) == false) {
throw new IllegalStateException(ps.getValue() + " tried to grant access to file outside config directory " + file);
}
if (logger.isDebugEnabled()) {
ps.getValue()
.forEach(
url -> logger.debug("Jar {} securing access to config file {} through setting {}", url, file, setting)
);
}
securedConfigFiles.computeIfAbsent(file.toString(), k -> new HashSet<>()).addAll(ps.getValue());
}
}
} }
// always add some config files as exclusive files that no one can access // always add some config files as exclusive files that no one can access
// there's no reason for anyone to read these once the security manager is initialized // there's no reason for anyone to read these once the security manager is initialized
// so if something has tried to grant itself access, crash out with an error // so if something has tried to grant itself access, crash out with an error
addSpeciallySecuredFile(securedFiles, environment.configFile().resolve("elasticsearch.yml").toString()); addSpeciallySecuredConfigFile(securedConfigFiles, environment.configFile().resolve("elasticsearch.yml").toString());
addSpeciallySecuredFile(securedFiles, environment.configFile().resolve("jvm.options").toString()); addSpeciallySecuredConfigFile(securedConfigFiles, environment.configFile().resolve("jvm.options").toString());
addSpeciallySecuredFile(securedFiles, environment.configFile().resolve("jvm.options.d/-").toString()); addSpeciallySecuredConfigFile(securedConfigFiles, environment.configFile().resolve("jvm.options.d/-").toString());
return Collections.unmodifiableMap(securedFiles); return Collections.unmodifiableMap(securedConfigFiles);
} }
private static void addSpeciallySecuredFile(Map<String, Set<URL>> securedFiles, String path) { private static void readSecuredConfigFilePermissions(
Environment environment,
URL url,
Permission p,
Map<String, Set<URL>> securedFiles,
Map<String, Set<URL>> securedSettingKeys
) {
String securedFileName = extractSecuredName(p, SecuredConfigFileAccessPermission.class);
if (securedFileName != null) {
Path securedFile = environment.configFile().resolve(securedFileName);
if (securedFile.startsWith(environment.configFile()) == false) {
throw new IllegalStateException("[" + url + "] tried to grant access to file outside config directory " + securedFile);
}
logger.debug("Jar {} securing access to config file {}", url, securedFile);
securedFiles.computeIfAbsent(securedFile.toString(), k -> new HashSet<>()).add(url);
}
String securedKey = extractSecuredName(p, SecuredConfigFileSettingAccessPermission.class);
if (securedKey != null) {
securedSettingKeys.computeIfAbsent(securedKey, k -> new HashSet<>()).add(url);
}
}
private static String extractSecuredName(Permission p, Class<? extends Permission> permissionType) {
if (permissionType.isInstance(p)) {
return p.getName();
} else if (p instanceof UnresolvedPermission up && up.getUnresolvedType().equals(permissionType.getCanonicalName())) {
return up.getUnresolvedName();
} else {
return null;
}
}
private static void addSpeciallySecuredConfigFile(Map<String, Set<URL>> securedFiles, String path) {
Set<URL> attemptedToGrant = securedFiles.put(path, Set.of()); Set<URL> attemptedToGrant = securedFiles.put(path, Set.of());
if (attemptedToGrant != null) { if (attemptedToGrant != null) {
throw new IllegalStateException(attemptedToGrant + " tried to grant access to special config file " + path); throw new IllegalStateException(attemptedToGrant + " tried to grant access to special config file " + path);
} }
} }
private static Stream<String> extractSecuredFileName(Permission p) {
if (p instanceof SecuredFileAccessPermission) {
return Stream.of(p.getName());
}
if (p instanceof UnresolvedPermission up && up.getUnresolvedType().equals(SecuredFileAccessPermission.class.getCanonicalName())) {
return Stream.of(up.getUnresolvedName());
}
return Stream.empty();
}
/** Adds access to classpath jars/classes for jar hell scan, etc */ /** Adds access to classpath jars/classes for jar hell scan, etc */
@SuppressForbidden(reason = "accesses fully qualified URLs to configure security") @SuppressForbidden(reason = "accesses fully qualified URLs to configure security")
static void addClasspathPermissions(Permissions policy) throws IOException { static void addClasspathPermissions(Permissions policy) throws IOException {

View file

@ -23,6 +23,7 @@ import org.elasticsearch.cluster.metadata.DataStream;
import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.lucene.Lucene;
import org.elasticsearch.common.lucene.uid.VersionsAndSeqNoResolver.DocIdAndSeqNo; import org.elasticsearch.common.lucene.uid.VersionsAndSeqNoResolver.DocIdAndSeqNo;
import org.elasticsearch.common.lucene.uid.VersionsAndSeqNoResolver.DocIdAndVersion; import org.elasticsearch.common.lucene.uid.VersionsAndSeqNoResolver.DocIdAndVersion;
import org.elasticsearch.index.mapper.IdFieldMapper;
import org.elasticsearch.index.mapper.SeqNoFieldMapper; import org.elasticsearch.index.mapper.SeqNoFieldMapper;
import org.elasticsearch.index.mapper.VersionFieldMapper; import org.elasticsearch.index.mapper.VersionFieldMapper;
@ -45,8 +46,6 @@ final class PerThreadIDVersionAndSeqNoLookup {
// TODO: do we really need to store all this stuff? some if it might not speed up anything. // TODO: do we really need to store all this stuff? some if it might not speed up anything.
// we keep it around for now, to reduce the amount of e.g. hash lookups by field and stuff // we keep it around for now, to reduce the amount of e.g. hash lookups by field and stuff
/** terms enum for uid field */
final String uidField;
private final TermsEnum termsEnum; private final TermsEnum termsEnum;
/** Reused for iteration (when the term exists) */ /** Reused for iteration (when the term exists) */
@ -62,10 +61,8 @@ final class PerThreadIDVersionAndSeqNoLookup {
/** /**
* Initialize lookup for the provided segment * Initialize lookup for the provided segment
*/ */
PerThreadIDVersionAndSeqNoLookup(LeafReader reader, String uidField, boolean trackReaderKey, boolean loadTimestampRange) PerThreadIDVersionAndSeqNoLookup(LeafReader reader, boolean trackReaderKey, boolean loadTimestampRange) throws IOException {
throws IOException { final Terms terms = reader.terms(IdFieldMapper.NAME);
this.uidField = uidField;
final Terms terms = reader.terms(uidField);
if (terms == null) { if (terms == null) {
// If a segment contains only no-ops, it does not have _uid but has both _soft_deletes and _tombstone fields. // If a segment contains only no-ops, it does not have _uid but has both _soft_deletes and _tombstone fields.
final NumericDocValues softDeletesDV = reader.getNumericDocValues(Lucene.SOFT_DELETES_FIELD); final NumericDocValues softDeletesDV = reader.getNumericDocValues(Lucene.SOFT_DELETES_FIELD);
@ -107,8 +104,8 @@ final class PerThreadIDVersionAndSeqNoLookup {
} }
} }
PerThreadIDVersionAndSeqNoLookup(LeafReader reader, String uidField, boolean loadTimestampRange) throws IOException { PerThreadIDVersionAndSeqNoLookup(LeafReader reader, boolean loadTimestampRange) throws IOException {
this(reader, uidField, true, loadTimestampRange); this(reader, true, loadTimestampRange);
} }
/** Return null if id is not found. /** Return null if id is not found.

View file

@ -11,7 +11,7 @@ package org.elasticsearch.common.lucene.uid;
import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.LeafReader; import org.apache.lucene.index.LeafReader;
import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.Term; import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.CloseableThreadLocal; import org.apache.lucene.util.CloseableThreadLocal;
import org.elasticsearch.common.util.ByteUtils; import org.elasticsearch.common.util.ByteUtils;
import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
@ -20,7 +20,6 @@ import org.elasticsearch.core.Assertions;
import java.io.IOException; import java.io.IOException;
import java.util.Base64; import java.util.Base64;
import java.util.List; import java.util.List;
import java.util.Objects;
import java.util.concurrent.ConcurrentMap; import java.util.concurrent.ConcurrentMap;
/** Utility class to resolve the Lucene doc ID, version, seqNo and primaryTerms for a given uid. */ /** Utility class to resolve the Lucene doc ID, version, seqNo and primaryTerms for a given uid. */
@ -37,8 +36,7 @@ public final class VersionsAndSeqNoResolver {
} }
}; };
private static PerThreadIDVersionAndSeqNoLookup[] getLookupState(IndexReader reader, String uidField, boolean loadTimestampRange) private static PerThreadIDVersionAndSeqNoLookup[] getLookupState(IndexReader reader, boolean loadTimestampRange) throws IOException {
throws IOException {
// We cache on the top level // We cache on the top level
// This means cache entries have a shorter lifetime, maybe as low as 1s with the // This means cache entries have a shorter lifetime, maybe as low as 1s with the
// default refresh interval and a steady indexing rate, but on the other hand it // default refresh interval and a steady indexing rate, but on the other hand it
@ -63,7 +61,7 @@ public final class VersionsAndSeqNoResolver {
if (lookupState == null) { if (lookupState == null) {
lookupState = new PerThreadIDVersionAndSeqNoLookup[reader.leaves().size()]; lookupState = new PerThreadIDVersionAndSeqNoLookup[reader.leaves().size()];
for (LeafReaderContext leaf : reader.leaves()) { for (LeafReaderContext leaf : reader.leaves()) {
lookupState[leaf.ord] = new PerThreadIDVersionAndSeqNoLookup(leaf.reader(), uidField, loadTimestampRange); lookupState[leaf.ord] = new PerThreadIDVersionAndSeqNoLookup(leaf.reader(), loadTimestampRange);
} }
ctl.set(lookupState); ctl.set(lookupState);
} else { } else {
@ -87,12 +85,6 @@ public final class VersionsAndSeqNoResolver {
throw new AssertionError("Mismatched numbers of leaves: " + lookupState.length + " != " + reader.leaves().size()); throw new AssertionError("Mismatched numbers of leaves: " + lookupState.length + " != " + reader.leaves().size());
} }
if (lookupState.length > 0 && Objects.equals(lookupState[0].uidField, uidField) == false) {
throw new AssertionError(
"Index does not consistently use the same uid field: [" + uidField + "] != [" + lookupState[0].uidField + "]"
);
}
return lookupState; return lookupState;
} }
@ -136,15 +128,15 @@ public final class VersionsAndSeqNoResolver {
* <li>a doc ID and a version otherwise * <li>a doc ID and a version otherwise
* </ul> * </ul>
*/ */
public static DocIdAndVersion timeSeriesLoadDocIdAndVersion(IndexReader reader, Term term, boolean loadSeqNo) throws IOException { public static DocIdAndVersion timeSeriesLoadDocIdAndVersion(IndexReader reader, BytesRef term, boolean loadSeqNo) throws IOException {
PerThreadIDVersionAndSeqNoLookup[] lookups = getLookupState(reader, term.field(), false); PerThreadIDVersionAndSeqNoLookup[] lookups = getLookupState(reader, false);
List<LeafReaderContext> leaves = reader.leaves(); List<LeafReaderContext> leaves = reader.leaves();
// iterate backwards to optimize for the frequently updated documents // iterate backwards to optimize for the frequently updated documents
// which are likely to be in the last segments // which are likely to be in the last segments
for (int i = leaves.size() - 1; i >= 0; i--) { for (int i = leaves.size() - 1; i >= 0; i--) {
final LeafReaderContext leaf = leaves.get(i); final LeafReaderContext leaf = leaves.get(i);
PerThreadIDVersionAndSeqNoLookup lookup = lookups[leaf.ord]; PerThreadIDVersionAndSeqNoLookup lookup = lookups[leaf.ord];
DocIdAndVersion result = lookup.lookupVersion(term.bytes(), loadSeqNo, leaf); DocIdAndVersion result = lookup.lookupVersion(term, loadSeqNo, leaf);
if (result != null) { if (result != null) {
return result; return result;
} }
@ -168,7 +160,7 @@ public final class VersionsAndSeqNoResolver {
* returning <code>null</code> if no document was found for the specified id * returning <code>null</code> if no document was found for the specified id
* @throws IOException In case of an i/o related failure * @throws IOException In case of an i/o related failure
*/ */
public static DocIdAndVersion timeSeriesLoadDocIdAndVersion(IndexReader reader, Term uid, String id, boolean loadSeqNo) public static DocIdAndVersion timeSeriesLoadDocIdAndVersion(IndexReader reader, BytesRef uid, String id, boolean loadSeqNo)
throws IOException { throws IOException {
byte[] idAsBytes = Base64.getUrlDecoder().decode(id); byte[] idAsBytes = Base64.getUrlDecoder().decode(id);
assert idAsBytes.length == 20; assert idAsBytes.length == 20;
@ -176,7 +168,7 @@ public final class VersionsAndSeqNoResolver {
// @timestamp) // @timestamp)
long timestamp = ByteUtils.readLongBE(idAsBytes, 12); long timestamp = ByteUtils.readLongBE(idAsBytes, 12);
PerThreadIDVersionAndSeqNoLookup[] lookups = getLookupState(reader, uid.field(), true); PerThreadIDVersionAndSeqNoLookup[] lookups = getLookupState(reader, true);
List<LeafReaderContext> leaves = reader.leaves(); List<LeafReaderContext> leaves = reader.leaves();
// iterate in default order, the segments should be sorted by DataStream#TIMESERIES_LEAF_READERS_SORTER // iterate in default order, the segments should be sorted by DataStream#TIMESERIES_LEAF_READERS_SORTER
long prevMaxTimestamp = Long.MAX_VALUE; long prevMaxTimestamp = Long.MAX_VALUE;
@ -190,7 +182,7 @@ public final class VersionsAndSeqNoResolver {
if (timestamp > lookup.maxTimestamp) { if (timestamp > lookup.maxTimestamp) {
return null; return null;
} }
DocIdAndVersion result = lookup.lookupVersion(uid.bytes(), loadSeqNo, leaf); DocIdAndVersion result = lookup.lookupVersion(uid, loadSeqNo, leaf);
if (result != null) { if (result != null) {
return result; return result;
} }
@ -199,12 +191,12 @@ public final class VersionsAndSeqNoResolver {
return null; return null;
} }
public static DocIdAndVersion loadDocIdAndVersionUncached(IndexReader reader, Term term, boolean loadSeqNo) throws IOException { public static DocIdAndVersion loadDocIdAndVersionUncached(IndexReader reader, BytesRef term, boolean loadSeqNo) throws IOException {
List<LeafReaderContext> leaves = reader.leaves(); List<LeafReaderContext> leaves = reader.leaves();
for (int i = leaves.size() - 1; i >= 0; i--) { for (int i = leaves.size() - 1; i >= 0; i--) {
final LeafReaderContext leaf = leaves.get(i); final LeafReaderContext leaf = leaves.get(i);
PerThreadIDVersionAndSeqNoLookup lookup = new PerThreadIDVersionAndSeqNoLookup(leaf.reader(), term.field(), false, false); PerThreadIDVersionAndSeqNoLookup lookup = new PerThreadIDVersionAndSeqNoLookup(leaf.reader(), false, false);
DocIdAndVersion result = lookup.lookupVersion(term.bytes(), loadSeqNo, leaf); DocIdAndVersion result = lookup.lookupVersion(term, loadSeqNo, leaf);
if (result != null) { if (result != null) {
return result; return result;
} }
@ -216,15 +208,15 @@ public final class VersionsAndSeqNoResolver {
* Loads the internal docId and sequence number of the latest copy for a given uid from the provided reader. * Loads the internal docId and sequence number of the latest copy for a given uid from the provided reader.
* The result is either null or the live and latest version of the given uid. * The result is either null or the live and latest version of the given uid.
*/ */
public static DocIdAndSeqNo loadDocIdAndSeqNo(IndexReader reader, Term term) throws IOException { public static DocIdAndSeqNo loadDocIdAndSeqNo(IndexReader reader, BytesRef term) throws IOException {
final PerThreadIDVersionAndSeqNoLookup[] lookups = getLookupState(reader, term.field(), false); final PerThreadIDVersionAndSeqNoLookup[] lookups = getLookupState(reader, false);
final List<LeafReaderContext> leaves = reader.leaves(); final List<LeafReaderContext> leaves = reader.leaves();
// iterate backwards to optimize for the frequently updated documents // iterate backwards to optimize for the frequently updated documents
// which are likely to be in the last segments // which are likely to be in the last segments
for (int i = leaves.size() - 1; i >= 0; i--) { for (int i = leaves.size() - 1; i >= 0; i--) {
final LeafReaderContext leaf = leaves.get(i); final LeafReaderContext leaf = leaves.get(i);
final PerThreadIDVersionAndSeqNoLookup lookup = lookups[leaf.ord]; final PerThreadIDVersionAndSeqNoLookup lookup = lookups[leaf.ord];
final DocIdAndSeqNo result = lookup.lookupSeqNo(term.bytes(), leaf); final DocIdAndSeqNo result = lookup.lookupSeqNo(term, leaf);
if (result != null) { if (result != null) {
return result; return result;
} }

View file

@ -85,6 +85,7 @@ public final class NetworkService {
private final List<CustomNameResolver> customNameResolvers; private final List<CustomNameResolver> customNameResolvers;
private final HandlingTimeTracker handlingTimeTracker = new HandlingTimeTracker(); private final HandlingTimeTracker handlingTimeTracker = new HandlingTimeTracker();
private final ThreadWatchdog threadWatchdog = new ThreadWatchdog();
public NetworkService(List<CustomNameResolver> customNameResolvers) { public NetworkService(List<CustomNameResolver> customNameResolvers) {
this.customNameResolvers = Objects.requireNonNull(customNameResolvers, "customNameResolvers must be non null"); this.customNameResolvers = Objects.requireNonNull(customNameResolvers, "customNameResolvers must be non null");
@ -94,6 +95,10 @@ public final class NetworkService {
return handlingTimeTracker; return handlingTimeTracker;
} }
public ThreadWatchdog getThreadWatchdog() {
return threadWatchdog;
}
/** /**
* Resolves {@code bindHosts} to a list of internet addresses. The list will * Resolves {@code bindHosts} to a list of internet addresses. The list will
* not contain duplicate addresses. * not contain duplicate addresses.

View file

@ -0,0 +1,280 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0 and the Server Side Public License, v 1; you may not use this file except
* in compliance with, at your election, the Elastic License 2.0 or the Server
* Side Public License, v 1.
*/
package org.elasticsearch.common.network;
import org.apache.logging.log4j.Level;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.common.ReferenceDocs;
import org.elasticsearch.common.component.Lifecycle;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
import org.elasticsearch.common.util.concurrent.EsExecutors;
import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
import org.elasticsearch.core.TimeValue;
import org.elasticsearch.monitor.jvm.HotThreads;
import org.elasticsearch.threadpool.ThreadPool;
import java.lang.ref.WeakReference;
import java.util.ArrayList;
import java.util.Comparator;
import java.util.List;
import java.util.concurrent.atomic.AtomicLong;
/**
* Watchdog mechanism for making sure that no transport thread spends too long blocking the event loop.
*/
// Today we only use this to track activity processing reads on network threads. Tracking time when we're busy processing writes is a little
// trickier because that code is more re-entrant, both within the network layer and also it may complete a listener from the wider codebase
// that ends up calling back into the network layer again. But also we don't see many network threads blocking for ages on the write path,
// so we focus on reads for now.
public class ThreadWatchdog {
public static final Setting<TimeValue> NETWORK_THREAD_WATCHDOG_INTERVAL = Setting.timeSetting(
"network.thread.watchdog.interval",
TimeValue.timeValueSeconds(5),
Setting.Property.NodeScope
);
public static final Setting<TimeValue> NETWORK_THREAD_WATCHDOG_QUIET_TIME = Setting.timeSetting(
"network.thread.watchdog.quiet_time",
TimeValue.timeValueMinutes(10),
Setting.Property.NodeScope
);
private static final Logger logger = LogManager.getLogger(ThreadWatchdog.class);
/**
* Activity tracker for the current thread. Thread-locals are only retained by the owning thread so these will be GCd after thread exit.
*/
private final ThreadLocal<ActivityTracker> activityTrackerThreadLocal = new ThreadLocal<>();
/**
* Collection of known activity trackers to be scanned for stuck threads. Uses {@link WeakReference} so that we don't prevent trackers
* from being GCd if a thread exits. There aren't many such trackers, O(#cpus), and they almost never change, so an {@link ArrayList}
* with explicit synchronization is fine.
*/
private final List<WeakReference<ActivityTracker>> knownTrackers = new ArrayList<>();
/**
* @return an activity tracker for activities on the current thread.
*/
public ActivityTracker getActivityTrackerForCurrentThread() {
var result = activityTrackerThreadLocal.get();
if (result == null) {
// this is a previously-untracked thread; thread creation is assumed to be very rare, no need to optimize this path at all
result = new ActivityTracker();
synchronized (knownTrackers) {
knownTrackers.add(new WeakReference<>(result));
}
activityTrackerThreadLocal.set(result);
}
return result;
}
// exposed for testing
List<String> getStuckThreadNames() {
List<String> stuckThreadNames = null;
// this is not called very often, and only on a single thread, with almost no contention on this mutex since thread creation is rare
synchronized (knownTrackers) {
final var iterator = knownTrackers.iterator();
while (iterator.hasNext()) {
final var tracker = iterator.next().get();
if (tracker == null) {
// tracker was GCd because its thread exited - very rare, no need to optimize this case
iterator.remove();
} else if (tracker.isIdleOrMakingProgress() == false) {
if (stuckThreadNames == null) {
stuckThreadNames = new ArrayList<>();
}
stuckThreadNames.add(tracker.getTrackedThreadName());
}
}
}
if (stuckThreadNames == null) {
return List.of();
} else {
stuckThreadNames.sort(Comparator.naturalOrder());
return stuckThreadNames;
}
}
/**
* Per-thread class which keeps track of activity on that thread, represented as a {@code long} which is incremented every time an
* activity starts or stops. Thus the parity of its value indicates whether the thread is idle or not. Crucially, the activity tracking
* is very lightweight (on the tracked thread).
*/
public static final class ActivityTracker extends AtomicLong {
private final Thread trackedThread;
private long lastObservedValue;
public ActivityTracker() {
this.trackedThread = Thread.currentThread();
}
String getTrackedThreadName() {
return trackedThread.getName();
}
public void startActivity() {
assert trackedThread == Thread.currentThread() : trackedThread.getName() + " vs " + Thread.currentThread().getName();
final var prevValue = getAndIncrement();
assert isIdle(prevValue) : "thread [" + trackedThread.getName() + "] was already active";
}
public void stopActivity() {
assert trackedThread == Thread.currentThread() : trackedThread.getName() + " vs " + Thread.currentThread().getName();
final var prevValue = getAndIncrement();
assert isIdle(prevValue) == false : "thread [" + trackedThread.getName() + "] was already idle";
}
boolean isIdleOrMakingProgress() {
final var value = get();
if (isIdle(value)) {
return true;
}
if (value == lastObservedValue) {
// no change since last check
return false;
} else {
// made progress since last check
lastObservedValue = value;
return true;
}
}
private static boolean isIdle(long value) {
// the parity of the value indicates the idle state: initially zero (idle), so active == odd
return (value & 1) == 0;
}
}
public void run(Settings settings, ThreadPool threadPool, Lifecycle lifecycle) {
new Checker(threadPool, NETWORK_THREAD_WATCHDOG_INTERVAL.get(settings), NETWORK_THREAD_WATCHDOG_QUIET_TIME.get(settings), lifecycle)
.run();
}
/**
* Action which runs itself periodically, calling {@link #getStuckThreadNames} to check for active threads that didn't make progress
* since the last call, and if it finds any then it dispatches {@link #threadDumper} to log the current hot threads.
*/
private final class Checker extends AbstractRunnable {
private final ThreadPool threadPool;
private final TimeValue interval;
private final TimeValue quietTime;
private final Lifecycle lifecycle;
Checker(ThreadPool threadPool, TimeValue interval, TimeValue quietTime, Lifecycle lifecycle) {
this.threadPool = threadPool;
this.interval = interval;
this.quietTime = quietTime.compareTo(interval) <= 0 ? interval : quietTime;
this.lifecycle = lifecycle;
assert this.interval.millis() <= this.quietTime.millis();
}
@Override
protected void doRun() {
if (isRunning() == false) {
return;
}
boolean rescheduleImmediately = true;
try {
final var stuckThreadNames = getStuckThreadNames();
if (stuckThreadNames.isEmpty() == false) {
logger.warn(
"the following threads are active but did not make progress in the preceding [{}]: {}",
interval,
stuckThreadNames
);
rescheduleImmediately = false;
threadPool.generic().execute(threadDumper);
}
} finally {
if (rescheduleImmediately) {
scheduleNext(interval);
}
}
}
@Override
public boolean isForceExecution() {
return true;
}
private boolean isRunning() {
return 0 < interval.millis() && lifecycle.stoppedOrClosed() == false;
}
private void scheduleNext(TimeValue delay) {
if (isRunning()) {
threadPool.scheduleUnlessShuttingDown(delay, EsExecutors.DIRECT_EXECUTOR_SERVICE, Checker.this);
}
}
private final AbstractRunnable threadDumper = new AbstractRunnable() {
@Override
protected void doRun() {
assert ThreadPool.assertCurrentThreadPool(ThreadPool.Names.GENERIC);
if (isRunning()) {
HotThreads.logLocalHotThreads(
logger,
Level.WARN,
"hot threads dump due to active threads not making progress",
ReferenceDocs.NETWORK_THREADING_MODEL
);
}
}
@Override
public boolean isForceExecution() {
return true;
}
@Override
public void onFailure(Exception e) {
Checker.this.onFailure(e);
}
@Override
public void onRejection(Exception e) {
Checker.this.onRejection(e);
}
@Override
public void onAfter() {
scheduleNext(quietTime);
}
@Override
public String toString() {
return "ThreadWatchDog$Checker#threadDumper";
}
};
@Override
public void onFailure(Exception e) {
logger.error("exception in ThreadWatchDog$Checker", e);
assert false : e;
}
@Override
public void onRejection(Exception e) {
logger.debug("ThreadWatchDog$Checker execution rejected", e);
assert e instanceof EsRejectedExecutionException esre && esre.isExecutorShutdown() : e;
}
@Override
public String toString() {
return "ThreadWatchDog$Checker";
}
}
}

View file

@ -60,6 +60,7 @@ import org.elasticsearch.cluster.service.MasterService;
import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.network.NetworkModule;
import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.network.NetworkService;
import org.elasticsearch.common.network.ThreadWatchdog;
import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.util.PageCacheRecycler; import org.elasticsearch.common.util.PageCacheRecycler;
import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.util.concurrent.EsExecutors;
@ -421,6 +422,8 @@ public final class ClusterSettings extends AbstractScopedSettings {
NetworkService.TCP_REUSE_ADDRESS, NetworkService.TCP_REUSE_ADDRESS,
NetworkService.TCP_SEND_BUFFER_SIZE, NetworkService.TCP_SEND_BUFFER_SIZE,
NetworkService.TCP_RECEIVE_BUFFER_SIZE, NetworkService.TCP_RECEIVE_BUFFER_SIZE,
ThreadWatchdog.NETWORK_THREAD_WATCHDOG_INTERVAL,
ThreadWatchdog.NETWORK_THREAD_WATCHDOG_QUIET_TIME,
IndexSettings.QUERY_STRING_ANALYZE_WILDCARD, IndexSettings.QUERY_STRING_ANALYZE_WILDCARD,
IndexSettings.QUERY_STRING_ALLOW_LEADING_WILDCARD, IndexSettings.QUERY_STRING_ALLOW_LEADING_WILDCARD,
ScriptService.SCRIPT_CACHE_SIZE_SETTING, ScriptService.SCRIPT_CACHE_SIZE_SETTING,

View file

@ -158,6 +158,7 @@ public final class IndexScopedSettings extends AbstractScopedSettings {
MapperService.INDEX_MAPPING_DEPTH_LIMIT_SETTING, MapperService.INDEX_MAPPING_DEPTH_LIMIT_SETTING,
MapperService.INDEX_MAPPING_DIMENSION_FIELDS_LIMIT_SETTING, MapperService.INDEX_MAPPING_DIMENSION_FIELDS_LIMIT_SETTING,
MapperService.INDEX_MAPPING_FIELD_NAME_LENGTH_LIMIT_SETTING, MapperService.INDEX_MAPPING_FIELD_NAME_LENGTH_LIMIT_SETTING,
MapperService.INDEX_MAPPER_DYNAMIC_SETTING,
BitsetFilterCache.INDEX_LOAD_RANDOM_ACCESS_FILTERS_EAGERLY_SETTING, BitsetFilterCache.INDEX_LOAD_RANDOM_ACCESS_FILTERS_EAGERLY_SETTING,
IndexModule.INDEX_STORE_TYPE_SETTING, IndexModule.INDEX_STORE_TYPE_SETTING,
IndexModule.INDEX_STORE_PRE_LOAD_SETTING, IndexModule.INDEX_STORE_PRE_LOAD_SETTING,

View file

@ -149,6 +149,7 @@ public class Setting<T> implements ToXContentObject {
* Indicates that this index-level setting was deprecated in {@link Version#V_7_17_0} and is * Indicates that this index-level setting was deprecated in {@link Version#V_7_17_0} and is
* forbidden in indices created from {@link Version#V_8_0_0} onwards. * forbidden in indices created from {@link Version#V_8_0_0} onwards.
*/ */
@UpdateForV9 // introduce IndexSettingDeprecatedInV8AndRemovedInV9 to replace this constant
IndexSettingDeprecatedInV7AndRemovedInV8, IndexSettingDeprecatedInV7AndRemovedInV8,
/** /**

Some files were not shown because too many files have changed in this diff Show more