Merge branch 'main' into feature/multi-project

This commit is contained in:
Tim Vernum 2025-02-27 12:09:08 +11:00 committed by GitHub
commit f7e80e7fd2
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
138 changed files with 2024 additions and 1309 deletions

View file

@ -0,0 +1,15 @@
steps:
- label: $FWC_VERSION / fwc
command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v$FWC_VERSION#fwcTest -Dtests.bwc.snapshot=false
timeout_in_minutes: 300
agents:
provider: gcp
image: family/elasticsearch-ubuntu-2004
machineType: n1-standard-32
buildDirectory: /dev/shm/bk
preemptible: true
matrix:
setup:
FWC_VERSION: $FWC_LIST
env:
FWC_VERSION: $FWC_VERSION

View file

@ -0,0 +1,16 @@
# This file is auto-generated. See .buildkite/pipelines/periodic-fwc.template.yml
steps:
- label: "{{matrix.FWC_VERSION}} / fwc"
command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v$$FWC_VERSION#fwcTest -Dtests.bwc.snapshot=false
timeout_in_minutes: 300
agents:
provider: gcp
image: family/elasticsearch-ubuntu-2004
machineType: n1-standard-32
buildDirectory: /dev/shm/bk
preemptible: true
matrix:
setup:
FWC_VERSION: []
env:
FWC_VERSION: "{{matrix.FWC_VERSION}}"

View file

@ -2,3 +2,7 @@
# This determines which branches will have pipelines triggered periodically, for dra workflows. # This determines which branches will have pipelines triggered periodically, for dra workflows.
BRANCHES=( $(cat branches.json | jq -r '.branches[].branch') ) BRANCHES=( $(cat branches.json | jq -r '.branches[].branch') )
# Sort them to make ordering predictable
IFS=$'\n' BRANCHES=($(sort <<<"${BRANCHES[*]}"))
unset IFS

View file

@ -46,4 +46,15 @@ EOF
branch: "$BRANCH" branch: "$BRANCH"
commit: "$LAST_GOOD_COMMIT" commit: "$LAST_GOOD_COMMIT"
EOF EOF
# Include forward compatibility tests only for the bugfix branch
if [[ "${BRANCH}" == "${BRANCHES[2]}" ]]; then
cat <<EOF
- trigger: elasticsearch-periodic-fwc
label: Trigger periodic-fwc pipeline for $BRANCH
async: true
build:
branch: "$BRANCH"
commit: "$LAST_GOOD_COMMIT"
EOF
fi
done done

View file

@ -1,21 +0,0 @@
---
name: Comment on PR for .asciidoc changes
on:
# We need to use pull_request_target to be able to comment on PRs from forks
pull_request_target:
types:
- synchronize
- opened
- reopened
branches:
- main
- master
- "9.0"
jobs:
comment-on-asciidoc-change:
permissions:
contents: read
pull-requests: write
uses: elastic/docs-builder/.github/workflows/comment-on-asciidoc-changes.yml@main

19
.github/workflows/docs-build.yml vendored Normal file
View file

@ -0,0 +1,19 @@
name: docs-build
on:
push:
branches:
- main
pull_request_target: ~
merge_group: ~
jobs:
docs-preview:
uses: elastic/docs-builder/.github/workflows/preview-build.yml@main
with:
path-pattern: docs/**
permissions:
deployments: write
id-token: write
contents: read
pull-requests: read

14
.github/workflows/docs-cleanup.yml vendored Normal file
View file

@ -0,0 +1,14 @@
name: docs-cleanup
on:
pull_request_target:
types:
- closed
jobs:
docs-preview:
uses: elastic/docs-builder/.github/workflows/preview-cleanup.yml@main
permissions:
contents: none
id-token: write
deployments: write

View file

@ -0,0 +1,346 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the "Elastic License
* 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side
* Public License v 1"; you may not use this file except in compliance with, at
* your election, the "Elastic License 2.0", the "GNU Affero General Public
* License v3.0 only", or the "Server Side Public License, v 1".
*/
package org.elasticsearch.benchmark.search.query.range;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.LongPoint;
import org.apache.lucene.document.SortedDocValuesField;
import org.apache.lucene.document.SortedNumericDocValuesField;
import org.apache.lucene.document.StringField;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.search.IndexOrDocValuesQuery;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.IndexSortSortedNumericDocValuesRangeQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.Sort;
import org.apache.lucene.search.SortField;
import org.apache.lucene.search.SortedNumericSortField;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.util.BytesRef;
import org.openjdk.jmh.annotations.Benchmark;
import org.openjdk.jmh.annotations.BenchmarkMode;
import org.openjdk.jmh.annotations.Fork;
import org.openjdk.jmh.annotations.Level;
import org.openjdk.jmh.annotations.Measurement;
import org.openjdk.jmh.annotations.Mode;
import org.openjdk.jmh.annotations.OutputTimeUnit;
import org.openjdk.jmh.annotations.Param;
import org.openjdk.jmh.annotations.Scope;
import org.openjdk.jmh.annotations.Setup;
import org.openjdk.jmh.annotations.State;
import org.openjdk.jmh.annotations.TearDown;
import org.openjdk.jmh.annotations.Threads;
import org.openjdk.jmh.annotations.Warmup;
import org.openjdk.jmh.infra.Blackhole;
import org.openjdk.jmh.profile.AsyncProfiler;
import org.openjdk.jmh.runner.Runner;
import org.openjdk.jmh.runner.RunnerException;
import org.openjdk.jmh.runner.options.Options;
import org.openjdk.jmh.runner.options.OptionsBuilder;
import java.io.IOException;
import java.nio.file.Files;
import java.util.Random;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
/**
* Benchmark for measuring query performance with and without doc values skipper in Elasticsearch.
*
* <p><b>Goal:</b> This benchmark is designed to mimic and benchmark the execution of a range query in LogsDB,
* with and without a sparse doc values index on the {@code host.name} and {@code @timestamp} fields.
*
* <p><b>Document Structure:</b>
* <ul>
* <li>{@code host.name}: A keyword field (sorted, non-stored).</li>
* <li>{@code @timestamp}: A numeric field, indexed for range queries and using doc values with or without a doc values sparse index.</li>
* </ul>
*
* <p><b>Index Sorting:</b>
* The index is sorted primarily by {@code host.name} (ascending) and secondarily by {@code @timestamp} (descending).
* Documents are grouped into batches, where each hostname gets a dedicated batch of timestamps.
* This is meant to simulate collecting logs from a set of hosts over a certain time interval.
*
* <p><b>Batched Data Behavior:</b>
* <ul>
* <li>The {@code host.name} value is generated in batches (e.g., "host-0", "host-1", ...).</li>
* <li>Each batch contains a fixed number of documents ({@code batchSize}).</li>
* <li>The {@code @timestamp} value resets to {@code BASE_TIMESTAMP} at the start of each batch.</li>
* <li>A random timestamp delta (0{@code deltaTime} ms) is added so that each document in a batch differs slightly.</li>
* </ul>
*
* <p><b>Example Output:</b>
* <pre>
* | Document # | host.name | @timestamp (ms since epoch) |
* |-----------|-----------|------------------------------|
* | 1 | host-0 | 1704067200005 |
* | 2 | host-0 | 1704067201053 |
* | 3 | host-0 | 1704067202091 |
* | ... | ... | ... |
* | 10000 | host-0 | 1704077199568 |
* | 10001 | host-1 | 1704067200042 |
* | 10002 | host-1 | 1704067201099 |
* | ... | ... | ... |
* </pre>
*
* <p>When running the range query, we retrieve only a fraction of the total data,
* simulating a real-world scenario where a dashboard only needs the most recent logs.
*/
@BenchmarkMode(Mode.SampleTime)
@OutputTimeUnit(TimeUnit.MILLISECONDS)
@State(Scope.Benchmark)
@Fork(1)
@Threads(1)
@Warmup(iterations = 3)
@Measurement(iterations = 5)
public class DateFieldMapperDocValuesSkipperBenchmark {
/**
* Total number of documents to index.
*/
@Param("1343120")
private int nDocs;
/**
* Number of documents per hostname batch.
*/
@Param({ "1340", "121300" })
private int batchSize;
/**
* Maximum random increment (in milliseconds) added to each doc's timestamp.
*/
@Param("1000")
private int deltaTime;
/**
* Fraction of the total time range (derived from {@code batchSize * deltaTime}) that the range query will cover.
*/
@Param({ "0.01", "0.2", "0.8" })
private double queryRange;
/**
* Number of docs to index before forcing a commit, thus creating multiple Lucene segments.
*/
@Param({ "7390", "398470" })
private int commitEvery;
/**
* Seed for random data generation.
*/
@Param("42")
private int seed;
private static final String TIMESTAMP_FIELD = "@timestamp";
private static final String HOSTNAME_FIELD = "host.name";
private static final long BASE_TIMESTAMP = 1704067200000L;
private IndexSearcher indexSearcherWithoutDocValuesSkipper;
private IndexSearcher indexSearcherWithDocValuesSkipper;
private ExecutorService executorService;
/**
* Main entry point for running this benchmark via JMH.
*
* @param args command line arguments (unused)
* @throws RunnerException if the benchmark fails to run
*/
public static void main(String[] args) throws RunnerException {
final Options options = new OptionsBuilder().include(DateFieldMapperDocValuesSkipperBenchmark.class.getSimpleName())
.addProfiler(AsyncProfiler.class)
.build();
new Runner(options).run();
}
/**
* Sets up the benchmark by creating Lucene indexes (with and without doc values skipper).
* Sets up a single-threaded executor for searching the indexes and avoid concurrent search threads.
*
* @throws IOException if an error occurs while building the index
*/
@Setup(Level.Trial)
public void setup() throws IOException {
executorService = Executors.newSingleThreadExecutor();
final Directory tempDirectoryWithoutDocValuesSkipper = FSDirectory.open(Files.createTempDirectory("temp1-"));
final Directory tempDirectoryWithDocValuesSkipper = FSDirectory.open(Files.createTempDirectory("temp2-"));
indexSearcherWithoutDocValuesSkipper = createIndex(tempDirectoryWithoutDocValuesSkipper, false, commitEvery);
indexSearcherWithDocValuesSkipper = createIndex(tempDirectoryWithDocValuesSkipper, true, commitEvery);
}
/**
* Creates an {@link IndexSearcher} after indexing documents in batches.
* Each batch commit forces multiple segments to be created.
*
* @param directory the Lucene {@link Directory} for writing the index
* @param withDocValuesSkipper true if we should enable doc values skipper on certain fields
* @param commitEvery number of documents after which to commit (and thus segment)
* @return an {@link IndexSearcher} for querying the newly built index
* @throws IOException if an I/O error occurs during index writing
*/
private IndexSearcher createIndex(final Directory directory, final boolean withDocValuesSkipper, final int commitEvery)
throws IOException {
final IndexWriterConfig config = new IndexWriterConfig(new StandardAnalyzer());
// NOTE: index sort config matching LogsDB's sort order
config.setIndexSort(
new Sort(
new SortField(HOSTNAME_FIELD, SortField.Type.STRING, false),
new SortedNumericSortField(TIMESTAMP_FIELD, SortField.Type.LONG, true)
)
);
final Random random = new Random(seed);
try (IndexWriter indexWriter = new IndexWriter(directory, config)) {
int docCountSinceLastCommit = 0;
for (int i = 0; i < nDocs; i++) {
final Document doc = new Document();
addFieldsToDocument(doc, i, withDocValuesSkipper, random);
indexWriter.addDocument(doc);
docCountSinceLastCommit++;
// Force commit periodically to create multiple Lucene segments
if (docCountSinceLastCommit >= commitEvery) {
indexWriter.commit();
docCountSinceLastCommit = 0;
}
}
indexWriter.commit();
// Open a reader and create a searcher on top of it using a single thread executor.
DirectoryReader reader = DirectoryReader.open(indexWriter);
return new IndexSearcher(reader, executorService);
}
}
/**
* Populates the given {@link Document} with fields, optionally using doc values skipper.
*
* @param doc the Lucene document to fill
* @param docIndex index of the document being added
* @param withDocValuesSkipper true if doc values skipper is enabled
* @param random seeded {@link Random} for data variation
*/
private void addFieldsToDocument(final Document doc, int docIndex, boolean withDocValuesSkipper, final Random random) {
final int batchIndex = docIndex / batchSize;
final String hostName = "host-" + batchIndex;
// Slightly vary the timestamp in each document
final long timestamp = BASE_TIMESTAMP + ((docIndex % batchSize) * deltaTime) + random.nextInt(0, deltaTime);
if (withDocValuesSkipper) {
// Sparse doc values index on `@timestamp` and `host.name`
doc.add(SortedNumericDocValuesField.indexedField(TIMESTAMP_FIELD, timestamp));
doc.add(SortedDocValuesField.indexedField(HOSTNAME_FIELD, new BytesRef(hostName)));
} else {
// Standard doc values, points and inverted index
doc.add(new StringField(HOSTNAME_FIELD, hostName, Field.Store.NO));
doc.add(new SortedDocValuesField(HOSTNAME_FIELD, new BytesRef(hostName)));
doc.add(new LongPoint(TIMESTAMP_FIELD, timestamp));
doc.add(new SortedNumericDocValuesField(TIMESTAMP_FIELD, timestamp));
}
}
/**
* Calculates the upper bound for the timestamp range query based on {@code batchSize},
* {@code deltaTime}, and {@code queryRange}.
*
* @return the computed upper bound for the timestamp range query
*/
private long rangeEndTimestamp() {
return BASE_TIMESTAMP + (long) (batchSize * deltaTime * queryRange);
}
/**
* Executes a range query without doc values skipper.
*
* @param bh the blackhole consuming the query result
* @throws IOException if a search error occurs
*/
@Benchmark
public void rangeQueryWithoutDocValuesSkipper(final Blackhole bh) throws IOException {
bh.consume(rangeQuery(indexSearcherWithoutDocValuesSkipper, BASE_TIMESTAMP, rangeEndTimestamp(), true));
}
/**
* Executes a range query with doc values skipper enabled.
*
* @param bh the blackhole consuming the query result
* @throws IOException if a search error occurs
*/
@Benchmark
public void rangeQueryWithDocValuesSkipper(final Blackhole bh) throws IOException {
bh.consume(rangeQuery(indexSearcherWithDocValuesSkipper, BASE_TIMESTAMP, rangeEndTimestamp(), false));
}
/**
* Runs the actual Lucene range query, optionally combining a {@link LongPoint} index query
* with doc values ({@link SortedNumericDocValuesField}) via {@link IndexOrDocValuesQuery},
* and then wrapping it with an {@link IndexSortSortedNumericDocValuesRangeQuery} to utilize the index sort.
*
* @param searcher the Lucene {@link IndexSearcher}
* @param rangeStartTimestamp lower bound of the timestamp range
* @param rangeEndTimestamp upper bound of the timestamp range
* @param isIndexed true if we should combine indexed and doc value queries
* @return the total number of matching documents
* @throws IOException if a search error occurs
*/
private long rangeQuery(final IndexSearcher searcher, long rangeStartTimestamp, long rangeEndTimestamp, boolean isIndexed)
throws IOException {
assert rangeEndTimestamp > rangeStartTimestamp;
final Query rangeQuery = isIndexed
? new IndexOrDocValuesQuery(
LongPoint.newRangeQuery(TIMESTAMP_FIELD, rangeStartTimestamp, rangeEndTimestamp),
SortedNumericDocValuesField.newSlowRangeQuery(TIMESTAMP_FIELD, rangeStartTimestamp, rangeEndTimestamp)
)
: SortedNumericDocValuesField.newSlowRangeQuery(TIMESTAMP_FIELD, rangeStartTimestamp, rangeEndTimestamp);
final Query query = new IndexSortSortedNumericDocValuesRangeQuery(
TIMESTAMP_FIELD,
rangeStartTimestamp,
rangeEndTimestamp,
rangeQuery
);
return searcher.count(query);
}
/**
* Shuts down the executor service after the trial completes.
*/
@TearDown(Level.Trial)
public void tearDown() {
if (executorService != null) {
executorService.shutdown();
try {
if (executorService.awaitTermination(30, TimeUnit.SECONDS) == false) {
executorService.shutdownNow();
}
} catch (InterruptedException e) {
executorService.shutdownNow();
Thread.currentThread().interrupt();
}
}
}
}

View file

@ -0,0 +1,34 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the "Elastic License
* 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side
* Public License v 1"; you may not use this file except in compliance with, at
* your election, the "Elastic License 2.0", the "GNU Affero General Public
* License v3.0 only", or the "Server Side Public License, v 1".
*/
import org.elasticsearch.gradle.VersionProperties
import org.elasticsearch.gradle.testclusters.StandaloneRestIntegTestTask
def fwcVersions = buildParams.bwcVersions.released.findAll { it.major == VersionProperties.elasticsearchVersion.major && it.minor == VersionProperties.elasticsearchVersion.minor }
def previousMinorSnapshot = buildParams.bwcVersions.unreleased.find { it.major == VersionProperties.elasticsearchVersion.major && it.minor == VersionProperties.elasticsearchVersion.minor - 1 }
fwcVersions.each { fwcVersion ->
tasks.register("v${fwcVersion}#fwcTest", StandaloneRestIntegTestTask) {
usesBwcDistribution(previousMinorSnapshot)
usesBwcDistribution(fwcVersion)
systemProperty("tests.old_cluster_version", previousMinorSnapshot)
systemProperty("tests.new_cluster_version", fwcVersion)
nonInputProperties.systemProperty 'tests.fwc', 'true'
}
}
gradle.taskGraph.whenReady { graph ->
if (graph.allTasks.any { it.name.endsWith('#fwcTest') } && Boolean.parseBoolean(System.getProperty("tests.bwc.snapshot", "true"))) {
throw new GradleException("Running forward compatibility tests requires passing `-Dtests.bwc.snapshot=false`.")
}
if (graph.allTasks.any { it.name.endsWith('#fwcTest') } && graph.allTasks.any { it.name.endsWith('#bwcTest') }) {
throw new GradleException("Backward compatibility and forward compatibility tests cannot be executed in the same build.")
}
}

View file

@ -145,18 +145,23 @@ if (providers.systemProperty('idea.active').getOrNull() == 'true') {
} }
} }
// modifies the idea module config to enable preview features on ':libs:native' module // modifies the idea module config to enable preview features on module that need them
tasks.register("enablePreviewFeatures", EnablePreviewFeaturesTask) { tasks.register("enablePreviewFeatures", EnablePreviewFeaturesTask) {
group = 'ide' group = 'ide'
description = 'Enables preview features on native library module' description = 'Enables preview features on modules that need them'
dependsOn tasks.named("enableExternalConfiguration") dependsOn tasks.named("enableExternalConfiguration")
doLast { doLast {
enablePreview('.idea/modules/libs/native/elasticsearch.libs.native.main.iml', 'JDK_21_PREVIEW') enablePreview('.idea/modules/libs/native/elasticsearch.libs.native.main.iml', 'JDK_21_PREVIEW')
enablePreview('.idea/modules/libs/native/elasticsearch.libs.native.test.iml', 'JDK_21_PREVIEW') enablePreview('.idea/modules/libs/native/elasticsearch.libs.native.test.iml', 'JDK_21_PREVIEW')
// due to org.elasticsearch.plugins.PluginsLoader
enablePreview('.idea/modules/server/elasticsearch.server.main.iml', 'JDK_21_PREVIEW')
enablePreview('.idea/modules/server/elasticsearch.server.test.iml', 'JDK_21_PREVIEW')
enablePreview('.idea/modules/libs/entitlement/elasticsearch.libs.entitlement.main.iml', 'JDK_21_PREVIEW') enablePreview('.idea/modules/libs/entitlement/elasticsearch.libs.entitlement.main.iml', 'JDK_21_PREVIEW')
enablePreview('.idea/modules/libs/entitlement/elasticsearch.libs.entitlement.test.iml', 'JDK_21_PREVIEW') enablePreview('.idea/modules/libs/entitlement/elasticsearch.libs.entitlement.test.iml', 'JDK_21_PREVIEW')
enablePreview('.idea/modules/libs/entitlement/bridge/elasticsearch.libs.entitlement.bridge.main.iml', 'JDK_21_PREVIEW') enablePreview('.idea/modules/libs/entitlement/bridge/elasticsearch.libs.entitlement.bridge.main.iml', 'JDK_21_PREVIEW')
enablePreview('.idea/modules/libs/entitlement/bridge/elasticsearch.libs.entitlement.bridge.test.iml', 'JDK_21_PREVIEW') enablePreview('.idea/modules/libs/entitlement/bridge/elasticsearch.libs.entitlement.bridge.test.iml', 'JDK_21_PREVIEW')
enablePreview('.idea/modules/libs/entitlement/qa/entitlement-test-plugin/elasticsearch.libs.entitlement.qa.entitlement-test-plugin.main.iml', 'JDK_21_PREVIEW')
enablePreview('.idea/modules/libs/entitlement/qa/entitlement-test-plugin/elasticsearch.libs.entitlement.qa.entitlement-test-plugin.test.iml', 'JDK_21_PREVIEW')
} }
} }

View file

@ -145,7 +145,14 @@ public class BwcSetupExtension {
loggedExec.args("-DisCI"); loggedExec.args("-DisCI");
} }
loggedExec.args("-Dbuild.snapshot=true", "-Dscan.tag.NESTED"); loggedExec.args("-Dscan.tag.NESTED");
if (System.getProperty("tests.bwc.snapshot", "true").equals("false")) {
loggedExec.args("-Dbuild.snapshot=false", "-Dlicense.key=x-pack/plugin/core/snapshot.key");
} else {
loggedExec.args("-Dbuild.snapshot=true");
}
final LogLevel logLevel = project.getGradle().getStartParameter().getLogLevel(); final LogLevel logLevel = project.getGradle().getStartParameter().getLogLevel();
List<LogLevel> nonDefaultLogLevels = Arrays.asList(LogLevel.QUIET, LogLevel.WARN, LogLevel.INFO, LogLevel.DEBUG); List<LogLevel> nonDefaultLogLevels = Arrays.asList(LogLevel.QUIET, LogLevel.WARN, LogLevel.INFO, LogLevel.DEBUG);
if (nonDefaultLogLevels.contains(logLevel)) { if (nonDefaultLogLevels.contains(logLevel)) {

View file

@ -355,8 +355,9 @@ public class InternalDistributionBwcSetupPlugin implements Plugin<Project> {
String bwcTaskName = buildBwcTaskName(projectName); String bwcTaskName = buildBwcTaskName(projectName);
bwcSetupExtension.bwcTask(bwcTaskName, c -> { bwcSetupExtension.bwcTask(bwcTaskName, c -> {
boolean useNativeExpanded = projectArtifact.expandedDistDir != null; boolean useNativeExpanded = projectArtifact.expandedDistDir != null;
boolean isReleaseBuild = System.getProperty("tests.bwc.snapshot", "true").equals("false");
File expectedOutputFile = useNativeExpanded File expectedOutputFile = useNativeExpanded
? new File(projectArtifact.expandedDistDir, "elasticsearch-" + bwcVersion.get() + "-SNAPSHOT") ? new File(projectArtifact.expandedDistDir, "elasticsearch-" + bwcVersion.get() + (isReleaseBuild ? "" : "-SNAPSHOT"))
: projectArtifact.distFile; : projectArtifact.distFile;
c.getInputs().file(new File(project.getBuildDir(), "refspec")).withPathSensitivity(PathSensitivity.RELATIVE); c.getInputs().file(new File(project.getBuildDir(), "refspec")).withPathSensitivity(PathSensitivity.RELATIVE);
if (useNativeExpanded) { if (useNativeExpanded) {

View file

@ -14,7 +14,7 @@ log4j = 2.19.0
slf4j = 2.0.6 slf4j = 2.0.6
ecsLogging = 1.2.0 ecsLogging = 1.2.0
jna = 5.12.1 jna = 5.12.1
netty = 4.1.115.Final netty = 4.1.118.Final
commons_lang3 = 3.9 commons_lang3 = 3.9
google_oauth_client = 1.34.1 google_oauth_client = 1.34.1
awsv1sdk = 1.12.746 awsv1sdk = 1.12.746

View file

@ -15,6 +15,7 @@ import com.fasterxml.jackson.databind.ObjectMapper
import org.elasticsearch.gradle.DistributionDownloadPlugin import org.elasticsearch.gradle.DistributionDownloadPlugin
import org.elasticsearch.gradle.Version import org.elasticsearch.gradle.Version
import org.elasticsearch.gradle.VersionProperties
import org.elasticsearch.gradle.internal.BaseInternalPluginBuildPlugin import org.elasticsearch.gradle.internal.BaseInternalPluginBuildPlugin
import org.elasticsearch.gradle.internal.ResolveAllDependencies import org.elasticsearch.gradle.internal.ResolveAllDependencies
import org.elasticsearch.gradle.util.GradleUtils import org.elasticsearch.gradle.util.GradleUtils
@ -120,10 +121,10 @@ tasks.register("updateCIBwcVersions") {
outputFile.text = "# This file is auto-generated. See ${pipelineTemplatePath}\n" + pipeline outputFile.text = "# This file is auto-generated. See ${pipelineTemplatePath}\n" + pipeline
} }
// Writes a Buildkite pipelime from a template, and replaces $BWC_LIST with an array of versions // Writes a Buildkite pipelime from a template, and replaces a variable with an array of versions
// Useful for writing a list of versions in a matrix configuration // Useful for writing a list of versions in a matrix configuration
def expandBwcList = { String outputFilePath, String pipelineTemplatePath, List<Version> versions -> def expandList = { String outputFilePath, String pipelineTemplatePath, String variable, List<Version> versions ->
writeBuildkitePipeline(outputFilePath, pipelineTemplatePath, [new ListExpansion(versions: versions, variable: "BWC_LIST")]) writeBuildkitePipeline(outputFilePath, pipelineTemplatePath, [new ListExpansion(versions: versions, variable: variable)])
} }
// Writes a Buildkite pipeline from a template, and replaces $BWC_STEPS with a list of steps, one for each version // Writes a Buildkite pipeline from a template, and replaces $BWC_STEPS with a list of steps, one for each version
@ -140,11 +141,18 @@ tasks.register("updateCIBwcVersions") {
doLast { doLast {
writeVersions(file(".ci/bwcVersions"), filterIntermediatePatches(buildParams.bwcVersions.indexCompatible)) writeVersions(file(".ci/bwcVersions"), filterIntermediatePatches(buildParams.bwcVersions.indexCompatible))
writeVersions(file(".ci/snapshotBwcVersions"), filterIntermediatePatches(buildParams.bwcVersions.unreleasedIndexCompatible)) writeVersions(file(".ci/snapshotBwcVersions"), filterIntermediatePatches(buildParams.bwcVersions.unreleasedIndexCompatible))
expandBwcList( expandList(
".buildkite/pipelines/intake.yml", ".buildkite/pipelines/intake.yml",
".buildkite/pipelines/intake.template.yml", ".buildkite/pipelines/intake.template.yml",
"BWC_LIST",
filterIntermediatePatches(buildParams.bwcVersions.unreleasedIndexCompatible) filterIntermediatePatches(buildParams.bwcVersions.unreleasedIndexCompatible)
) )
expandList(
".buildkite/pipelines/periodic-fwc.yml",
".buildkite/pipelines/periodic-fwc.template.yml",
"FWC_LIST",
buildParams.bwcVersions.released.findAll { it.major == VersionProperties.elasticsearchVersion.major && it.minor == VersionProperties.elasticsearchVersion.minor }
)
writeBuildkitePipeline( writeBuildkitePipeline(
".buildkite/pipelines/periodic.yml", ".buildkite/pipelines/periodic.yml",
".buildkite/pipelines/periodic.template.yml", ".buildkite/pipelines/periodic.template.yml",

View file

@ -0,0 +1,5 @@
pr: 123427
summary: Reduce iteration complexity for plan traversal
area: ES|QL
type: bug
issues: []

View file

@ -118,9 +118,10 @@ in the case of each type of failure. The plan is to have a test case that valida
[discrete] [discrete]
=== Run Jepsen (STATUS: ONGOING) === Run Jepsen (STATUS: ONGOING)
We have ported the known scenarios in the Jepsen blogs that check loss of acknowledged writes to our testing infrastructure. We have ported the known scenarios in the Jepsen blogs that check loss of
The new tests are run continuously in our testing farm and are passing. We are also working on running Jepsen independently to verify acknowledged writes to our testing infrastructure. The new tests are run
that no failures are found. continuously in our testing farm and are passing. We will also monitor for new
failure scenarios and adapt our test suite as needed.
== Completed == Completed

View file

@ -1404,114 +1404,74 @@
<sha256 value="a3ebec96768ee4a2d3db44597e84cea2d0bdd68ca04822397980ea9f67075a86" origin="Generated by Gradle"/> <sha256 value="a3ebec96768ee4a2d3db44597e84cea2d0bdd68ca04822397980ea9f67075a86" origin="Generated by Gradle"/>
</artifact> </artifact>
</component> </component>
<component group="io.netty" name="netty-buffer" version="4.1.115.Final"> <component group="io.netty" name="netty-buffer" version="4.1.118.Final">
<artifact name="netty-buffer-4.1.115.Final.jar"> <artifact name="netty-buffer-4.1.118.Final.jar">
<sha256 value="4a7b331d3770c566ab70eb02a0d1feed63b95cf6e4d68c8fe778c4c9de2d116d" origin="Generated by Gradle"/> <sha256 value="0eea4e8666a9636a28722661d8ba5fa8564477e75fec6dd2ff3e324e361f8b3c" origin="Generated by Gradle"/>
</artifact> </artifact>
</component> </component>
<component group="io.netty" name="netty-buffer" version="4.1.42.Final"> <component group="io.netty" name="netty-codec" version="4.1.118.Final">
<artifact name="netty-buffer-4.1.42.Final.jar"> <artifact name="netty-codec-4.1.118.Final.jar">
<sha256 value="7b0171a4e8bcd573e08d9f2bba053c67b557ab5012106a5982ccbae5743814c0" origin="Generated by Gradle"/> <sha256 value="4abd215fd1ed7ce86509d169cc9cbede5042176c265a79b3b70602b017226c3f" origin="Generated by Gradle"/>
</artifact> </artifact>
</component> </component>
<component group="io.netty" name="netty-codec" version="4.1.115.Final"> <component group="io.netty" name="netty-codec-dns" version="4.1.118.Final">
<artifact name="netty-codec-4.1.115.Final.jar"> <artifact name="netty-codec-dns-4.1.118.Final.jar">
<sha256 value="cd189afb70ec6eacfcdfdd3a5f472b4e705a5c91d5bd3ef0386421f2ae15ec77" origin="Generated by Gradle"/> <sha256 value="e115e42ca1e3cc8d85e3a632d8faa102d18c0ebc1fa4511af30bec79f8c147d4" origin="Generated by Gradle"/>
</artifact> </artifact>
</component> </component>
<component group="io.netty" name="netty-codec" version="4.1.42.Final"> <component group="io.netty" name="netty-codec-http" version="4.1.118.Final">
<artifact name="netty-codec-4.1.42.Final.jar"> <artifact name="netty-codec-http-4.1.118.Final.jar">
<sha256 value="e96ced697fb7df589da7c20c995e01f75a9cb246be242bbc4cd3b4af424ff189" origin="Generated by Gradle"/> <sha256 value="09822d785e9a794838031ddd5346cf419b30c036a981c2e277a062bea884174b" origin="Generated by Gradle"/>
</artifact> </artifact>
</component> </component>
<component group="io.netty" name="netty-codec-dns" version="4.1.115.Final"> <component group="io.netty" name="netty-codec-http2" version="4.1.118.Final">
<artifact name="netty-codec-dns-4.1.115.Final.jar"> <artifact name="netty-codec-http2-4.1.118.Final.jar">
<sha256 value="23dd6806bcc326855f13e69838c6411d0490e6b1aeb12e217a19a3dd6ad3f10d" origin="Generated by Gradle"/> <sha256 value="68da0b1a34dceb00a6f9f6f788fb2f6b7b9e4adba8c70658ac2bd7eb898b97ae" origin="Generated by Gradle"/>
</artifact> </artifact>
</component> </component>
<component group="io.netty" name="netty-codec-http" version="4.1.115.Final"> <component group="io.netty" name="netty-codec-socks" version="4.1.118.Final">
<artifact name="netty-codec-http-4.1.115.Final.jar"> <artifact name="netty-codec-socks-4.1.118.Final.jar">
<sha256 value="e6dbe971c59373bbae9802021c63b9bc1d8800fead382863d67e79e79b023166" origin="Generated by Gradle"/> <sha256 value="094465e3cfb3aef0fca38ed82b801f53a6c8be7ae1f83ab0c1b2e8ece2586840" origin="Generated by Gradle"/>
</artifact> </artifact>
</component> </component>
<component group="io.netty" name="netty-codec-http2" version="4.1.115.Final"> <component group="io.netty" name="netty-common" version="4.1.118.Final">
<artifact name="netty-codec-http2-4.1.115.Final.jar"> <artifact name="netty-common-4.1.118.Final.jar">
<sha256 value="cbed9829a5d582e91e314e209edce9a0c2eb369f23bb4fb74a5bc8b7990222c2" origin="Generated by Gradle"/> <sha256 value="65cce901ecf0f9d6591cc7750772614ab401a84415dc9aec9da4d046f0f9a77c" origin="Generated by Gradle"/>
</artifact> </artifact>
</component> </component>
<component group="io.netty" name="netty-codec-socks" version="4.1.115.Final"> <component group="io.netty" name="netty-handler" version="4.1.118.Final">
<artifact name="netty-codec-socks-4.1.115.Final.jar"> <artifact name="netty-handler-4.1.118.Final.jar">
<sha256 value="e9b1cc744dc6195894450b1fd4d271a821ab167fe21ae3c459b27cdadc70e81f" origin="Generated by Gradle"/> <sha256 value="26e3f8a5e859fd62cf3c13dc6d75e4e18879f000a5d0ad7f58f8679675d23dae" origin="Generated by Gradle"/>
</artifact> </artifact>
</component> </component>
<component group="io.netty" name="netty-common" version="4.1.115.Final"> <component group="io.netty" name="netty-handler-proxy" version="4.1.118.Final">
<artifact name="netty-common-4.1.115.Final.jar"> <artifact name="netty-handler-proxy-4.1.118.Final.jar">
<sha256 value="39f1b5a2aaa4eab5d036dfd0486e35a4276df412e092d36b2d88b494705a134d" origin="Generated by Gradle"/> <sha256 value="fef926126f44c668968dd3e2389c2552981d452e6dfc23b1f9bd03db92c21f96" origin="Generated by Gradle"/>
</artifact> </artifact>
</component> </component>
<component group="io.netty" name="netty-common" version="4.1.42.Final"> <component group="io.netty" name="netty-resolver" version="4.1.118.Final">
<artifact name="netty-common-4.1.42.Final.jar"> <artifact name="netty-resolver-4.1.118.Final.jar">
<sha256 value="3d0a918d78292eeca02a7bb2188daa4e5053b6e29b71e6308309033e121242b5" origin="Generated by Gradle"/> <sha256 value="3170c225972c18b6850d28add60db15bb28d83c4e3d5b686ca220e0bd7273c8a" origin="Generated by Gradle"/>
</artifact> </artifact>
</component> </component>
<component group="io.netty" name="netty-handler" version="4.1.115.Final"> <component group="io.netty" name="netty-resolver-dns" version="4.1.118.Final">
<artifact name="netty-handler-4.1.115.Final.jar"> <artifact name="netty-resolver-dns-4.1.118.Final.jar">
<sha256 value="5972028cc863b74927ce0d11fb8d58f65da2560bef5602fe8ce8903bd306ca07" origin="Generated by Gradle"/> <sha256 value="c0e0fdaffaba849e3145b2b96288fc8fc6f3b2a623cf72aaba708288348e4938" origin="Generated by Gradle"/>
</artifact> </artifact>
</component> </component>
<component group="io.netty" name="netty-handler" version="4.1.42.Final"> <component group="io.netty" name="netty-transport" version="4.1.118.Final">
<artifact name="netty-handler-4.1.42.Final.jar"> <artifact name="netty-transport-4.1.118.Final.jar">
<sha256 value="11eda86500c33b9d386719b5419f513fd9c097d13894f25dd0c75b610d636e03" origin="Generated by Gradle"/> <sha256 value="ab3751e717daef9c8d91e4d74728a48730bd8530b72e2466b222b2ea3fb07db9" origin="Generated by Gradle"/>
</artifact> </artifact>
</component> </component>
<component group="io.netty" name="netty-handler-proxy" version="4.1.115.Final"> <component group="io.netty" name="netty-transport-classes-epoll" version="4.1.118.Final">
<artifact name="netty-handler-proxy-4.1.115.Final.jar"> <artifact name="netty-transport-classes-epoll-4.1.118.Final.jar">
<sha256 value="807e67cfb17136927d11db42df62031169d1fa0883e13f254906994c84ffbe87" origin="Generated by Gradle"/> <sha256 value="bd86e6d41e1f6053f9577931655236259778ab045646e1e6ab04150f070864f3" origin="Generated by Gradle"/>
</artifact> </artifact>
</component> </component>
<component group="io.netty" name="netty-resolver" version="4.1.115.Final"> <component group="io.netty" name="netty-transport-native-unix-common" version="4.1.118.Final">
<artifact name="netty-resolver-4.1.115.Final.jar"> <artifact name="netty-transport-native-unix-common-4.1.118.Final.jar">
<sha256 value="7b3455d14f59828765a00573bc3967dc59379e874bd62a67eb1926d6512109d1" origin="Generated by Gradle"/> <sha256 value="69b16793d7b41ea76a762bd2bd144fc4f7c39c156a7a59ebf69baeb560fb10b7" origin="Generated by Gradle"/>
</artifact>
</component>
<component group="io.netty" name="netty-resolver" version="4.1.42.Final">
<artifact name="netty-resolver-4.1.42.Final.jar">
<sha256 value="89768242b6b7cce9bd9f5945ad21d1b4bae515c6b1bf03a8af5d1899779cebc9" origin="Generated by Gradle"/>
</artifact>
</component>
<component group="io.netty" name="netty-resolver-dns" version="4.1.115.Final">
<artifact name="netty-resolver-dns-4.1.115.Final.jar">
<sha256 value="4aca31593e5896c64ab7e041bbc6c0d851bd9634ec3a4354208141a35576619f" origin="Generated by Gradle"/>
</artifact>
</component>
<component group="io.netty" name="netty-transport" version="4.1.115.Final">
<artifact name="netty-transport-4.1.115.Final.jar">
<sha256 value="c3d71faaa736ffd2c9260ab0b498024b814c39c7d764bea8113fa98de6e2bdd2" origin="Generated by Gradle"/>
</artifact>
</component>
<component group="io.netty" name="netty-transport" version="4.1.42.Final">
<artifact name="netty-transport-4.1.42.Final.jar">
<sha256 value="dfa817a156ea263aa9ad8364a2e226527665c9722aca40a7945f228c2c14f1da" origin="Generated by Gradle"/>
</artifact>
</component>
<component group="io.netty" name="netty-transport-classes-epoll" version="4.1.115.Final">
<artifact name="netty-transport-classes-epoll-4.1.115.Final.jar">
<sha256 value="40aa67b4463cca0ab346e393c87f6c37e8954d18ec8b78567d95b55aa1f2b3aa" origin="Generated by Gradle"/>
</artifact>
</component>
<component group="io.netty" name="netty-transport-native-epoll" version="4.1.42.Final">
<artifact name="netty-transport-native-epoll-4.1.42.Final.jar">
<sha256 value="3c7d659b3bd773e0ea9b7517d2d6baffa275a3d2ae8eb4c10cb8f0a7724b11d5" origin="Generated by Gradle"/>
</artifact>
</component>
<component group="io.netty" name="netty-transport-native-unix-common" version="4.1.115.Final">
<artifact name="netty-transport-native-unix-common-4.1.115.Final.jar">
<sha256 value="4b03e716272657c296b0204b57c140b2b2ca96b1a746c92da41f595892ec6d88" origin="Generated by Gradle"/>
</artifact>
</component>
<component group="io.netty" name="netty-transport-native-unix-common" version="4.1.42.Final">
<artifact name="netty-transport-native-unix-common-4.1.42.Final.jar">
<sha256 value="508fba9128da78bd775ba854d71917ceb2b00b95a7600254f54a277a06761a86" origin="Generated by Gradle"/>
</artifact> </artifact>
</component> </component>
<component group="io.opencensus" name="opencensus-api" version="0.30.0"> <component group="io.opencensus" name="opencensus-api" version="0.30.0">

View file

@ -9,6 +9,7 @@
package org.elasticsearch.entitlement.runtime.policy.entitlements; package org.elasticsearch.entitlement.runtime.policy.entitlements;
import org.elasticsearch.core.Booleans;
import org.elasticsearch.entitlement.runtime.policy.ExternalEntitlement; import org.elasticsearch.entitlement.runtime.policy.ExternalEntitlement;
import org.elasticsearch.entitlement.runtime.policy.PathLookup; import org.elasticsearch.entitlement.runtime.policy.PathLookup;
import org.elasticsearch.entitlement.runtime.policy.PolicyValidationException; import org.elasticsearch.entitlement.runtime.policy.PolicyValidationException;
@ -17,6 +18,7 @@ import java.nio.file.Path;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.HashMap; import java.util.HashMap;
import java.util.List; import java.util.List;
import java.util.Locale;
import java.util.Map; import java.util.Map;
import java.util.Objects; import java.util.Objects;
import java.util.stream.Stream; import java.util.stream.Stream;
@ -85,12 +87,12 @@ public record FilesEntitlement(List<FileData> filesData) implements Entitlement
return new RelativePathFileData(relativePath, baseDir, mode, null); return new RelativePathFileData(relativePath, baseDir, mode, null);
} }
static FileData ofPathSetting(String setting, Mode mode) { static FileData ofPathSetting(String setting, Mode mode, boolean ignoreUrl) {
return new PathSettingFileData(setting, mode, null); return new PathSettingFileData(setting, mode, ignoreUrl, null);
} }
static FileData ofRelativePathSetting(String setting, BaseDir baseDir, Mode mode) { static FileData ofRelativePathSetting(String setting, BaseDir baseDir, Mode mode, boolean ignoreUrl) {
return new RelativePathSettingFileData(setting, baseDir, mode, null); return new RelativePathSettingFileData(setting, baseDir, mode, ignoreUrl, null);
} }
/** /**
@ -207,10 +209,10 @@ public record FilesEntitlement(List<FileData> filesData) implements Entitlement
} }
} }
private record PathSettingFileData(String setting, Mode mode, Platform platform) implements FileData { private record PathSettingFileData(String setting, Mode mode, boolean ignoreUrl, Platform platform) implements FileData {
@Override @Override
public Stream<Path> resolvePaths(PathLookup pathLookup) { public Stream<Path> resolvePaths(PathLookup pathLookup) {
return resolvePathSettings(pathLookup, setting); return resolvePathSettings(pathLookup, setting, ignoreUrl);
} }
@Override @Override
@ -218,17 +220,17 @@ public record FilesEntitlement(List<FileData> filesData) implements Entitlement
if (platform == platform()) { if (platform == platform()) {
return this; return this;
} }
return new PathSettingFileData(setting, mode, platform); return new PathSettingFileData(setting, mode, ignoreUrl, platform);
} }
} }
private record RelativePathSettingFileData(String setting, BaseDir baseDir, Mode mode, Platform platform) private record RelativePathSettingFileData(String setting, BaseDir baseDir, Mode mode, boolean ignoreUrl, Platform platform)
implements implements
FileData, FileData,
RelativeFileData { RelativeFileData {
@Override @Override
public Stream<Path> resolveRelativePaths(PathLookup pathLookup) { public Stream<Path> resolveRelativePaths(PathLookup pathLookup) {
return resolvePathSettings(pathLookup, setting); return resolvePathSettings(pathLookup, setting, ignoreUrl);
} }
@Override @Override
@ -236,16 +238,22 @@ public record FilesEntitlement(List<FileData> filesData) implements Entitlement
if (platform == platform()) { if (platform == platform()) {
return this; return this;
} }
return new RelativePathSettingFileData(setting, baseDir, mode, platform); return new RelativePathSettingFileData(setting, baseDir, mode, ignoreUrl, platform);
} }
} }
private static Stream<Path> resolvePathSettings(PathLookup pathLookup, String setting) { private static Stream<Path> resolvePathSettings(PathLookup pathLookup, String setting, boolean ignoreUrl) {
Stream<String> result;
if (setting.contains("*")) { if (setting.contains("*")) {
return pathLookup.settingGlobResolver().apply(setting).map(Path::of); result = pathLookup.settingGlobResolver().apply(setting);
} else {
String path = pathLookup.settingResolver().apply(setting);
result = path == null ? Stream.of() : Stream.of(path);
} }
String path = pathLookup.settingResolver().apply(setting); if (ignoreUrl) {
return path == null ? Stream.of() : Stream.of(Path.of(path)); result = result.filter(s -> s.toLowerCase(Locale.ROOT).startsWith("https://") == false);
}
return result.map(Path::of);
} }
private static Mode parseMode(String mode) { private static Mode parseMode(String mode) {
@ -298,6 +306,7 @@ public record FilesEntitlement(List<FileData> filesData) implements Entitlement
String relativePathSetting = file.remove("relative_path_setting"); String relativePathSetting = file.remove("relative_path_setting");
String modeAsString = file.remove("mode"); String modeAsString = file.remove("mode");
String platformAsString = file.remove("platform"); String platformAsString = file.remove("platform");
String ignoreUrlAsString = file.remove("ignore_url");
if (file.isEmpty() == false) { if (file.isEmpty() == false) {
throw new PolicyValidationException("unknown key(s) [" + file + "] in a listed file for files entitlement"); throw new PolicyValidationException("unknown key(s) [" + file + "] in a listed file for files entitlement");
@ -324,6 +333,14 @@ public record FilesEntitlement(List<FileData> filesData) implements Entitlement
baseDir = parseBaseDir(relativeTo); baseDir = parseBaseDir(relativeTo);
} }
boolean ignoreUrl = false;
if (ignoreUrlAsString != null) {
if (relativePathAsString != null || pathAsString != null) {
throw new PolicyValidationException("'ignore_url' may only be used with `path_setting` or `relative_path_setting`");
}
ignoreUrl = Booleans.parseBoolean(ignoreUrlAsString);
}
final FileData fileData; final FileData fileData;
if (relativePathAsString != null) { if (relativePathAsString != null) {
if (baseDir == null) { if (baseDir == null) {
@ -342,12 +359,12 @@ public record FilesEntitlement(List<FileData> filesData) implements Entitlement
} }
fileData = FileData.ofPath(path, mode); fileData = FileData.ofPath(path, mode);
} else if (pathSetting != null) { } else if (pathSetting != null) {
fileData = FileData.ofPathSetting(pathSetting, mode); fileData = FileData.ofPathSetting(pathSetting, mode, ignoreUrl);
} else if (relativePathSetting != null) { } else if (relativePathSetting != null) {
if (baseDir == null) { if (baseDir == null) {
throw new PolicyValidationException("files entitlement with a 'relative_path_setting' must specify 'relative_to'"); throw new PolicyValidationException("files entitlement with a 'relative_path_setting' must specify 'relative_to'");
} }
fileData = FileData.ofRelativePathSetting(relativePathSetting, baseDir, mode); fileData = FileData.ofRelativePathSetting(relativePathSetting, baseDir, mode, ignoreUrl);
} else { } else {
throw new AssertionError("File entry validation error"); throw new AssertionError("File entry validation error");
} }

View file

@ -20,6 +20,7 @@ import java.nio.file.Path;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
import static org.elasticsearch.entitlement.runtime.policy.entitlements.FilesEntitlement.BaseDir.CONFIG;
import static org.elasticsearch.entitlement.runtime.policy.entitlements.FilesEntitlement.Mode.READ; import static org.elasticsearch.entitlement.runtime.policy.entitlements.FilesEntitlement.Mode.READ;
import static org.elasticsearch.entitlement.runtime.policy.entitlements.FilesEntitlement.Mode.READ_WRITE; import static org.elasticsearch.entitlement.runtime.policy.entitlements.FilesEntitlement.Mode.READ_WRITE;
import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.contains;
@ -94,22 +95,50 @@ public class FilesEntitlementTests extends ESTestCase {
public void testPathSettingResolve() { public void testPathSettingResolve() {
var entitlement = FilesEntitlement.build(List.of(Map.of("path_setting", "foo.bar", "mode", "read"))); var entitlement = FilesEntitlement.build(List.of(Map.of("path_setting", "foo.bar", "mode", "read")));
var filesData = entitlement.filesData(); var filesData = entitlement.filesData();
assertThat(filesData, contains(FileData.ofPathSetting("foo.bar", READ))); assertThat(filesData, contains(FileData.ofPathSetting("foo.bar", READ, false)));
var fileData = FileData.ofPathSetting("foo.bar", READ); var fileData = FileData.ofPathSetting("foo.bar", READ, false);
// empty settings // empty settings
assertThat(fileData.resolvePaths(TEST_PATH_LOOKUP).toList(), empty()); assertThat(fileData.resolvePaths(TEST_PATH_LOOKUP).toList(), empty());
fileData = FileData.ofPathSetting("foo.bar", READ); fileData = FileData.ofPathSetting("foo.bar", READ, false);
settings = Settings.builder().put("foo.bar", "/setting/path").build(); settings = Settings.builder().put("foo.bar", "/setting/path").build();
assertThat(fileData.resolvePaths(TEST_PATH_LOOKUP).toList(), contains(Path.of("/setting/path"))); assertThat(fileData.resolvePaths(TEST_PATH_LOOKUP).toList(), contains(Path.of("/setting/path")));
fileData = FileData.ofPathSetting("foo.*.bar", READ); fileData = FileData.ofPathSetting("foo.*.bar", READ, false);
settings = Settings.builder().put("foo.baz.bar", "/setting/path").build(); settings = Settings.builder().put("foo.baz.bar", "/setting/path").build();
assertThat(fileData.resolvePaths(TEST_PATH_LOOKUP).toList(), contains(Path.of("/setting/path"))); assertThat(fileData.resolvePaths(TEST_PATH_LOOKUP).toList(), contains(Path.of("/setting/path")));
fileData = FileData.ofPathSetting("foo.*.bar", READ); fileData = FileData.ofPathSetting("foo.*.bar", READ, false);
settings = Settings.builder().put("foo.baz.bar", "/setting/path").put("foo.baz2.bar", "/other/path").build(); settings = Settings.builder().put("foo.baz.bar", "/setting/path").put("foo.baz2.bar", "/other/path").build();
assertThat(fileData.resolvePaths(TEST_PATH_LOOKUP).toList(), containsInAnyOrder(Path.of("/setting/path"), Path.of("/other/path"))); assertThat(fileData.resolvePaths(TEST_PATH_LOOKUP).toList(), containsInAnyOrder(Path.of("/setting/path"), Path.of("/other/path")));
} }
public void testPathSettingIgnoreUrl() {
var fileData = FileData.ofPathSetting("foo.*.bar", READ, true);
settings = Settings.builder().put("foo.nonurl.bar", "/setting/path").put("foo.url.bar", "https://mysite").build();
assertThat(fileData.resolvePaths(TEST_PATH_LOOKUP).toList(), contains(Path.of("/setting/path")));
}
public void testRelativePathSettingIgnoreUrl() {
var fileData = FileData.ofRelativePathSetting("foo.*.bar", CONFIG, READ, true);
settings = Settings.builder().put("foo.nonurl.bar", "path").put("foo.url.bar", "https://mysite").build();
assertThat(fileData.resolvePaths(TEST_PATH_LOOKUP).toList(), contains(Path.of("/config/path")));
}
public void testIgnoreUrlValidation() {
var e = expectThrows(
PolicyValidationException.class,
() -> FilesEntitlement.build(List.of(Map.of("path", "/foo", "mode", "read", "ignore_url", "true")))
);
assertThat(e.getMessage(), is("'ignore_url' may only be used with `path_setting` or `relative_path_setting`"));
e = expectThrows(
PolicyValidationException.class,
() -> FilesEntitlement.build(
List.of(Map.of("relative_path", "foo", "relative_to", "config", "mode", "read", "ignore_url", "true"))
)
);
assertThat(e.getMessage(), is("'ignore_url' may only be used with `path_setting` or `relative_path_setting`"));
}
} }

View file

@ -12,6 +12,8 @@ grant {
permission java.net.SocketPermission "*", "connect"; permission java.net.SocketPermission "*", "connect";
// io.netty.util.concurrent.GlobalEventExecutor.startThread // io.netty.util.concurrent.GlobalEventExecutor.startThread
permission java.lang.RuntimePermission "setContextClassLoader"; permission java.lang.RuntimePermission "setContextClassLoader";
// io.netty.util.concurrent.GlobalEventExecutor.startThread
permission java.lang.RuntimePermission "getClassLoader";
// Used by jackson bean deserialization // Used by jackson bean deserialization
permission java.lang.RuntimePermission "accessDeclaredMembers"; permission java.lang.RuntimePermission "accessDeclaredMembers";
permission java.lang.reflect.ReflectPermission "suppressAccessChecks"; permission java.lang.reflect.ReflectPermission "suppressAccessChecks";

View file

@ -14,7 +14,6 @@ import io.netty.channel.ChannelInboundHandlerAdapter;
import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.common.bytes.ReleasableBytesReference;
import org.elasticsearch.common.network.ThreadWatchdog; import org.elasticsearch.common.network.ThreadWatchdog;
import org.elasticsearch.core.Releasables; import org.elasticsearch.core.Releasables;
import org.elasticsearch.transport.InboundPipeline; import org.elasticsearch.transport.InboundPipeline;
@ -51,8 +50,8 @@ public class Netty4MessageInboundHandler extends ChannelInboundHandlerAdapter {
final ByteBuf buffer = (ByteBuf) msg; final ByteBuf buffer = (ByteBuf) msg;
Netty4TcpChannel channel = ctx.channel().attr(Netty4Transport.CHANNEL_KEY).get(); Netty4TcpChannel channel = ctx.channel().attr(Netty4Transport.CHANNEL_KEY).get();
activityTracker.startActivity(); activityTracker.startActivity();
try (ReleasableBytesReference reference = Netty4Utils.toReleasableBytesReference(buffer)) { try {
pipeline.handleBytes(channel, reference); pipeline.handleBytes(channel, Netty4Utils.toReleasableBytesReference(buffer));
} finally { } finally {
activityTracker.stopActivity(); activityTracker.stopActivity();
} }

View file

@ -12,12 +12,10 @@ package org.elasticsearch.transport.netty4;
import io.netty.buffer.ByteBuf; import io.netty.buffer.ByteBuf;
import io.netty.channel.ChannelHandler; import io.netty.channel.ChannelHandler;
import io.netty.channel.ChannelHandlerContext; import io.netty.channel.ChannelHandlerContext;
import io.netty.handler.codec.MessageToMessageDecoder; import io.netty.channel.ChannelInboundHandlerAdapter;
import java.util.List;
@ChannelHandler.Sharable @ChannelHandler.Sharable
public class NettyByteBufSizer extends MessageToMessageDecoder<ByteBuf> { public class NettyByteBufSizer extends ChannelInboundHandlerAdapter {
public static final NettyByteBufSizer INSTANCE = new NettyByteBufSizer(); public static final NettyByteBufSizer INSTANCE = new NettyByteBufSizer();
@ -26,14 +24,12 @@ public class NettyByteBufSizer extends MessageToMessageDecoder<ByteBuf> {
} }
@Override @Override
protected void decode(ChannelHandlerContext ctx, ByteBuf buf, List<Object> out) { public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception {
int readableBytes = buf.readableBytes(); if (msg instanceof ByteBuf buf && buf.capacity() >= 1024) {
if (buf.capacity() >= 1024) { int readableBytes = buf.readableBytes();
ByteBuf resized = buf.discardReadBytes().capacity(readableBytes); buf = buf.discardReadBytes().capacity(readableBytes);
assert resized.readableBytes() == readableBytes; assert buf.readableBytes() == readableBytes;
out.add(resized.retain());
} else {
out.add(buf.retain());
} }
ctx.fireChannelRead(msg);
} }
} }

View file

@ -14,8 +14,9 @@ grant codeBase "${codebase.netty-common}" {
// netty makes and accepts socket connections // netty makes and accepts socket connections
permission java.net.SocketPermission "*", "accept,connect"; permission java.net.SocketPermission "*", "accept,connect";
// Netty sets custom classloader for some of its internal threads // Netty gets and sets classloaders for some of its internal threads
permission java.lang.RuntimePermission "setContextClassLoader"; permission java.lang.RuntimePermission "setContextClassLoader";
permission java.lang.RuntimePermission "getClassLoader";
}; };
grant codeBase "${codebase.netty-transport}" { grant codeBase "${codebase.netty-transport}" {

View file

@ -161,9 +161,6 @@ tests:
- class: org.elasticsearch.oldrepos.OldRepositoryAccessIT - class: org.elasticsearch.oldrepos.OldRepositoryAccessIT
method: testOldSourceOnlyRepoAccess method: testOldSourceOnlyRepoAccess
issue: https://github.com/elastic/elasticsearch/issues/120080 issue: https://github.com/elastic/elasticsearch/issues/120080
- class: org.elasticsearch.xpack.test.rest.XPackRestIT
method: test {p0=snapshot/10_basic/Failed to snapshot indices with synthetic source}
issue: https://github.com/elastic/elasticsearch/issues/120332
- class: org.elasticsearch.xpack.ccr.FollowIndexSecurityIT - class: org.elasticsearch.xpack.ccr.FollowIndexSecurityIT
method: testCleanShardFollowTaskAfterDeleteFollower method: testCleanShardFollowTaskAfterDeleteFollower
issue: https://github.com/elastic/elasticsearch/issues/120339 issue: https://github.com/elastic/elasticsearch/issues/120339
@ -337,15 +334,6 @@ tests:
- class: org.elasticsearch.action.admin.indices.diskusage.IndexDiskUsageAnalyzerTests - class: org.elasticsearch.action.admin.indices.diskusage.IndexDiskUsageAnalyzerTests
method: testCompletionField method: testCompletionField
issue: https://github.com/elastic/elasticsearch/issues/123269 issue: https://github.com/elastic/elasticsearch/issues/123269
- class: org.elasticsearch.index.mapper.IPSyntheticSourceNativeArrayIntegrationTests
method: testSynthesizeArray
issue: https://github.com/elastic/elasticsearch/issues/123417
- class: org.elasticsearch.index.mapper.IPSyntheticSourceNativeArrayIntegrationTests
method: testSynthesizeArrayRandom
issue: https://github.com/elastic/elasticsearch/issues/123418
- class: org.elasticsearch.index.mapper.IPSyntheticSourceNativeArrayIntegrationTests
method: testSynthesizeArrayIgnoreMalformed
issue: https://github.com/elastic/elasticsearch/issues/123419
- class: org.elasticsearch.packaging.test.DockerTests - class: org.elasticsearch.packaging.test.DockerTests
method: test151MachineDependentHeapWithSizeOverride method: test151MachineDependentHeapWithSizeOverride
issue: https://github.com/elastic/elasticsearch/issues/123437 issue: https://github.com/elastic/elasticsearch/issues/123437

View file

@ -12,6 +12,7 @@ import org.elasticsearch.gradle.testclusters.StandaloneRestIntegTestTask
apply plugin: 'elasticsearch.internal-java-rest-test' apply plugin: 'elasticsearch.internal-java-rest-test'
apply plugin: 'elasticsearch.internal-test-artifact-base' apply plugin: 'elasticsearch.internal-test-artifact-base'
apply plugin: 'elasticsearch.bwc-test' apply plugin: 'elasticsearch.bwc-test'
apply plugin: 'elasticsearch.fwc-test'
testArtifacts { testArtifacts {
registerTestArtifactFromSourceSet(sourceSets.javaRestTest) registerTestArtifactFromSourceSet(sourceSets.javaRestTest)

View file

@ -15,6 +15,7 @@ import org.elasticsearch.action.admin.cluster.migration.TransportGetFeatureUpgra
import org.elasticsearch.client.Request; import org.elasticsearch.client.Request;
import org.elasticsearch.client.ResponseException; import org.elasticsearch.client.ResponseException;
import org.elasticsearch.test.XContentTestUtils; import org.elasticsearch.test.XContentTestUtils;
import org.junit.BeforeClass;
import java.util.Collections; import java.util.Collections;
import java.util.List; import java.util.List;
@ -30,6 +31,11 @@ public class FeatureUpgradeIT extends AbstractRollingUpgradeTestCase {
super(upgradedNodes); super(upgradedNodes);
} }
@BeforeClass
public static void ensureNotForwardCompatTest() {
assumeFalse("Only supported by bwc tests", Boolean.parseBoolean(System.getProperty("tests.fwc", "false")));
}
public void testGetFeatureUpgradeStatus() throws Exception { public void testGetFeatureUpgradeStatus() throws Exception {
final String systemIndexWarning = "this request accesses system indices: [.tasks], but in a future major version, direct " final String systemIndexWarning = "this request accesses system indices: [.tasks], but in a future major version, direct "

View file

@ -103,8 +103,12 @@ public abstract class ParameterizedRollingUpgradeTestCase extends ESRestTestCase
for (int n = 0; n < requestedUpgradedNodes; n++) { for (int n = 0; n < requestedUpgradedNodes; n++) {
if (upgradedNodes.add(n)) { if (upgradedNodes.add(n)) {
try { try {
logger.info("Upgrading node {} to version {}", n, Version.CURRENT); Version upgradeVersion = System.getProperty("tests.new_cluster_version") == null
getUpgradeCluster().upgradeNodeToVersion(n, Version.CURRENT); ? Version.CURRENT
: Version.fromString(System.getProperty("tests.new_cluster_version"));
logger.info("Upgrading node {} to version {}", n, upgradeVersion);
getUpgradeCluster().upgradeNodeToVersion(n, upgradeVersion);
} catch (Exception e) { } catch (Exception e) {
upgradeFailed = true; upgradeFailed = true;
throw e; throw e;

View file

@ -205,6 +205,7 @@ public class TransportVersions {
public static final TransportVersion VOYAGE_AI_INTEGRATION_ADDED = def(9_014_0_00); public static final TransportVersion VOYAGE_AI_INTEGRATION_ADDED = def(9_014_0_00);
public static final TransportVersion BYTE_SIZE_VALUE_ALWAYS_USES_BYTES = def(9_015_0_00); public static final TransportVersion BYTE_SIZE_VALUE_ALWAYS_USES_BYTES = def(9_015_0_00);
public static final TransportVersion ESQL_SERIALIZE_SOURCE_FUNCTIONS_WARNINGS = def(9_016_0_00); public static final TransportVersion ESQL_SERIALIZE_SOURCE_FUNCTIONS_WARNINGS = def(9_016_0_00);
public static final TransportVersion ESQL_DRIVER_NODE_DESCRIPTION = def(9_017_0_00);
/* /*
* WARNING: DO NOT MERGE INTO MAIN! * WARNING: DO NOT MERGE INTO MAIN!

View file

@ -18,6 +18,8 @@ import org.elasticsearch.threadpool.Scheduler;
import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPool;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.ConcurrentLinkedQueue; import java.util.concurrent.ConcurrentLinkedQueue;
import java.util.concurrent.atomic.AtomicReference; import java.util.concurrent.atomic.AtomicReference;
@ -83,6 +85,74 @@ public class AllocationBalancingRoundSummaryService {
}); });
} }
/**
* Summarizes the work required to move from an old to new desired balance shard allocation.
*/
public static BalancingRoundSummary createBalancerRoundSummary(DesiredBalance oldDesiredBalance, DesiredBalance newDesiredBalance) {
return new BalancingRoundSummary(
createWeightsSummary(oldDesiredBalance, newDesiredBalance),
DesiredBalance.shardMovements(oldDesiredBalance, newDesiredBalance)
);
}
/**
* Creates a summary of the node weight changes from {@code oldDesiredBalance} to {@code newDesiredBalance}.
* See {@link BalancingRoundSummary.NodesWeightsChanges} for content details.
*/
private static Map<String, BalancingRoundSummary.NodesWeightsChanges> createWeightsSummary(
DesiredBalance oldDesiredBalance,
DesiredBalance newDesiredBalance
) {
var oldWeightsPerNode = oldDesiredBalance.weightsPerNode();
var newWeightsPerNode = newDesiredBalance.weightsPerNode();
Map<String, BalancingRoundSummary.NodesWeightsChanges> nodeNameToWeightInfo = new HashMap<>(oldWeightsPerNode.size());
for (var nodeAndWeights : oldWeightsPerNode.entrySet()) {
var discoveryNode = nodeAndWeights.getKey();
var oldNodeWeightStats = nodeAndWeights.getValue();
// The node may no longer exists in the new DesiredBalance. If so, the new weights for that node are effectively zero. New
// weights of zero will result in correctly negative weight diffs for the removed node.
var newNodeWeightStats = newWeightsPerNode.getOrDefault(discoveryNode, DesiredBalanceMetrics.NodeWeightStats.ZERO);
nodeNameToWeightInfo.put(
discoveryNode.getName(),
new BalancingRoundSummary.NodesWeightsChanges(
oldNodeWeightStats,
BalancingRoundSummary.NodeWeightsDiff.create(oldNodeWeightStats, newNodeWeightStats)
)
);
}
// There may be a new node in the new DesiredBalance that was not in the old DesiredBalance. So we'll need to iterate the nodes in
// the new DesiredBalance to check.
for (var nodeAndWeights : newWeightsPerNode.entrySet()) {
var discoveryNode = nodeAndWeights.getKey();
if (nodeNameToWeightInfo.containsKey(discoveryNode.getName()) == false) {
// This node is new in the new DesiredBalance, there was no entry added during iteration of the nodes in the old
// DesiredBalance. So we'll make a new entry with a base of zero value weights and a weights diff of the new node's weights.
nodeNameToWeightInfo.put(
discoveryNode.getName(),
new BalancingRoundSummary.NodesWeightsChanges(
DesiredBalanceMetrics.NodeWeightStats.ZERO,
BalancingRoundSummary.NodeWeightsDiff.create(DesiredBalanceMetrics.NodeWeightStats.ZERO, nodeAndWeights.getValue())
)
);
}
}
return nodeNameToWeightInfo;
}
/**
* Creates and saves a balancer round summary for the work to move from {@code oldDesiredBalance} to {@code newDesiredBalance}. If
* balancer round summaries are not enabled in the cluster (see {@link #ENABLE_BALANCER_ROUND_SUMMARIES_SETTING}), then the summary is
* immediately discarded.
*/
public void addBalancerRoundSummary(DesiredBalance oldDesiredBalance, DesiredBalance newDesiredBalance) {
addBalancerRoundSummary(createBalancerRoundSummary(oldDesiredBalance, newDesiredBalance));
}
/** /**
* Adds the summary of a balancing round. If summaries are enabled, this will eventually be reported (logging, etc.). If balancer round * Adds the summary of a balancing round. If summaries are enabled, this will eventually be reported (logging, etc.). If balancer round
* summaries are not enabled in the cluster, then the summary is immediately discarded (so as not to fill up a data structure that will * summaries are not enabled in the cluster, then the summary is immediately discarded (so as not to fill up a data structure that will
@ -110,7 +180,7 @@ public class AllocationBalancingRoundSummaryService {
*/ */
private void drainAndReportSummaries() { private void drainAndReportSummaries() {
var combinedSummaries = drainSummaries(); var combinedSummaries = drainSummaries();
if (combinedSummaries == CombinedBalancingRoundSummary.EMPTY_RESULTS) { if (combinedSummaries == BalancingRoundSummary.CombinedBalancingRoundSummary.EMPTY_RESULTS) {
return; return;
} }
@ -120,14 +190,15 @@ public class AllocationBalancingRoundSummaryService {
/** /**
* Returns a combined summary of all unreported allocation round summaries: may summarize a single balancer round, multiple, or none. * Returns a combined summary of all unreported allocation round summaries: may summarize a single balancer round, multiple, or none.
* *
* @return {@link CombinedBalancingRoundSummary#EMPTY_RESULTS} if there are no balancing round summaries waiting to be reported. * @return {@link BalancingRoundSummary.CombinedBalancingRoundSummary#EMPTY_RESULTS} if there are no balancing round summaries waiting
* to be reported.
*/ */
private CombinedBalancingRoundSummary drainSummaries() { private BalancingRoundSummary.CombinedBalancingRoundSummary drainSummaries() {
ArrayList<BalancingRoundSummary> batchOfSummaries = new ArrayList<>(); ArrayList<BalancingRoundSummary> batchOfSummaries = new ArrayList<>();
while (summaries.isEmpty() == false) { while (summaries.isEmpty() == false) {
batchOfSummaries.add(summaries.poll()); batchOfSummaries.add(summaries.poll());
} }
return CombinedBalancingRoundSummary.combine(batchOfSummaries); return BalancingRoundSummary.CombinedBalancingRoundSummary.combine(batchOfSummaries);
} }
/** /**
@ -186,7 +257,9 @@ public class AllocationBalancingRoundSummaryService {
} }
} }
// @VisibleForTesting /**
* Checks that the number of entries in {@link #summaries} matches the given {@code numberOfSummaries}.
*/
protected void verifyNumberOfSummaries(int numberOfSummaries) { protected void verifyNumberOfSummaries(int numberOfSummaries) {
assert numberOfSummaries == summaries.size(); assert numberOfSummaries == summaries.size();
} }

View file

@ -169,6 +169,7 @@ public class BalancedShardsAllocator implements ShardsAllocator {
balancer.moveShards(); balancer.moveShards();
balancer.balance(); balancer.balance();
// Node weights are calculated after each internal balancing round and saved to the RoutingNodes copy.
collectAndRecordNodeWeightStats(balancer, weightFunction, allocation); collectAndRecordNodeWeightStats(balancer, weightFunction, allocation);
} }

View file

@ -9,16 +9,149 @@
package org.elasticsearch.cluster.routing.allocation.allocator; package org.elasticsearch.cluster.routing.allocation.allocator;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
/** /**
* Summarizes the impact to the cluster as a result of a rebalancing round. * Summarizes the impact to the cluster as a result of a rebalancing round.
* *
* @param numberOfShardsToMove The number of shard moves required to move from the previous desired balance to the new one. * @param nodeNameToWeightChanges The shard balance weight changes for each node (by name), comparing a previous DesiredBalance shard
* allocation to a new DesiredBalance allocation.
* @param numberOfShardsToMove The number of shard moves required to move from the previous desired balance to the new one. Does not include
* new (index creation) or removed (index deletion) shard assignements.
*/ */
public record BalancingRoundSummary(long numberOfShardsToMove) { public record BalancingRoundSummary(Map<String, NodesWeightsChanges> nodeNameToWeightChanges, long numberOfShardsToMove) {
/**
* Represents the change in weights for a node going from an old DesiredBalance to a new DesiredBalance
* Saves the node weights of an old DesiredBalance, along with a diff against a newer DesiredBalance.
*
* @param baseWeights The starting {@link DesiredBalanceMetrics.NodeWeightStats} of a previous DesiredBalance.
* @param weightsDiff The difference between the {@code baseWeights} and a new DesiredBalance.
*/
record NodesWeightsChanges(DesiredBalanceMetrics.NodeWeightStats baseWeights, NodeWeightsDiff weightsDiff) {}
/**
* Represents the change of shard balance weights for a node, comparing an older DesiredBalance with the latest DesiredBalance.
*
* @param shardCountDiff How many more, or less, shards are assigned to the node in the latest DesiredBalance.
* @param diskUsageInBytesDiff How much more, or less, disk is used by shards assigned to the node in the latest DesiredBalance.
* @param writeLoadDiff How much more, or less, write load is estimated for shards assigned to the node in the latest DesiredBalance.
* @param totalWeightDiff How much more, or less, the total weight is of shards assigned to the node in the latest DesiredBalance.
*/
record NodeWeightsDiff(long shardCountDiff, double diskUsageInBytesDiff, double writeLoadDiff, double totalWeightDiff) {
/**
* Creates a diff where the {@code base} weights will be subtracted from the {@code next} weights, to show the changes made to reach
* the {@code next} weights.
*
* @param base has the original weights
* @param next has the new weights
* @return The diff of ({@code next} - {@code base})
*/
public static NodeWeightsDiff create(DesiredBalanceMetrics.NodeWeightStats base, DesiredBalanceMetrics.NodeWeightStats next) {
return new NodeWeightsDiff(
next.shardCount() - base.shardCount(),
next.diskUsageInBytes() - base.diskUsageInBytes(),
next.writeLoad() - base.writeLoad(),
next.nodeWeight() - base.nodeWeight()
);
}
/**
* Creates a new {@link NodeWeightsDiff} summing this instance's values with {@code otherDiff}'s values.
*/
public NodeWeightsDiff combine(NodeWeightsDiff otherDiff) {
return new NodeWeightsDiff(
this.shardCountDiff + otherDiff.shardCountDiff,
this.diskUsageInBytesDiff + otherDiff.diskUsageInBytesDiff,
this.writeLoadDiff + otherDiff.writeLoadDiff,
this.totalWeightDiff + otherDiff.totalWeightDiff
);
}
}
@Override @Override
public String toString() { public String toString() {
return "BalancingRoundSummary{" + "numberOfShardsToMove=" + numberOfShardsToMove + '}'; return "BalancingRoundSummary{"
+ "nodeNameToWeightChanges"
+ nodeNameToWeightChanges
+ ", numberOfShardsToMove="
+ numberOfShardsToMove
+ '}';
}
/**
* Holds combined {@link BalancingRoundSummary} results. Essentially holds a list of the balancing events and the summed up changes
* across all those events: what allocation work was done across some period of time.
* TODO: WIP ES-10341
*
* Note that each balancing round summary is the difference between, at the time, latest desired balance and the previous desired
* balance. Each summary represents a step towards the next desired balance, which is based on presuming the previous desired balance is
* reached. So combining them is roughly the difference between the first summary's previous desired balance and the last summary's
* latest desired balance.
*
* @param numberOfBalancingRounds How many balancing round summaries are combined in this report.
* @param nodeNameToWeightChanges
* @param numberOfShardMoves The sum of shard moves for each balancing round being combined into a single summary.
*/
public record CombinedBalancingRoundSummary(
int numberOfBalancingRounds,
Map<String, NodesWeightsChanges> nodeNameToWeightChanges,
long numberOfShardMoves
) {
public static final CombinedBalancingRoundSummary EMPTY_RESULTS = new CombinedBalancingRoundSummary(0, new HashMap<>(), 0);
/**
* Merges multiple {@link BalancingRoundSummary} summaries into a single {@link CombinedBalancingRoundSummary}.
*/
public static CombinedBalancingRoundSummary combine(List<BalancingRoundSummary> summaries) {
if (summaries.isEmpty()) {
return EMPTY_RESULTS;
}
// We will loop through the summaries and sum the weight diffs for each node entry.
Map<String, NodesWeightsChanges> combinedNodeNameToWeightChanges = new HashMap<>();
// Number of shards moves are simply summed across summaries. Each new balancing round is built upon the last one, so it is
// possible that a shard is reassigned back to a node before it even moves away, and that will still be counted as 2 moves here.
long numberOfShardMoves = 0;
// Total number of summaries that are being combined.
int numSummaries = 0;
var iterator = summaries.iterator();
while (iterator.hasNext()) {
var summary = iterator.next();
// We'll build the weight changes by keeping the node weight base from the first summary in which a node appears and then
// summing the weight diffs in each summary to get total weight diffs across summaries.
for (var nodeNameAndWeights : summary.nodeNameToWeightChanges.entrySet()) {
var combined = combinedNodeNameToWeightChanges.get(nodeNameAndWeights.getKey());
if (combined == null) {
// Either this is the first summary, and combinedNodeNameToWeightChanges hasn't been initialized yet for this node;
// or a later balancing round had a new node. Either way, initialize the node entry with the weight changes from the
// first summary in which it appears.
combinedNodeNameToWeightChanges.put(nodeNameAndWeights.getKey(), nodeNameAndWeights.getValue());
} else {
// We have at least two summaries containing this node, so let's combine them.
var newCombinedChanges = new NodesWeightsChanges(
combined.baseWeights,
combined.weightsDiff.combine(nodeNameAndWeights.getValue().weightsDiff())
);
combinedNodeNameToWeightChanges.put(nodeNameAndWeights.getKey(), newCombinedChanges);
}
}
++numSummaries;
numberOfShardMoves += summary.numberOfShardsToMove();
}
return new CombinedBalancingRoundSummary(numSummaries, combinedNodeNameToWeightChanges, numberOfShardMoves);
}
} }
} }

View file

@ -1,45 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the "Elastic License
* 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side
* Public License v 1"; you may not use this file except in compliance with, at
* your election, the "Elastic License 2.0", the "GNU Affero General Public
* License v3.0 only", or the "Server Side Public License, v 1".
*/
package org.elasticsearch.cluster.routing.allocation.allocator;
import java.util.List;
/**
* Holds combined {@link BalancingRoundSummary} results. Essentially holds a list of the balancing events and the summed up changes
* across all those events: what allocation work was done across some period of time.
* TODO: WIP ES-10341
*
* Note that each balancing round summary is the difference between, at the time, latest desired balance and the previous desired balance.
* Each summary represents a step towards the next desired balance, which is based on presuming the previous desired balance is reached. So
* combining them is roughly the difference between the first summary's previous desired balance and the last summary's latest desired
* balance.
*
* @param numberOfBalancingRounds How many balancing round summaries are combined in this report.
* @param numberOfShardMoves The sum of shard moves for each balancing round being combined into a single summary.
*/
public record CombinedBalancingRoundSummary(int numberOfBalancingRounds, long numberOfShardMoves) {
public static final CombinedBalancingRoundSummary EMPTY_RESULTS = new CombinedBalancingRoundSummary(0, 0);
public static CombinedBalancingRoundSummary combine(List<BalancingRoundSummary> summaries) {
if (summaries.isEmpty()) {
return EMPTY_RESULTS;
}
int numSummaries = 0;
long numberOfShardMoves = 0;
for (BalancingRoundSummary summary : summaries) {
++numSummaries;
numberOfShardMoves += summary.numberOfShardsToMove();
}
return new CombinedBalancingRoundSummary(numSummaries, numberOfShardMoves);
}
}

View file

@ -38,7 +38,9 @@ public class DesiredBalanceMetrics {
*/ */
public record AllocationStats(long unassignedShards, long totalAllocations, long undesiredAllocationsExcludingShuttingDownNodes) {} public record AllocationStats(long unassignedShards, long totalAllocations, long undesiredAllocationsExcludingShuttingDownNodes) {}
public record NodeWeightStats(long shardCount, double diskUsageInBytes, double writeLoad, double nodeWeight) {} public record NodeWeightStats(long shardCount, double diskUsageInBytes, double writeLoad, double nodeWeight) {
public static final NodeWeightStats ZERO = new NodeWeightStats(0, 0, 0, 0);
}
// Reconciliation metrics. // Reconciliation metrics.
/** See {@link #unassignedShards} */ /** See {@link #unassignedShards} */

View file

@ -324,7 +324,7 @@ public class DesiredBalanceShardsAllocator implements ShardsAllocator {
} }
if (currentDesiredBalanceRef.compareAndSet(oldDesiredBalance, newDesiredBalance)) { if (currentDesiredBalanceRef.compareAndSet(oldDesiredBalance, newDesiredBalance)) {
balancerRoundSummaryService.addBalancerRoundSummary(calculateBalancingRoundSummary(oldDesiredBalance, newDesiredBalance)); balancerRoundSummaryService.addBalancerRoundSummary(oldDesiredBalance, newDesiredBalance);
if (logger.isTraceEnabled()) { if (logger.isTraceEnabled()) {
var diff = DesiredBalance.hasChanges(oldDesiredBalance, newDesiredBalance) var diff = DesiredBalance.hasChanges(oldDesiredBalance, newDesiredBalance)
? "Diff: " + DesiredBalance.humanReadableDiff(oldDesiredBalance, newDesiredBalance) ? "Diff: " + DesiredBalance.humanReadableDiff(oldDesiredBalance, newDesiredBalance)
@ -339,13 +339,6 @@ public class DesiredBalanceShardsAllocator implements ShardsAllocator {
} }
} }
/**
* Summarizes the work required to move from an old to new desired balance shard allocation.
*/
private BalancingRoundSummary calculateBalancingRoundSummary(DesiredBalance oldDesiredBalance, DesiredBalance newDesiredBalance) {
return new BalancingRoundSummary(DesiredBalance.shardMovements(oldDesiredBalance, newDesiredBalance));
}
/** /**
* Submits the desired balance to be reconciled (applies the desired changes to the routing table) and creates and publishes a new * Submits the desired balance to be reconciled (applies the desired changes to the routing table) and creates and publishes a new
* cluster state. The data nodes will receive and apply the new cluster state to start/move/remove shards. * cluster state. The data nodes will receive and apply the new cluster state to start/move/remove shards.

View file

@ -18,12 +18,12 @@ import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.recycler.Recycler; import org.elasticsearch.common.recycler.Recycler;
import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeUnit;
import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.core.CheckedConsumer;
import org.elasticsearch.core.Releasable; import org.elasticsearch.core.Releasable;
import org.elasticsearch.core.Releasables; import org.elasticsearch.core.Releasables;
import java.io.IOException; import java.io.IOException;
import java.io.StreamCorruptedException; import java.io.StreamCorruptedException;
import java.util.function.Consumer;
public class InboundDecoder implements Releasable { public class InboundDecoder implements Releasable {
@ -53,7 +53,7 @@ public class InboundDecoder implements Releasable {
this.channelType = channelType; this.channelType = channelType;
} }
public int decode(ReleasableBytesReference reference, Consumer<Object> fragmentConsumer) throws IOException { public int decode(ReleasableBytesReference reference, CheckedConsumer<Object, IOException> fragmentConsumer) throws IOException {
ensureOpen(); ensureOpen();
try { try {
return internalDecode(reference, fragmentConsumer); return internalDecode(reference, fragmentConsumer);
@ -63,7 +63,8 @@ public class InboundDecoder implements Releasable {
} }
} }
public int internalDecode(ReleasableBytesReference reference, Consumer<Object> fragmentConsumer) throws IOException { public int internalDecode(ReleasableBytesReference reference, CheckedConsumer<Object, IOException> fragmentConsumer)
throws IOException {
if (isOnHeader()) { if (isOnHeader()) {
int messageLength = TcpTransport.readMessageLength(reference); int messageLength = TcpTransport.readMessageLength(reference);
if (messageLength == -1) { if (messageLength == -1) {
@ -104,25 +105,28 @@ public class InboundDecoder implements Releasable {
} }
int remainingToConsume = totalNetworkSize - bytesConsumed; int remainingToConsume = totalNetworkSize - bytesConsumed;
int maxBytesToConsume = Math.min(reference.length(), remainingToConsume); int maxBytesToConsume = Math.min(reference.length(), remainingToConsume);
ReleasableBytesReference retainedContent;
if (maxBytesToConsume == remainingToConsume) {
retainedContent = reference.retainedSlice(0, maxBytesToConsume);
} else {
retainedContent = reference.retain();
}
int bytesConsumedThisDecode = 0; int bytesConsumedThisDecode = 0;
if (decompressor != null) { if (decompressor != null) {
bytesConsumedThisDecode += decompress(retainedContent); bytesConsumedThisDecode += decompressor.decompress(
maxBytesToConsume == remainingToConsume ? reference.slice(0, maxBytesToConsume) : reference
);
bytesConsumed += bytesConsumedThisDecode; bytesConsumed += bytesConsumedThisDecode;
ReleasableBytesReference decompressed; ReleasableBytesReference decompressed;
while ((decompressed = decompressor.pollDecompressedPage(isDone())) != null) { while ((decompressed = decompressor.pollDecompressedPage(isDone())) != null) {
fragmentConsumer.accept(decompressed); try (var buf = decompressed) {
fragmentConsumer.accept(buf);
}
} }
} else { } else {
bytesConsumedThisDecode += maxBytesToConsume; bytesConsumedThisDecode += maxBytesToConsume;
bytesConsumed += maxBytesToConsume; bytesConsumed += maxBytesToConsume;
fragmentConsumer.accept(retainedContent); if (maxBytesToConsume == remainingToConsume) {
try (ReleasableBytesReference retained = reference.retainedSlice(0, maxBytesToConsume)) {
fragmentConsumer.accept(retained);
}
} else {
fragmentConsumer.accept(reference);
}
} }
if (isDone()) { if (isDone()) {
finishMessage(fragmentConsumer); finishMessage(fragmentConsumer);
@ -138,7 +142,7 @@ public class InboundDecoder implements Releasable {
cleanDecodeState(); cleanDecodeState();
} }
private void finishMessage(Consumer<Object> fragmentConsumer) { private void finishMessage(CheckedConsumer<Object, IOException> fragmentConsumer) throws IOException {
cleanDecodeState(); cleanDecodeState();
fragmentConsumer.accept(END_CONTENT); fragmentConsumer.accept(END_CONTENT);
} }
@ -154,12 +158,6 @@ public class InboundDecoder implements Releasable {
} }
} }
private int decompress(ReleasableBytesReference content) throws IOException {
try (content) {
return decompressor.decompress(content);
}
}
private boolean isDone() { private boolean isDone() {
return bytesConsumed == totalNetworkSize; return bytesConsumed == totalNetworkSize;
} }

View file

@ -11,18 +11,17 @@ package org.elasticsearch.transport;
import org.elasticsearch.common.bytes.CompositeBytesReference; import org.elasticsearch.common.bytes.CompositeBytesReference;
import org.elasticsearch.common.bytes.ReleasableBytesReference; import org.elasticsearch.common.bytes.ReleasableBytesReference;
import org.elasticsearch.core.CheckedConsumer;
import org.elasticsearch.core.Releasable; import org.elasticsearch.core.Releasable;
import org.elasticsearch.core.Releasables; import org.elasticsearch.core.Releasables;
import java.io.IOException; import java.io.IOException;
import java.util.ArrayDeque; import java.util.ArrayDeque;
import java.util.ArrayList;
import java.util.function.BiConsumer; import java.util.function.BiConsumer;
import java.util.function.LongSupplier; import java.util.function.LongSupplier;
public class InboundPipeline implements Releasable { public class InboundPipeline implements Releasable {
private static final ThreadLocal<ArrayList<Object>> fragmentList = ThreadLocal.withInitial(ArrayList::new);
private static final InboundMessage PING_MESSAGE = new InboundMessage(null, true); private static final InboundMessage PING_MESSAGE = new InboundMessage(null, true);
private final LongSupplier relativeTimeInMillis; private final LongSupplier relativeTimeInMillis;
@ -56,81 +55,74 @@ public class InboundPipeline implements Releasable {
public void handleBytes(TcpChannel channel, ReleasableBytesReference reference) throws IOException { public void handleBytes(TcpChannel channel, ReleasableBytesReference reference) throws IOException {
if (uncaughtException != null) { if (uncaughtException != null) {
reference.close();
throw new IllegalStateException("Pipeline state corrupted by uncaught exception", uncaughtException); throw new IllegalStateException("Pipeline state corrupted by uncaught exception", uncaughtException);
} }
try { try {
doHandleBytes(channel, reference); channel.getChannelStats().markAccessed(relativeTimeInMillis.getAsLong());
statsTracker.markBytesRead(reference.length());
if (isClosed) {
reference.close();
return;
}
pending.add(reference);
doHandleBytes(channel);
} catch (Exception e) { } catch (Exception e) {
uncaughtException = e; uncaughtException = e;
throw e; throw e;
} }
} }
public void doHandleBytes(TcpChannel channel, ReleasableBytesReference reference) throws IOException { private void doHandleBytes(TcpChannel channel) throws IOException {
channel.getChannelStats().markAccessed(relativeTimeInMillis.getAsLong()); do {
statsTracker.markBytesRead(reference.length()); CheckedConsumer<Object, IOException> decodeConsumer = f -> forwardFragment(channel, f);
pending.add(reference.retain()); int bytesDecoded = decoder.decode(pending.peekFirst(), decodeConsumer);
if (bytesDecoded == 0 && pending.size() > 1) {
final ArrayList<Object> fragments = fragmentList.get(); final ReleasableBytesReference[] bytesReferences = new ReleasableBytesReference[pending.size()];
boolean continueHandling = true; int index = 0;
for (ReleasableBytesReference pendingReference : pending) {
while (continueHandling && isClosed == false) { bytesReferences[index] = pendingReference.retain();
boolean continueDecoding = true; ++index;
while (continueDecoding && pending.isEmpty() == false) { }
try (ReleasableBytesReference toDecode = getPendingBytes()) { try (
final int bytesDecoded = decoder.decode(toDecode, fragments::add); ReleasableBytesReference toDecode = new ReleasableBytesReference(
if (bytesDecoded != 0) { CompositeBytesReference.of(bytesReferences),
releasePendingBytes(bytesDecoded); () -> Releasables.closeExpectNoException(bytesReferences)
if (fragments.isEmpty() == false && endOfMessage(fragments.get(fragments.size() - 1))) { )
continueDecoding = false; ) {
} bytesDecoded = decoder.decode(toDecode, decodeConsumer);
} else {
continueDecoding = false;
}
} }
} }
if (bytesDecoded != 0) {
if (fragments.isEmpty()) { releasePendingBytes(bytesDecoded);
continueHandling = false;
} else { } else {
try { break;
forwardFragments(channel, fragments);
} finally {
for (Object fragment : fragments) {
if (fragment instanceof ReleasableBytesReference) {
((ReleasableBytesReference) fragment).close();
}
}
fragments.clear();
}
} }
} } while (pending.isEmpty() == false);
} }
private void forwardFragments(TcpChannel channel, ArrayList<Object> fragments) throws IOException { private void forwardFragment(TcpChannel channel, Object fragment) throws IOException {
for (Object fragment : fragments) { if (fragment instanceof Header) {
if (fragment instanceof Header) { headerReceived((Header) fragment);
headerReceived((Header) fragment); } else if (fragment instanceof Compression.Scheme) {
} else if (fragment instanceof Compression.Scheme) { assert aggregator.isAggregating();
assert aggregator.isAggregating(); aggregator.updateCompressionScheme((Compression.Scheme) fragment);
aggregator.updateCompressionScheme((Compression.Scheme) fragment); } else if (fragment == InboundDecoder.PING) {
} else if (fragment == InboundDecoder.PING) { assert aggregator.isAggregating() == false;
assert aggregator.isAggregating() == false; messageHandler.accept(channel, PING_MESSAGE);
messageHandler.accept(channel, PING_MESSAGE); } else if (fragment == InboundDecoder.END_CONTENT) {
} else if (fragment == InboundDecoder.END_CONTENT) { assert aggregator.isAggregating();
assert aggregator.isAggregating(); InboundMessage aggregated = aggregator.finishAggregation();
InboundMessage aggregated = aggregator.finishAggregation(); try {
try { statsTracker.markMessageReceived();
statsTracker.markMessageReceived(); messageHandler.accept(channel, aggregated);
messageHandler.accept(channel, aggregated); } finally {
} finally { aggregated.decRef();
aggregated.decRef();
}
} else {
assert aggregator.isAggregating();
assert fragment instanceof ReleasableBytesReference;
aggregator.aggregate((ReleasableBytesReference) fragment);
} }
} else {
assert aggregator.isAggregating();
assert fragment instanceof ReleasableBytesReference;
aggregator.aggregate((ReleasableBytesReference) fragment);
} }
} }
@ -139,25 +131,6 @@ public class InboundPipeline implements Releasable {
aggregator.headerReceived(header); aggregator.headerReceived(header);
} }
private static boolean endOfMessage(Object fragment) {
return fragment == InboundDecoder.PING || fragment == InboundDecoder.END_CONTENT || fragment instanceof Exception;
}
private ReleasableBytesReference getPendingBytes() {
if (pending.size() == 1) {
return pending.peekFirst().retain();
} else {
final ReleasableBytesReference[] bytesReferences = new ReleasableBytesReference[pending.size()];
int index = 0;
for (ReleasableBytesReference pendingReference : pending) {
bytesReferences[index] = pendingReference.retain();
++index;
}
final Releasable releasable = () -> Releasables.closeExpectNoException(bytesReferences);
return new ReleasableBytesReference(CompositeBytesReference.of(bytesReferences), releasable);
}
}
private void releasePendingBytes(int bytesConsumed) { private void releasePendingBytes(int bytesConsumed) {
int bytesToRelease = bytesConsumed; int bytesToRelease = bytesConsumed;
while (bytesToRelease != 0) { while (bytesToRelease != 0) {

View file

@ -120,8 +120,9 @@ grant codeBase "${codebase.httpasyncclient}" {
grant codeBase "${codebase.netty-common}" { grant codeBase "${codebase.netty-common}" {
// for reading the system-wide configuration for the backlog of established sockets // for reading the system-wide configuration for the backlog of established sockets
permission java.io.FilePermission "/proc/sys/net/core/somaxconn", "read"; permission java.io.FilePermission "/proc/sys/net/core/somaxconn", "read";
// Netty sets custom classloader for some of its internal threads // Netty gets and sets classloaders for some of its internal threads
permission java.lang.RuntimePermission "setContextClassLoader"; permission java.lang.RuntimePermission "setContextClassLoader";
permission java.lang.RuntimePermission "getClassLoader";
permission java.net.SocketPermission "*", "accept,connect"; permission java.net.SocketPermission "*", "accept,connect";
}; };

View file

@ -12,38 +12,63 @@ package org.elasticsearch.cluster.routing.allocation.allocator;
import org.apache.logging.log4j.Level; import org.apache.logging.log4j.Level;
import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.Logger;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.concurrent.DeterministicTaskQueue; import org.elasticsearch.common.util.concurrent.DeterministicTaskQueue;
import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.test.MockLog; import org.elasticsearch.test.MockLog;
import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPool;
import org.junit.Before; import org.junit.Before;
import java.util.Map;
import java.util.Set;
public class AllocationBalancingRoundSummaryServiceTests extends ESTestCase { public class AllocationBalancingRoundSummaryServiceTests extends ESTestCase {
private static final Logger logger = LogManager.getLogger(AllocationBalancingRoundSummaryServiceTests.class); private static final Logger logger = LogManager.getLogger(AllocationBalancingRoundSummaryServiceTests.class);
private static final String BALANCING_SUMMARY_MSG_PREFIX = "Balancing round summaries:*"; private static final String BALANCING_SUMMARY_MSG_PREFIX = "Balancing round summaries:*";
private static final Map<String, BalancingRoundSummary.NodesWeightsChanges> NODE_NAME_TO_WEIGHT_CHANGES = Map.of(
"node1",
new BalancingRoundSummary.NodesWeightsChanges(
new DesiredBalanceMetrics.NodeWeightStats(1L, 2, 3, 4),
new BalancingRoundSummary.NodeWeightsDiff(1, 2, 3, 4)
),
"node2",
new BalancingRoundSummary.NodesWeightsChanges(
new DesiredBalanceMetrics.NodeWeightStats(1L, 2, 3, 4),
new BalancingRoundSummary.NodeWeightsDiff(1, 2, 3, 4)
)
);
final DiscoveryNode DUMMY_NODE = new DiscoveryNode("node1Name", "node1Id", "eph-node1", "abc", "abc", null, Map.of(), Set.of(), null);
final DiscoveryNode SECOND_DUMMY_NODE = new DiscoveryNode(
"node2Name",
"node2Id",
"eph-node2",
"def",
"def",
null,
Map.of(),
Set.of(),
null
);
final String INDEX_NAME = "index";
final String INDEX_UUID = "_indexUUID_";
final Settings enabledSummariesSettings = Settings.builder() final Settings enabledSummariesSettings = Settings.builder()
.put(AllocationBalancingRoundSummaryService.ENABLE_BALANCER_ROUND_SUMMARIES_SETTING.getKey(), true) .put(AllocationBalancingRoundSummaryService.ENABLE_BALANCER_ROUND_SUMMARIES_SETTING.getKey(), true)
.build(); .build();
final Settings disabledDefaultEmptySettings = Settings.builder().build(); final Settings disabledDefaultEmptySettings = Settings.builder().build();
final Settings enabledButNegativeIntervalSettings = Settings.builder()
.put(AllocationBalancingRoundSummaryService.ENABLE_BALANCER_ROUND_SUMMARIES_SETTING.getKey(), true)
.put(AllocationBalancingRoundSummaryService.BALANCER_ROUND_SUMMARIES_LOG_INTERVAL_SETTING.getKey(), TimeValue.MINUS_ONE)
.build();
ClusterSettings enabledClusterSettings = new ClusterSettings(enabledSummariesSettings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); ClusterSettings enabledClusterSettings = new ClusterSettings(enabledSummariesSettings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS);
ClusterSettings disabledDefaultEmptyClusterSettings = new ClusterSettings( ClusterSettings disabledDefaultEmptyClusterSettings = new ClusterSettings(
disabledDefaultEmptySettings, disabledDefaultEmptySettings,
ClusterSettings.BUILT_IN_CLUSTER_SETTINGS ClusterSettings.BUILT_IN_CLUSTER_SETTINGS
); );
ClusterSettings enabledButNegativeIntervalClusterSettings = new ClusterSettings(
enabledButNegativeIntervalSettings,
ClusterSettings.BUILT_IN_CLUSTER_SETTINGS
);
// Construction parameters for the service. // Construction parameters for the service.
@ -68,7 +93,7 @@ public class AllocationBalancingRoundSummaryServiceTests extends ESTestCase {
* Add a summary and check it is not logged. * Add a summary and check it is not logged.
*/ */
service.addBalancerRoundSummary(new BalancingRoundSummary(50)); service.addBalancerRoundSummary(new BalancingRoundSummary(NODE_NAME_TO_WEIGHT_CHANGES, 50));
service.verifyNumberOfSummaries(0); // when summaries are disabled, summaries are not retained when added. service.verifyNumberOfSummaries(0); // when summaries are disabled, summaries are not retained when added.
mockLog.addExpectation( mockLog.addExpectation(
new MockLog.UnseenEventExpectation( new MockLog.UnseenEventExpectation(
@ -96,7 +121,7 @@ public class AllocationBalancingRoundSummaryServiceTests extends ESTestCase {
* Add a summary and check the service logs a report on it. * Add a summary and check the service logs a report on it.
*/ */
service.addBalancerRoundSummary(new BalancingRoundSummary(50)); service.addBalancerRoundSummary(new BalancingRoundSummary(NODE_NAME_TO_WEIGHT_CHANGES, 50));
service.verifyNumberOfSummaries(1); service.verifyNumberOfSummaries(1);
mockLog.addExpectation( mockLog.addExpectation(
new MockLog.SeenEventExpectation( new MockLog.SeenEventExpectation(
@ -116,7 +141,7 @@ public class AllocationBalancingRoundSummaryServiceTests extends ESTestCase {
* Add a second summary, check for more logging. * Add a second summary, check for more logging.
*/ */
service.addBalancerRoundSummary(new BalancingRoundSummary(200)); service.addBalancerRoundSummary(new BalancingRoundSummary(NODE_NAME_TO_WEIGHT_CHANGES, 200));
service.verifyNumberOfSummaries(1); service.verifyNumberOfSummaries(1);
mockLog.addExpectation( mockLog.addExpectation(
new MockLog.SeenEventExpectation( new MockLog.SeenEventExpectation(
@ -141,8 +166,8 @@ public class AllocationBalancingRoundSummaryServiceTests extends ESTestCase {
var service = new AllocationBalancingRoundSummaryService(testThreadPool, enabledClusterSettings); var service = new AllocationBalancingRoundSummaryService(testThreadPool, enabledClusterSettings);
try (var mockLog = MockLog.capture(AllocationBalancingRoundSummaryService.class)) { try (var mockLog = MockLog.capture(AllocationBalancingRoundSummaryService.class)) {
service.addBalancerRoundSummary(new BalancingRoundSummary(50)); service.addBalancerRoundSummary(new BalancingRoundSummary(NODE_NAME_TO_WEIGHT_CHANGES, 50));
service.addBalancerRoundSummary(new BalancingRoundSummary(100)); service.addBalancerRoundSummary(new BalancingRoundSummary(NODE_NAME_TO_WEIGHT_CHANGES, 100));
service.verifyNumberOfSummaries(2); service.verifyNumberOfSummaries(2);
mockLog.addExpectation( mockLog.addExpectation(
new MockLog.SeenEventExpectation( new MockLog.SeenEventExpectation(
@ -171,7 +196,7 @@ public class AllocationBalancingRoundSummaryServiceTests extends ESTestCase {
* First add some summaries to report, ensuring that the logging is active. * First add some summaries to report, ensuring that the logging is active.
*/ */
service.addBalancerRoundSummary(new BalancingRoundSummary(50)); service.addBalancerRoundSummary(new BalancingRoundSummary(NODE_NAME_TO_WEIGHT_CHANGES, 50));
service.verifyNumberOfSummaries(1); service.verifyNumberOfSummaries(1);
mockLog.addExpectation( mockLog.addExpectation(
new MockLog.SeenEventExpectation( new MockLog.SeenEventExpectation(
@ -224,7 +249,7 @@ public class AllocationBalancingRoundSummaryServiceTests extends ESTestCase {
* summaries waiting to be reported. * summaries waiting to be reported.
*/ */
service.addBalancerRoundSummary(new BalancingRoundSummary(50)); service.addBalancerRoundSummary(new BalancingRoundSummary(NODE_NAME_TO_WEIGHT_CHANGES, 50));
service.verifyNumberOfSummaries(1); service.verifyNumberOfSummaries(1);
clusterSettings.applySettings(disabledSettingsUpdate); clusterSettings.applySettings(disabledSettingsUpdate);
@ -234,7 +259,7 @@ public class AllocationBalancingRoundSummaryServiceTests extends ESTestCase {
* Verify that any additional summaries are not retained, since the service is disabled. * Verify that any additional summaries are not retained, since the service is disabled.
*/ */
service.addBalancerRoundSummary(new BalancingRoundSummary(50)); service.addBalancerRoundSummary(new BalancingRoundSummary(NODE_NAME_TO_WEIGHT_CHANGES, 50));
service.verifyNumberOfSummaries(0); service.verifyNumberOfSummaries(0);
// Check that the service never logged anything. // Check that the service never logged anything.
@ -253,4 +278,173 @@ public class AllocationBalancingRoundSummaryServiceTests extends ESTestCase {
} }
} }
/**
* Tests the {@link AllocationBalancingRoundSummaryService#createBalancerRoundSummary(DesiredBalance, DesiredBalance)} logic.
*/
public void testCreateBalancerRoundSummary() {
// Initial desired balance allocations and node weights.
DesiredBalance firstDesiredBalance = new DesiredBalance(
1,
// The shard assignments and node weights don't make sense together, but for summary purposes the first determines the summary's
// number of shards moved, and the second the weight changes: the summary service doesn't need them to make sense together
// because it looks at them separately. They do have to make sense individually across balancing rounds.
Map.of(new ShardId(INDEX_NAME, INDEX_UUID, 0), new ShardAssignment(Set.of("a", "b"), 2, 0, 0)),
Map.of(DUMMY_NODE, new DesiredBalanceMetrics.NodeWeightStats(10, 20, 30, 40)),
DesiredBalance.ComputationFinishReason.CONVERGED
);
// Move two shards and change the node weights.
DesiredBalance secondDesiredBalance = new DesiredBalance(
1,
Map.of(new ShardId(INDEX_NAME, INDEX_UUID, 0), new ShardAssignment(Set.of("c", "d"), 2, 0, 0)),
Map.of(DUMMY_NODE, new DesiredBalanceMetrics.NodeWeightStats(20, 40, 60, 80)),
DesiredBalance.ComputationFinishReason.CONVERGED
);
// Move one shard and change the node weights.
DesiredBalance thirdDesiredBalance = new DesiredBalance(
1,
Map.of(new ShardId(INDEX_NAME, INDEX_UUID, 0), new ShardAssignment(Set.of("a", "d"), 2, 0, 0)),
Map.of(DUMMY_NODE, new DesiredBalanceMetrics.NodeWeightStats(30, 60, 90, 120)),
DesiredBalance.ComputationFinishReason.CONVERGED
);
var firstSummary = AllocationBalancingRoundSummaryService.createBalancerRoundSummary(firstDesiredBalance, secondDesiredBalance);
var secondSummary = AllocationBalancingRoundSummaryService.createBalancerRoundSummary(secondDesiredBalance, thirdDesiredBalance);
assertEquals(2, firstSummary.numberOfShardsToMove());
assertEquals(1, firstSummary.nodeNameToWeightChanges().size());
var firstSummaryWeights = firstSummary.nodeNameToWeightChanges().get(DUMMY_NODE.getName());
assertEquals(10, firstSummaryWeights.baseWeights().shardCount());
assertDoublesEqual(20, firstSummaryWeights.baseWeights().diskUsageInBytes());
assertDoublesEqual(30, firstSummaryWeights.baseWeights().writeLoad());
assertDoublesEqual(40, firstSummaryWeights.baseWeights().nodeWeight());
assertEquals(10, firstSummaryWeights.weightsDiff().shardCountDiff());
assertDoublesEqual(20, firstSummaryWeights.weightsDiff().diskUsageInBytesDiff());
assertDoublesEqual(30, firstSummaryWeights.weightsDiff().writeLoadDiff());
assertDoublesEqual(40, firstSummaryWeights.weightsDiff().totalWeightDiff());
assertEquals(1, secondSummary.numberOfShardsToMove());
assertEquals(1, secondSummary.nodeNameToWeightChanges().size());
var secondSummaryWeights = secondSummary.nodeNameToWeightChanges().get(DUMMY_NODE.getName());
assertEquals(20, secondSummaryWeights.baseWeights().shardCount());
assertDoublesEqual(40, secondSummaryWeights.baseWeights().diskUsageInBytes());
assertDoublesEqual(60, secondSummaryWeights.baseWeights().writeLoad());
assertDoublesEqual(80, secondSummaryWeights.baseWeights().nodeWeight());
assertEquals(10, secondSummaryWeights.weightsDiff().shardCountDiff());
assertDoublesEqual(20, secondSummaryWeights.weightsDiff().diskUsageInBytesDiff());
assertDoublesEqual(30, secondSummaryWeights.weightsDiff().writeLoadDiff());
assertDoublesEqual(40, secondSummaryWeights.weightsDiff().totalWeightDiff());
}
/**
* Tests that removing a node from old to new DesiredBalance will result in a weights diff of negative values bringing the weights down
* to zero.
*/
public void testCreateBalancerRoundSummaryWithRemovedNode() {
DesiredBalance firstDesiredBalance = new DesiredBalance(
1,
// The shard assignments and node weights don't make sense together, but for summary purposes the first determines the summary's
// number of shards moved, and the second the weight changes: the summary service doesn't need them to make sense together
// because it looks at them separately. They do have to make sense individually across balancing rounds.
Map.of(new ShardId(INDEX_NAME, INDEX_UUID, 0), new ShardAssignment(Set.of(DUMMY_NODE.getId()), 1, 0, 0)),
Map.of(
DUMMY_NODE,
new DesiredBalanceMetrics.NodeWeightStats(10, 20, 30, 40),
SECOND_DUMMY_NODE,
new DesiredBalanceMetrics.NodeWeightStats(5, 15, 25, 35)
),
DesiredBalance.ComputationFinishReason.CONVERGED
);
// Remove a new node and don't move any shards.
DesiredBalance secondDesiredBalance = new DesiredBalance(
1,
Map.of(new ShardId(INDEX_NAME, INDEX_UUID, 0), new ShardAssignment(Set.of(DUMMY_NODE.getId()), 1, 0, 0)),
Map.of(DUMMY_NODE, new DesiredBalanceMetrics.NodeWeightStats(20, 40, 60, 80)),
DesiredBalance.ComputationFinishReason.CONVERGED
);
var summary = AllocationBalancingRoundSummaryService.createBalancerRoundSummary(firstDesiredBalance, secondDesiredBalance);
assertEquals(0, summary.numberOfShardsToMove());
assertEquals(2, summary.nodeNameToWeightChanges().size());
var summaryDummyNodeWeights = summary.nodeNameToWeightChanges().get(DUMMY_NODE.getName());
assertEquals(10, summaryDummyNodeWeights.baseWeights().shardCount());
assertDoublesEqual(20, summaryDummyNodeWeights.baseWeights().diskUsageInBytes());
assertDoublesEqual(30, summaryDummyNodeWeights.baseWeights().writeLoad());
assertDoublesEqual(40, summaryDummyNodeWeights.baseWeights().nodeWeight());
assertEquals(10, summaryDummyNodeWeights.weightsDiff().shardCountDiff());
assertDoublesEqual(20, summaryDummyNodeWeights.weightsDiff().diskUsageInBytesDiff());
assertDoublesEqual(30, summaryDummyNodeWeights.weightsDiff().writeLoadDiff());
assertDoublesEqual(40, summaryDummyNodeWeights.weightsDiff().totalWeightDiff());
var summarySecondDummyNodeWeights = summary.nodeNameToWeightChanges().get(SECOND_DUMMY_NODE.getName());
assertEquals(5, summarySecondDummyNodeWeights.baseWeights().shardCount());
assertDoublesEqual(15, summarySecondDummyNodeWeights.baseWeights().diskUsageInBytes());
assertDoublesEqual(25, summarySecondDummyNodeWeights.baseWeights().writeLoad());
assertDoublesEqual(35, summarySecondDummyNodeWeights.baseWeights().nodeWeight());
assertEquals(-5, summarySecondDummyNodeWeights.weightsDiff().shardCountDiff());
assertDoublesEqual(-15, summarySecondDummyNodeWeights.weightsDiff().diskUsageInBytesDiff());
assertDoublesEqual(-25, summarySecondDummyNodeWeights.weightsDiff().writeLoadDiff());
assertDoublesEqual(-35, summarySecondDummyNodeWeights.weightsDiff().totalWeightDiff());
}
/**
* Tests that adding a node from old to new DesiredBalance will result in an entry in the summary for the new node with zero weights and
* a weights diff showing the new allocation weight changes.
*/
public void testCreateBalancerRoundSummaryWithAddedNode() {
DesiredBalance firstDesiredBalance = new DesiredBalance(
1,
// The shard assignments and node weights don't make sense together, but for summary purposes the first determines the summary's
// number of shards moved, and the second the weight changes: the summary service doesn't need them to make sense together
// because it looks at them separately. They do have to make sense individually across balancing rounds.
Map.of(new ShardId(INDEX_NAME, INDEX_UUID, 0), new ShardAssignment(Set.of(DUMMY_NODE.getId()), 1, 0, 0)),
Map.of(DUMMY_NODE, new DesiredBalanceMetrics.NodeWeightStats(10, 20, 30, 40)),
DesiredBalance.ComputationFinishReason.CONVERGED
);
// Add a new node and move one shard.
DesiredBalance secondDesiredBalance = new DesiredBalance(
1,
Map.of(new ShardId(INDEX_NAME, INDEX_UUID, 0), new ShardAssignment(Set.of(SECOND_DUMMY_NODE.getId()), 1, 0, 0)),
Map.of(
DUMMY_NODE,
new DesiredBalanceMetrics.NodeWeightStats(20, 40, 60, 80),
SECOND_DUMMY_NODE,
new DesiredBalanceMetrics.NodeWeightStats(5, 15, 25, 35)
),
DesiredBalance.ComputationFinishReason.CONVERGED
);
var summary = AllocationBalancingRoundSummaryService.createBalancerRoundSummary(firstDesiredBalance, secondDesiredBalance);
assertEquals(1, summary.numberOfShardsToMove());
assertEquals(2, summary.nodeNameToWeightChanges().size());
var summaryDummyNodeWeights = summary.nodeNameToWeightChanges().get(DUMMY_NODE.getName());
assertEquals(10, summaryDummyNodeWeights.baseWeights().shardCount());
assertDoublesEqual(20, summaryDummyNodeWeights.baseWeights().diskUsageInBytes());
assertDoublesEqual(30, summaryDummyNodeWeights.baseWeights().writeLoad());
assertDoublesEqual(40, summaryDummyNodeWeights.baseWeights().nodeWeight());
assertEquals(10, summaryDummyNodeWeights.weightsDiff().shardCountDiff());
assertDoublesEqual(20, summaryDummyNodeWeights.weightsDiff().diskUsageInBytesDiff());
assertDoublesEqual(30, summaryDummyNodeWeights.weightsDiff().writeLoadDiff());
assertDoublesEqual(40, summaryDummyNodeWeights.weightsDiff().totalWeightDiff());
var summarySecondDummyNodeWeights = summary.nodeNameToWeightChanges().get(SECOND_DUMMY_NODE.getName());
assertEquals(0, summarySecondDummyNodeWeights.baseWeights().shardCount());
assertDoublesEqual(0, summarySecondDummyNodeWeights.baseWeights().diskUsageInBytes());
assertDoublesEqual(0, summarySecondDummyNodeWeights.baseWeights().writeLoad());
assertDoublesEqual(0, summarySecondDummyNodeWeights.baseWeights().nodeWeight());
assertEquals(5, summarySecondDummyNodeWeights.weightsDiff().shardCountDiff());
assertDoublesEqual(15, summarySecondDummyNodeWeights.weightsDiff().diskUsageInBytesDiff());
assertDoublesEqual(25, summarySecondDummyNodeWeights.weightsDiff().writeLoadDiff());
assertDoublesEqual(35, summarySecondDummyNodeWeights.weightsDiff().totalWeightDiff());
}
/**
* Helper for double type inputs. assertEquals on double type inputs require a delta.
*/
private void assertDoublesEqual(double expected, double actual) {
assertEquals(expected, actual, 0.00001);
}
} }

View file

@ -0,0 +1,115 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the "Elastic License
* 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side
* Public License v 1"; you may not use this file except in compliance with, at
* your election, the "Elastic License 2.0", the "GNU Affero General Public
* License v3.0 only", or the "Server Side Public License, v 1".
*/
package org.elasticsearch.cluster.routing.allocation.allocator;
import org.elasticsearch.cluster.routing.allocation.allocator.BalancingRoundSummary.CombinedBalancingRoundSummary;
import org.elasticsearch.test.ESTestCase;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
public class BalancingRoundSummaryTests extends ESTestCase {
/**
* Tests the {@link BalancingRoundSummary.CombinedBalancingRoundSummary#combine(List)} method.
*/
public void testCombine() {
final String NODE_1 = "node1";
final String NODE_2 = "node2";
final var node1BaseWeights = new DesiredBalanceMetrics.NodeWeightStats(10, 20, 30, 40);
final var node2BaseWeights = new DesiredBalanceMetrics.NodeWeightStats(100, 200, 300, 400);
final var commonDiff = new BalancingRoundSummary.NodeWeightsDiff(1, 2, 3, 4);
final long shardMovesSummary1 = 50;
final long shardMovesSummary2 = 150;
// Set up a summaries list with two summary entries for a two node cluster
List<BalancingRoundSummary> summaries = new ArrayList<>();
summaries.add(
new BalancingRoundSummary(
Map.of(
NODE_1,
new BalancingRoundSummary.NodesWeightsChanges(node1BaseWeights, commonDiff),
NODE_2,
new BalancingRoundSummary.NodesWeightsChanges(node2BaseWeights, commonDiff)
),
shardMovesSummary1
)
);
summaries.add(
new BalancingRoundSummary(
Map.of(
NODE_1,
new BalancingRoundSummary.NodesWeightsChanges(
// The base weights for the next BalancingRoundSummary will be the previous BalancingRoundSummary's base + diff
// weights.
new DesiredBalanceMetrics.NodeWeightStats(
node1BaseWeights.shardCount() + commonDiff.shardCountDiff(),
node1BaseWeights.diskUsageInBytes() + commonDiff.diskUsageInBytesDiff(),
node1BaseWeights.writeLoad() + commonDiff.writeLoadDiff(),
node1BaseWeights.nodeWeight() + commonDiff.totalWeightDiff()
),
commonDiff
),
NODE_2,
new BalancingRoundSummary.NodesWeightsChanges(
new DesiredBalanceMetrics.NodeWeightStats(
node2BaseWeights.shardCount() + commonDiff.shardCountDiff(),
node2BaseWeights.diskUsageInBytes() + commonDiff.diskUsageInBytesDiff(),
node2BaseWeights.writeLoad() + commonDiff.writeLoadDiff(),
node2BaseWeights.nodeWeight() + commonDiff.totalWeightDiff()
),
commonDiff
)
),
shardMovesSummary2
)
);
// Combine the summaries.
CombinedBalancingRoundSummary combined = BalancingRoundSummary.CombinedBalancingRoundSummary.combine(summaries);
assertEquals(2, combined.numberOfBalancingRounds());
assertEquals(shardMovesSummary1 + shardMovesSummary2, combined.numberOfShardMoves());
assertEquals(2, combined.nodeNameToWeightChanges().size());
var combinedNode1WeightsChanges = combined.nodeNameToWeightChanges().get(NODE_1);
var combinedNode2WeightsChanges = combined.nodeNameToWeightChanges().get(NODE_2);
// The base weights for each node should match the first BalancingRoundSummary's base weight values. The diff weights will be summed
// across all BalancingRoundSummary entries (in this case, there are two BalancingRoundSummary entries).
assertEquals(node1BaseWeights.shardCount(), combinedNode1WeightsChanges.baseWeights().shardCount());
assertDoublesEqual(node1BaseWeights.diskUsageInBytes(), combinedNode1WeightsChanges.baseWeights().diskUsageInBytes());
assertDoublesEqual(node1BaseWeights.writeLoad(), combinedNode1WeightsChanges.baseWeights().writeLoad());
assertDoublesEqual(node1BaseWeights.nodeWeight(), combinedNode1WeightsChanges.baseWeights().nodeWeight());
assertEquals(2 * commonDiff.shardCountDiff(), combinedNode1WeightsChanges.weightsDiff().shardCountDiff());
assertDoublesEqual(2 * commonDiff.diskUsageInBytesDiff(), combinedNode1WeightsChanges.weightsDiff().diskUsageInBytesDiff());
assertDoublesEqual(2 * commonDiff.writeLoadDiff(), combinedNode1WeightsChanges.weightsDiff().writeLoadDiff());
assertDoublesEqual(2 * commonDiff.totalWeightDiff(), combinedNode1WeightsChanges.weightsDiff().totalWeightDiff());
assertEquals(node2BaseWeights.shardCount(), combinedNode2WeightsChanges.baseWeights().shardCount());
assertDoublesEqual(node2BaseWeights.diskUsageInBytes(), combinedNode2WeightsChanges.baseWeights().diskUsageInBytes());
assertDoublesEqual(node2BaseWeights.writeLoad(), combinedNode2WeightsChanges.baseWeights().writeLoad());
assertDoublesEqual(node2BaseWeights.nodeWeight(), combinedNode2WeightsChanges.baseWeights().nodeWeight());
assertEquals(2 * commonDiff.shardCountDiff(), combinedNode2WeightsChanges.weightsDiff().shardCountDiff());
assertDoublesEqual(2 * commonDiff.diskUsageInBytesDiff(), combinedNode2WeightsChanges.weightsDiff().diskUsageInBytesDiff());
assertDoublesEqual(2 * commonDiff.writeLoadDiff(), combinedNode2WeightsChanges.weightsDiff().writeLoadDiff());
assertDoublesEqual(2 * commonDiff.totalWeightDiff(), combinedNode2WeightsChanges.weightsDiff().totalWeightDiff());
}
/**
* Helper for double type inputs. assertEquals on double type inputs require a delta.
*/
private void assertDoublesEqual(double expected, double actual) {
assertEquals(expected, actual, 0.00001);
}
}

View file

@ -141,7 +141,7 @@ public abstract class NativeArrayIntegrationTestCase extends ESSingleNodeTestCas
} else { } else {
var copyExpectedStoredFields = new String[expectedStoredFields.length + 1]; var copyExpectedStoredFields = new String[expectedStoredFields.length + 1];
System.arraycopy(expectedStoredFields, 0, copyExpectedStoredFields, 0, expectedStoredFields.length); System.arraycopy(expectedStoredFields, 0, copyExpectedStoredFields, 0, expectedStoredFields.length);
copyExpectedStoredFields[copyExpectedStoredFields.length - 1] = "_ignored_source"; copyExpectedStoredFields[copyExpectedStoredFields.length - 1] = "_recovery_source";
assertThat(storedFieldNames, containsInAnyOrder(copyExpectedStoredFields)); assertThat(storedFieldNames, containsInAnyOrder(copyExpectedStoredFields));
} }
} }

View file

@ -117,8 +117,6 @@ public class InboundDecoderTests extends ESTestCase {
assertEquals(messageBytes, content); assertEquals(messageBytes, content);
// Ref count is incremented since the bytes are forwarded as a fragment // Ref count is incremented since the bytes are forwarded as a fragment
assertTrue(releasable2.hasReferences()); assertTrue(releasable2.hasReferences());
releasable2.decRef();
assertTrue(releasable2.hasReferences());
assertTrue(releasable2.decRef()); assertTrue(releasable2.decRef());
assertEquals(InboundDecoder.END_CONTENT, endMarker); assertEquals(InboundDecoder.END_CONTENT, endMarker);
} }
@ -335,7 +333,12 @@ public class InboundDecoderTests extends ESTestCase {
final BytesReference bytes2 = totalBytes.slice(bytesConsumed, totalBytes.length() - bytesConsumed); final BytesReference bytes2 = totalBytes.slice(bytesConsumed, totalBytes.length() - bytesConsumed);
final ReleasableBytesReference releasable2 = wrapAsReleasable(bytes2); final ReleasableBytesReference releasable2 = wrapAsReleasable(bytes2);
int bytesConsumed2 = decoder.decode(releasable2, fragments::add); int bytesConsumed2 = decoder.decode(releasable2, e -> {
fragments.add(e);
if (e instanceof ReleasableBytesReference reference) {
reference.retain();
}
});
assertEquals(totalBytes.length() - totalHeaderSize, bytesConsumed2); assertEquals(totalBytes.length() - totalHeaderSize, bytesConsumed2);
final Object compressionScheme = fragments.get(0); final Object compressionScheme = fragments.get(0);

View file

@ -159,12 +159,11 @@ public class InboundPipelineTests extends ESTestCase {
final int remainingBytes = networkBytes.length() - currentOffset; final int remainingBytes = networkBytes.length() - currentOffset;
final int bytesToRead = Math.min(randomIntBetween(1, 32 * 1024), remainingBytes); final int bytesToRead = Math.min(randomIntBetween(1, 32 * 1024), remainingBytes);
final BytesReference slice = networkBytes.slice(currentOffset, bytesToRead); final BytesReference slice = networkBytes.slice(currentOffset, bytesToRead);
try (ReleasableBytesReference reference = new ReleasableBytesReference(slice, () -> {})) { ReleasableBytesReference reference = new ReleasableBytesReference(slice, () -> {});
toRelease.add(reference); toRelease.add(reference);
bytesReceived += reference.length(); bytesReceived += reference.length();
pipeline.handleBytes(channel, reference); pipeline.handleBytes(channel, reference);
currentOffset += bytesToRead; currentOffset += bytesToRead;
}
} }
final int messages = expected.size(); final int messages = expected.size();
@ -288,13 +287,12 @@ public class InboundPipelineTests extends ESTestCase {
final Releasable releasable = () -> bodyReleased.set(true); final Releasable releasable = () -> bodyReleased.set(true);
final int from = totalHeaderSize - 1; final int from = totalHeaderSize - 1;
final BytesReference partHeaderPartBody = reference.slice(from, reference.length() - from - 1); final BytesReference partHeaderPartBody = reference.slice(from, reference.length() - from - 1);
try (ReleasableBytesReference slice = new ReleasableBytesReference(partHeaderPartBody, releasable)) { pipeline.handleBytes(new FakeTcpChannel(), new ReleasableBytesReference(partHeaderPartBody, releasable));
pipeline.handleBytes(new FakeTcpChannel(), slice);
}
assertFalse(bodyReleased.get()); assertFalse(bodyReleased.get());
try (ReleasableBytesReference slice = new ReleasableBytesReference(reference.slice(reference.length() - 1, 1), releasable)) { pipeline.handleBytes(
pipeline.handleBytes(new FakeTcpChannel(), slice); new FakeTcpChannel(),
} new ReleasableBytesReference(reference.slice(reference.length() - 1, 1), releasable)
);
assertTrue(bodyReleased.get()); assertTrue(bodyReleased.get());
} }
} }

View file

@ -11,13 +11,21 @@ package org.elasticsearch.test.cluster.local;
import org.elasticsearch.test.cluster.SystemPropertyProvider; import org.elasticsearch.test.cluster.SystemPropertyProvider;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map; import java.util.Map;
import static java.util.Map.entry;
public class DefaultSystemPropertyProvider implements SystemPropertyProvider { public class DefaultSystemPropertyProvider implements SystemPropertyProvider {
@Override @Override
public Map<String, String> get(LocalClusterSpec.LocalNodeSpec nodeSpec) { public Map<String, String> get(LocalClusterSpec.LocalNodeSpec nodeSpec) {
return Map.ofEntries(entry("ingest.geoip.downloader.enabled.default", "false"), entry("tests.testfeatures.enabled", "true")); Map<String, String> properties = new HashMap<>();
properties.put("ingest.geoip.downloader.enabled.default", "false");
// enable test features unless we are running forwards compatibility tests
if (Boolean.parseBoolean(System.getProperty("tests.fwc", "false")) == false) {
properties.put("tests.testfeatures.enabled", "true");
}
return Collections.unmodifiableMap(properties);
} }
} }

View file

@ -39,7 +39,8 @@ public class SnapshotDistributionResolver implements DistributionResolver {
// Snapshot distributions are never release builds and always use the default distribution // Snapshot distributions are never release builds and always use the default distribution
Version realVersion = Version.fromString(System.getProperty("tests.bwc.main.version", version.toString())); Version realVersion = Version.fromString(System.getProperty("tests.bwc.main.version", version.toString()));
return new DefaultDistributionDescriptor(realVersion, true, distributionDir, DistributionType.DEFAULT); boolean isSnapshot = System.getProperty("tests.bwc.snapshot", "true").equals("false") == false;
return new DefaultDistributionDescriptor(realVersion, isSnapshot, distributionDir, DistributionType.DEFAULT);
} }
return delegate.resolve(version, type); return delegate.resolve(version, type);

View file

@ -102,6 +102,9 @@
"unprivileged": { "unprivileged": {
"type": "boolean" "type": "boolean"
}, },
"fips": {
"type": "boolean"
},
"version": { "version": {
"type": "text", "type": "text",
"fields": { "fields": {

View file

@ -14,6 +14,7 @@ import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.core.UpdateForV10;
import org.elasticsearch.xcontent.ObjectParser; import org.elasticsearch.xcontent.ObjectParser;
import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.ToXContentObject;
import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentBuilder;
@ -31,6 +32,7 @@ import static org.elasticsearch.xpack.application.analytics.AnalyticsConstants.E
* @deprecated in 9.0 * @deprecated in 9.0
*/ */
@Deprecated @Deprecated
@UpdateForV10(owner = UpdateForV10.Owner.ENTERPRISE_SEARCH)
public class AnalyticsCollection implements Writeable, ToXContentObject { public class AnalyticsCollection implements Writeable, ToXContentObject {
private static final ObjectParser<AnalyticsCollection, String> PARSER = ObjectParser.fromBuilder( private static final ObjectParser<AnalyticsCollection, String> PARSER = ObjectParser.fromBuilder(

View file

@ -14,6 +14,7 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.Strings; import org.elasticsearch.common.Strings;
import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.regex.Regex;
import org.elasticsearch.core.UpdateForV10;
import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.injection.guice.Inject;
import java.util.ArrayList; import java.util.ArrayList;
@ -31,6 +32,7 @@ import static org.elasticsearch.xpack.application.analytics.AnalyticsConstants.E
* @deprecated in 9.0 * @deprecated in 9.0
*/ */
@Deprecated @Deprecated
@UpdateForV10(owner = UpdateForV10.Owner.ENTERPRISE_SEARCH)
public class AnalyticsCollectionResolver { public class AnalyticsCollectionResolver {
private final IndexNameExpressionResolver indexNameExpressionResolver; private final IndexNameExpressionResolver indexNameExpressionResolver;

View file

@ -19,6 +19,7 @@ import org.elasticsearch.client.internal.Client;
import org.elasticsearch.client.internal.OriginSettingClient; import org.elasticsearch.client.internal.OriginSettingClient;
import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.core.TimeValue; import org.elasticsearch.core.TimeValue;
import org.elasticsearch.core.UpdateForV10;
import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.injection.guice.Inject;
import org.elasticsearch.logging.LogManager; import org.elasticsearch.logging.LogManager;
import org.elasticsearch.logging.Logger; import org.elasticsearch.logging.Logger;
@ -36,6 +37,7 @@ import static org.elasticsearch.xpack.core.ClientHelper.ENT_SEARCH_ORIGIN;
* @deprecated in 9.0 * @deprecated in 9.0
*/ */
@Deprecated @Deprecated
@UpdateForV10(owner = UpdateForV10.Owner.ENTERPRISE_SEARCH)
public class AnalyticsCollectionService { public class AnalyticsCollectionService {
private static final Logger logger = LogManager.getLogger(AnalyticsCollectionService.class); private static final Logger logger = LogManager.getLogger(AnalyticsCollectionService.class);

View file

@ -7,10 +7,13 @@
package org.elasticsearch.xpack.application.analytics; package org.elasticsearch.xpack.application.analytics;
import org.elasticsearch.core.UpdateForV10;
/** /**
* @deprecated in 9.0 * @deprecated in 9.0
*/ */
@Deprecated @Deprecated
@UpdateForV10(owner = UpdateForV10.Owner.ENTERPRISE_SEARCH)
public class AnalyticsConstants { public class AnalyticsConstants {
private AnalyticsConstants() {} private AnalyticsConstants() {}

View file

@ -9,6 +9,7 @@ package org.elasticsearch.xpack.application.analytics;
import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.ResourceNotFoundException;
import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListener;
import org.elasticsearch.core.UpdateForV10;
import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.injection.guice.Inject;
import org.elasticsearch.xpack.application.analytics.action.PostAnalyticsEventAction; import org.elasticsearch.xpack.application.analytics.action.PostAnalyticsEventAction;
import org.elasticsearch.xpack.application.analytics.ingest.AnalyticsEventEmitter; import org.elasticsearch.xpack.application.analytics.ingest.AnalyticsEventEmitter;
@ -20,6 +21,7 @@ import java.util.Objects;
* @deprecated in 9.0 * @deprecated in 9.0
*/ */
@Deprecated @Deprecated
@UpdateForV10(owner = UpdateForV10.Owner.ENTERPRISE_SEARCH)
public class AnalyticsEventIngestService { public class AnalyticsEventIngestService {
private final AnalyticsCollectionResolver collectionResolver; private final AnalyticsCollectionResolver collectionResolver;

View file

@ -11,6 +11,7 @@ import org.elasticsearch.cluster.metadata.ComponentTemplate;
import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; import org.elasticsearch.cluster.metadata.ComposableIndexTemplate;
import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.core.UpdateForV10;
import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.NamedXContentRegistry;
import org.elasticsearch.xcontent.XContentParserConfiguration; import org.elasticsearch.xcontent.XContentParserConfiguration;
@ -35,6 +36,7 @@ import static org.elasticsearch.xpack.core.ClientHelper.ENT_SEARCH_ORIGIN;
* @deprecated in 9.0 * @deprecated in 9.0
*/ */
@Deprecated @Deprecated
@UpdateForV10(owner = UpdateForV10.Owner.ENTERPRISE_SEARCH)
public class AnalyticsTemplateRegistry extends IndexTemplateRegistry { public class AnalyticsTemplateRegistry extends IndexTemplateRegistry {
// This number must be incremented when we make changes to built-in templates. // This number must be incremented when we make changes to built-in templates.

View file

@ -15,6 +15,7 @@ import org.elasticsearch.common.Strings;
import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.core.TimeValue; import org.elasticsearch.core.TimeValue;
import org.elasticsearch.core.UpdateForV10;
import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ParseField;
import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.ToXContentObject;
import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentBuilder;
@ -28,6 +29,7 @@ import static org.elasticsearch.action.ValidateActions.addValidationError;
* @deprecated in 9.0 * @deprecated in 9.0
*/ */
@Deprecated @Deprecated
@UpdateForV10(owner = UpdateForV10.Owner.ENTERPRISE_SEARCH)
public class DeleteAnalyticsCollectionAction { public class DeleteAnalyticsCollectionAction {
public static final String NAME = "cluster:admin/xpack/application/analytics/delete"; public static final String NAME = "cluster:admin/xpack/application/analytics/delete";

View file

@ -14,6 +14,7 @@ import org.elasticsearch.action.support.master.MasterNodeReadRequest;
import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.core.TimeValue; import org.elasticsearch.core.TimeValue;
import org.elasticsearch.core.UpdateForV10;
import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ParseField;
import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.ToXContent;
import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.ToXContentObject;
@ -29,6 +30,7 @@ import java.util.Objects;
* @deprecated in 9.0 * @deprecated in 9.0
*/ */
@Deprecated @Deprecated
@UpdateForV10(owner = UpdateForV10.Owner.ENTERPRISE_SEARCH)
public class GetAnalyticsCollectionAction { public class GetAnalyticsCollectionAction {
public static final String NAME = "cluster:admin/xpack/application/analytics/get"; public static final String NAME = "cluster:admin/xpack/application/analytics/get";

View file

@ -18,6 +18,7 @@ import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.network.NetworkAddress; import org.elasticsearch.common.network.NetworkAddress;
import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.core.Nullable; import org.elasticsearch.core.Nullable;
import org.elasticsearch.core.UpdateForV10;
import org.elasticsearch.xcontent.ConstructingObjectParser; import org.elasticsearch.xcontent.ConstructingObjectParser;
import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ParseField;
import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.ToXContentObject;
@ -40,6 +41,7 @@ import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg
* @deprecated in 9.0 * @deprecated in 9.0
*/ */
@Deprecated @Deprecated
@UpdateForV10(owner = UpdateForV10.Owner.ENTERPRISE_SEARCH)
public class PostAnalyticsEventAction { public class PostAnalyticsEventAction {
public static final String NAME = "cluster:admin/xpack/application/analytics/post_event"; public static final String NAME = "cluster:admin/xpack/application/analytics/post_event";

View file

@ -14,6 +14,7 @@ import org.elasticsearch.action.support.master.MasterNodeRequest;
import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.core.TimeValue; import org.elasticsearch.core.TimeValue;
import org.elasticsearch.core.UpdateForV10;
import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ParseField;
import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.ToXContentObject;
import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentBuilder;
@ -27,6 +28,7 @@ import static org.elasticsearch.action.ValidateActions.addValidationError;
* @deprecated in 9.0 * @deprecated in 9.0
*/ */
@Deprecated @Deprecated
@UpdateForV10(owner = UpdateForV10.Owner.ENTERPRISE_SEARCH)
public class PutAnalyticsCollectionAction { public class PutAnalyticsCollectionAction {
public static final String NAME = "cluster:admin/xpack/application/analytics/put"; public static final String NAME = "cluster:admin/xpack/application/analytics/put";

View file

@ -8,6 +8,7 @@
package org.elasticsearch.xpack.application.analytics.action; package org.elasticsearch.xpack.application.analytics.action;
import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.client.internal.node.NodeClient;
import org.elasticsearch.core.UpdateForV10;
import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.license.XPackLicenseState;
import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestRequest;
import org.elasticsearch.rest.RestUtils; import org.elasticsearch.rest.RestUtils;
@ -27,6 +28,7 @@ import static org.elasticsearch.rest.RestRequest.Method.DELETE;
* @deprecated in 9.0 * @deprecated in 9.0
*/ */
@Deprecated @Deprecated
@UpdateForV10(owner = UpdateForV10.Owner.ENTERPRISE_SEARCH)
@ServerlessScope(Scope.PUBLIC) @ServerlessScope(Scope.PUBLIC)
public class RestDeleteAnalyticsCollectionAction extends EnterpriseSearchBaseRestHandler { public class RestDeleteAnalyticsCollectionAction extends EnterpriseSearchBaseRestHandler {
public RestDeleteAnalyticsCollectionAction(XPackLicenseState licenseState) { public RestDeleteAnalyticsCollectionAction(XPackLicenseState licenseState) {

View file

@ -9,6 +9,7 @@ package org.elasticsearch.xpack.application.analytics.action;
import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.client.internal.node.NodeClient;
import org.elasticsearch.common.Strings; import org.elasticsearch.common.Strings;
import org.elasticsearch.core.UpdateForV10;
import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.license.XPackLicenseState;
import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestRequest;
import org.elasticsearch.rest.RestUtils; import org.elasticsearch.rest.RestUtils;
@ -27,6 +28,7 @@ import static org.elasticsearch.rest.RestRequest.Method.GET;
* @deprecated in 9.0 * @deprecated in 9.0
*/ */
@Deprecated @Deprecated
@UpdateForV10(owner = UpdateForV10.Owner.ENTERPRISE_SEARCH)
@ServerlessScope(Scope.PUBLIC) @ServerlessScope(Scope.PUBLIC)
public class RestGetAnalyticsCollectionAction extends EnterpriseSearchBaseRestHandler { public class RestGetAnalyticsCollectionAction extends EnterpriseSearchBaseRestHandler {
public RestGetAnalyticsCollectionAction(XPackLicenseState licenseState) { public RestGetAnalyticsCollectionAction(XPackLicenseState licenseState) {

View file

@ -12,6 +12,7 @@ import org.elasticsearch.client.internal.node.NodeClient;
import org.elasticsearch.common.bytes.ReleasableBytesReference; import org.elasticsearch.common.bytes.ReleasableBytesReference;
import org.elasticsearch.common.network.InetAddresses; import org.elasticsearch.common.network.InetAddresses;
import org.elasticsearch.core.Tuple; import org.elasticsearch.core.Tuple;
import org.elasticsearch.core.UpdateForV10;
import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.license.XPackLicenseState;
import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestRequest;
import org.elasticsearch.rest.RestStatus; import org.elasticsearch.rest.RestStatus;
@ -33,6 +34,7 @@ import static org.elasticsearch.rest.RestRequest.Method.POST;
* @deprecated in 9.0 * @deprecated in 9.0
*/ */
@Deprecated @Deprecated
@UpdateForV10(owner = UpdateForV10.Owner.ENTERPRISE_SEARCH)
@ServerlessScope(Scope.PUBLIC) @ServerlessScope(Scope.PUBLIC)
public class RestPostAnalyticsEventAction extends EnterpriseSearchBaseRestHandler { public class RestPostAnalyticsEventAction extends EnterpriseSearchBaseRestHandler {
public RestPostAnalyticsEventAction(XPackLicenseState licenseState) { public RestPostAnalyticsEventAction(XPackLicenseState licenseState) {

View file

@ -8,6 +8,7 @@
package org.elasticsearch.xpack.application.analytics.action; package org.elasticsearch.xpack.application.analytics.action;
import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.client.internal.node.NodeClient;
import org.elasticsearch.core.UpdateForV10;
import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.license.XPackLicenseState;
import org.elasticsearch.rest.RestHandler; import org.elasticsearch.rest.RestHandler;
import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestRequest;
@ -28,6 +29,7 @@ import static org.elasticsearch.rest.RestRequest.Method.PUT;
* @deprecated in 9.0 * @deprecated in 9.0
*/ */
@Deprecated @Deprecated
@UpdateForV10(owner = UpdateForV10.Owner.ENTERPRISE_SEARCH)
@ServerlessScope(Scope.PUBLIC) @ServerlessScope(Scope.PUBLIC)
public class RestPutAnalyticsCollectionAction extends EnterpriseSearchBaseRestHandler { public class RestPutAnalyticsCollectionAction extends EnterpriseSearchBaseRestHandler {
public RestPutAnalyticsCollectionAction(XPackLicenseState licenseState) { public RestPutAnalyticsCollectionAction(XPackLicenseState licenseState) {

View file

@ -18,6 +18,7 @@ import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.logging.DeprecationCategory; import org.elasticsearch.common.logging.DeprecationCategory;
import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.logging.DeprecationLogger;
import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.util.concurrent.EsExecutors;
import org.elasticsearch.core.UpdateForV10;
import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.injection.guice.Inject;
import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.Task;
import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPool;
@ -31,6 +32,7 @@ import static org.elasticsearch.xpack.application.EnterpriseSearch.BEHAVIORAL_AN
* @deprecated in 9.0 * @deprecated in 9.0
*/ */
@Deprecated @Deprecated
@UpdateForV10(owner = UpdateForV10.Owner.ENTERPRISE_SEARCH)
public class TransportDeleteAnalyticsCollectionAction extends AcknowledgedTransportMasterNodeAction< public class TransportDeleteAnalyticsCollectionAction extends AcknowledgedTransportMasterNodeAction<
DeleteAnalyticsCollectionAction.Request> { DeleteAnalyticsCollectionAction.Request> {

View file

@ -16,6 +16,7 @@ import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.logging.DeprecationCategory; import org.elasticsearch.common.logging.DeprecationCategory;
import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.logging.DeprecationLogger;
import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.util.concurrent.EsExecutors;
import org.elasticsearch.core.UpdateForV10;
import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.injection.guice.Inject;
import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.Task;
import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPool;
@ -29,6 +30,7 @@ import static org.elasticsearch.xpack.application.EnterpriseSearch.BEHAVIORAL_AN
* @deprecated in 9.0 * @deprecated in 9.0
*/ */
@Deprecated @Deprecated
@UpdateForV10(owner = UpdateForV10.Owner.ENTERPRISE_SEARCH)
public class TransportGetAnalyticsCollectionAction extends TransportMasterNodeReadAction< public class TransportGetAnalyticsCollectionAction extends TransportMasterNodeReadAction<
GetAnalyticsCollectionAction.Request, GetAnalyticsCollectionAction.Request,
GetAnalyticsCollectionAction.Response> { GetAnalyticsCollectionAction.Response> {

View file

@ -14,6 +14,7 @@ import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.logging.DeprecationCategory; import org.elasticsearch.common.logging.DeprecationCategory;
import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.logging.DeprecationLogger;
import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.util.concurrent.EsExecutors;
import org.elasticsearch.core.UpdateForV10;
import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.injection.guice.Inject;
import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.Task;
import org.elasticsearch.transport.TransportService; import org.elasticsearch.transport.TransportService;
@ -29,6 +30,7 @@ import static org.elasticsearch.xpack.application.EnterpriseSearch.BEHAVIORAL_AN
* @deprecated in 9.0 * @deprecated in 9.0
*/ */
@Deprecated @Deprecated
@UpdateForV10(owner = UpdateForV10.Owner.ENTERPRISE_SEARCH)
public class TransportPostAnalyticsEventAction extends HandledTransportAction< public class TransportPostAnalyticsEventAction extends HandledTransportAction<
PostAnalyticsEventAction.Request, PostAnalyticsEventAction.Request,
PostAnalyticsEventAction.Response> { PostAnalyticsEventAction.Response> {

View file

@ -17,6 +17,7 @@ import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.logging.DeprecationCategory; import org.elasticsearch.common.logging.DeprecationCategory;
import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.logging.DeprecationLogger;
import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.util.concurrent.EsExecutors;
import org.elasticsearch.core.UpdateForV10;
import org.elasticsearch.features.FeatureService; import org.elasticsearch.features.FeatureService;
import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.injection.guice.Inject;
import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.Task;
@ -31,6 +32,7 @@ import static org.elasticsearch.xpack.application.EnterpriseSearch.BEHAVIORAL_AN
* @deprecated in 9.0 * @deprecated in 9.0
*/ */
@Deprecated @Deprecated
@UpdateForV10(owner = UpdateForV10.Owner.ENTERPRISE_SEARCH)
public class TransportPutAnalyticsCollectionAction extends TransportMasterNodeAction< public class TransportPutAnalyticsCollectionAction extends TransportMasterNodeAction<
PutAnalyticsCollectionAction.Request, PutAnalyticsCollectionAction.Request,
PutAnalyticsCollectionAction.Response> { PutAnalyticsCollectionAction.Response> {

View file

@ -13,6 +13,7 @@ import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.core.UpdateForV10;
import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ParseField;
import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.ToXContentObject;
import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentBuilder;
@ -34,6 +35,7 @@ import static org.elasticsearch.xpack.application.analytics.AnalyticsConstants.E
* @deprecated in 9.0 * @deprecated in 9.0
*/ */
@Deprecated @Deprecated
@UpdateForV10(owner = UpdateForV10.Owner.ENTERPRISE_SEARCH)
public class AnalyticsEvent implements Writeable, ToXContentObject { public class AnalyticsEvent implements Writeable, ToXContentObject {
public static final ParseField TIMESTAMP_FIELD = new ParseField("@timestamp"); public static final ParseField TIMESTAMP_FIELD = new ParseField("@timestamp");

View file

@ -10,6 +10,7 @@ package org.elasticsearch.xpack.application.analytics.event;
import org.elasticsearch.common.Strings; import org.elasticsearch.common.Strings;
import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.core.UpdateForV10;
import org.elasticsearch.xcontent.ContextParser; import org.elasticsearch.xcontent.ContextParser;
import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentParser;
import org.elasticsearch.xcontent.XContentParserConfiguration; import org.elasticsearch.xcontent.XContentParserConfiguration;
@ -32,6 +33,7 @@ import static org.elasticsearch.xpack.application.analytics.event.AnalyticsEvent
* @deprecated in 9.0 * @deprecated in 9.0
*/ */
@Deprecated @Deprecated
@UpdateForV10(owner = UpdateForV10.Owner.ENTERPRISE_SEARCH)
public class AnalyticsEventFactory { public class AnalyticsEventFactory {
public static final AnalyticsEventFactory INSTANCE = new AnalyticsEventFactory(); public static final AnalyticsEventFactory INSTANCE = new AnalyticsEventFactory();

View file

@ -7,6 +7,7 @@
package org.elasticsearch.xpack.application.analytics.event.parser.event; package org.elasticsearch.xpack.application.analytics.event.parser.event;
import org.elasticsearch.core.UpdateForV10;
import org.elasticsearch.xcontent.ObjectParser; import org.elasticsearch.xcontent.ObjectParser;
import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentParser;
import org.elasticsearch.xpack.application.analytics.event.AnalyticsEvent; import org.elasticsearch.xpack.application.analytics.event.AnalyticsEvent;
@ -26,6 +27,7 @@ import static org.elasticsearch.xpack.application.analytics.event.parser.field.U
* @deprecated in 9.0 * @deprecated in 9.0
*/ */
@Deprecated @Deprecated
@UpdateForV10(owner = UpdateForV10.Owner.ENTERPRISE_SEARCH)
public class PageViewAnalyticsEvent { public class PageViewAnalyticsEvent {
private static final ObjectParser<AnalyticsEvent.Builder, AnalyticsEvent.Context> PARSER = ObjectParser.fromBuilder( private static final ObjectParser<AnalyticsEvent.Builder, AnalyticsEvent.Context> PARSER = ObjectParser.fromBuilder(
"page_view_event", "page_view_event",

View file

@ -7,6 +7,7 @@
package org.elasticsearch.xpack.application.analytics.event.parser.event; package org.elasticsearch.xpack.application.analytics.event.parser.event;
import org.elasticsearch.core.UpdateForV10;
import org.elasticsearch.xcontent.ObjectParser; import org.elasticsearch.xcontent.ObjectParser;
import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentParser;
import org.elasticsearch.xpack.application.analytics.event.AnalyticsEvent; import org.elasticsearch.xpack.application.analytics.event.AnalyticsEvent;
@ -24,6 +25,7 @@ import static org.elasticsearch.xpack.application.analytics.event.parser.field.U
* @deprecated in 9.0 * @deprecated in 9.0
*/ */
@Deprecated @Deprecated
@UpdateForV10(owner = UpdateForV10.Owner.ENTERPRISE_SEARCH)
public class SearchAnalyticsEvent { public class SearchAnalyticsEvent {
private static final ObjectParser<AnalyticsEvent.Builder, AnalyticsEvent.Context> PARSER = ObjectParser.fromBuilder( private static final ObjectParser<AnalyticsEvent.Builder, AnalyticsEvent.Context> PARSER = ObjectParser.fromBuilder(
"search_event", "search_event",

View file

@ -7,6 +7,7 @@
package org.elasticsearch.xpack.application.analytics.event.parser.event; package org.elasticsearch.xpack.application.analytics.event.parser.event;
import org.elasticsearch.core.UpdateForV10;
import org.elasticsearch.xcontent.ObjectParser; import org.elasticsearch.xcontent.ObjectParser;
import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentParser;
import org.elasticsearch.xpack.application.analytics.event.AnalyticsEvent; import org.elasticsearch.xpack.application.analytics.event.AnalyticsEvent;
@ -28,6 +29,7 @@ import static org.elasticsearch.xpack.application.analytics.event.parser.field.U
* @deprecated in 9.0 * @deprecated in 9.0
*/ */
@Deprecated @Deprecated
@UpdateForV10(owner = UpdateForV10.Owner.ENTERPRISE_SEARCH)
public class SearchClickAnalyticsEvent { public class SearchClickAnalyticsEvent {
private static final ObjectParser<AnalyticsEvent.Builder, AnalyticsEvent.Context> PARSER = ObjectParser.fromBuilder( private static final ObjectParser<AnalyticsEvent.Builder, AnalyticsEvent.Context> PARSER = ObjectParser.fromBuilder(

View file

@ -7,6 +7,7 @@
package org.elasticsearch.xpack.application.analytics.event.parser.field; package org.elasticsearch.xpack.application.analytics.event.parser.field;
import org.elasticsearch.core.UpdateForV10;
import org.elasticsearch.xcontent.ObjectParser; import org.elasticsearch.xcontent.ObjectParser;
import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ParseField;
import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentParser;
@ -22,6 +23,7 @@ import static org.elasticsearch.common.Strings.requireNonBlank;
* @deprecated in 9.0 * @deprecated in 9.0
*/ */
@Deprecated @Deprecated
@UpdateForV10(owner = UpdateForV10.Owner.ENTERPRISE_SEARCH)
public class DocumentAnalyticsEventField { public class DocumentAnalyticsEventField {
public static final ParseField DOCUMENT_FIELD = new ParseField("document"); public static final ParseField DOCUMENT_FIELD = new ParseField("document");

View file

@ -7,6 +7,7 @@
package org.elasticsearch.xpack.application.analytics.event.parser.field; package org.elasticsearch.xpack.application.analytics.event.parser.field;
import org.elasticsearch.core.UpdateForV10;
import org.elasticsearch.xcontent.ObjectParser; import org.elasticsearch.xcontent.ObjectParser;
import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ParseField;
import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentParser;
@ -20,6 +21,7 @@ import java.util.Map;
* @deprecated in 9.0 * @deprecated in 9.0
*/ */
@Deprecated @Deprecated
@UpdateForV10(owner = UpdateForV10.Owner.ENTERPRISE_SEARCH)
public class PageAnalyticsEventField { public class PageAnalyticsEventField {
public static final ParseField PAGE_FIELD = new ParseField("page"); public static final ParseField PAGE_FIELD = new ParseField("page");

View file

@ -8,6 +8,7 @@
package org.elasticsearch.xpack.application.analytics.event.parser.field; package org.elasticsearch.xpack.application.analytics.event.parser.field;
import org.elasticsearch.core.Strings; import org.elasticsearch.core.Strings;
import org.elasticsearch.core.UpdateForV10;
import org.elasticsearch.xcontent.ObjectParser; import org.elasticsearch.xcontent.ObjectParser;
import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ParseField;
import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentParser;
@ -21,6 +22,7 @@ import java.util.Map;
* @deprecated in 9.0 * @deprecated in 9.0
*/ */
@Deprecated @Deprecated
@UpdateForV10(owner = UpdateForV10.Owner.ENTERPRISE_SEARCH)
public class PaginationAnalyticsEventField { public class PaginationAnalyticsEventField {
public static final ParseField PAGINATION_FIELD = new ParseField("page"); public static final ParseField PAGINATION_FIELD = new ParseField("page");

View file

@ -7,6 +7,7 @@
package org.elasticsearch.xpack.application.analytics.event.parser.field; package org.elasticsearch.xpack.application.analytics.event.parser.field;
import org.elasticsearch.core.UpdateForV10;
import org.elasticsearch.xcontent.ObjectParser; import org.elasticsearch.xcontent.ObjectParser;
import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ParseField;
import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentParser;
@ -24,6 +25,7 @@ import static org.elasticsearch.xpack.application.analytics.event.parser.field.S
* @deprecated in 9.0 * @deprecated in 9.0
*/ */
@Deprecated @Deprecated
@UpdateForV10(owner = UpdateForV10.Owner.ENTERPRISE_SEARCH)
public class SearchAnalyticsEventField { public class SearchAnalyticsEventField {
public static final ParseField SEARCH_FIELD = new ParseField("search"); public static final ParseField SEARCH_FIELD = new ParseField("search");

View file

@ -8,6 +8,7 @@
package org.elasticsearch.xpack.application.analytics.event.parser.field; package org.elasticsearch.xpack.application.analytics.event.parser.field;
import org.elasticsearch.common.Strings; import org.elasticsearch.common.Strings;
import org.elasticsearch.core.UpdateForV10;
import org.elasticsearch.xcontent.ObjectParser; import org.elasticsearch.xcontent.ObjectParser;
import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ParseField;
import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentParser;
@ -22,6 +23,7 @@ import java.util.Map;
* @deprecated in 9.0 * @deprecated in 9.0
*/ */
@Deprecated @Deprecated
@UpdateForV10(owner = UpdateForV10.Owner.ENTERPRISE_SEARCH)
public class SearchFiltersAnalyticsEventField { public class SearchFiltersAnalyticsEventField {
public static final ParseField SEARCH_FILTERS_FIELD = new ParseField("filters"); public static final ParseField SEARCH_FILTERS_FIELD = new ParseField("filters");

View file

@ -7,6 +7,7 @@
package org.elasticsearch.xpack.application.analytics.event.parser.field; package org.elasticsearch.xpack.application.analytics.event.parser.field;
import org.elasticsearch.core.UpdateForV10;
import org.elasticsearch.xcontent.ObjectParser; import org.elasticsearch.xcontent.ObjectParser;
import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ParseField;
import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentParser;
@ -23,6 +24,7 @@ import static org.elasticsearch.xpack.application.analytics.event.parser.field.P
* @deprecated in 9.0 * @deprecated in 9.0
*/ */
@Deprecated @Deprecated
@UpdateForV10(owner = UpdateForV10.Owner.ENTERPRISE_SEARCH)
public class SearchResultAnalyticsEventField { public class SearchResultAnalyticsEventField {
public static final ParseField SEARCH_RESULTS_TOTAL_FIELD = new ParseField("total_results"); public static final ParseField SEARCH_RESULTS_TOTAL_FIELD = new ParseField("total_results");

View file

@ -9,6 +9,7 @@
package org.elasticsearch.xpack.application.analytics.event.parser.field; package org.elasticsearch.xpack.application.analytics.event.parser.field;
import org.elasticsearch.common.Strings; import org.elasticsearch.common.Strings;
import org.elasticsearch.core.UpdateForV10;
import org.elasticsearch.xcontent.ObjectParser; import org.elasticsearch.xcontent.ObjectParser;
import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ParseField;
import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentParser;
@ -24,6 +25,7 @@ import static org.elasticsearch.common.Strings.requireNonBlank;
* @deprecated in 9.0 * @deprecated in 9.0
*/ */
@Deprecated @Deprecated
@UpdateForV10(owner = UpdateForV10.Owner.ENTERPRISE_SEARCH)
public class SessionAnalyticsEventField { public class SessionAnalyticsEventField {
public static final ParseField SESSION_FIELD = new ParseField("session"); public static final ParseField SESSION_FIELD = new ParseField("session");

View file

@ -7,6 +7,7 @@
package org.elasticsearch.xpack.application.analytics.event.parser.field; package org.elasticsearch.xpack.application.analytics.event.parser.field;
import org.elasticsearch.core.UpdateForV10;
import org.elasticsearch.xcontent.ObjectParser; import org.elasticsearch.xcontent.ObjectParser;
import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ParseField;
import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentParser;
@ -22,6 +23,7 @@ import static org.elasticsearch.common.Strings.requireNonBlank;
* @deprecated in 9.0 * @deprecated in 9.0
*/ */
@Deprecated @Deprecated
@UpdateForV10(owner = UpdateForV10.Owner.ENTERPRISE_SEARCH)
public class SortOrderAnalyticsEventField { public class SortOrderAnalyticsEventField {
public static final ParseField SORT_FIELD = new ParseField("sort"); public static final ParseField SORT_FIELD = new ParseField("sort");

View file

@ -8,6 +8,7 @@
package org.elasticsearch.xpack.application.analytics.event.parser.field; package org.elasticsearch.xpack.application.analytics.event.parser.field;
import org.elasticsearch.core.UpdateForV10;
import org.elasticsearch.xcontent.ObjectParser; import org.elasticsearch.xcontent.ObjectParser;
import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ParseField;
import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentParser;
@ -23,6 +24,7 @@ import static org.elasticsearch.common.Strings.requireNonBlank;
* @deprecated in 9.0 * @deprecated in 9.0
*/ */
@Deprecated @Deprecated
@UpdateForV10(owner = UpdateForV10.Owner.ENTERPRISE_SEARCH)
public class UserAnalyticsEventField { public class UserAnalyticsEventField {
public static final ParseField USER_FIELD = new ParseField("user"); public static final ParseField USER_FIELD = new ParseField("user");

View file

@ -16,6 +16,7 @@ import org.elasticsearch.client.internal.Client;
import org.elasticsearch.client.internal.OriginSettingClient; import org.elasticsearch.client.internal.OriginSettingClient;
import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.component.AbstractLifecycleComponent;
import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
import org.elasticsearch.core.UpdateForV10;
import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.injection.guice.Inject;
import org.elasticsearch.logging.LogManager; import org.elasticsearch.logging.LogManager;
import org.elasticsearch.logging.Logger; import org.elasticsearch.logging.Logger;
@ -38,6 +39,7 @@ import static org.elasticsearch.xpack.core.ClientHelper.ENT_SEARCH_ORIGIN;
* @deprecated in 9.0 * @deprecated in 9.0
*/ */
@Deprecated @Deprecated
@UpdateForV10(owner = UpdateForV10.Owner.ENTERPRISE_SEARCH)
public class AnalyticsEventEmitter extends AbstractLifecycleComponent { public class AnalyticsEventEmitter extends AbstractLifecycleComponent {
private static final Logger logger = LogManager.getLogger(AnalyticsEventEmitter.class); private static final Logger logger = LogManager.getLogger(AnalyticsEventEmitter.class);

View file

@ -12,6 +12,7 @@ import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.core.Strings; import org.elasticsearch.core.Strings;
import org.elasticsearch.core.TimeValue; import org.elasticsearch.core.TimeValue;
import org.elasticsearch.core.UpdateForV10;
import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.injection.guice.Inject;
/** /**
@ -23,6 +24,7 @@ import org.elasticsearch.injection.guice.Inject;
* @deprecated in 9.0 * @deprecated in 9.0
*/ */
@Deprecated @Deprecated
@UpdateForV10(owner = UpdateForV10.Owner.ENTERPRISE_SEARCH)
public class AnalyticsEventIngestConfig { public class AnalyticsEventIngestConfig {
private static final String SETTING_ROOT_PATH = "xpack.applications.behavioral_analytics.ingest"; private static final String SETTING_ROOT_PATH = "xpack.applications.behavioral_analytics.ingest";

View file

@ -13,6 +13,7 @@ import org.elasticsearch.action.bulk.BulkRequest;
import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.bulk.BulkResponse;
import org.elasticsearch.client.internal.Client; import org.elasticsearch.client.internal.Client;
import org.elasticsearch.client.internal.OriginSettingClient; import org.elasticsearch.client.internal.OriginSettingClient;
import org.elasticsearch.core.UpdateForV10;
import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.injection.guice.Inject;
import org.elasticsearch.logging.LogManager; import org.elasticsearch.logging.LogManager;
import org.elasticsearch.logging.Logger; import org.elasticsearch.logging.Logger;
@ -29,6 +30,7 @@ import static org.elasticsearch.xpack.core.ClientHelper.ENT_SEARCH_ORIGIN;
* @deprecated in 9.0 * @deprecated in 9.0
*/ */
@Deprecated @Deprecated
@UpdateForV10(owner = UpdateForV10.Owner.ENTERPRISE_SEARCH)
public class BulkProcessorFactory { public class BulkProcessorFactory {
private static final Logger logger = LogManager.getLogger(AnalyticsEventEmitter.class); private static final Logger logger = LogManager.getLogger(AnalyticsEventEmitter.class);

View file

@ -34,6 +34,7 @@ import org.elasticsearch.xcontent.XContentBuilder;
import java.io.IOException; import java.io.IOException;
import java.io.UncheckedIOException; import java.io.UncheckedIOException;
import java.util.Collection;
import java.util.Collections; import java.util.Collections;
import java.util.HashSet; import java.util.HashSet;
import java.util.List; import java.util.List;
@ -231,12 +232,20 @@ public abstract class LuceneOperator extends SourceOperator {
public String toString() { public String toString() {
StringBuilder sb = new StringBuilder(); StringBuilder sb = new StringBuilder();
sb.append(this.getClass().getSimpleName()).append("["); sb.append(this.getClass().getSimpleName()).append("[");
sb.append("maxPageSize = ").append(maxPageSize); sb.append("shards = ").append(sortedUnion(processedShards, sliceQueue.remainingShardsIdentifiers()));
sb.append(", maxPageSize = ").append(maxPageSize);
describe(sb); describe(sb);
sb.append("]"); sb.append("]");
return sb.toString(); return sb.toString();
} }
private static Set<String> sortedUnion(Collection<String> a, Collection<String> b) {
var result = new TreeSet<String>();
result.addAll(a);
result.addAll(b);
return result;
}
protected abstract void describe(StringBuilder sb); protected abstract void describe(StringBuilder sb);
@Override @Override
@ -254,7 +263,7 @@ public abstract class LuceneOperator extends SourceOperator {
private final int processedSlices; private final int processedSlices;
private final Set<String> processedQueries; private final Set<String> processedQueries;
private final Set<String> processedShards; private final Set<String> processedShards;
private final long processingNanos; private final long processNanos;
private final int totalSlices; private final int totalSlices;
private final int pagesEmitted; private final int pagesEmitted;
private final int sliceIndex; private final int sliceIndex;
@ -266,7 +275,7 @@ public abstract class LuceneOperator extends SourceOperator {
private Status(LuceneOperator operator) { private Status(LuceneOperator operator) {
processedSlices = operator.processedSlices; processedSlices = operator.processedSlices;
processedQueries = operator.processedQueries.stream().map(Query::toString).collect(Collectors.toCollection(TreeSet::new)); processedQueries = operator.processedQueries.stream().map(Query::toString).collect(Collectors.toCollection(TreeSet::new));
processingNanos = operator.processingNanos; processNanos = operator.processingNanos;
processedShards = new TreeSet<>(operator.processedShards); processedShards = new TreeSet<>(operator.processedShards);
sliceIndex = operator.sliceIndex; sliceIndex = operator.sliceIndex;
totalSlices = operator.sliceQueue.totalSlices(); totalSlices = operator.sliceQueue.totalSlices();
@ -293,7 +302,7 @@ public abstract class LuceneOperator extends SourceOperator {
int processedSlices, int processedSlices,
Set<String> processedQueries, Set<String> processedQueries,
Set<String> processedShards, Set<String> processedShards,
long processingNanos, long processNanos,
int sliceIndex, int sliceIndex,
int totalSlices, int totalSlices,
int pagesEmitted, int pagesEmitted,
@ -305,7 +314,7 @@ public abstract class LuceneOperator extends SourceOperator {
this.processedSlices = processedSlices; this.processedSlices = processedSlices;
this.processedQueries = processedQueries; this.processedQueries = processedQueries;
this.processedShards = processedShards; this.processedShards = processedShards;
this.processingNanos = processingNanos; this.processNanos = processNanos;
this.sliceIndex = sliceIndex; this.sliceIndex = sliceIndex;
this.totalSlices = totalSlices; this.totalSlices = totalSlices;
this.pagesEmitted = pagesEmitted; this.pagesEmitted = pagesEmitted;
@ -324,7 +333,7 @@ public abstract class LuceneOperator extends SourceOperator {
processedQueries = Collections.emptySet(); processedQueries = Collections.emptySet();
processedShards = Collections.emptySet(); processedShards = Collections.emptySet();
} }
processingNanos = in.getTransportVersion().onOrAfter(TransportVersions.V_8_14_0) ? in.readVLong() : 0; processNanos = in.getTransportVersion().onOrAfter(TransportVersions.V_8_14_0) ? in.readVLong() : 0;
sliceIndex = in.readVInt(); sliceIndex = in.readVInt();
totalSlices = in.readVInt(); totalSlices = in.readVInt();
pagesEmitted = in.readVInt(); pagesEmitted = in.readVInt();
@ -346,7 +355,7 @@ public abstract class LuceneOperator extends SourceOperator {
out.writeCollection(processedShards, StreamOutput::writeString); out.writeCollection(processedShards, StreamOutput::writeString);
} }
if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_14_0)) { if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_14_0)) {
out.writeVLong(processingNanos); out.writeVLong(processNanos);
} }
out.writeVInt(sliceIndex); out.writeVInt(sliceIndex);
out.writeVInt(totalSlices); out.writeVInt(totalSlices);
@ -377,7 +386,7 @@ public abstract class LuceneOperator extends SourceOperator {
} }
public long processNanos() { public long processNanos() {
return processingNanos; return processNanos;
} }
public int sliceIndex() { public int sliceIndex() {
@ -414,9 +423,9 @@ public abstract class LuceneOperator extends SourceOperator {
builder.field("processed_slices", processedSlices); builder.field("processed_slices", processedSlices);
builder.field("processed_queries", processedQueries); builder.field("processed_queries", processedQueries);
builder.field("processed_shards", processedShards); builder.field("processed_shards", processedShards);
builder.field("processing_nanos", processingNanos); builder.field("process_nanos", processNanos);
if (builder.humanReadable()) { if (builder.humanReadable()) {
builder.field("processing_time", TimeValue.timeValueNanos(processingNanos)); builder.field("process_time", TimeValue.timeValueNanos(processNanos));
} }
builder.field("slice_index", sliceIndex); builder.field("slice_index", sliceIndex);
builder.field("total_slices", totalSlices); builder.field("total_slices", totalSlices);
@ -436,7 +445,7 @@ public abstract class LuceneOperator extends SourceOperator {
return processedSlices == status.processedSlices return processedSlices == status.processedSlices
&& processedQueries.equals(status.processedQueries) && processedQueries.equals(status.processedQueries)
&& processedShards.equals(status.processedShards) && processedShards.equals(status.processedShards)
&& processingNanos == status.processingNanos && processNanos == status.processNanos
&& sliceIndex == status.sliceIndex && sliceIndex == status.sliceIndex
&& totalSlices == status.totalSlices && totalSlices == status.totalSlices
&& pagesEmitted == status.pagesEmitted && pagesEmitted == status.pagesEmitted

View file

@ -15,6 +15,7 @@ import org.elasticsearch.core.Nullable;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Arrays; import java.util.Arrays;
import java.util.Collection;
import java.util.Collections; import java.util.Collections;
import java.util.List; import java.util.List;
import java.util.Queue; import java.util.Queue;
@ -45,8 +46,8 @@ public final class LuceneSliceQueue {
return totalSlices; return totalSlices;
} }
public Iterable<LuceneSlice> getSlices() { public Collection<String> remainingShardsIdentifiers() {
return slices; return slices.stream().map(slice -> slice.shardContext().shardIdentifier()).toList();
} }
public static LuceneSliceQueue create( public static LuceneSliceQueue create(

View file

@ -50,6 +50,7 @@ import static org.apache.lucene.search.ScoreMode.TOP_DOCS;
* Source operator that builds Pages out of the output of a TopFieldCollector (aka TopN) * Source operator that builds Pages out of the output of a TopFieldCollector (aka TopN)
*/ */
public final class LuceneTopNSourceOperator extends LuceneOperator { public final class LuceneTopNSourceOperator extends LuceneOperator {
public static class Factory extends LuceneOperator.Factory { public static class Factory extends LuceneOperator.Factory {
private final int maxPageSize; private final int maxPageSize;
private final List<SortBuilder<?>> sorts; private final List<SortBuilder<?>> sorts;

View file

@ -77,7 +77,7 @@ public class Driver implements Releasable, Describable {
private final DriverContext driverContext; private final DriverContext driverContext;
private final Supplier<String> description; private final Supplier<String> description;
private final List<Operator> activeOperators; private final List<Operator> activeOperators;
private final List<DriverStatus.OperatorStatus> statusOfCompletedOperators = new ArrayList<>(); private final List<OperatorStatus> statusOfCompletedOperators = new ArrayList<>();
private final Releasable releasable; private final Releasable releasable;
private final long statusNanos; private final long statusNanos;
@ -117,6 +117,8 @@ public class Driver implements Releasable, Describable {
public Driver( public Driver(
String sessionId, String sessionId,
String taskDescription, String taskDescription,
String clusterName,
String nodeName,
long startTime, long startTime,
long startNanos, long startNanos,
DriverContext driverContext, DriverContext driverContext,
@ -143,6 +145,8 @@ public class Driver implements Releasable, Describable {
new DriverStatus( new DriverStatus(
sessionId, sessionId,
taskDescription, taskDescription,
clusterName,
nodeName,
startTime, startTime,
System.currentTimeMillis(), System.currentTimeMillis(),
0, 0,
@ -155,37 +159,6 @@ public class Driver implements Releasable, Describable {
); );
} }
/**
* Creates a new driver with a chain of operators.
* @param driverContext the driver context
* @param source source operator
* @param intermediateOperators the chain of operators to execute
* @param sink sink operator
* @param releasable a {@link Releasable} to invoked once the chain of operators has run to completion
*/
public Driver(
String taskDescription,
DriverContext driverContext,
SourceOperator source,
List<Operator> intermediateOperators,
SinkOperator sink,
Releasable releasable
) {
this(
"unset",
taskDescription,
System.currentTimeMillis(),
System.nanoTime(),
driverContext,
() -> null,
source,
intermediateOperators,
sink,
DEFAULT_STATUS_INTERVAL,
releasable
);
}
public DriverContext driverContext() { public DriverContext driverContext() {
return driverContext; return driverContext;
} }
@ -329,7 +302,7 @@ public class Driver implements Releasable, Describable {
Iterator<Operator> itr = finishedOperators.iterator(); Iterator<Operator> itr = finishedOperators.iterator();
while (itr.hasNext()) { while (itr.hasNext()) {
Operator op = itr.next(); Operator op = itr.next();
statusOfCompletedOperators.add(new DriverStatus.OperatorStatus(op.toString(), op.status())); statusOfCompletedOperators.add(new OperatorStatus(op.toString(), op.status()));
op.close(); op.close();
itr.remove(); itr.remove();
} }
@ -502,6 +475,8 @@ public class Driver implements Releasable, Describable {
} }
return new DriverProfile( return new DriverProfile(
status.taskDescription(), status.taskDescription(),
status.clusterName(),
status.nodeName(),
status.started(), status.started(),
status.lastUpdated(), status.lastUpdated(),
finishNanos - startNanos, finishNanos - startNanos,
@ -549,13 +524,15 @@ public class Driver implements Releasable, Describable {
return new DriverStatus( return new DriverStatus(
sessionId, sessionId,
taskDescription, taskDescription,
prev.clusterName(),
prev.nodeName(),
startTime, startTime,
now, now,
prev.cpuNanos() + extraCpuNanos, prev.cpuNanos() + extraCpuNanos,
prev.iterations() + extraIterations, prev.iterations() + extraIterations,
status, status,
statusOfCompletedOperators, statusOfCompletedOperators,
activeOperators.stream().map(op -> new DriverStatus.OperatorStatus(op.toString(), op.status())).toList(), activeOperators.stream().map(op -> new OperatorStatus(op.toString(), op.status())).toList(),
sleeps sleeps
); );
}); });

View file

@ -21,98 +21,48 @@ import org.elasticsearch.xcontent.ToXContent;
import java.io.IOException; import java.io.IOException;
import java.util.Iterator; import java.util.Iterator;
import java.util.List; import java.util.List;
import java.util.Objects;
/** /**
* Profile results from a single {@link Driver}. * Profile results from a single {@link Driver}.
*
* @param taskDescription Description of the task this driver is running. This description should be short and meaningful
* as a grouping identifier. We use the phase of the query right now: "data", "node_reduce", "final".
* @param clusterName The name of the cluster this driver is running on.
* @param nodeName The name of the node this driver is running on.
* @param startMillis Millis since epoch when the driver started.
* @param stopMillis Millis since epoch when the driver stopped.
* @param tookNanos Nanos between creation and completion of the {@link Driver}.
* @param cpuNanos Nanos this {@link Driver} has been running on the cpu. Does not include async or waiting time.
* @param iterations The number of times the driver has moved a single page up the chain of operators as far as it'll go.
* @param operators Status of each {@link Operator} in the driver when it finished.
*/ */
public class DriverProfile implements Writeable, ChunkedToXContentObject { public record DriverProfile(
/** String taskDescription,
* Description of the task this driver is running. This description should be String clusterName,
* short and meaningful as a grouping identifier. We use the phase of the String nodeName,
* query right now: "data", "node_reduce", "final". long startMillis,
*/ long stopMillis,
private final String taskDescription; long tookNanos,
long cpuNanos,
long iterations,
List<OperatorStatus> operators,
DriverSleeps sleeps
) implements Writeable, ChunkedToXContentObject {
/** public static DriverProfile readFrom(StreamInput in) throws IOException {
* Millis since epoch when the driver started. return new DriverProfile(
*/ in.getTransportVersion().onOrAfter(TransportVersions.ESQL_DRIVER_TASK_DESCRIPTION)
private final long startMillis; || in.getTransportVersion().isPatchFrom(TransportVersions.ESQL_DRIVER_TASK_DESCRIPTION_90) ? in.readString() : "",
in.getTransportVersion().onOrAfter(TransportVersions.ESQL_DRIVER_NODE_DESCRIPTION) ? in.readString() : "",
/** in.getTransportVersion().onOrAfter(TransportVersions.ESQL_DRIVER_NODE_DESCRIPTION) ? in.readString() : "",
* Millis since epoch when the driver stopped. in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0) ? in.readVLong() : 0,
*/ in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0) ? in.readVLong() : 0,
private final long stopMillis; in.getTransportVersion().onOrAfter(TransportVersions.V_8_14_0) ? in.readVLong() : 0,
in.getTransportVersion().onOrAfter(TransportVersions.V_8_14_0) ? in.readVLong() : 0,
/** in.getTransportVersion().onOrAfter(TransportVersions.V_8_14_0) ? in.readVLong() : 0,
* Nanos between creation and completion of the {@link Driver}. in.readCollectionAsImmutableList(OperatorStatus::readFrom),
*/ DriverSleeps.read(in)
private final long tookNanos; );
/**
* Nanos this {@link Driver} has been running on the cpu. Does not
* include async or waiting time.
*/
private final long cpuNanos;
/**
* The number of times the driver has moved a single page up the
* chain of operators as far as it'll go.
*/
private final long iterations;
/**
* Status of each {@link Operator} in the driver when it finished.
*/
private final List<DriverStatus.OperatorStatus> operators;
private final DriverSleeps sleeps;
public DriverProfile(
String taskDescription,
long startMillis,
long stopMillis,
long tookNanos,
long cpuNanos,
long iterations,
List<DriverStatus.OperatorStatus> operators,
DriverSleeps sleeps
) {
this.taskDescription = taskDescription;
this.startMillis = startMillis;
this.stopMillis = stopMillis;
this.tookNanos = tookNanos;
this.cpuNanos = cpuNanos;
this.iterations = iterations;
this.operators = operators;
this.sleeps = sleeps;
}
public DriverProfile(StreamInput in) throws IOException {
if (in.getTransportVersion().onOrAfter(TransportVersions.ESQL_DRIVER_TASK_DESCRIPTION)
|| in.getTransportVersion().isPatchFrom(TransportVersions.ESQL_DRIVER_TASK_DESCRIPTION_90)) {
this.taskDescription = in.readString();
} else {
this.taskDescription = "";
}
if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) {
this.startMillis = in.readVLong();
this.stopMillis = in.readVLong();
} else {
this.startMillis = 0;
this.stopMillis = 0;
}
if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_14_0)) {
this.tookNanos = in.readVLong();
this.cpuNanos = in.readVLong();
this.iterations = in.readVLong();
} else {
this.tookNanos = 0;
this.cpuNanos = 0;
this.iterations = 0;
}
this.operators = in.readCollectionAsImmutableList(DriverStatus.OperatorStatus::new);
this.sleeps = DriverSleeps.read(in);
} }
@Override @Override
@ -121,6 +71,10 @@ public class DriverProfile implements Writeable, ChunkedToXContentObject {
|| out.getTransportVersion().isPatchFrom(TransportVersions.ESQL_DRIVER_TASK_DESCRIPTION_90)) { || out.getTransportVersion().isPatchFrom(TransportVersions.ESQL_DRIVER_TASK_DESCRIPTION_90)) {
out.writeString(taskDescription); out.writeString(taskDescription);
} }
if (out.getTransportVersion().onOrAfter(TransportVersions.ESQL_DRIVER_NODE_DESCRIPTION)) {
out.writeString(clusterName);
out.writeString(nodeName);
}
if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) { if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_16_0)) {
out.writeVLong(startMillis); out.writeVLong(startMillis);
out.writeVLong(stopMillis); out.writeVLong(stopMillis);
@ -134,68 +88,12 @@ public class DriverProfile implements Writeable, ChunkedToXContentObject {
sleeps.writeTo(out); sleeps.writeTo(out);
} }
/**
* Description of the task this driver is running.
*/
public String taskDescription() {
return taskDescription;
}
/**
* Millis since epoch when the driver started.
*/
public long startMillis() {
return startMillis;
}
/**
* Millis since epoch when the driver stopped.
*/
public long stopMillis() {
return stopMillis;
}
/**
* Nanos between creation and completion of the {@link Driver}.
*/
public long tookNanos() {
return tookNanos;
}
/**
* Nanos this {@link Driver} has been running on the cpu. Does not
* include async or waiting time.
*/
public long cpuNanos() {
return cpuNanos;
}
/**
* The number of times the driver has moved a single page up the
* chain of operators as far as it'll go.
*/
public long iterations() {
return iterations;
}
/**
* Status of each {@link Operator} in the driver when it finished.
*/
public List<DriverStatus.OperatorStatus> operators() {
return operators;
}
/**
* Records of the times the driver has slept.
*/
public DriverSleeps sleeps() {
return sleeps;
}
@Override @Override
public Iterator<? extends ToXContent> toXContentChunked(ToXContent.Params params) { public Iterator<? extends ToXContent> toXContentChunked(ToXContent.Params params) {
return Iterators.concat(ChunkedToXContentHelper.startObject(), Iterators.single((b, p) -> { return Iterators.concat(ChunkedToXContentHelper.startObject(), Iterators.single((b, p) -> {
b.field("task_description", taskDescription); b.field("task_description", taskDescription);
b.field("cluster_name", clusterName);
b.field("node_name", nodeName);
b.timestampFieldsFromUnixEpochMillis("start_millis", "start", startMillis); b.timestampFieldsFromUnixEpochMillis("start_millis", "start", startMillis);
b.timestampFieldsFromUnixEpochMillis("stop_millis", "stop", stopMillis); b.timestampFieldsFromUnixEpochMillis("stop_millis", "stop", stopMillis);
b.field("took_nanos", tookNanos); b.field("took_nanos", tookNanos);
@ -215,30 +113,6 @@ public class DriverProfile implements Writeable, ChunkedToXContentObject {
); );
} }
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
DriverProfile that = (DriverProfile) o;
return taskDescription.equals(that.taskDescription)
&& startMillis == that.startMillis
&& stopMillis == that.stopMillis
&& tookNanos == that.tookNanos
&& cpuNanos == that.cpuNanos
&& iterations == that.iterations
&& Objects.equals(operators, that.operators)
&& sleeps.equals(that.sleeps);
}
@Override
public int hashCode() {
return Objects.hash(taskDescription, startMillis, stopMillis, tookNanos, cpuNanos, iterations, operators, sleeps);
}
@Override @Override
public String toString() { public String toString() {
return Strings.toString(this); return Strings.toString(this);

View file

@ -12,124 +12,71 @@ import org.elasticsearch.common.Strings;
import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.VersionedNamedWriteable;
import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.core.Nullable;
import org.elasticsearch.core.TimeValue; import org.elasticsearch.core.TimeValue;
import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.index.mapper.DateFieldMapper;
import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.Task;
import org.elasticsearch.xcontent.ToXContentFragment; import org.elasticsearch.xcontent.ToXContentFragment;
import org.elasticsearch.xcontent.ToXContentObject;
import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentBuilder;
import java.io.IOException; import java.io.IOException;
import java.util.List; import java.util.List;
import java.util.Locale; import java.util.Locale;
import java.util.Objects;
/** /**
* {@link Task.Status} reported from a {@link Driver} to be reported by the tasks api. * {@link Task.Status} reported from a {@link Driver} to be reported by the tasks api.
*
* @param sessionId The session for this driver.
* @param taskDescription Description of the task this driver is running.
* @param clusterName The name of the cluster this driver is running on.
* @param nodeName The name of the node this driver is running on.
* @param started When this {@link Driver} was started.
* @param lastUpdated When this status was generated.
* @param cpuNanos Nanos this {@link Driver} has been running on the cpu. Does not include async or waiting time.
* @param iterations The number of times the driver has moved a single page up the chain of operators as far as it'll go.
* @param status The state of the overall driver - queue, starting, running, finished.
* @param completedOperators Status of each completed {@link Operator} in the driver.
* @param activeOperators Status of each active {@link Operator} in the driver.
*/ */
public class DriverStatus implements Task.Status { public record DriverStatus(
String sessionId,
String taskDescription,
String clusterName,
String nodeName,
long started,
long lastUpdated,
long cpuNanos,
long iterations,
Status status,
List<OperatorStatus> completedOperators,
List<OperatorStatus> activeOperators,
DriverSleeps sleeps
) implements Task.Status {
public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry( public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(
Task.Status.class, Task.Status.class,
"driver", "driver",
DriverStatus::new DriverStatus::readFrom
); );
/** public static DriverStatus readFrom(StreamInput in) throws IOException {
* The session for this driver. return new DriverStatus(
*/ in.readString(),
private final String sessionId; in.getTransportVersion().onOrAfter(TransportVersions.ESQL_DRIVER_TASK_DESCRIPTION)
|| in.getTransportVersion().isPatchFrom(TransportVersions.ESQL_DRIVER_TASK_DESCRIPTION_90) ? in.readString() : "",
/** in.getTransportVersion().onOrAfter(TransportVersions.ESQL_DRIVER_NODE_DESCRIPTION) ? in.readString() : "",
* Description of the task this driver is running. in.getTransportVersion().onOrAfter(TransportVersions.ESQL_DRIVER_NODE_DESCRIPTION) ? in.readString() : "",
*/ in.getTransportVersion().onOrAfter(TransportVersions.V_8_14_0) ? in.readLong() : 0,
private final String taskDescription; in.readLong(),
in.getTransportVersion().onOrAfter(TransportVersions.V_8_14_0) ? in.readVLong() : 0,
/** in.getTransportVersion().onOrAfter(TransportVersions.V_8_14_0) ? in.readVLong() : 0,
* Milliseconds since epoch when this driver started. Status.read(in),
*/ in.getTransportVersion().onOrAfter(TransportVersions.V_8_12_0)
private final long started; ? in.readCollectionAsImmutableList(OperatorStatus::readFrom)
: List.of(),
/** in.readCollectionAsImmutableList(OperatorStatus::readFrom),
* When this status was generated. DriverSleeps.read(in)
*/ );
private final long lastUpdated;
/**
* Nanos this {@link Driver} has been running on the cpu. Does not
* include async or waiting time.
*/
private final long cpuNanos;
/**
* The number of times the driver has moved a single page up the
* chain of operators as far as it'll go.
*/
private final long iterations;
/**
* The state of the overall driver - queue, starting, running, finished.
*/
private final Status status;
/**
* Status of each completed {@link Operator} in the driver.
*/
private final List<OperatorStatus> completedOperators;
/**
* Status of each active {@link Operator} in the driver.
*/
private final List<OperatorStatus> activeOperators;
private final DriverSleeps sleeps;
DriverStatus(
String sessionId,
String taskDescription,
long started,
long lastUpdated,
long cpuTime,
long iterations,
Status status,
List<OperatorStatus> completedOperators,
List<OperatorStatus> activeOperators,
DriverSleeps sleeps
) {
this.sessionId = sessionId;
this.taskDescription = taskDescription;
this.started = started;
this.lastUpdated = lastUpdated;
this.cpuNanos = cpuTime;
this.iterations = iterations;
this.status = status;
this.completedOperators = completedOperators;
this.activeOperators = activeOperators;
this.sleeps = sleeps;
}
public DriverStatus(StreamInput in) throws IOException {
this.sessionId = in.readString();
if (in.getTransportVersion().onOrAfter(TransportVersions.ESQL_DRIVER_TASK_DESCRIPTION)
|| in.getTransportVersion().isPatchFrom(TransportVersions.ESQL_DRIVER_TASK_DESCRIPTION_90)) {
this.taskDescription = in.readString();
} else {
this.taskDescription = "";
}
this.started = in.getTransportVersion().onOrAfter(TransportVersions.V_8_14_0) ? in.readLong() : 0;
this.lastUpdated = in.readLong();
this.cpuNanos = in.getTransportVersion().onOrAfter(TransportVersions.V_8_14_0) ? in.readVLong() : 0;
this.iterations = in.getTransportVersion().onOrAfter(TransportVersions.V_8_14_0) ? in.readVLong() : 0;
this.status = Status.read(in);
if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_12_0)) {
this.completedOperators = in.readCollectionAsImmutableList(OperatorStatus::new);
} else {
this.completedOperators = List.of();
}
this.activeOperators = in.readCollectionAsImmutableList(OperatorStatus::new);
this.sleeps = DriverSleeps.read(in);
} }
@Override @Override
@ -139,6 +86,10 @@ public class DriverStatus implements Task.Status {
|| out.getTransportVersion().isPatchFrom(TransportVersions.ESQL_DRIVER_TASK_DESCRIPTION_90)) { || out.getTransportVersion().isPatchFrom(TransportVersions.ESQL_DRIVER_TASK_DESCRIPTION_90)) {
out.writeString(taskDescription); out.writeString(taskDescription);
} }
if (out.getTransportVersion().onOrAfter(TransportVersions.ESQL_DRIVER_NODE_DESCRIPTION)) {
out.writeString(clusterName);
out.writeString(nodeName);
}
if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_14_0)) { if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_14_0)) {
out.writeLong(started); out.writeLong(started);
} }
@ -160,85 +111,13 @@ public class DriverStatus implements Task.Status {
return ENTRY.name; return ENTRY.name;
} }
/**
* The session for this driver.
*/
public String sessionId() {
return sessionId;
}
/**
* Description of the task this driver is running. This description should be
* short and meaningful as a grouping identifier. We use the phase of the
* query right now: "data", "node_reduce", "final".
*/
public String taskDescription() {
return taskDescription;
}
/**
* When this {@link Driver} was started.
*/
public long started() {
return started;
}
/**
* When this status was generated.
*/
public long lastUpdated() {
return lastUpdated;
}
/**
* Nanos this {@link Driver} has been running on the cpu. Does not
* include async or waiting time.
*/
public long cpuNanos() {
return cpuNanos;
}
/**
* The number of times the driver has moved a single page up the
* chain of operators as far as it'll go.
*/
public long iterations() {
return iterations;
}
/**
* The state of the overall driver - queue, starting, running, finished.
*/
public Status status() {
return status;
}
/**
* Status of each completed {@link Operator} in the driver.
*/
public List<OperatorStatus> completedOperators() {
return completedOperators;
}
/**
* Records of the times the driver has slept.
*/
public DriverSleeps sleeps() {
return sleeps;
}
/**
* Status of each active {@link Operator} in the driver.
*/
public List<OperatorStatus> activeOperators() {
return activeOperators;
}
@Override @Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject(); builder.startObject();
builder.field("session_id", sessionId); builder.field("session_id", sessionId);
builder.field("task_description", taskDescription); builder.field("task_description", taskDescription);
builder.field("cluster_name", clusterName);
builder.field("node_name", nodeName);
builder.field("started", DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.formatMillis(started)); builder.field("started", DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.formatMillis(started));
builder.field("last_updated", DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.formatMillis(lastUpdated)); builder.field("last_updated", DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.formatMillis(lastUpdated));
builder.field("cpu_nanos", cpuNanos); builder.field("cpu_nanos", cpuNanos);
@ -261,112 +140,11 @@ public class DriverStatus implements Task.Status {
return builder.endObject(); return builder.endObject();
} }
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
DriverStatus that = (DriverStatus) o;
return sessionId.equals(that.sessionId)
&& taskDescription.equals(that.taskDescription)
&& started == that.started
&& lastUpdated == that.lastUpdated
&& cpuNanos == that.cpuNanos
&& iterations == that.iterations
&& status == that.status
&& completedOperators.equals(that.completedOperators)
&& activeOperators.equals(that.activeOperators)
&& sleeps.equals(that.sleeps);
}
@Override
public int hashCode() {
return Objects.hash(
sessionId,
taskDescription,
started,
lastUpdated,
cpuNanos,
iterations,
status,
completedOperators,
activeOperators,
sleeps
);
}
@Override @Override
public String toString() { public String toString() {
return Strings.toString(this); return Strings.toString(this);
} }
/**
* Status of an {@link Operator}.
*/
public static class OperatorStatus implements Writeable, ToXContentObject {
/**
* String representation of the {@link Operator}. Literally just the
* {@link Object#toString()} of it.
*/
private final String operator;
/**
* Status as reported by the {@link Operator}.
*/
@Nullable
private final Operator.Status status;
public OperatorStatus(String operator, Operator.Status status) {
this.operator = operator;
this.status = status;
}
OperatorStatus(StreamInput in) throws IOException {
operator = in.readString();
status = in.readOptionalNamedWriteable(Operator.Status.class);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeString(operator);
out.writeOptionalNamedWriteable(status != null && VersionedNamedWriteable.shouldSerialize(out, status) ? status : null);
}
public String operator() {
return operator;
}
public Operator.Status status() {
return status;
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
builder.field("operator", operator);
if (status != null) {
builder.field("status", status);
}
return builder.endObject();
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
OperatorStatus that = (OperatorStatus) o;
return operator.equals(that.operator) && Objects.equals(status, that.status);
}
@Override
public int hashCode() {
return Objects.hash(operator, status);
}
@Override
public String toString() {
return Strings.toString(this);
}
}
public enum Status implements Writeable, ToXContentFragment { public enum Status implements Writeable, ToXContentFragment {
QUEUED, QUEUED,
STARTING, STARTING,

View file

@ -0,0 +1,53 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0; you may not use this file except in compliance with the Elastic License
* 2.0.
*/
package org.elasticsearch.compute.operator;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.VersionedNamedWriteable;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.core.Nullable;
import org.elasticsearch.xcontent.ToXContentObject;
import org.elasticsearch.xcontent.XContentBuilder;
import java.io.IOException;
/**
* Status of an {@link Operator}.
*
* @param operator String representation of the {@link Operator}.
* @param status Status as reported by the {@link Operator}.
*/
public record OperatorStatus(String operator, @Nullable Operator.Status status) implements Writeable, ToXContentObject {
public static OperatorStatus readFrom(StreamInput in) throws IOException {
return new OperatorStatus(in.readString(), in.readOptionalNamedWriteable(Operator.Status.class));
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeString(operator);
out.writeOptionalNamedWriteable(status != null && VersionedNamedWriteable.shouldSerialize(out, status) ? status : null);
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
builder.field("operator", operator);
if (status != null) {
builder.field("status", status);
}
return builder.endObject();
}
@Override
public String toString() {
return Strings.toString(this);
}
}

View file

@ -63,6 +63,7 @@ import org.elasticsearch.compute.operator.ShuffleDocsOperator;
import org.elasticsearch.compute.test.BlockTestUtils; import org.elasticsearch.compute.test.BlockTestUtils;
import org.elasticsearch.compute.test.OperatorTestCase; import org.elasticsearch.compute.test.OperatorTestCase;
import org.elasticsearch.compute.test.SequenceLongBlockSourceOperator; import org.elasticsearch.compute.test.SequenceLongBlockSourceOperator;
import org.elasticsearch.compute.test.TestDriverFactory;
import org.elasticsearch.core.CheckedConsumer; import org.elasticsearch.core.CheckedConsumer;
import org.elasticsearch.core.Releasables; import org.elasticsearch.core.Releasables;
import org.elasticsearch.index.mapper.KeywordFieldMapper; import org.elasticsearch.index.mapper.KeywordFieldMapper;
@ -123,7 +124,7 @@ public class OperatorTests extends MapperServiceTestCase {
} }
}); });
DriverContext driverContext = driverContext(); DriverContext driverContext = driverContext();
drivers.add(new Driver("test", driverContext, factory.get(driverContext), List.of(), docCollector, () -> {})); drivers.add(TestDriverFactory.create(driverContext, factory.get(driverContext), List.of(), docCollector));
} }
OperatorTestCase.runDriver(drivers); OperatorTestCase.runDriver(drivers);
Set<Integer> expectedDocIds = searchForDocIds(reader, query); Set<Integer> expectedDocIds = searchForDocIds(reader, query);
@ -214,8 +215,7 @@ public class OperatorTests extends MapperServiceTestCase {
driverContext driverContext
) )
); );
Driver driver = new Driver( Driver driver = TestDriverFactory.create(
"test",
driverContext, driverContext,
luceneOperatorFactory(reader, new MatchAllDocsQuery(), LuceneOperator.NO_LIMIT).get(driverContext), luceneOperatorFactory(reader, new MatchAllDocsQuery(), LuceneOperator.NO_LIMIT).get(driverContext),
operators, operators,
@ -228,8 +228,7 @@ public class OperatorTests extends MapperServiceTestCase {
actualCounts.put(BytesRef.deepCopyOf(spare), counts.getLong(i)); actualCounts.put(BytesRef.deepCopyOf(spare), counts.getLong(i));
} }
page.releaseBlocks(); page.releaseBlocks();
}), })
() -> {}
); );
OperatorTestCase.runDriver(driver); OperatorTestCase.runDriver(driver);
assertThat(actualCounts, equalTo(expectedCounts)); assertThat(actualCounts, equalTo(expectedCounts));
@ -248,8 +247,7 @@ public class OperatorTests extends MapperServiceTestCase {
var results = new ArrayList<Long>(); var results = new ArrayList<Long>();
DriverContext driverContext = driverContext(); DriverContext driverContext = driverContext();
try ( try (
var driver = new Driver( var driver = TestDriverFactory.create(
"test",
driverContext, driverContext,
new SequenceLongBlockSourceOperator(driverContext.blockFactory(), values, 100), new SequenceLongBlockSourceOperator(driverContext.blockFactory(), values, 100),
List.of((new LimitOperator.Factory(limit)).get(driverContext)), List.of((new LimitOperator.Factory(limit)).get(driverContext)),
@ -258,8 +256,7 @@ public class OperatorTests extends MapperServiceTestCase {
for (int i = 0; i < page.getPositionCount(); i++) { for (int i = 0; i < page.getPositionCount(); i++) {
results.add(block.getLong(i)); results.add(block.getLong(i));
} }
}), })
() -> {}
) )
) { ) {
OperatorTestCase.runDriver(driver); OperatorTestCase.runDriver(driver);
@ -336,8 +333,7 @@ public class OperatorTests extends MapperServiceTestCase {
var actualValues = new ArrayList<>(); var actualValues = new ArrayList<>();
var actualPrimeOrds = new ArrayList<>(); var actualPrimeOrds = new ArrayList<>();
try ( try (
var driver = new Driver( var driver = TestDriverFactory.create(
"test",
driverContext, driverContext,
new SequenceLongBlockSourceOperator(driverContext.blockFactory(), values, 100), new SequenceLongBlockSourceOperator(driverContext.blockFactory(), values, 100),
List.of( List.of(
@ -354,8 +350,7 @@ public class OperatorTests extends MapperServiceTestCase {
} finally { } finally {
page.releaseBlocks(); page.releaseBlocks();
} }
}), })
() -> {}
) )
) { ) {
OperatorTestCase.runDriver(driver); OperatorTestCase.runDriver(driver);

View file

@ -31,6 +31,7 @@ import org.elasticsearch.compute.operator.PositionMergingSourceOperator;
import org.elasticsearch.compute.test.BlockTestUtils; import org.elasticsearch.compute.test.BlockTestUtils;
import org.elasticsearch.compute.test.CannedSourceOperator; import org.elasticsearch.compute.test.CannedSourceOperator;
import org.elasticsearch.compute.test.TestBlockFactory; import org.elasticsearch.compute.test.TestBlockFactory;
import org.elasticsearch.compute.test.TestDriverFactory;
import org.elasticsearch.compute.test.TestResultPageSinkOperator; import org.elasticsearch.compute.test.TestResultPageSinkOperator;
import org.hamcrest.Matcher; import org.hamcrest.Matcher;
@ -110,13 +111,11 @@ public abstract class AggregatorFunctionTestCase extends ForkingOperatorTestCase
List<Page> origInput = BlockTestUtils.deepCopyOf(input, TestBlockFactory.getNonBreakingInstance()); List<Page> origInput = BlockTestUtils.deepCopyOf(input, TestBlockFactory.getNonBreakingInstance());
try ( try (
Driver d = new Driver( Driver d = TestDriverFactory.create(
"test",
driverContext, driverContext,
new NullInsertingSourceOperator(new CannedSourceOperator(input.iterator()), blockFactory), new NullInsertingSourceOperator(new CannedSourceOperator(input.iterator()), blockFactory),
List.of(simple().get(driverContext)), List.of(simple().get(driverContext)),
new TestResultPageSinkOperator(results::add), new TestResultPageSinkOperator(results::add)
() -> {}
) )
) { ) {
runDriver(d); runDriver(d);

View file

@ -18,6 +18,7 @@ import org.elasticsearch.compute.operator.PageConsumerOperator;
import org.elasticsearch.compute.operator.SequenceIntBlockSourceOperator; import org.elasticsearch.compute.operator.SequenceIntBlockSourceOperator;
import org.elasticsearch.compute.operator.SourceOperator; import org.elasticsearch.compute.operator.SourceOperator;
import org.elasticsearch.compute.test.CannedSourceOperator; import org.elasticsearch.compute.test.CannedSourceOperator;
import org.elasticsearch.compute.test.TestDriverFactory;
import java.util.List; import java.util.List;
import java.util.stream.LongStream; import java.util.stream.LongStream;
@ -64,13 +65,11 @@ public class CountDistinctIntAggregatorFunctionTests extends AggregatorFunctionT
DriverContext driverContext = driverContext(); DriverContext driverContext = driverContext();
BlockFactory blockFactory = driverContext.blockFactory(); BlockFactory blockFactory = driverContext.blockFactory();
try ( try (
Driver d = new Driver( Driver d = TestDriverFactory.create(
"test",
driverContext, driverContext,
new CannedSourceOperator(Iterators.single(new Page(blockFactory.newDoubleArrayVector(new double[] { 1.0 }, 1).asBlock()))), new CannedSourceOperator(Iterators.single(new Page(blockFactory.newDoubleArrayVector(new double[] { 1.0 }, 1).asBlock()))),
List.of(simple().get(driverContext)), List.of(simple().get(driverContext)),
new PageConsumerOperator(page -> fail("shouldn't have made it this far")), new PageConsumerOperator(page -> fail("shouldn't have made it this far"))
() -> {}
) )
) { ) {
expectThrows(Exception.class, () -> runDriver(d)); // ### find a more specific exception type expectThrows(Exception.class, () -> runDriver(d)); // ### find a more specific exception type

View file

@ -18,6 +18,7 @@ import org.elasticsearch.compute.operator.PageConsumerOperator;
import org.elasticsearch.compute.operator.SourceOperator; import org.elasticsearch.compute.operator.SourceOperator;
import org.elasticsearch.compute.test.CannedSourceOperator; import org.elasticsearch.compute.test.CannedSourceOperator;
import org.elasticsearch.compute.test.SequenceLongBlockSourceOperator; import org.elasticsearch.compute.test.SequenceLongBlockSourceOperator;
import org.elasticsearch.compute.test.TestDriverFactory;
import java.util.List; import java.util.List;
import java.util.stream.LongStream; import java.util.stream.LongStream;
@ -65,13 +66,11 @@ public class CountDistinctLongAggregatorFunctionTests extends AggregatorFunction
DriverContext driverContext = driverContext(); DriverContext driverContext = driverContext();
BlockFactory blockFactory = driverContext.blockFactory(); BlockFactory blockFactory = driverContext.blockFactory();
try ( try (
Driver d = new Driver( Driver d = TestDriverFactory.create(
"test",
driverContext, driverContext,
new CannedSourceOperator(Iterators.single(new Page(blockFactory.newDoubleArrayVector(new double[] { 1.0 }, 1).asBlock()))), new CannedSourceOperator(Iterators.single(new Page(blockFactory.newDoubleArrayVector(new double[] { 1.0 }, 1).asBlock()))),
List.of(simple().get(driverContext)), List.of(simple().get(driverContext)),
new PageConsumerOperator(page -> fail("shouldn't have made it this far")), new PageConsumerOperator(page -> fail("shouldn't have made it this far"))
() -> {}
) )
) { ) {
expectThrows(Exception.class, () -> runDriver(d)); // ### find a more specific exception type expectThrows(Exception.class, () -> runDriver(d)); // ### find a more specific exception type

View file

@ -15,6 +15,7 @@ import org.elasticsearch.compute.operator.Driver;
import org.elasticsearch.compute.operator.DriverContext; import org.elasticsearch.compute.operator.DriverContext;
import org.elasticsearch.compute.operator.SequenceDoubleBlockSourceOperator; import org.elasticsearch.compute.operator.SequenceDoubleBlockSourceOperator;
import org.elasticsearch.compute.operator.SourceOperator; import org.elasticsearch.compute.operator.SourceOperator;
import org.elasticsearch.compute.test.TestDriverFactory;
import org.elasticsearch.compute.test.TestResultPageSinkOperator; import org.elasticsearch.compute.test.TestResultPageSinkOperator;
import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.ESTestCase;
@ -52,13 +53,11 @@ public class SumDoubleAggregatorFunctionTests extends AggregatorFunctionTestCase
DriverContext driverContext = driverContext(); DriverContext driverContext = driverContext();
List<Page> results = new ArrayList<>(); List<Page> results = new ArrayList<>();
try ( try (
Driver d = new Driver( Driver d = TestDriverFactory.create(
"test",
driverContext, driverContext,
new SequenceDoubleBlockSourceOperator(driverContext.blockFactory(), DoubleStream.of(Double.MAX_VALUE - 1, 2)), new SequenceDoubleBlockSourceOperator(driverContext.blockFactory(), DoubleStream.of(Double.MAX_VALUE - 1, 2)),
List.of(simple().get(driverContext)), List.of(simple().get(driverContext)),
new TestResultPageSinkOperator(results::add), new TestResultPageSinkOperator(results::add)
() -> {}
) )
) { ) {
runDriver(d); runDriver(d);
@ -71,16 +70,14 @@ public class SumDoubleAggregatorFunctionTests extends AggregatorFunctionTestCase
DriverContext driverContext = driverContext(); DriverContext driverContext = driverContext();
List<Page> results = new ArrayList<>(); List<Page> results = new ArrayList<>();
try ( try (
Driver d = new Driver( Driver d = TestDriverFactory.create(
"test",
driverContext, driverContext,
new SequenceDoubleBlockSourceOperator( new SequenceDoubleBlockSourceOperator(
driverContext.blockFactory(), driverContext.blockFactory(),
DoubleStream.of(0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7) DoubleStream.of(0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7)
), ),
List.of(simple().get(driverContext)), List.of(simple().get(driverContext)),
new TestResultPageSinkOperator(results::add), new TestResultPageSinkOperator(results::add)
() -> {}
) )
) { ) {
runDriver(d); runDriver(d);
@ -101,13 +98,11 @@ public class SumDoubleAggregatorFunctionTests extends AggregatorFunctionTestCase
} }
driverContext = driverContext(); driverContext = driverContext();
try ( try (
Driver d = new Driver( Driver d = TestDriverFactory.create(
"test",
driverContext, driverContext,
new SequenceDoubleBlockSourceOperator(driverContext.blockFactory(), DoubleStream.of(values)), new SequenceDoubleBlockSourceOperator(driverContext.blockFactory(), DoubleStream.of(values)),
List.of(simple().get(driverContext)), List.of(simple().get(driverContext)),
new TestResultPageSinkOperator(results::add), new TestResultPageSinkOperator(results::add)
() -> {}
) )
) { ) {
runDriver(d); runDriver(d);
@ -124,13 +119,11 @@ public class SumDoubleAggregatorFunctionTests extends AggregatorFunctionTestCase
} }
driverContext = driverContext(); driverContext = driverContext();
try ( try (
Driver d = new Driver( Driver d = TestDriverFactory.create(
"test",
driverContext, driverContext,
new SequenceDoubleBlockSourceOperator(driverContext.blockFactory(), DoubleStream.of(largeValues)), new SequenceDoubleBlockSourceOperator(driverContext.blockFactory(), DoubleStream.of(largeValues)),
List.of(simple().get(driverContext)), List.of(simple().get(driverContext)),
new TestResultPageSinkOperator(results::add), new TestResultPageSinkOperator(results::add)
() -> {}
) )
) { ) {
runDriver(d); runDriver(d);
@ -144,13 +137,11 @@ public class SumDoubleAggregatorFunctionTests extends AggregatorFunctionTestCase
} }
driverContext = driverContext(); driverContext = driverContext();
try ( try (
Driver d = new Driver( Driver d = TestDriverFactory.create(
"test",
driverContext, driverContext,
new SequenceDoubleBlockSourceOperator(driverContext.blockFactory(), DoubleStream.of(largeValues)), new SequenceDoubleBlockSourceOperator(driverContext.blockFactory(), DoubleStream.of(largeValues)),
List.of(simple().get(driverContext)), List.of(simple().get(driverContext)),
new TestResultPageSinkOperator(results::add), new TestResultPageSinkOperator(results::add)
() -> {}
) )
) { ) {
runDriver(d); runDriver(d);

View file

@ -15,6 +15,7 @@ import org.elasticsearch.compute.operator.Driver;
import org.elasticsearch.compute.operator.DriverContext; import org.elasticsearch.compute.operator.DriverContext;
import org.elasticsearch.compute.operator.SequenceFloatBlockSourceOperator; import org.elasticsearch.compute.operator.SequenceFloatBlockSourceOperator;
import org.elasticsearch.compute.operator.SourceOperator; import org.elasticsearch.compute.operator.SourceOperator;
import org.elasticsearch.compute.test.TestDriverFactory;
import org.elasticsearch.compute.test.TestResultPageSinkOperator; import org.elasticsearch.compute.test.TestResultPageSinkOperator;
import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.ESTestCase;
@ -52,13 +53,11 @@ public class SumFloatAggregatorFunctionTests extends AggregatorFunctionTestCase
DriverContext driverContext = driverContext(); DriverContext driverContext = driverContext();
List<Page> results = new ArrayList<>(); List<Page> results = new ArrayList<>();
try ( try (
Driver d = new Driver( Driver d = TestDriverFactory.create(
"test",
driverContext, driverContext,
new SequenceFloatBlockSourceOperator(driverContext.blockFactory(), Stream.of(Float.MAX_VALUE - 1, 2f)), new SequenceFloatBlockSourceOperator(driverContext.blockFactory(), Stream.of(Float.MAX_VALUE - 1, 2f)),
List.of(simple().get(driverContext)), List.of(simple().get(driverContext)),
new TestResultPageSinkOperator(results::add), new TestResultPageSinkOperator(results::add)
() -> {}
) )
) { ) {
runDriver(d); runDriver(d);
@ -71,16 +70,14 @@ public class SumFloatAggregatorFunctionTests extends AggregatorFunctionTestCase
DriverContext driverContext = driverContext(); DriverContext driverContext = driverContext();
List<Page> results = new ArrayList<>(); List<Page> results = new ArrayList<>();
try ( try (
Driver d = new Driver( Driver d = TestDriverFactory.create(
"test",
driverContext, driverContext,
new SequenceFloatBlockSourceOperator( new SequenceFloatBlockSourceOperator(
driverContext.blockFactory(), driverContext.blockFactory(),
Stream.of(0.1f, 0.2f, 0.3f, 0.4f, 0.5f, 0.6f, 0.7f, 0.8f, 0.9f, 1.0f, 1.1f, 1.2f, 1.3f, 1.4f, 1.5f, 1.6f, 1.7f) Stream.of(0.1f, 0.2f, 0.3f, 0.4f, 0.5f, 0.6f, 0.7f, 0.8f, 0.9f, 1.0f, 1.1f, 1.2f, 1.3f, 1.4f, 1.5f, 1.6f, 1.7f)
), ),
List.of(simple().get(driverContext)), List.of(simple().get(driverContext)),
new TestResultPageSinkOperator(results::add), new TestResultPageSinkOperator(results::add)
() -> {}
) )
) { ) {
runDriver(d); runDriver(d);
@ -101,13 +98,11 @@ public class SumFloatAggregatorFunctionTests extends AggregatorFunctionTestCase
} }
driverContext = driverContext(); driverContext = driverContext();
try ( try (
Driver d = new Driver( Driver d = TestDriverFactory.create(
"test",
driverContext, driverContext,
new SequenceFloatBlockSourceOperator(driverContext.blockFactory(), Stream.of(values)), new SequenceFloatBlockSourceOperator(driverContext.blockFactory(), Stream.of(values)),
List.of(simple().get(driverContext)), List.of(simple().get(driverContext)),
new TestResultPageSinkOperator(results::add), new TestResultPageSinkOperator(results::add)
() -> {}
) )
) { ) {
runDriver(d); runDriver(d);
@ -124,13 +119,11 @@ public class SumFloatAggregatorFunctionTests extends AggregatorFunctionTestCase
} }
driverContext = driverContext(); driverContext = driverContext();
try ( try (
Driver d = new Driver( Driver d = TestDriverFactory.create(
"test",
driverContext, driverContext,
new SequenceFloatBlockSourceOperator(driverContext.blockFactory(), Stream.of(largeValues)), new SequenceFloatBlockSourceOperator(driverContext.blockFactory(), Stream.of(largeValues)),
List.of(simple().get(driverContext)), List.of(simple().get(driverContext)),
new TestResultPageSinkOperator(results::add), new TestResultPageSinkOperator(results::add)
() -> {}
) )
) { ) {
runDriver(d); runDriver(d);
@ -144,13 +137,11 @@ public class SumFloatAggregatorFunctionTests extends AggregatorFunctionTestCase
} }
driverContext = driverContext(); driverContext = driverContext();
try ( try (
Driver d = new Driver( Driver d = TestDriverFactory.create(
"test",
driverContext, driverContext,
new SequenceFloatBlockSourceOperator(driverContext.blockFactory(), Stream.of(largeValues)), new SequenceFloatBlockSourceOperator(driverContext.blockFactory(), Stream.of(largeValues)),
List.of(simple().get(driverContext)), List.of(simple().get(driverContext)),
new TestResultPageSinkOperator(results::add), new TestResultPageSinkOperator(results::add)
() -> {}
) )
) { ) {
runDriver(d); runDriver(d);

View file

@ -18,6 +18,7 @@ import org.elasticsearch.compute.operator.PageConsumerOperator;
import org.elasticsearch.compute.operator.SequenceIntBlockSourceOperator; import org.elasticsearch.compute.operator.SequenceIntBlockSourceOperator;
import org.elasticsearch.compute.operator.SourceOperator; import org.elasticsearch.compute.operator.SourceOperator;
import org.elasticsearch.compute.test.CannedSourceOperator; import org.elasticsearch.compute.test.CannedSourceOperator;
import org.elasticsearch.compute.test.TestDriverFactory;
import java.util.List; import java.util.List;
import java.util.stream.LongStream; import java.util.stream.LongStream;
@ -51,13 +52,11 @@ public class SumIntAggregatorFunctionTests extends AggregatorFunctionTestCase {
DriverContext driverContext = driverContext(); DriverContext driverContext = driverContext();
BlockFactory blockFactory = driverContext.blockFactory(); BlockFactory blockFactory = driverContext.blockFactory();
try ( try (
Driver d = new Driver( Driver d = TestDriverFactory.create(
"test",
driverContext, driverContext,
new CannedSourceOperator(Iterators.single(new Page(blockFactory.newDoubleArrayVector(new double[] { 1.0 }, 1).asBlock()))), new CannedSourceOperator(Iterators.single(new Page(blockFactory.newDoubleArrayVector(new double[] { 1.0 }, 1).asBlock()))),
List.of(simple().get(driverContext)), List.of(simple().get(driverContext)),
new PageConsumerOperator(page -> fail("shouldn't have made it this far")), new PageConsumerOperator(page -> fail("shouldn't have made it this far"))
() -> {}
) )
) { ) {
expectThrows(Exception.class, () -> runDriver(d)); // ### find a more specific exception type expectThrows(Exception.class, () -> runDriver(d)); // ### find a more specific exception type

View file

@ -18,6 +18,7 @@ import org.elasticsearch.compute.operator.PageConsumerOperator;
import org.elasticsearch.compute.operator.SourceOperator; import org.elasticsearch.compute.operator.SourceOperator;
import org.elasticsearch.compute.test.CannedSourceOperator; import org.elasticsearch.compute.test.CannedSourceOperator;
import org.elasticsearch.compute.test.SequenceLongBlockSourceOperator; import org.elasticsearch.compute.test.SequenceLongBlockSourceOperator;
import org.elasticsearch.compute.test.TestDriverFactory;
import java.util.List; import java.util.List;
import java.util.stream.LongStream; import java.util.stream.LongStream;
@ -50,13 +51,11 @@ public class SumLongAggregatorFunctionTests extends AggregatorFunctionTestCase {
public void testOverflowFails() { public void testOverflowFails() {
DriverContext driverContext = driverContext(); DriverContext driverContext = driverContext();
try ( try (
Driver d = new Driver( Driver d = TestDriverFactory.create(
"test",
driverContext, driverContext,
new SequenceLongBlockSourceOperator(driverContext.blockFactory(), LongStream.of(Long.MAX_VALUE - 1, 2)), new SequenceLongBlockSourceOperator(driverContext.blockFactory(), LongStream.of(Long.MAX_VALUE - 1, 2)),
List.of(simple().get(driverContext)), List.of(simple().get(driverContext)),
new PageConsumerOperator(page -> fail("shouldn't have made it this far")), new PageConsumerOperator(page -> fail("shouldn't have made it this far"))
() -> {}
) )
) { ) {
Exception e = expectThrows(ArithmeticException.class, () -> runDriver(d)); Exception e = expectThrows(ArithmeticException.class, () -> runDriver(d));
@ -68,13 +67,11 @@ public class SumLongAggregatorFunctionTests extends AggregatorFunctionTestCase {
DriverContext driverContext = driverContext(); DriverContext driverContext = driverContext();
BlockFactory blockFactory = driverContext.blockFactory(); BlockFactory blockFactory = driverContext.blockFactory();
try ( try (
Driver d = new Driver( Driver d = TestDriverFactory.create(
"test",
driverContext, driverContext,
new CannedSourceOperator(Iterators.single(new Page(blockFactory.newDoubleArrayVector(new double[] { 1.0 }, 1).asBlock()))), new CannedSourceOperator(Iterators.single(new Page(blockFactory.newDoubleArrayVector(new double[] { 1.0 }, 1).asBlock()))),
List.of(simple().get(driverContext)), List.of(simple().get(driverContext)),
new PageConsumerOperator(page -> fail("shouldn't have made it this far")), new PageConsumerOperator(page -> fail("shouldn't have made it this far"))
() -> {}
) )
) { ) {
expectThrows(Exception.class, () -> runDriver(d)); // ### find a more specific exception type expectThrows(Exception.class, () -> runDriver(d)); // ### find a more specific exception type

View file

@ -36,6 +36,7 @@ import org.elasticsearch.compute.operator.HashAggregationOperator;
import org.elasticsearch.compute.operator.LocalSourceOperator; import org.elasticsearch.compute.operator.LocalSourceOperator;
import org.elasticsearch.compute.operator.PageConsumerOperator; import org.elasticsearch.compute.operator.PageConsumerOperator;
import org.elasticsearch.compute.test.CannedSourceOperator; import org.elasticsearch.compute.test.CannedSourceOperator;
import org.elasticsearch.compute.test.TestDriverFactory;
import org.elasticsearch.core.Releasables; import org.elasticsearch.core.Releasables;
import org.elasticsearch.env.Environment; import org.elasticsearch.env.Environment;
import org.elasticsearch.env.TestEnvironment; import org.elasticsearch.env.TestEnvironment;
@ -415,8 +416,7 @@ public class CategorizeBlockHashTests extends BlockHashTestCase {
List<Page> intermediateOutput = new ArrayList<>(); List<Page> intermediateOutput = new ArrayList<>();
Driver driver = new Driver( Driver driver = TestDriverFactory.create(
"test",
driverContext, driverContext,
new LocalSourceOperator(input1), new LocalSourceOperator(input1),
List.of( List.of(
@ -431,13 +431,11 @@ public class CategorizeBlockHashTests extends BlockHashTestCase {
analysisRegistry analysisRegistry
).get(driverContext) ).get(driverContext)
), ),
new PageConsumerOperator(intermediateOutput::add), new PageConsumerOperator(intermediateOutput::add)
() -> {}
); );
runDriver(driver); runDriver(driver);
driver = new Driver( driver = TestDriverFactory.create(
"test",
driverContext, driverContext,
new LocalSourceOperator(input2), new LocalSourceOperator(input2),
List.of( List.of(
@ -452,15 +450,13 @@ public class CategorizeBlockHashTests extends BlockHashTestCase {
analysisRegistry analysisRegistry
).get(driverContext) ).get(driverContext)
), ),
new PageConsumerOperator(intermediateOutput::add), new PageConsumerOperator(intermediateOutput::add)
() -> {}
); );
runDriver(driver); runDriver(driver);
List<Page> finalOutput = new ArrayList<>(); List<Page> finalOutput = new ArrayList<>();
driver = new Driver( driver = TestDriverFactory.create(
"test",
driverContext, driverContext,
new CannedSourceOperator(intermediateOutput.iterator()), new CannedSourceOperator(intermediateOutput.iterator()),
List.of( List.of(
@ -475,8 +471,7 @@ public class CategorizeBlockHashTests extends BlockHashTestCase {
analysisRegistry analysisRegistry
).get(driverContext) ).get(driverContext)
), ),
new PageConsumerOperator(finalOutput::add), new PageConsumerOperator(finalOutput::add)
() -> {}
); );
runDriver(driver); runDriver(driver);

View file

@ -31,6 +31,7 @@ import org.elasticsearch.compute.operator.HashAggregationOperator;
import org.elasticsearch.compute.operator.LocalSourceOperator; import org.elasticsearch.compute.operator.LocalSourceOperator;
import org.elasticsearch.compute.operator.PageConsumerOperator; import org.elasticsearch.compute.operator.PageConsumerOperator;
import org.elasticsearch.compute.test.CannedSourceOperator; import org.elasticsearch.compute.test.CannedSourceOperator;
import org.elasticsearch.compute.test.TestDriverFactory;
import org.elasticsearch.core.Releasables; import org.elasticsearch.core.Releasables;
import org.elasticsearch.env.Environment; import org.elasticsearch.env.Environment;
import org.elasticsearch.env.TestEnvironment; import org.elasticsearch.env.TestEnvironment;
@ -136,8 +137,7 @@ public class CategorizePackedValuesBlockHashTests extends BlockHashTestCase {
List<Page> intermediateOutput = new ArrayList<>(); List<Page> intermediateOutput = new ArrayList<>();
Driver driver = new Driver( Driver driver = TestDriverFactory.create(
"test",
driverContext, driverContext,
new LocalSourceOperator(input1), new LocalSourceOperator(input1),
List.of( List.of(
@ -149,13 +149,11 @@ public class CategorizePackedValuesBlockHashTests extends BlockHashTestCase {
analysisRegistry analysisRegistry
).get(driverContext) ).get(driverContext)
), ),
new PageConsumerOperator(intermediateOutput::add), new PageConsumerOperator(intermediateOutput::add)
() -> {}
); );
runDriver(driver); runDriver(driver);
driver = new Driver( driver = TestDriverFactory.create(
"test",
driverContext, driverContext,
new LocalSourceOperator(input2), new LocalSourceOperator(input2),
List.of( List.of(
@ -167,15 +165,13 @@ public class CategorizePackedValuesBlockHashTests extends BlockHashTestCase {
analysisRegistry analysisRegistry
).get(driverContext) ).get(driverContext)
), ),
new PageConsumerOperator(intermediateOutput::add), new PageConsumerOperator(intermediateOutput::add)
() -> {}
); );
runDriver(driver); runDriver(driver);
List<Page> finalOutput = new ArrayList<>(); List<Page> finalOutput = new ArrayList<>();
driver = new Driver( driver = TestDriverFactory.create(
"test",
driverContext, driverContext,
new CannedSourceOperator(intermediateOutput.iterator()), new CannedSourceOperator(intermediateOutput.iterator()),
List.of( List.of(
@ -187,8 +183,7 @@ public class CategorizePackedValuesBlockHashTests extends BlockHashTestCase {
analysisRegistry analysisRegistry
).get(driverContext) ).get(driverContext)
), ),
new PageConsumerOperator(finalOutput::add), new PageConsumerOperator(finalOutput::add)
() -> {}
); );
runDriver(driver); runDriver(driver);

View file

@ -23,6 +23,7 @@ import org.elasticsearch.compute.operator.Driver;
import org.elasticsearch.compute.operator.DriverContext; import org.elasticsearch.compute.operator.DriverContext;
import org.elasticsearch.compute.test.AnyOperatorTestCase; import org.elasticsearch.compute.test.AnyOperatorTestCase;
import org.elasticsearch.compute.test.OperatorTestCase; import org.elasticsearch.compute.test.OperatorTestCase;
import org.elasticsearch.compute.test.TestDriverFactory;
import org.elasticsearch.compute.test.TestResultPageSinkOperator; import org.elasticsearch.compute.test.TestResultPageSinkOperator;
import org.elasticsearch.core.IOUtils; import org.elasticsearch.core.IOUtils;
import org.elasticsearch.indices.CrankyCircuitBreakerService; import org.elasticsearch.indices.CrankyCircuitBreakerService;
@ -94,7 +95,7 @@ public class LuceneCountOperatorTests extends AnyOperatorTestCase {
@Override @Override
protected Matcher<String> expectedToStringOfSimple() { protected Matcher<String> expectedToStringOfSimple() {
return matchesRegex("LuceneCountOperator\\[maxPageSize = \\d+, remainingDocs=100]"); return matchesRegex("LuceneCountOperator\\[shards = \\[test], maxPageSize = \\d+, remainingDocs=100]");
} }
@Override @Override
@ -151,7 +152,7 @@ public class LuceneCountOperatorTests extends AnyOperatorTestCase {
int taskConcurrency = between(1, 8); int taskConcurrency = between(1, 8);
for (int i = 0; i < taskConcurrency; i++) { for (int i = 0; i < taskConcurrency; i++) {
DriverContext ctx = contexts.get(); DriverContext ctx = contexts.get();
drivers.add(new Driver("test", ctx, factory.get(ctx), List.of(), new TestResultPageSinkOperator(results::add), () -> {})); drivers.add(TestDriverFactory.create(ctx, factory.get(ctx), List.of(), new TestResultPageSinkOperator(results::add)));
} }
OperatorTestCase.runDriver(drivers); OperatorTestCase.runDriver(drivers);
assertThat(results.size(), lessThanOrEqualTo(taskConcurrency)); assertThat(results.size(), lessThanOrEqualTo(taskConcurrency));

View file

@ -24,6 +24,7 @@ import org.elasticsearch.compute.operator.Driver;
import org.elasticsearch.compute.operator.DriverContext; import org.elasticsearch.compute.operator.DriverContext;
import org.elasticsearch.compute.test.AnyOperatorTestCase; import org.elasticsearch.compute.test.AnyOperatorTestCase;
import org.elasticsearch.compute.test.OperatorTestCase; import org.elasticsearch.compute.test.OperatorTestCase;
import org.elasticsearch.compute.test.TestDriverFactory;
import org.elasticsearch.compute.test.TestResultPageSinkOperator; import org.elasticsearch.compute.test.TestResultPageSinkOperator;
import org.elasticsearch.core.IOUtils; import org.elasticsearch.core.IOUtils;
import org.elasticsearch.core.Releasables; import org.elasticsearch.core.Releasables;
@ -166,7 +167,7 @@ public abstract class LuceneMaxOperatorTestCase extends AnyOperatorTestCase {
int taskConcurrency = between(1, 8); int taskConcurrency = between(1, 8);
for (int i = 0; i < taskConcurrency; i++) { for (int i = 0; i < taskConcurrency; i++) {
DriverContext ctx = contexts.get(); DriverContext ctx = contexts.get();
drivers.add(new Driver("test", ctx, factory.get(ctx), List.of(), new TestResultPageSinkOperator(results::add), () -> {})); drivers.add(TestDriverFactory.create(ctx, factory.get(ctx), List.of(), new TestResultPageSinkOperator(results::add)));
} }
OperatorTestCase.runDriver(drivers); OperatorTestCase.runDriver(drivers);
assertThat(results.size(), lessThanOrEqualTo(taskConcurrency)); assertThat(results.size(), lessThanOrEqualTo(taskConcurrency));
@ -194,7 +195,7 @@ public abstract class LuceneMaxOperatorTestCase extends AnyOperatorTestCase {
@Override @Override
protected final Matcher<String> expectedToStringOfSimple() { protected final Matcher<String> expectedToStringOfSimple() {
return matchesRegex("LuceneMinMaxOperator\\[maxPageSize = \\d+, remainingDocs=100]"); return matchesRegex("LuceneMinMaxOperator\\[shards = \\[test\\], maxPageSize = \\d+, remainingDocs=100]");
} }
@Override @Override

View file

@ -24,6 +24,7 @@ import org.elasticsearch.compute.operator.Driver;
import org.elasticsearch.compute.operator.DriverContext; import org.elasticsearch.compute.operator.DriverContext;
import org.elasticsearch.compute.test.AnyOperatorTestCase; import org.elasticsearch.compute.test.AnyOperatorTestCase;
import org.elasticsearch.compute.test.OperatorTestCase; import org.elasticsearch.compute.test.OperatorTestCase;
import org.elasticsearch.compute.test.TestDriverFactory;
import org.elasticsearch.compute.test.TestResultPageSinkOperator; import org.elasticsearch.compute.test.TestResultPageSinkOperator;
import org.elasticsearch.core.IOUtils; import org.elasticsearch.core.IOUtils;
import org.elasticsearch.core.Releasables; import org.elasticsearch.core.Releasables;
@ -166,7 +167,7 @@ public abstract class LuceneMinOperatorTestCase extends AnyOperatorTestCase {
int taskConcurrency = between(1, 8); int taskConcurrency = between(1, 8);
for (int i = 0; i < taskConcurrency; i++) { for (int i = 0; i < taskConcurrency; i++) {
DriverContext ctx = contexts.get(); DriverContext ctx = contexts.get();
drivers.add(new Driver("test", ctx, factory.get(ctx), List.of(), new TestResultPageSinkOperator(results::add), () -> {})); drivers.add(TestDriverFactory.create(ctx, factory.get(ctx), List.of(), new TestResultPageSinkOperator(results::add)));
} }
OperatorTestCase.runDriver(drivers); OperatorTestCase.runDriver(drivers);
assertThat(results.size(), lessThanOrEqualTo(taskConcurrency)); assertThat(results.size(), lessThanOrEqualTo(taskConcurrency));
@ -194,7 +195,7 @@ public abstract class LuceneMinOperatorTestCase extends AnyOperatorTestCase {
@Override @Override
protected final Matcher<String> expectedToStringOfSimple() { protected final Matcher<String> expectedToStringOfSimple() {
return matchesRegex("LuceneMinMaxOperator\\[maxPageSize = \\d+, remainingDocs=100]"); return matchesRegex("LuceneMinMaxOperator\\[shards = \\[test], maxPageSize = \\d+, remainingDocs=100]");
} }
@Override @Override

Some files were not shown because too many files have changed in this diff Show more