Merge main into multi-project

This commit is contained in:
Niels Bauman 2025-02-03 20:50:15 +10:00
commit 1b342b26ef
196 changed files with 4203 additions and 1535 deletions

View file

@ -96,6 +96,7 @@ steps:
# - trigger: elasticsearch-dra-workflow # - trigger: elasticsearch-dra-workflow
# label: Trigger DRA snapshot workflow # label: Trigger DRA snapshot workflow
# async: true # async: true
# branches: "main 9.* 8.* 7.17"
# build: # build:
# branch: "$BUILDKITE_BRANCH" # branch: "$BUILDKITE_BRANCH"
# commit: "$BUILDKITE_COMMIT" # commit: "$BUILDKITE_COMMIT"

View file

@ -97,7 +97,7 @@ steps:
# - trigger: elasticsearch-dra-workflow # - trigger: elasticsearch-dra-workflow
# label: Trigger DRA snapshot workflow # label: Trigger DRA snapshot workflow
# async: true # async: true
# branches: "main 8.* 7.17" # branches: "main 9.* 8.* 7.17"
# build: # build:
# branch: "$BUILDKITE_BRANCH" # branch: "$BUILDKITE_BRANCH"
# commit: "$BUILDKITE_COMMIT" # commit: "$BUILDKITE_COMMIT"

View file

@ -0,0 +1,5 @@
pr: 117176
summary: Integrate IBM watsonx to Inference API for re-ranking task
area: Experiences
type: enhancement
issues: []

View file

@ -0,0 +1,5 @@
pr: 120852
summary: Correct line and column numbers of missing named parameters
area: ES|QL
type: bug
issues: []

View file

@ -0,0 +1,5 @@
pr: 121260
summary: Introduce a pre-mapping logical plan processing step
area: ES|QL
type: enhancement
issues: []

View file

@ -0,0 +1,6 @@
pr: 121324
summary: Support duplicate suggestions in completion field
area: Suggesters
type: bug
issues:
- 82432

View file

@ -0,0 +1,6 @@
pr: 121428
summary: Fix infer on and elasticsearch service endpoint created with a deployment
id
area: Machine Learning
type: bug
issues: []

View file

@ -286,7 +286,7 @@ public class DocsClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase {
/** /**
* Compares the results of running two analyzers against many random * Compares the results of running two analyzers against many random
* strings. The goal is to figure out if two anlayzers are "the same" by * strings. The goal is to figure out if two analyzers are "the same" by
* comparing their results. This is far from perfect but should be fairly * comparing their results. This is far from perfect but should be fairly
* accurate, especially for gross things like missing {@code decimal_digit} * accurate, especially for gross things like missing {@code decimal_digit}
* token filters, and should be fairly fast because it compares a fairly * token filters, and should be fairly fast because it compares a fairly

View file

@ -0,0 +1,18 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the "Elastic License
* 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side
* Public License v 1"; you may not use this file except in compliance with, at
* your election, the "Elastic License 2.0", the "GNU Affero General Public
* License v3.0 only", or the "Server Side Public License, v 1".
*/
package org.elasticsearch.core;
/**
* A {@link java.util.function.Supplier}-like interface which allows throwing checked exceptions.
*/
@FunctionalInterface
public interface CheckedSupplier<T, E extends Exception> {
T get() throws E;
}

View file

@ -52,10 +52,9 @@ import javax.net.ssl.SSLSocketFactory;
* <p> * <p>
* A bit like Mockito but way more painful. * A bit like Mockito but way more painful.
*/ */
class DummyImplementations { public class DummyImplementations {
static class DummyLocaleServiceProvider extends LocaleServiceProvider {
public static class DummyLocaleServiceProvider extends LocaleServiceProvider {
@Override @Override
public Locale[] getAvailableLocales() { public Locale[] getAvailableLocales() {
throw unexpected(); throw unexpected();

View file

@ -0,0 +1,29 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the "Elastic License
* 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side
* Public License v 1"; you may not use this file except in compliance with, at
* your election, the "Elastic License 2.0", the "GNU Affero General Public
* License v3.0 only", or the "Server Side Public License, v 1".
*/
package org.elasticsearch.entitlement.qa.test;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
@Target(ElementType.METHOD)
@Retention(RetentionPolicy.RUNTIME)
public @interface EntitlementTest {
enum ExpectedAccess {
PLUGINS,
ES_MODULES_ONLY,
ALWAYS_DENIED
}
ExpectedAccess expectedAccess();
int fromJavaVersion() default -1;
}

View file

@ -22,6 +22,8 @@ import java.nio.file.Paths;
import java.nio.file.attribute.UserPrincipal; import java.nio.file.attribute.UserPrincipal;
import java.util.Scanner; import java.util.Scanner;
import static org.elasticsearch.entitlement.qa.test.EntitlementTest.ExpectedAccess.PLUGINS;
@SuppressForbidden(reason = "Explicitly checking APIs that are forbidden") @SuppressForbidden(reason = "Explicitly checking APIs that are forbidden")
class FileCheckActions { class FileCheckActions {
@ -43,38 +45,47 @@ class FileCheckActions {
return testRootDir.resolve("read_write_file"); return testRootDir.resolve("read_write_file");
} }
@EntitlementTest(expectedAccess = PLUGINS)
static void createScannerFile() throws FileNotFoundException { static void createScannerFile() throws FileNotFoundException {
new Scanner(readFile().toFile()); new Scanner(readFile().toFile());
} }
@EntitlementTest(expectedAccess = PLUGINS)
static void createScannerFileWithCharset() throws IOException { static void createScannerFileWithCharset() throws IOException {
new Scanner(readFile().toFile(), StandardCharsets.UTF_8); new Scanner(readFile().toFile(), StandardCharsets.UTF_8);
} }
@EntitlementTest(expectedAccess = PLUGINS)
static void createScannerFileWithCharsetName() throws FileNotFoundException { static void createScannerFileWithCharsetName() throws FileNotFoundException {
new Scanner(readFile().toFile(), "UTF-8"); new Scanner(readFile().toFile(), "UTF-8");
} }
@EntitlementTest(expectedAccess = PLUGINS)
static void createFileOutputStreamString() throws IOException { static void createFileOutputStreamString() throws IOException {
new FileOutputStream(readWriteFile().toString()).close(); new FileOutputStream(readWriteFile().toString()).close();
} }
@EntitlementTest(expectedAccess = PLUGINS)
static void createFileOutputStreamStringWithAppend() throws IOException { static void createFileOutputStreamStringWithAppend() throws IOException {
new FileOutputStream(readWriteFile().toString(), false).close(); new FileOutputStream(readWriteFile().toString(), false).close();
} }
@EntitlementTest(expectedAccess = PLUGINS)
static void createFileOutputStreamFile() throws IOException { static void createFileOutputStreamFile() throws IOException {
new FileOutputStream(readWriteFile().toFile()).close(); new FileOutputStream(readWriteFile().toFile()).close();
} }
@EntitlementTest(expectedAccess = PLUGINS)
static void createFileOutputStreamFileWithAppend() throws IOException { static void createFileOutputStreamFileWithAppend() throws IOException {
new FileOutputStream(readWriteFile().toFile(), false).close(); new FileOutputStream(readWriteFile().toFile(), false).close();
} }
@EntitlementTest(expectedAccess = PLUGINS)
static void filesProbeContentType() throws IOException { static void filesProbeContentType() throws IOException {
Files.probeContentType(readFile()); Files.probeContentType(readFile());
} }
@EntitlementTest(expectedAccess = PLUGINS)
static void filesSetOwner() throws IOException { static void filesSetOwner() throws IOException {
UserPrincipal owner = EntitledActions.getFileOwner(readWriteFile()); UserPrincipal owner = EntitledActions.getFileOwner(readWriteFile());
Files.setOwner(readWriteFile(), owner); // set to existing owner, just trying to execute the method Files.setOwner(readWriteFile(), owner); // set to existing owner, just trying to execute the method

View file

@ -33,6 +33,9 @@ import org.elasticsearch.rest.RestResponse;
import org.elasticsearch.rest.RestStatus; import org.elasticsearch.rest.RestStatus;
import java.io.IOException; import java.io.IOException;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import java.lang.reflect.Modifier;
import java.net.DatagramPacket; import java.net.DatagramPacket;
import java.net.DatagramSocket; import java.net.DatagramSocket;
import java.net.HttpURLConnection; import java.net.HttpURLConnection;
@ -51,8 +54,10 @@ import java.net.URLConnection;
import java.net.URLStreamHandler; import java.net.URLStreamHandler;
import java.net.spi.URLStreamHandlerProvider; import java.net.spi.URLStreamHandlerProvider;
import java.security.NoSuchAlgorithmException; import java.security.NoSuchAlgorithmException;
import java.util.ArrayList;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.Map.Entry;
import java.util.Set; import java.util.Set;
import java.util.stream.Collectors; import java.util.stream.Collectors;
import java.util.stream.Stream; import java.util.stream.Stream;
@ -61,6 +66,7 @@ import javax.net.ssl.HttpsURLConnection;
import javax.net.ssl.SSLContext; import javax.net.ssl.SSLContext;
import static java.util.Map.entry; import static java.util.Map.entry;
import static org.elasticsearch.entitlement.qa.test.EntitlementTest.ExpectedAccess.PLUGINS;
import static org.elasticsearch.entitlement.qa.test.RestEntitlementsCheckAction.CheckAction.alwaysDenied; import static org.elasticsearch.entitlement.qa.test.RestEntitlementsCheckAction.CheckAction.alwaysDenied;
import static org.elasticsearch.entitlement.qa.test.RestEntitlementsCheckAction.CheckAction.deniedToPlugins; import static org.elasticsearch.entitlement.qa.test.RestEntitlementsCheckAction.CheckAction.deniedToPlugins;
import static org.elasticsearch.entitlement.qa.test.RestEntitlementsCheckAction.CheckAction.forPlugins; import static org.elasticsearch.entitlement.qa.test.RestEntitlementsCheckAction.CheckAction.forPlugins;
@ -88,7 +94,11 @@ public class RestEntitlementsCheckAction extends BaseRestHandler {
} }
} }
private static final Map<String, CheckAction> checkActions = Stream.<Map.Entry<String, CheckAction>>of( private static final Map<String, CheckAction> checkActions = Stream.concat(
Stream.<Entry<String, CheckAction>>of(
entry("static_reflection", deniedToPlugins(RestEntitlementsCheckAction::staticMethodNeverEntitledViaReflection)),
entry("nonstatic_reflection", deniedToPlugins(RestEntitlementsCheckAction::nonstaticMethodNeverEntitledViaReflection)),
entry("constructor_reflection", deniedToPlugins(RestEntitlementsCheckAction::constructorNeverEntitledViaReflection)),
entry("runtime_exit", deniedToPlugins(RestEntitlementsCheckAction::runtimeExit)), entry("runtime_exit", deniedToPlugins(RestEntitlementsCheckAction::runtimeExit)),
entry("runtime_halt", deniedToPlugins(RestEntitlementsCheckAction::runtimeHalt)), entry("runtime_halt", deniedToPlugins(RestEntitlementsCheckAction::runtimeHalt)),
entry("system_exit", deniedToPlugins(RestEntitlementsCheckAction::systemExit)), entry("system_exit", deniedToPlugins(RestEntitlementsCheckAction::systemExit)),
@ -141,7 +151,10 @@ public class RestEntitlementsCheckAction extends BaseRestHandler {
entry("socket_setSocketImplFactory", alwaysDenied(RestEntitlementsCheckAction::socket$$setSocketImplFactory)), entry("socket_setSocketImplFactory", alwaysDenied(RestEntitlementsCheckAction::socket$$setSocketImplFactory)),
entry("url_setURLStreamHandlerFactory", alwaysDenied(RestEntitlementsCheckAction::url$$setURLStreamHandlerFactory)), entry("url_setURLStreamHandlerFactory", alwaysDenied(RestEntitlementsCheckAction::url$$setURLStreamHandlerFactory)),
entry("urlConnection_setFileNameMap", alwaysDenied(RestEntitlementsCheckAction::urlConnection$$setFileNameMap)), entry("urlConnection_setFileNameMap", alwaysDenied(RestEntitlementsCheckAction::urlConnection$$setFileNameMap)),
entry("urlConnection_setContentHandlerFactory", alwaysDenied(RestEntitlementsCheckAction::urlConnection$$setContentHandlerFactory)), entry(
"urlConnection_setContentHandlerFactory",
alwaysDenied(RestEntitlementsCheckAction::urlConnection$$setContentHandlerFactory)
),
entry("proxySelector_setDefault", alwaysDenied(RestEntitlementsCheckAction::setDefaultProxySelector)), entry("proxySelector_setDefault", alwaysDenied(RestEntitlementsCheckAction::setDefaultProxySelector)),
entry("responseCache_setDefault", alwaysDenied(RestEntitlementsCheckAction::setDefaultResponseCache)), entry("responseCache_setDefault", alwaysDenied(RestEntitlementsCheckAction::setDefaultResponseCache)),
@ -178,7 +191,10 @@ public class RestEntitlementsCheckAction extends BaseRestHandler {
"asynchronous_server_socket_channel_bind_backlog", "asynchronous_server_socket_channel_bind_backlog",
forPlugins(NetworkAccessCheckActions::asynchronousServerSocketChannelBindWithBacklog) forPlugins(NetworkAccessCheckActions::asynchronousServerSocketChannelBindWithBacklog)
), ),
entry("asynchronous_server_socket_channel_accept", forPlugins(NetworkAccessCheckActions::asynchronousServerSocketChannelAccept)), entry(
"asynchronous_server_socket_channel_accept",
forPlugins(NetworkAccessCheckActions::asynchronousServerSocketChannelAccept)
),
entry( entry(
"asynchronous_server_socket_channel_accept_with_handler", "asynchronous_server_socket_channel_accept_with_handler",
forPlugins(NetworkAccessCheckActions::asynchronousServerSocketChannelAcceptWithHandler) forPlugins(NetworkAccessCheckActions::asynchronousServerSocketChannelAcceptWithHandler)
@ -203,7 +219,10 @@ public class RestEntitlementsCheckAction extends BaseRestHandler {
entry("enable_native_access", new CheckAction(VersionSpecificNativeChecks::enableNativeAccess, false, 22)), entry("enable_native_access", new CheckAction(VersionSpecificNativeChecks::enableNativeAccess, false, 22)),
entry("address_target_layout", new CheckAction(VersionSpecificNativeChecks::addressLayoutWithTargetLayout, false, 22)), entry("address_target_layout", new CheckAction(VersionSpecificNativeChecks::addressLayoutWithTargetLayout, false, 22)),
entry("donwncall_handle", new CheckAction(VersionSpecificNativeChecks::linkerDowncallHandle, false, 22)), entry("donwncall_handle", new CheckAction(VersionSpecificNativeChecks::linkerDowncallHandle, false, 22)),
entry("donwncall_handle_with_address", new CheckAction(VersionSpecificNativeChecks::linkerDowncallHandleWithAddress, false, 22)), entry(
"donwncall_handle_with_address",
new CheckAction(VersionSpecificNativeChecks::linkerDowncallHandleWithAddress, false, 22)
),
entry("upcall_stub", new CheckAction(VersionSpecificNativeChecks::linkerUpcallStub, false, 22)), entry("upcall_stub", new CheckAction(VersionSpecificNativeChecks::linkerUpcallStub, false, 22)),
entry("reinterpret", new CheckAction(VersionSpecificNativeChecks::memorySegmentReinterpret, false, 22)), entry("reinterpret", new CheckAction(VersionSpecificNativeChecks::memorySegmentReinterpret, false, 22)),
entry("reinterpret_cleanup", new CheckAction(VersionSpecificNativeChecks::memorySegmentReinterpretWithCleanup, false, 22)), entry("reinterpret_cleanup", new CheckAction(VersionSpecificNativeChecks::memorySegmentReinterpretWithCleanup, false, 22)),
@ -212,19 +231,51 @@ public class RestEntitlementsCheckAction extends BaseRestHandler {
new CheckAction(VersionSpecificNativeChecks::memorySegmentReinterpretWithSizeAndCleanup, false, 22) new CheckAction(VersionSpecificNativeChecks::memorySegmentReinterpretWithSizeAndCleanup, false, 22)
), ),
entry("symbol_lookup_name", new CheckAction(VersionSpecificNativeChecks::symbolLookupWithName, false, 22)), entry("symbol_lookup_name", new CheckAction(VersionSpecificNativeChecks::symbolLookupWithName, false, 22)),
entry("symbol_lookup_path", new CheckAction(VersionSpecificNativeChecks::symbolLookupWithPath, false, 22)), entry("symbol_lookup_path", new CheckAction(VersionSpecificNativeChecks::symbolLookupWithPath, false, 22))
entry("create_scanner", forPlugins(FileCheckActions::createScannerFile)), ),
entry("create_scanner_with_charset", forPlugins(FileCheckActions::createScannerFileWithCharset)), getTestEntries(FileCheckActions.class)
entry("create_scanner_with_charset_name", forPlugins(FileCheckActions::createScannerFileWithCharsetName)),
entry("create_file_output_stream_string", forPlugins(FileCheckActions::createFileOutputStreamString)),
entry("create_file_output_stream_string_with_append", forPlugins(FileCheckActions::createFileOutputStreamStringWithAppend)),
entry("create_file_output_stream_file", forPlugins(FileCheckActions::createFileOutputStreamFile)),
entry("create_file_output_stream_file_with_append", forPlugins(FileCheckActions::createFileOutputStreamFileWithAppend)),
entry("files_probe_content_type", forPlugins(FileCheckActions::filesProbeContentType)),
entry("files_set_owner", forPlugins(FileCheckActions::filesSetOwner))
) )
.filter(entry -> entry.getValue().fromJavaVersion() == null || Runtime.version().feature() >= entry.getValue().fromJavaVersion()) .filter(entry -> entry.getValue().fromJavaVersion() == null || Runtime.version().feature() >= entry.getValue().fromJavaVersion())
.collect(Collectors.toUnmodifiableMap(Map.Entry::getKey, Map.Entry::getValue)); .collect(Collectors.toUnmodifiableMap(Entry::getKey, Entry::getValue));
@SuppressForbidden(reason = "Need package private methods so we don't have to make them all public")
private static Method[] getDeclaredMethods(Class<?> clazz) {
return clazz.getDeclaredMethods();
}
private static Stream<Entry<String, CheckAction>> getTestEntries(Class<?> actionsClass) {
List<Entry<String, CheckAction>> entries = new ArrayList<>();
for (var method : getDeclaredMethods(actionsClass)) {
var testAnnotation = method.getAnnotation(EntitlementTest.class);
if (testAnnotation == null) {
continue;
}
if (Modifier.isStatic(method.getModifiers()) == false) {
throw new AssertionError("Entitlement test method [" + method + "] must be static");
}
if (method.getParameterTypes().length != 0) {
throw new AssertionError("Entitlement test method [" + method + "] must not have parameters");
}
CheckedRunnable<Exception> runnable = () -> {
try {
method.invoke(null);
} catch (IllegalAccessException e) {
throw new AssertionError(e);
} catch (InvocationTargetException e) {
if (e.getCause() instanceof Exception exc) {
throw exc;
} else {
throw new AssertionError(e);
}
}
};
boolean deniedToPlugins = testAnnotation.expectedAccess() == PLUGINS;
Integer fromJavaVersion = testAnnotation.fromJavaVersion() == -1 ? null : testAnnotation.fromJavaVersion();
entries.add(entry(method.getName(), new CheckAction(runnable, deniedToPlugins, fromJavaVersion)));
}
return entries.stream();
}
private static void createURLStreamHandlerProvider() { private static void createURLStreamHandlerProvider() {
var x = new URLStreamHandlerProvider() { var x = new URLStreamHandlerProvider() {
@ -290,6 +341,11 @@ public class RestEntitlementsCheckAction extends BaseRestHandler {
System.exit(123); System.exit(123);
} }
private static void staticMethodNeverEntitledViaReflection() throws Exception {
Method systemExit = System.class.getMethod("exit", int.class);
systemExit.invoke(null, 123);
}
private static void createClassLoader() throws IOException { private static void createClassLoader() throws IOException {
try (var classLoader = new URLClassLoader("test", new URL[0], RestEntitlementsCheckAction.class.getClassLoader())) { try (var classLoader = new URLClassLoader("test", new URL[0], RestEntitlementsCheckAction.class.getClassLoader())) {
logger.info("Created URLClassLoader [{}]", classLoader.getName()); logger.info("Created URLClassLoader [{}]", classLoader.getName());
@ -300,6 +356,11 @@ public class RestEntitlementsCheckAction extends BaseRestHandler {
new ProcessBuilder("").start(); new ProcessBuilder("").start();
} }
private static void nonstaticMethodNeverEntitledViaReflection() throws Exception {
Method processBuilderStart = ProcessBuilder.class.getMethod("start");
processBuilderStart.invoke(new ProcessBuilder(""));
}
private static void processBuilder_startPipeline() throws IOException { private static void processBuilder_startPipeline() throws IOException {
ProcessBuilder.startPipeline(List.of()); ProcessBuilder.startPipeline(List.of());
} }
@ -338,6 +399,10 @@ public class RestEntitlementsCheckAction extends BaseRestHandler {
new DummyLocaleServiceProvider(); new DummyLocaleServiceProvider();
} }
private static void constructorNeverEntitledViaReflection() throws Exception {
DummyLocaleServiceProvider.class.getConstructor().newInstance();
}
private static void breakIteratorProvider$() { private static void breakIteratorProvider$() {
new DummyBreakIteratorProvider(); new DummyBreakIteratorProvider();
} }
@ -470,7 +535,7 @@ public class RestEntitlementsCheckAction extends BaseRestHandler {
return checkActions.entrySet() return checkActions.entrySet()
.stream() .stream()
.filter(kv -> kv.getValue().isAlwaysDeniedToPlugins() == false) .filter(kv -> kv.getValue().isAlwaysDeniedToPlugins() == false)
.map(Map.Entry::getKey) .map(Entry::getKey)
.collect(Collectors.toSet()); .collect(Collectors.toSet());
} }

View file

@ -14,6 +14,8 @@ import com.sun.tools.attach.AgentLoadException;
import com.sun.tools.attach.AttachNotSupportedException; import com.sun.tools.attach.AttachNotSupportedException;
import com.sun.tools.attach.VirtualMachine; import com.sun.tools.attach.VirtualMachine;
import org.elasticsearch.core.CheckedConsumer;
import org.elasticsearch.core.CheckedSupplier;
import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.core.SuppressForbidden;
import org.elasticsearch.entitlement.initialization.EntitlementInitialization; import org.elasticsearch.entitlement.initialization.EntitlementInitialization;
import org.elasticsearch.entitlement.runtime.api.NotEntitledException; import org.elasticsearch.entitlement.runtime.api.NotEntitledException;
@ -22,8 +24,10 @@ import org.elasticsearch.logging.LogManager;
import org.elasticsearch.logging.Logger; import org.elasticsearch.logging.Logger;
import java.io.IOException; import java.io.IOException;
import java.lang.reflect.InvocationTargetException;
import java.nio.file.Files; import java.nio.file.Files;
import java.nio.file.Path; import java.nio.file.Path;
import java.nio.file.attribute.FileAttribute;
import java.util.Map; import java.util.Map;
import java.util.function.Function; import java.util.function.Function;
@ -31,10 +35,22 @@ import static java.util.Objects.requireNonNull;
public class EntitlementBootstrap { public class EntitlementBootstrap {
public record BootstrapArgs(Map<String, Policy> pluginPolicies, Function<Class<?>, String> pluginResolver) { public record BootstrapArgs(
Map<String, Policy> pluginPolicies,
Function<Class<?>, String> pluginResolver,
Path[] dataDirs,
Path configDir,
Path tempDir
) {
public BootstrapArgs { public BootstrapArgs {
requireNonNull(pluginPolicies); requireNonNull(pluginPolicies);
requireNonNull(pluginResolver); requireNonNull(pluginResolver);
requireNonNull(dataDirs);
if (dataDirs.length == 0) {
throw new IllegalArgumentException("must provide at least one data directory");
}
requireNonNull(configDir);
requireNonNull(tempDir);
} }
} }
@ -50,13 +66,22 @@ public class EntitlementBootstrap {
* *
* @param pluginPolicies a map holding policies for plugins (and modules), by plugin (or module) name. * @param pluginPolicies a map holding policies for plugins (and modules), by plugin (or module) name.
* @param pluginResolver a functor to map a Java Class to the plugin it belongs to (the plugin name). * @param pluginResolver a functor to map a Java Class to the plugin it belongs to (the plugin name).
* @param dataDirs data directories for Elasticsearch
* @param configDir the config directory for Elasticsearch
* @param tempDir the temp directory for Elasticsearch
*/ */
public static void bootstrap(Map<String, Policy> pluginPolicies, Function<Class<?>, String> pluginResolver) { public static void bootstrap(
Map<String, Policy> pluginPolicies,
Function<Class<?>, String> pluginResolver,
Path[] dataDirs,
Path configDir,
Path tempDir
) {
logger.debug("Loading entitlement agent"); logger.debug("Loading entitlement agent");
if (EntitlementBootstrap.bootstrapArgs != null) { if (EntitlementBootstrap.bootstrapArgs != null) {
throw new IllegalStateException("plugin data is already set"); throw new IllegalStateException("plugin data is already set");
} }
EntitlementBootstrap.bootstrapArgs = new BootstrapArgs(pluginPolicies, pluginResolver); EntitlementBootstrap.bootstrapArgs = new BootstrapArgs(pluginPolicies, pluginResolver, dataDirs, configDir, tempDir);
exportInitializationToAgent(); exportInitializationToAgent();
loadAgent(findAgentJar()); loadAgent(findAgentJar());
selfTest(); selfTest();
@ -123,30 +148,31 @@ public class EntitlementBootstrap {
* @throws IllegalStateException if the entitlements system can't prevent an unauthorized action of our choosing * @throws IllegalStateException if the entitlements system can't prevent an unauthorized action of our choosing
*/ */
private static void selfTest() { private static void selfTest() {
ensureCannotStartProcess(); ensureCannotStartProcess(ProcessBuilder::start);
ensureCanCreateTempFile(); ensureCanCreateTempFile(EntitlementBootstrap::createTempFile);
// Try again with reflection
ensureCannotStartProcess(EntitlementBootstrap::reflectiveStartProcess);
ensureCanCreateTempFile(EntitlementBootstrap::reflectiveCreateTempFile);
} }
private static void ensureCannotStartProcess() { private static void ensureCannotStartProcess(CheckedConsumer<ProcessBuilder, ?> startProcess) {
try { try {
// The command doesn't matter; it doesn't even need to exist // The command doesn't matter; it doesn't even need to exist
new ProcessBuilder("").start(); startProcess.accept(new ProcessBuilder(""));
} catch (NotEntitledException e) { } catch (NotEntitledException e) {
logger.debug("Success: Entitlement protection correctly prevented process creation"); logger.debug("Success: Entitlement protection correctly prevented process creation");
return; return;
} catch (IOException e) { } catch (Exception e) {
throw new IllegalStateException("Failed entitlement protection self-test", e); throw new IllegalStateException("Failed entitlement protection self-test", e);
} }
throw new IllegalStateException("Entitlement protection self-test was incorrectly permitted"); throw new IllegalStateException("Entitlement protection self-test was incorrectly permitted");
} }
/**
* Originally {@code Security.selfTest}.
*/
@SuppressForbidden(reason = "accesses jvm default tempdir as a self-test") @SuppressForbidden(reason = "accesses jvm default tempdir as a self-test")
private static void ensureCanCreateTempFile() { private static void ensureCanCreateTempFile(CheckedSupplier<Path, ?> createTempFile) {
try { try {
Path p = Files.createTempFile(null, null); Path p = createTempFile.get();
p.toFile().deleteOnExit(); p.toFile().deleteOnExit();
// Make an effort to clean up the file immediately; also, deleteOnExit leaves the file if the JVM exits abnormally. // Make an effort to clean up the file immediately; also, deleteOnExit leaves the file if the JVM exits abnormally.
@ -163,5 +189,24 @@ public class EntitlementBootstrap {
logger.debug("Success: Entitlement protection correctly permitted temp file creation"); logger.debug("Success: Entitlement protection correctly permitted temp file creation");
} }
@SuppressForbidden(reason = "accesses jvm default tempdir as a self-test")
private static Path createTempFile() throws Exception {
return Files.createTempFile(null, null);
}
private static void reflectiveStartProcess(ProcessBuilder pb) throws Exception {
try {
var start = ProcessBuilder.class.getMethod("start");
start.invoke(pb);
} catch (InvocationTargetException e) {
throw (Exception) e.getCause();
}
}
private static Path reflectiveCreateTempFile() throws Exception {
return (Path) Files.class.getMethod("createTempFile", String.class, String.class, FileAttribute[].class)
.invoke(null, null, null, new FileAttribute<?>[0]);
}
private static final Logger logger = LogManager.getLogger(EntitlementBootstrap.class); private static final Logger logger = LogManager.getLogger(EntitlementBootstrap.class);
} }

View file

@ -9,10 +9,8 @@
package org.elasticsearch.entitlement.runtime.policy; package org.elasticsearch.entitlement.runtime.policy;
import org.elasticsearch.core.SuppressForbidden;
import org.elasticsearch.entitlement.runtime.policy.entitlements.FileEntitlement; import org.elasticsearch.entitlement.runtime.policy.entitlements.FileEntitlement;
import java.io.File;
import java.nio.file.Path; import java.nio.file.Path;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Arrays; import java.util.Arrays;
@ -51,20 +49,10 @@ public final class FileAccessTree {
return checkPath(normalize(path), readPaths); return checkPath(normalize(path), readPaths);
} }
@SuppressForbidden(reason = "Explicitly checking File apis")
boolean canRead(File file) {
return checkPath(normalize(file.toPath()), readPaths);
}
boolean canWrite(Path path) { boolean canWrite(Path path) {
return checkPath(normalize(path), writePaths); return checkPath(normalize(path), writePaths);
} }
@SuppressForbidden(reason = "Explicitly checking File apis")
boolean canWrite(File file) {
return checkPath(normalize(file.toPath()), writePaths);
}
private static String normalize(Path path) { private static String normalize(Path path) {
return path.toAbsolutePath().normalize().toString(); return path.toAbsolutePath().normalize().toString();
} }

View file

@ -169,23 +169,7 @@ public class PolicyManager {
} }
public void checkStartProcess(Class<?> callerClass) { public void checkStartProcess(Class<?> callerClass) {
neverEntitled(callerClass, "start process"); neverEntitled(callerClass, () -> "start process");
}
private void neverEntitled(Class<?> callerClass, String operationDescription) {
var requestingClass = requestingClass(callerClass);
if (isTriviallyAllowed(requestingClass)) {
return;
}
throw new NotEntitledException(
Strings.format(
"Not entitled: caller [%s], module [%s], operation [%s]",
callerClass,
requestingClass.getModule() == null ? "<none>" : requestingClass.getModule().getName(),
operationDescription
)
);
} }
/** /**
@ -241,31 +225,9 @@ public class PolicyManager {
checkChangeJVMGlobalState(callerClass); checkChangeJVMGlobalState(callerClass);
} }
/**
* Check for operations that can access sensitive network information, e.g. secrets, tokens or SSL sessions
*/
public void checkReadSensitiveNetworkInformation(Class<?> callerClass) {
neverEntitled(callerClass, "access sensitive network information");
}
@SuppressForbidden(reason = "Explicitly checking File apis") @SuppressForbidden(reason = "Explicitly checking File apis")
public void checkFileRead(Class<?> callerClass, File file) { public void checkFileRead(Class<?> callerClass, File file) {
var requestingClass = requestingClass(callerClass); checkFileRead(callerClass, file.toPath());
if (isTriviallyAllowed(requestingClass)) {
return;
}
ModuleEntitlements entitlements = getEntitlements(requestingClass);
if (entitlements.fileAccess().canRead(file) == false) {
throw new NotEntitledException(
Strings.format(
"Not entitled: caller [%s], module [%s], entitlement [file], operation [read], path [%s]",
callerClass,
requestingClass.getModule(),
file
)
);
}
} }
public void checkFileRead(Class<?> callerClass, Path path) { public void checkFileRead(Class<?> callerClass, Path path) {
@ -289,22 +251,7 @@ public class PolicyManager {
@SuppressForbidden(reason = "Explicitly checking File apis") @SuppressForbidden(reason = "Explicitly checking File apis")
public void checkFileWrite(Class<?> callerClass, File file) { public void checkFileWrite(Class<?> callerClass, File file) {
var requestingClass = requestingClass(callerClass); checkFileWrite(callerClass, file.toPath());
if (isTriviallyAllowed(requestingClass)) {
return;
}
ModuleEntitlements entitlements = getEntitlements(requestingClass);
if (entitlements.fileAccess().canWrite(file) == false) {
throw new NotEntitledException(
Strings.format(
"Not entitled: caller [%s], module [%s], entitlement [file], operation [write], path [%s]",
callerClass,
requestingClass.getModule(),
file
)
);
}
} }
public void checkFileWrite(Class<?> callerClass, Path path) { public void checkFileWrite(Class<?> callerClass, Path path) {

View file

@ -238,7 +238,6 @@ public class PolicyManagerTests extends ESTestCase {
} }
public void testRequestingModuleWithStackWalk() throws IOException, ClassNotFoundException { public void testRequestingModuleWithStackWalk() throws IOException, ClassNotFoundException {
var agentsClass = new TestAgent();
var entitlementsClass = makeClassInItsOwnModule(); // A class in the entitlements library itself var entitlementsClass = makeClassInItsOwnModule(); // A class in the entitlements library itself
var requestingClass = makeClassInItsOwnModule(); // This guy is always the right answer var requestingClass = makeClassInItsOwnModule(); // This guy is always the right answer
var instrumentedClass = makeClassInItsOwnModule(); // The class that called the check method var instrumentedClass = makeClassInItsOwnModule(); // The class that called the check method
@ -365,13 +364,6 @@ public class PolicyManagerTests extends ESTestCase {
return layer.findLoader("org.example.plugin").loadClass("q.B"); return layer.findLoader("org.example.plugin").loadClass("q.B");
} }
private static Class<?> makeClassInItsOwnUnnamedModule() throws IOException, ClassNotFoundException {
final Path home = createTempDir();
Path jar = createMockPluginJar(home);
var layer = createLayerForJar(jar, "org.example.plugin");
return layer.findLoader("org.example.plugin").loadClass("q.B");
}
private static PolicyManager policyManager(String agentsPackageName, Module entitlementsModule) { private static PolicyManager policyManager(String agentsPackageName, Module entitlementsModule) {
return new PolicyManager(createEmptyTestServerPolicy(), List.of(), Map.of(), c -> "test", agentsPackageName, entitlementsModule); return new PolicyManager(createEmptyTestServerPolicy(), List.of(), Map.of(), c -> "test", agentsPackageName, entitlementsModule);
} }

View file

@ -143,9 +143,6 @@ tests:
- class: org.elasticsearch.datastreams.DataStreamsClientYamlTestSuiteIT - class: org.elasticsearch.datastreams.DataStreamsClientYamlTestSuiteIT
method: test {p0=data_stream/120_data_streams_stats/Multiple data stream} method: test {p0=data_stream/120_data_streams_stats/Multiple data stream}
issue: https://github.com/elastic/elasticsearch/issues/118217 issue: https://github.com/elastic/elasticsearch/issues/118217
- class: org.elasticsearch.action.search.SearchQueryThenFetchAsyncActionTests
method: testBottomFieldSort
issue: https://github.com/elastic/elasticsearch/issues/118214
- class: org.elasticsearch.xpack.searchablesnapshots.RetrySearchIntegTests - class: org.elasticsearch.xpack.searchablesnapshots.RetrySearchIntegTests
method: testSearcherId method: testSearcherId
issue: https://github.com/elastic/elasticsearch/issues/118374 issue: https://github.com/elastic/elasticsearch/issues/118374
@ -231,9 +228,6 @@ tests:
- class: org.elasticsearch.datastreams.DataStreamsClientYamlTestSuiteIT - class: org.elasticsearch.datastreams.DataStreamsClientYamlTestSuiteIT
method: test {p0=data_stream/140_data_stream_aliases/Create data stream aliases using wildcard expression} method: test {p0=data_stream/140_data_stream_aliases/Create data stream aliases using wildcard expression}
issue: https://github.com/elastic/elasticsearch/issues/120890 issue: https://github.com/elastic/elasticsearch/issues/120890
- class: org.elasticsearch.xpack.test.rest.XPackRestIT
method: test {p0=ml/inference_crud/*}
issue: https://github.com/elastic/elasticsearch/issues/120816
- class: org.elasticsearch.xpack.security.authc.service.ServiceAccountIT - class: org.elasticsearch.xpack.security.authc.service.ServiceAccountIT
method: testAuthenticateShouldNotFallThroughInCaseOfFailure method: testAuthenticateShouldNotFallThroughInCaseOfFailure
issue: https://github.com/elastic/elasticsearch/issues/120902 issue: https://github.com/elastic/elasticsearch/issues/120902
@ -330,6 +324,10 @@ tests:
method: testCrossClusterAsyncQueryStop method: testCrossClusterAsyncQueryStop
issue: https://github.com/elastic/elasticsearch/issues/121249 issue: https://github.com/elastic/elasticsearch/issues/121249
- class: org.elasticsearch.xpack.test.rest.XPackRestIT - class: org.elasticsearch.xpack.test.rest.XPackRestIT
method: test {p0=transform/*}
issue: https://github.com/elastic/elasticsearch/issues/120816
- class: org.elasticsearch.xpack.test.rest.XPackRestIT
method: test {p0=ml/*}
issue: https://github.com/elastic/elasticsearch/issues/120816 issue: https://github.com/elastic/elasticsearch/issues/120816
- class: org.elasticsearch.upgrades.VectorSearchIT - class: org.elasticsearch.upgrades.VectorSearchIT
method: testBBQVectorSearch {upgradedNodes=0} method: testBBQVectorSearch {upgradedNodes=0}
@ -359,14 +357,6 @@ tests:
- class: org.elasticsearch.xpack.security.CoreWithSecurityClientYamlTestSuiteIT - class: org.elasticsearch.xpack.security.CoreWithSecurityClientYamlTestSuiteIT
method: test {yaml=indices.get_alias/10_basic/Get aliases via /*/_alias/} method: test {yaml=indices.get_alias/10_basic/Get aliases via /*/_alias/}
issue: https://github.com/elastic/elasticsearch/issues/121290 issue: https://github.com/elastic/elasticsearch/issues/121290
- class: org.elasticsearch.xpack.inference.action.TransportInferenceActionTests
method: testRerouting_HandlesTransportException_FromOtherNode
issue: https://github.com/elastic/elasticsearch/issues/121292
- class: org.elasticsearch.xpack.inference.action.TransportInferenceActionTests
method: testRerouting_ToOtherNode
issue: https://github.com/elastic/elasticsearch/issues/121293
- class: org.elasticsearch.xpack.inference.common.InferenceServiceNodeLocalRateLimitCalculatorTests
issue: https://github.com/elastic/elasticsearch/issues/121294
- class: org.elasticsearch.env.NodeEnvironmentTests - class: org.elasticsearch.env.NodeEnvironmentTests
method: testGetBestDowngradeVersion method: testGetBestDowngradeVersion
issue: https://github.com/elastic/elasticsearch/issues/121316 issue: https://github.com/elastic/elasticsearch/issues/121316
@ -391,6 +381,76 @@ tests:
- class: org.elasticsearch.smoketest.SmokeTestMultiNodeClientYamlTestSuiteIT - class: org.elasticsearch.smoketest.SmokeTestMultiNodeClientYamlTestSuiteIT
method: test {yaml=search.vectors/42_knn_search_int4_flat/Vector similarity with filter only} method: test {yaml=search.vectors/42_knn_search_int4_flat/Vector similarity with filter only}
issue: https://github.com/elastic/elasticsearch/issues/121350 issue: https://github.com/elastic/elasticsearch/issues/121350
- class: org.elasticsearch.test.rest.yaml.RcsCcsCommonYamlTestSuiteIT
method: test {p0=search.vectors/42_knn_search_int4_flat/KNN Vector similarity search only}
issue: https://github.com/elastic/elasticsearch/issues/121395
- class: org.elasticsearch.test.rest.yaml.CcsCommonYamlTestSuiteIT
issue: https://github.com/elastic/elasticsearch/issues/121407
- class: org.elasticsearch.xpack.ml.integration.ClassificationIT
method: testDependentVariableIsAliasToNested
issue: https://github.com/elastic/elasticsearch/issues/121415
- class: org.elasticsearch.xpack.esql.heap_attack.HeapAttackIT
method: testLookupExplosionBigStringManyMatches
issue: https://github.com/elastic/elasticsearch/issues/121465
- class: org.elasticsearch.xpack.security.authc.jwt.JwtRealmSingleNodeTests
method: testClientSecretRotation
issue: https://github.com/elastic/elasticsearch/issues/120985
- class: org.elasticsearch.xpack.security.authc.jwt.JwtRealmSingleNodeTests
method: testGrantApiKeyForJWT
issue: https://github.com/elastic/elasticsearch/issues/121039
- class: org.elasticsearch.xpack.security.CoreWithSecurityClientYamlTestSuiteIT
method: test {yaml=cluster.health/10_basic/cluster health basic test}
issue: https://github.com/elastic/elasticsearch/issues/121478
- class: org.elasticsearch.xpack.esql.heap_attack.HeapAttackIT
method: testLookupExplosionManyMatches
issue: https://github.com/elastic/elasticsearch/issues/121481
- class: org.elasticsearch.xpack.security.profile.ProfileIntegTests
method: testGetUsersWithProfileUid
issue: https://github.com/elastic/elasticsearch/issues/121483
- class: org.elasticsearch.xpack.security.CoreWithSecurityClientYamlTestSuiteIT
method: test {yaml=cat.aliases/10_basic/Empty cluster}
issue: https://github.com/elastic/elasticsearch/issues/121484
- class: org.elasticsearch.xpack.transform.checkpoint.TransformCCSCanMatchIT
method: testTransformLifecycle_RangeQueryThatMatchesNoShards
issue: https://github.com/elastic/elasticsearch/issues/121480
- class: org.elasticsearch.xpack.esql.action.CrossClusterAsyncQueryIT
method: testStopQueryLocal
issue: https://github.com/elastic/elasticsearch/issues/121487
- class: org.elasticsearch.xpack.esql.action.CrossClusterAsyncQueryIT
method: testSuccessfulPathways
issue: https://github.com/elastic/elasticsearch/issues/121488
- class: org.elasticsearch.xpack.esql.action.CrossClusterAsyncQueryIT
method: testAsyncQueriesWithLimit0
issue: https://github.com/elastic/elasticsearch/issues/121489
- class: org.elasticsearch.xpack.security.profile.ProfileIntegTests
method: testSuggestProfilesWithHint
issue: https://github.com/elastic/elasticsearch/issues/121116
- class: org.elasticsearch.xpack.sql.qa.single_node.JdbcDocCsvSpecIT
method: test {docs.testFilterToday}
issue: https://github.com/elastic/elasticsearch/issues/121474
- class: org.elasticsearch.xpack.security.profile.ProfileIntegTests
method: testSuggestProfileWithData
issue: https://github.com/elastic/elasticsearch/issues/121258
- class: org.elasticsearch.ingest.geoip.FullClusterRestartIT
method: testGeoIpSystemFeaturesMigration {cluster=UPGRADED}
issue: https://github.com/elastic/elasticsearch/issues/121115
- class: org.elasticsearch.xpack.core.ilm.SetSingleNodeAllocateStepTests
method: testPerformActionSomeShardsOnlyOnNewNodesButNewNodesInvalidAttrs
issue: https://github.com/elastic/elasticsearch/issues/121495
- class: org.elasticsearch.xpack.security.CoreWithSecurityClientYamlTestSuiteIT
method: test {yaml=cat.aliases/40_hidden/Test cat aliases output with a visible index with a hidden alias}
issue: https://github.com/elastic/elasticsearch/issues/121128
- class: org.elasticsearch.backwards.MixedClusterClientYamlTestSuiteIT
method: test {p0=search.vectors/42_knn_search_int4_flat/Vector similarity with filter only}
issue: https://github.com/elastic/elasticsearch/issues/121412
- class: org.elasticsearch.xpack.inference.common.InferenceServiceNodeLocalRateLimitCalculatorTests
issue: https://github.com/elastic/elasticsearch/issues/121294
- class: org.elasticsearch.xpack.ml.integration.ClassificationIT
method: testDependentVariableIsAliasToKeyword
issue: https://github.com/elastic/elasticsearch/issues/121492
- class: org.elasticsearch.xpack.security.CoreWithSecurityClientYamlTestSuiteIT
method: test {yaml=cat.aliases/10_basic/Complex alias}
issue: https://github.com/elastic/elasticsearch/issues/121513
# Examples: # Examples:
# #

View file

@ -2,7 +2,7 @@
"indices.resolve_cluster": { "indices.resolve_cluster": {
"documentation": { "documentation": {
"url": "https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-resolve-cluster-api.html", "url": "https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-resolve-cluster-api.html",
"description":"Resolves the specified index expressions to return information about each cluster, including the local cluster, if included." "description": "Resolves the specified index expressions to return information about each cluster. If no index expression is provided, this endpoint will return information about all the remote clusters that are configured on the local cluster."
}, },
"stability": "stable", "stability": "stable",
"visibility": "public", "visibility": "public",
@ -11,11 +11,13 @@
}, },
"url": { "url": {
"paths": [ "paths": [
{
"path": "/_resolve/cluster",
"methods": ["GET"]
},
{ {
"path": "/_resolve/cluster/{name}", "path": "/_resolve/cluster/{name}",
"methods":[ "methods": ["GET"],
"GET"
],
"parts": { "parts": {
"name": { "name": {
"type": "list", "type": "list",
@ -28,28 +30,27 @@
"params": { "params": {
"ignore_unavailable": { "ignore_unavailable": {
"type": "boolean", "type": "boolean",
"description":"Whether specified concrete indices should be ignored when unavailable (missing or closed)" "description": "Whether specified concrete indices should be ignored when unavailable (missing or closed). Only allowed when providing an index expression."
}, },
"ignore_throttled": { "ignore_throttled": {
"type": "boolean", "type": "boolean",
"description":"Whether specified concrete, expanded or aliased indices should be ignored when throttled" "description": "Whether specified concrete, expanded or aliased indices should be ignored when throttled. Only allowed when providing an index expression."
}, },
"allow_no_indices": { "allow_no_indices": {
"type": "boolean", "type": "boolean",
"description":"Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified)" "description": "Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified). Only allowed when providing an index expression."
}, },
"expand_wildcards": { "expand_wildcards": {
"type": "enum", "type": "enum",
"options":[ "options": ["open", "closed", "hidden", "none", "all"],
"open",
"closed",
"hidden",
"none",
"all"
],
"default": "open", "default": "open",
"description":"Whether wildcard expressions should get expanded to open or closed indices (default: open)" "description": "Whether wildcard expressions should get expanded to open or closed indices (default: open). Only allowed when providing an index expression."
},
"timeout": {
"type": "time",
"description": "The maximum time to wait for remote clusters to respond"
} }
} }
} }
} }

View file

@ -39,7 +39,7 @@ setup:
- do: - do:
indices.resolve_cluster: indices.resolve_cluster:
name: '*' name: '*'
expand_wildcards: [closed] expand_wildcards: closed
- match: {(local).connected: true} - match: {(local).connected: true}
- match: {(local).skip_unavailable: false} - match: {(local).skip_unavailable: false}
@ -65,7 +65,7 @@ setup:
- do: - do:
indices.resolve_cluster: indices.resolve_cluster:
name: 'index2*' name: 'index2*'
expand_wildcards: [open,closed] expand_wildcards: open,closed
- match: {(local).connected: true} - match: {(local).connected: true}
- match: {(local).skip_unavailable: false} - match: {(local).skip_unavailable: false}
@ -75,7 +75,7 @@ setup:
- do: - do:
indices.resolve_cluster: indices.resolve_cluster:
name: 'index2*' name: 'index2*'
expand_wildcards: [closed] expand_wildcards: closed
- match: {(local).connected: true} - match: {(local).connected: true}
- match: {(local).skip_unavailable: false} - match: {(local).skip_unavailable: false}
@ -115,7 +115,7 @@ setup:
- do: - do:
indices.resolve_cluster: indices.resolve_cluster:
name: 'my_alias2,doesnotexist*' name: 'my_alias2,doesnotexist*'
expand_wildcards: [all] expand_wildcards: all
- match: {(local).connected: true} - match: {(local).connected: true}
- match: {(local).skip_unavailable: false} - match: {(local).skip_unavailable: false}
@ -141,10 +141,10 @@ setup:
- do: - do:
indices.resolve_cluster: indices.resolve_cluster:
name: '*' name: '*'
expand_wildcards: [all] expand_wildcards: all
ignore_unavailable: [true] ignore_unavailable: true
ignore_throttled: [true] ignore_throttled: true
allow_no_indices: [true] allow_no_indices: true
allowed_warnings: allowed_warnings:
- "[ignore_throttled] parameter is deprecated because frozen indices have been deprecated. Consider cold or frozen tiers in place of frozen indices." - "[ignore_throttled] parameter is deprecated because frozen indices have been deprecated. Consider cold or frozen tiers in place of frozen indices."
@ -157,10 +157,10 @@ setup:
- do: - do:
indices.resolve_cluster: indices.resolve_cluster:
name: '*' name: '*'
expand_wildcards: [open] expand_wildcards: open
ignore_unavailable: [false] ignore_unavailable: false
ignore_throttled: [false] ignore_throttled: false
allow_no_indices: [false] allow_no_indices: false
allowed_warnings: allowed_warnings:
- "[ignore_throttled] parameter is deprecated because frozen indices have been deprecated. Consider cold or frozen tiers in place of frozen indices." - "[ignore_throttled] parameter is deprecated because frozen indices have been deprecated. Consider cold or frozen tiers in place of frozen indices."
@ -170,3 +170,14 @@ setup:
- is_false: (local).error # should not be present - is_false: (local).error # should not be present
- exists: (local).version.number - exists: (local).version.number
---
"Resolve cluster with no index expression":
- requires:
cluster_features: ["gte_v8.18.0"]
reason: "resolve cluster with no index expression introduced in 8.18"
- do:
indices.resolve_cluster:
timeout: 400s
- is_false: (local).error # should not be present - body should be empty since no remotes configured

View file

@ -395,3 +395,75 @@ setup:
field: suggest_multi_contexts field: suggest_multi_contexts
contexts: contexts:
location: [] location: []
---
"Duplicate suggestions in different contexts":
- requires:
cluster_features: [ "search.completion_field.duplicate.support" ]
reason: "Support for duplicate suggestions in different contexts"
- do:
index:
refresh: true
index: test
id: "1"
body:
suggest_context:
-
input: "foox"
weight: 2
contexts:
color: ["red", "yellow"]
-
input: "foox"
weight: 3
contexts:
color: ["blue", "green", "yellow"]
- do:
search:
body:
suggest:
result:
text: "foo"
completion:
field: suggest_context
contexts:
color: "red"
- length: { suggest.result: 1 }
- length: { suggest.result.0.options: 1 }
- match: { suggest.result.0.options.0.text: "foox" }
- match: { suggest.result.0.options.0._score: 2 }
- do:
search:
body:
suggest:
result:
text: "foo"
completion:
field: suggest_context
contexts:
color: "yellow"
- length: { suggest.result: 1 }
- length: { suggest.result.0.options: 1 }
- match: { suggest.result.0.options.0.text: "foox" }
# the highest weight wins
- match: { suggest.result.0.options.0._score: 3 }
- do:
search:
body:
suggest:
result:
text: "foo"
completion:
field: suggest_context
contexts:
color: "blue"
- length: { suggest.result: 1 }
- length: { suggest.result.0.options: 1 }
- match: { suggest.result.0.options.0.text: "foox" }
- match: { suggest.result.0.options.0._score: 3 }

View file

@ -268,3 +268,80 @@
- length: { suggest.result: 1 } - length: { suggest.result: 1 }
- length: { suggest.result.0.options: 1 } - length: { suggest.result.0.options: 1 }
---
"Duplicate suggestions in different contexts in sub-fields":
- requires:
cluster_features: [ "search.completion_field.duplicate.support" ]
reason: "Support for duplicate suggestions in different contexts"
- do:
indices.create:
index: completion_with_context
body:
mappings:
"properties":
"suggest_1":
"type": "completion"
"contexts":
-
"name": "color"
"type": "category"
"fields":
"suggest_2":
"type": "completion"
"contexts":
-
"name": "color"
"type": "category"
- do:
index:
refresh: true
index: completion_with_context
id: "1"
body:
suggest_1:
-
input: "foox"
weight: 2
contexts:
color: ["red"]
-
input: "foox"
weight: 3
contexts:
color: ["blue", "green"]
- do:
search:
body:
suggest:
result:
text: "foo"
completion:
field: suggest_1.suggest_2
contexts:
color: "red"
- length: { suggest.result: 1 }
- length: { suggest.result.0.options: 1 }
- match: { suggest.result.0.options.0.text: "foox" }
- match: { suggest.result.0.options.0._score: 2 }
- do:
search:
body:
suggest:
result:
text: "foo"
completion:
field: suggest_1.suggest_2
contexts:
color: "blue"
- length: { suggest.result: 1 }
- length: { suggest.result.0.options: 1 }
- match: { suggest.result.0.options.0.text: "foox" }
- match: { suggest.result.0.options.0._score: 3 }

View file

@ -14,6 +14,12 @@ setup:
- synonyms: "bye => goodbye" - synonyms: "bye => goodbye"
id: "synonym-rule-2" id: "synonym-rule-2"
# This is to ensure that all index shards (write and read) are available. In serverless this can take some time.
- do:
cluster.health:
index: .synonyms
wait_for_status: green
# Create synonyms synonyms_set2 # Create synonyms synonyms_set2
- do: - do:
synonyms.put_synonym: synonyms.put_synonym:
@ -25,12 +31,6 @@ setup:
- synonyms: "bye => goodbye" - synonyms: "bye => goodbye"
id: "synonym-rule-2" id: "synonym-rule-2"
# This is to ensure that all index shards (write and read) are available. In serverless this can take some time.
- do:
cluster.health:
index: .synonyms
wait_for_status: green
# Create my_index1 with synonym_filter that uses synonyms_set1 # Create my_index1 with synonym_filter that uses synonyms_set1
- do: - do:
indices.create: indices.create:

View file

@ -0,0 +1,55 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the "Elastic License
* 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side
* Public License v 1"; you may not use this file except in compliance with, at
* your election, the "Elastic License 2.0", the "GNU Affero General Public
* License v3.0 only", or the "Server Side Public License, v 1".
*/
package org.elasticsearch.action.admin.indices.mapping.put;
import org.apache.logging.log4j.Level;
import org.elasticsearch.action.support.master.AcknowledgedResponse;
import org.elasticsearch.test.ESSingleNodeTestCase;
import org.elasticsearch.test.MockLog;
import org.elasticsearch.test.junit.annotations.TestLogging;
import static org.hamcrest.Matchers.equalTo;
public class PutMappingIT extends ESSingleNodeTestCase {
@TestLogging(
reason = "testing DEBUG logging",
value = "org.elasticsearch.action.admin.indices.mapping.put.TransportPutMappingAction:DEBUG"
)
public void testFailureLogging() {
final var indexName = randomIdentifier();
createIndex(indexName);
final var fieldName = randomIdentifier();
safeGet(client().execute(TransportPutMappingAction.TYPE, new PutMappingRequest(indexName).source(fieldName, "type=keyword")));
MockLog.assertThatLogger(
() -> assertThat(
asInstanceOf(
IllegalArgumentException.class,
safeAwaitFailure(
AcknowledgedResponse.class,
l -> client().execute(
TransportPutMappingAction.TYPE,
new PutMappingRequest(indexName).source(fieldName, "type=long"),
l
)
)
).getMessage(),
equalTo("mapper [" + fieldName + "] cannot be changed from type [keyword] to [long]")
),
TransportPutMappingAction.class,
new MockLog.SeenEventExpectation(
"failure message",
TransportPutMappingAction.class.getCanonicalName(),
Level.DEBUG,
"failed to put mappings on indices [[" + indexName
)
);
}
}

View file

@ -238,7 +238,6 @@ public class SearchCancellationIT extends AbstractSearchCancellationTestCase {
} }
} }
@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/99929")
public void testCancelFailedSearchWhenPartialResultDisallowed() throws Exception { public void testCancelFailedSearchWhenPartialResultDisallowed() throws Exception {
// Have at least two nodes so that we have parallel execution of two request guaranteed even if max concurrent requests per node // Have at least two nodes so that we have parallel execution of two request guaranteed even if max concurrent requests per node
// are limited to 1 // are limited to 1

View file

@ -172,6 +172,8 @@ public class TransportVersions {
public static final TransportVersion LINEAR_RETRIEVER_SUPPORT = def(8_837_00_0); public static final TransportVersion LINEAR_RETRIEVER_SUPPORT = def(8_837_00_0);
public static final TransportVersion TIMEOUT_GET_PARAM_FOR_RESOLVE_CLUSTER = def(8_838_00_0); public static final TransportVersion TIMEOUT_GET_PARAM_FOR_RESOLVE_CLUSTER = def(8_838_00_0);
public static final TransportVersion INFERENCE_REQUEST_ADAPTIVE_RATE_LIMITING = def(8_839_00_0); public static final TransportVersion INFERENCE_REQUEST_ADAPTIVE_RATE_LIMITING = def(8_839_00_0);
public static final TransportVersion ML_INFERENCE_IBM_WATSONX_RERANK_ADDED = def(8_840_00_0);
public static final TransportVersion ELASTICSEARCH_9_0 = def(9_000_00_0);
/* /*
* WARNING: DO NOT MERGE INTO MAIN! * WARNING: DO NOT MERGE INTO MAIN!
@ -241,15 +243,13 @@ public class TransportVersions {
* Reference to the earliest compatible transport version to this version of the codebase. * Reference to the earliest compatible transport version to this version of the codebase.
* This should be the transport version used by the highest minor version of the previous major. * This should be the transport version used by the highest minor version of the previous major.
*/ */
@UpdateForV9(owner = UpdateForV9.Owner.CORE_INFRA) public static final TransportVersion MINIMUM_COMPATIBLE = BYTE_SIZE_VALUE_ALWAYS_USES_BYTES_1;
// This needs to be bumped to the 8.last
public static final TransportVersion MINIMUM_COMPATIBLE = V_7_17_0;
/** /**
* Reference to the minimum transport version that can be used with CCS. * Reference to the minimum transport version that can be used with CCS.
* This should be the transport version used by the previous minor release. * This should be the transport version used by the previous minor release.
*/ */
public static final TransportVersion MINIMUM_CCS_VERSION = V_8_15_0; public static final TransportVersion MINIMUM_CCS_VERSION = BYTE_SIZE_VALUE_ALWAYS_USES_BYTES_1;
/** /**
* Sorted list of all versions defined in this class * Sorted list of all versions defined in this class

View file

@ -39,7 +39,6 @@ import org.elasticsearch.tasks.Task;
import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService; import org.elasticsearch.transport.TransportService;
import java.io.IOException;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Arrays; import java.util.Arrays;
import java.util.List; import java.util.List;
@ -139,7 +138,7 @@ public class TransportPutMappingAction extends AcknowledgedTransportMasterNodeAc
performMappingUpdate(concreteIndices, request, listener, metadataMappingService, false); performMappingUpdate(concreteIndices, request, listener, metadataMappingService, false);
} catch (IndexNotFoundException ex) { } catch (IndexNotFoundException ex) {
logger.debug(() -> "failed to put mappings on indices [" + Arrays.asList(request.indices() + "]"), ex); logger.debug(() -> "failed to put mappings on indices " + Arrays.toString(request.indices()), ex);
throw ex; throw ex;
} }
} }
@ -179,25 +178,21 @@ public class TransportPutMappingAction extends AcknowledgedTransportMasterNodeAc
MetadataMappingService metadataMappingService, MetadataMappingService metadataMappingService,
boolean autoUpdate boolean autoUpdate
) { ) {
final ActionListener<AcknowledgedResponse> wrappedListener = listener.delegateResponse((l, e) -> { ActionListener.run(listener.delegateResponse((l, e) -> {
logger.debug(() -> "failed to put mappings on indices [" + Arrays.asList(concreteIndices) + "]", e); logger.debug(() -> "failed to put mappings on indices " + Arrays.toString(concreteIndices), e);
l.onFailure(e); l.onFailure(e);
}); }),
final PutMappingClusterStateUpdateRequest updateRequest; wrappedListener -> metadataMappingService.putMapping(
try { new PutMappingClusterStateUpdateRequest(
updateRequest = new PutMappingClusterStateUpdateRequest(
request.masterNodeTimeout(), request.masterNodeTimeout(),
request.ackTimeout(), request.ackTimeout(),
request.source(), request.source(),
autoUpdate, autoUpdate,
concreteIndices concreteIndices
),
wrappedListener
)
); );
} catch (IOException e) {
wrappedListener.onFailure(e);
return;
}
metadataMappingService.putMapping(updateRequest, wrappedListener);
} }
static String checkForFailureStoreViolations(ProjectMetadata projectMetadata, Index[] concreteIndices, PutMappingRequest request) { static String checkForFailureStoreViolations(ProjectMetadata projectMetadata, Index[] concreteIndices, PutMappingRequest request) {

View file

@ -76,7 +76,7 @@ public abstract class TransportBroadcastUnpromotableAction<Request extends Broad
@Override @Override
protected void doExecute(Task task, Request request, ActionListener<Response> listener) { protected void doExecute(Task task, Request request, ActionListener<Response> listener) {
final var unpromotableShards = request.indexShardRoutingTable.unpromotableShards(); final var unpromotableShards = request.indexShardRoutingTable.assignedUnpromotableShards();
final var responses = new ArrayList<Response>(unpromotableShards.size()); final var responses = new ArrayList<Response>(unpromotableShards.size());
try (var listeners = new RefCountingListener(listener.map(v -> combineUnpromotableShardResponses(responses)))) { try (var listeners = new RefCountingListener(listener.map(v -> combineUnpromotableShardResponses(responses)))) {

View file

@ -65,7 +65,7 @@ public class PostWriteRefresh {
} }
}); });
case IMMEDIATE -> immediate(indexShard, listener.delegateFailureAndWrap((l, r) -> { case IMMEDIATE -> immediate(indexShard, listener.delegateFailureAndWrap((l, r) -> {
if (indexShard.getReplicationGroup().getRoutingTable().allUnpromotableShards().size() > 0) { if (indexShard.getReplicationGroup().getRoutingTable().unpromotableShards().size() > 0) {
sendUnpromotableRequests(indexShard, r.generation(), true, l, postWriteRefreshTimeout); sendUnpromotableRequests(indexShard, r.generation(), true, l, postWriteRefreshTimeout);
} else { } else {
l.onResponse(true); l.onResponse(true);

View file

@ -242,7 +242,13 @@ class Elasticsearch {
pluginsLoader = PluginsLoader.createPluginsLoader(modulesBundles, pluginsBundles, findPluginsWithNativeAccess(pluginPolicies)); pluginsLoader = PluginsLoader.createPluginsLoader(modulesBundles, pluginsBundles, findPluginsWithNativeAccess(pluginPolicies));
var pluginsResolver = PluginsResolver.create(pluginsLoader); var pluginsResolver = PluginsResolver.create(pluginsLoader);
EntitlementBootstrap.bootstrap(pluginPolicies, pluginsResolver::resolveClassToPluginName); EntitlementBootstrap.bootstrap(
pluginPolicies,
pluginsResolver::resolveClassToPluginName,
nodeEnv.dataFiles(),
nodeEnv.configFile(),
nodeEnv.tmpFile()
);
} else if (RuntimeVersionFeature.isSecurityManagerAvailable()) { } else if (RuntimeVersionFeature.isSecurityManagerAvailable()) {
// no need to explicitly enable native access for legacy code // no need to explicitly enable native access for legacy code
pluginsLoader = PluginsLoader.createPluginsLoader(modulesBundles, pluginsBundles, Map.of()); pluginsLoader = PluginsLoader.createPluginsLoader(modulesBundles, pluginsBundles, Map.of());

View file

@ -56,7 +56,7 @@ public class MetadataMappingService {
public MetadataMappingService(ClusterService clusterService, IndicesService indicesService) { public MetadataMappingService(ClusterService clusterService, IndicesService indicesService) {
this.clusterService = clusterService; this.clusterService = clusterService;
this.indicesService = indicesService; this.indicesService = indicesService;
taskQueue = clusterService.createTaskQueue("put-mapping", Priority.HIGH, new PutMappingExecutor()); this.taskQueue = clusterService.createTaskQueue("put-mapping", Priority.HIGH, new PutMappingExecutor());
} }
record PutMappingClusterStateUpdateTask(PutMappingClusterStateUpdateRequest request, ActionListener<AcknowledgedResponse> listener) record PutMappingClusterStateUpdateTask(PutMappingClusterStateUpdateRequest request, ActionListener<AcknowledgedResponse> listener)

View file

@ -193,7 +193,7 @@ public class IndexShardRoutingTable {
* *
* @return a {@link List} of shards * @return a {@link List} of shards
*/ */
public List<ShardRouting> unpromotableShards() { public List<ShardRouting> assignedUnpromotableShards() {
return this.assignedUnpromotableShards; return this.assignedUnpromotableShards;
} }
@ -202,7 +202,7 @@ public class IndexShardRoutingTable {
* *
* @return a {@link List} of shards * @return a {@link List} of shards
*/ */
public List<ShardRouting> allUnpromotableShards() { public List<ShardRouting> unpromotableShards() {
return this.unpromotableShards; return this.unpromotableShards;
} }

View file

@ -22,6 +22,12 @@ public class LoggerFactoryImpl extends LoggerFactory {
@Override @Override
public Logger getLogger(Class<?> clazz) { public Logger getLogger(Class<?> clazz) {
return new LoggerImpl(LogManager.getLogger(clazz)); // Elasticsearch configures logging at the root level, it does not support
// programmatic configuration at the logger level. Log4j's method for
// getting a logger by Class doesn't just use the class name, but also
// scans the classloader hierarchy for programmatic configuration. Here we
// just delegate to use the String class name so that regardless of which
// classloader a class comes from, we will use the root logging config.
return getLogger(clazz.getName());
} }
} }

View file

@ -614,6 +614,7 @@ public final class ClusterSettings extends AbstractScopedSettings {
DataStreamLifecycle.CLUSTER_LIFECYCLE_DEFAULT_ROLLOVER_SETTING, DataStreamLifecycle.CLUSTER_LIFECYCLE_DEFAULT_ROLLOVER_SETTING,
IndicesClusterStateService.SHARD_LOCK_RETRY_INTERVAL_SETTING, IndicesClusterStateService.SHARD_LOCK_RETRY_INTERVAL_SETTING,
IndicesClusterStateService.SHARD_LOCK_RETRY_TIMEOUT_SETTING, IndicesClusterStateService.SHARD_LOCK_RETRY_TIMEOUT_SETTING,
IndicesClusterStateService.CONCURRENT_SHARD_CLOSE_LIMIT,
IngestSettings.GROK_WATCHDOG_INTERVAL, IngestSettings.GROK_WATCHDOG_INTERVAL,
IngestSettings.GROK_WATCHDOG_MAX_EXECUTION_TIME, IngestSettings.GROK_WATCHDOG_MAX_EXECUTION_TIME,
TDigestExecutionHint.SETTING, TDigestExecutionHint.SETTING,

View file

@ -392,7 +392,7 @@ public class CompletionFieldMapper extends FieldMapper {
// parse // parse
XContentParser parser = context.parser(); XContentParser parser = context.parser();
Token token = parser.currentToken(); Token token = parser.currentToken();
Map<String, CompletionInputMetadata> inputMap = Maps.newMapWithExpectedSize(1); Map<String, CompletionInputMetadataContainer> inputMap = Maps.newMapWithExpectedSize(1);
if (token == Token.VALUE_NULL) { // ignore null values if (token == Token.VALUE_NULL) { // ignore null values
return; return;
@ -405,7 +405,7 @@ public class CompletionFieldMapper extends FieldMapper {
} }
// index // index
for (Map.Entry<String, CompletionInputMetadata> completionInput : inputMap.entrySet()) { for (Map.Entry<String, CompletionInputMetadataContainer> completionInput : inputMap.entrySet()) {
String input = completionInput.getKey(); String input = completionInput.getKey();
if (input.trim().isEmpty()) { if (input.trim().isEmpty()) {
context.addIgnoredField(mappedFieldType.name()); context.addIgnoredField(mappedFieldType.name());
@ -420,22 +420,34 @@ public class CompletionFieldMapper extends FieldMapper {
} }
input = input.substring(0, len); input = input.substring(0, len);
} }
CompletionInputMetadata metadata = completionInput.getValue(); CompletionInputMetadataContainer cmc = completionInput.getValue();
if (fieldType().hasContextMappings()) { if (fieldType().hasContextMappings()) {
for (CompletionInputMetadata metadata : cmc.getValues()) {
fieldType().getContextMappings().addField(context.doc(), fieldType().name(), input, metadata.weight, metadata.contexts); fieldType().getContextMappings().addField(context.doc(), fieldType().name(), input, metadata.weight, metadata.contexts);
}
} else { } else {
context.doc().add(new SuggestField(fieldType().name(), input, metadata.weight)); context.doc().add(new SuggestField(fieldType().name(), input, cmc.getWeight()));
} }
} }
context.addToFieldNames(fieldType().name()); context.addToFieldNames(fieldType().name());
for (CompletionInputMetadata metadata : inputMap.values()) { for (CompletionInputMetadataContainer cmc : inputMap.values()) {
if (fieldType().hasContextMappings()) {
for (CompletionInputMetadata metadata : cmc.getValues()) {
multiFields().parse( multiFields().parse(
this, this,
context, context,
() -> context.switchParser(new MultiFieldParser(metadata, fieldType().name(), context.parser().getTokenLocation())) () -> context.switchParser(new MultiFieldParser(metadata, fieldType().name(), context.parser().getTokenLocation()))
); );
} }
} else {
CompletionInputMetadata metadata = cmc.getValue();
multiFields().parse(
this,
context,
() -> context.switchParser(new MultiFieldParser(metadata, fieldType().name(), context.parser().getTokenLocation()))
);
}
}
} }
/** /**
@ -447,11 +459,13 @@ public class CompletionFieldMapper extends FieldMapper {
DocumentParserContext documentParserContext, DocumentParserContext documentParserContext,
Token token, Token token,
XContentParser parser, XContentParser parser,
Map<String, CompletionInputMetadata> inputMap Map<String, CompletionInputMetadataContainer> inputMap
) throws IOException { ) throws IOException {
String currentFieldName = null; String currentFieldName = null;
if (token == Token.VALUE_STRING) { if (token == Token.VALUE_STRING) {
inputMap.put(parser.text(), new CompletionInputMetadata(parser.text(), Collections.<String, Set<String>>emptyMap(), 1)); CompletionInputMetadataContainer cmc = new CompletionInputMetadataContainer(fieldType().hasContextMappings());
cmc.add(new CompletionInputMetadata(parser.text(), Collections.emptyMap(), 1));
inputMap.put(parser.text(), cmc);
} else if (token == Token.START_OBJECT) { } else if (token == Token.START_OBJECT) {
Set<String> inputs = new HashSet<>(); Set<String> inputs = new HashSet<>();
int weight = 1; int weight = 1;
@ -531,8 +545,14 @@ public class CompletionFieldMapper extends FieldMapper {
} }
} }
for (String input : inputs) { for (String input : inputs) {
if (inputMap.containsKey(input) == false || inputMap.get(input).weight < weight) { CompletionInputMetadata cm = new CompletionInputMetadata(input, contextsMap, weight);
inputMap.put(input, new CompletionInputMetadata(input, contextsMap, weight)); CompletionInputMetadataContainer cmc = inputMap.get(input);
if (cmc != null) {
cmc.add(cm);
} else {
cmc = new CompletionInputMetadataContainer(fieldType().hasContextMappings());
cmc.add(cm);
inputMap.put(input, cmc);
} }
} }
} else { } else {
@ -543,10 +563,46 @@ public class CompletionFieldMapper extends FieldMapper {
} }
} }
static class CompletionInputMetadataContainer {
private final boolean hasContexts;
private final List<CompletionInputMetadata> list;
private CompletionInputMetadata single;
CompletionInputMetadataContainer(boolean hasContexts) {
this.hasContexts = hasContexts;
this.list = hasContexts ? new ArrayList<>() : null;
}
void add(CompletionInputMetadata cm) {
if (hasContexts) {
list.add(cm);
} else {
if (single == null || single.weight < cm.weight) {
single = cm;
}
}
}
List<CompletionInputMetadata> getValues() {
assert hasContexts;
return list;
}
CompletionInputMetadata getValue() {
assert hasContexts == false;
return single;
}
int getWeight() {
assert hasContexts == false;
return single.weight;
}
}
static class CompletionInputMetadata { static class CompletionInputMetadata {
public final String input; private final String input;
public final Map<String, Set<String>> contexts; private final Map<String, Set<String>> contexts;
public final int weight; private final int weight;
CompletionInputMetadata(String input, Map<String, Set<String>> contexts, int weight) { CompletionInputMetadata(String input, Map<String, Set<String>> contexts, int weight) {
this.input = input; this.input = input;

View file

@ -29,6 +29,7 @@ import org.elasticsearch.core.Tuple;
import org.elasticsearch.index.engine.TranslogOperationAsserter; import org.elasticsearch.index.engine.TranslogOperationAsserter;
import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.seqno.SequenceNumbers;
import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.search.lookup.Source;
import java.io.Closeable; import java.io.Closeable;
import java.io.IOException; import java.io.IOException;
@ -298,8 +299,10 @@ public class TranslogWriter extends BaseTranslogReader implements Closeable {
+ "], with different data. " + "], with different data. "
+ "prvOp [" + "prvOp ["
+ prvOp + prvOp
+ (prvOp instanceof Translog.Index index ? " source: " + Source.fromBytes(index.source()).source() : "")
+ "], newOp [" + "], newOp ["
+ newOp + newOp
+ (newOp instanceof Translog.Index index ? " source: " + Source.fromBytes(index.source()).source() : "")
+ "]", + "]",
previous.v2() previous.v2()
); );

View file

@ -118,6 +118,18 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple
Setting.Property.NodeScope Setting.Property.NodeScope
); );
/**
* Maximum number of shards to try and close concurrently. Defaults to the smaller of {@code node.processors} and {@code 10}, but can be
* set to any positive integer.
*/
public static final Setting<Integer> CONCURRENT_SHARD_CLOSE_LIMIT = Setting.intSetting(
"indices.store.max_concurrent_closing_shards",
settings -> Integer.toString(Math.min(10, EsExecutors.NODE_PROCESSORS_SETTING.get(settings).roundUp())),
1,
Integer.MAX_VALUE,
Setting.Property.NodeScope
);
final AllocatedIndices<? extends Shard, ? extends AllocatedIndex<? extends Shard>> indicesService; final AllocatedIndices<? extends Shard, ? extends AllocatedIndex<? extends Shard>> indicesService;
private final ClusterService clusterService; private final ClusterService clusterService;
private final ThreadPool threadPool; private final ThreadPool threadPool;
@ -1356,7 +1368,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple
} }
} }
private static class ShardCloseExecutor implements Executor { static class ShardCloseExecutor implements Executor {
private final ThrottledTaskRunner throttledTaskRunner; private final ThrottledTaskRunner throttledTaskRunner;
@ -1369,8 +1381,11 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple
// can't close the old ones down fast enough. Maybe we could block or throttle new shards starting while old shards are still // can't close the old ones down fast enough. Maybe we could block or throttle new shards starting while old shards are still
// shutting down, given that starting new shards is already async. Since this seems unlikely in practice, we opt for the simple // shutting down, given that starting new shards is already async. Since this seems unlikely in practice, we opt for the simple
// approach here. // approach here.
final var maxThreads = Math.max(EsExecutors.NODE_PROCESSORS_SETTING.get(settings).roundUp(), 10); throttledTaskRunner = new ThrottledTaskRunner(
throttledTaskRunner = new ThrottledTaskRunner(IndicesClusterStateService.class.getCanonicalName(), maxThreads, delegate); IndicesClusterStateService.class.getCanonicalName(),
CONCURRENT_SHARD_CLOSE_LIMIT.get(settings),
delegate
);
} }
@Override @Override

View file

@ -1088,7 +1088,7 @@ class NodeConstruction {
actionModule.getReservedClusterStateService().installClusterStateHandler(new ReservedRepositoryAction(repositoriesService)); actionModule.getReservedClusterStateService().installClusterStateHandler(new ReservedRepositoryAction(repositoriesService));
actionModule.getReservedClusterStateService().installProjectStateHandler(new ReservedPipelineAction()); actionModule.getReservedClusterStateService().installProjectStateHandler(new ReservedPipelineAction());
FileSettingsHealthIndicatorService fileSettingsHealthIndicatorService = new FileSettingsHealthIndicatorService(); FileSettingsHealthIndicatorService fileSettingsHealthIndicatorService = new FileSettingsHealthIndicatorService(settings);
FileSettingsService fileSettingsService = pluginsService.loadSingletonServiceProvider( FileSettingsService fileSettingsService = pluginsService.loadSingletonServiceProvider(
FileSettingsServiceProvider.class, FileSettingsServiceProvider.class,
() -> FileSettingsService::new () -> FileSettingsService::new

View file

@ -22,6 +22,8 @@ import org.elasticsearch.cluster.metadata.ReservedStateMetadata;
import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.Strings; import org.elasticsearch.common.Strings;
import org.elasticsearch.common.file.MasterNodeFileWatchingService; import org.elasticsearch.common.file.MasterNodeFileWatchingService;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment; import org.elasticsearch.env.Environment;
import org.elasticsearch.health.HealthIndicatorDetails; import org.elasticsearch.health.HealthIndicatorDetails;
import org.elasticsearch.health.HealthIndicatorImpact; import org.elasticsearch.health.HealthIndicatorImpact;
@ -235,7 +237,7 @@ public class FileSettingsService extends MasterNodeFileWatchingService implement
} }
@Override @Override
protected void processInitialFilesMissing() throws ExecutionException, InterruptedException, IOException { protected void processInitialFilesMissing() throws ExecutionException, InterruptedException {
PlainActionFuture<ActionResponse.Empty> completion = new PlainActionFuture<>(); PlainActionFuture<ActionResponse.Empty> completion = new PlainActionFuture<>();
logger().info("setting file [{}] not found, initializing [{}] as empty", watchedFile, NAMESPACE); logger().info("setting file [{}] not found, initializing [{}] as empty", watchedFile, NAMESPACE);
stateService.initEmpty(NAMESPACE, completion); stateService.initEmpty(NAMESPACE, completion);
@ -259,11 +261,29 @@ public class FileSettingsService extends MasterNodeFileWatchingService implement
) )
); );
/**
* We want a length limit so we don't blow past the indexing limit in the case of a long description string.
* This is an {@code OperatorDynamic} setting so that if the truncation hampers troubleshooting efforts,
* the operator could override it and retry the operation without necessarily restarting the cluster.
*/
public static final String DESCRIPTION_LENGTH_LIMIT_KEY = "fileSettings.descriptionLengthLimit";
static final Setting<Integer> DESCRIPTION_LENGTH_LIMIT = Setting.intSetting(
DESCRIPTION_LENGTH_LIMIT_KEY,
100,
1, // Need room for the ellipsis
Setting.Property.OperatorDynamic
);
private final Settings settings;
private boolean isActive = false; private boolean isActive = false;
private long changeCount = 0; private long changeCount = 0;
private long failureStreak = 0; private long failureStreak = 0;
private String mostRecentFailure = null; private String mostRecentFailure = null;
public FileSettingsHealthIndicatorService(Settings settings) {
this.settings = settings;
}
public synchronized void startOccurred() { public synchronized void startOccurred() {
isActive = true; isActive = true;
failureStreak = 0; failureStreak = 0;
@ -285,7 +305,16 @@ public class FileSettingsService extends MasterNodeFileWatchingService implement
public synchronized void failureOccurred(String description) { public synchronized void failureOccurred(String description) {
++failureStreak; ++failureStreak;
mostRecentFailure = description; mostRecentFailure = limitLength(description);
}
private String limitLength(String description) {
int descriptionLengthLimit = DESCRIPTION_LENGTH_LIMIT.get(settings);
if (description.length() > descriptionLengthLimit) {
return description.substring(0, descriptionLengthLimit - 1) + "";
} else {
return description;
}
} }
@Override @Override

View file

@ -25,9 +25,12 @@ public final class SearchFeatures implements FeatureSpecification {
} }
public static final NodeFeature RETRIEVER_RESCORER_ENABLED = new NodeFeature("search.retriever.rescorer.enabled"); public static final NodeFeature RETRIEVER_RESCORER_ENABLED = new NodeFeature("search.retriever.rescorer.enabled");
public static final NodeFeature COMPLETION_FIELD_SUPPORTS_DUPLICATE_SUGGESTIONS = new NodeFeature(
"search.completion_field.duplicate.support"
);
@Override @Override
public Set<NodeFeature> getTestFeatures() { public Set<NodeFeature> getTestFeatures() {
return Set.of(RETRIEVER_RESCORER_ENABLED); return Set.of(RETRIEVER_RESCORER_ENABLED, COMPLETION_FIELD_SUPPORTS_DUPLICATE_SUGGESTIONS);
} }
} }

View file

@ -9,7 +9,6 @@
package org.elasticsearch; package org.elasticsearch;
import org.elasticsearch.core.FixForMultiProject;
import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.test.TransportVersionUtils; import org.elasticsearch.test.TransportVersionUtils;
@ -17,7 +16,6 @@ import java.lang.reflect.Modifier;
import java.util.Collections; import java.util.Collections;
import java.util.List; import java.util.List;
import java.util.Set; import java.util.Set;
import java.util.TreeSet;
import java.util.regex.Matcher; import java.util.regex.Matcher;
import java.util.regex.Pattern; import java.util.regex.Pattern;
@ -200,44 +198,4 @@ public class TransportVersionTests extends ESTestCase {
assertEquals("2000099", TransportVersion.fromId(2_00_00_99).toString()); assertEquals("2000099", TransportVersion.fromId(2_00_00_99).toString());
assertEquals("5000099", TransportVersion.fromId(5_00_00_99).toString()); assertEquals("5000099", TransportVersion.fromId(5_00_00_99).toString());
} }
/**
* Until 9.0 bumps its transport version to 9_000_00_0, all transport changes must be backported to 8.x.
* This test ensures transport versions are dense, so that we have confidence backports have not been missed.
* Note that it does not ensure patches are not missed, but it should catch the majority of misordered
* or missing transport versions.
*/
@FixForMultiProject
public void testDenseTransportVersions() {
Set<Integer> missingVersions = new TreeSet<>();
TransportVersion previous = null;
for (var tv : TransportVersion.getAllVersions()) {
if (tv.before(TransportVersions.V_8_16_0)) {
continue;
}
if (tv.equals(TransportVersions.MULTI_PROJECT)) {
continue; // Temporary work around. Remove this when Multi-Project merges to main (ES-7708)
}
if (previous == null) {
previous = tv;
continue;
}
if (previous.id() + 1000 < tv.id()) {
int nextId = previous.id();
do {
nextId = (nextId + 1000) / 1000 * 1000;
missingVersions.add(nextId);
} while (nextId + 1000 < tv.id());
}
previous = tv;
}
if (missingVersions.isEmpty() == false) {
StringBuilder msg = new StringBuilder("Missing transport versions:\n");
for (Integer id : missingVersions) {
msg.append(" " + id + "\n");
}
fail(msg.toString());
}
}
} }

View file

@ -20,7 +20,6 @@ import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.test.AbstractWireSerializingTestCase; import org.elasticsearch.test.AbstractWireSerializingTestCase;
import org.elasticsearch.test.TransportVersionUtils; import org.elasticsearch.test.TransportVersionUtils;
import java.io.IOException;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Collections; import java.util.Collections;
import java.util.Comparator; import java.util.Comparator;
@ -37,7 +36,6 @@ import static org.elasticsearch.action.fieldcaps.FieldCapabilitiesIndexResponseT
import static org.elasticsearch.action.fieldcaps.FieldCapabilitiesIndexResponseTests.randomMappingHashToIndices; import static org.elasticsearch.action.fieldcaps.FieldCapabilitiesIndexResponseTests.randomMappingHashToIndices;
import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.hasSize;
import static org.hamcrest.Matchers.nullValue;
public class FieldCapabilitiesNodeResponseTests extends AbstractWireSerializingTestCase<FieldCapabilitiesNodeResponse> { public class FieldCapabilitiesNodeResponseTests extends AbstractWireSerializingTestCase<FieldCapabilitiesNodeResponse> {
@ -145,48 +143,6 @@ public class FieldCapabilitiesNodeResponseTests extends AbstractWireSerializingT
} }
} }
public void testSerializeNodeResponseBetweenOldNodes() throws IOException {
final TransportVersion minCompactVersion = TransportVersions.MINIMUM_COMPATIBLE;
assertTrue("Remove this test once minCompactVersion >= 8.2.0", minCompactVersion.before(TransportVersions.V_8_2_0));
List<FieldCapabilitiesIndexResponse> indexResponses = CollectionUtils.concatLists(
randomIndexResponsesWithMappingHash(randomMappingHashToIndices()),
randomIndexResponsesWithoutMappingHash()
);
Randomness.shuffle(indexResponses);
FieldCapabilitiesNodeResponse inResponse = randomNodeResponse(indexResponses);
TransportVersion version = TransportVersionUtils.randomVersionBetween(
random(),
minCompactVersion,
TransportVersionUtils.getPreviousVersion(TransportVersions.V_8_2_0)
);
final FieldCapabilitiesNodeResponse outResponse = copyInstance(inResponse, version);
assertThat(outResponse.getFailures().keySet(), equalTo(inResponse.getFailures().keySet()));
assertThat(outResponse.getUnmatchedShardIds(), equalTo(inResponse.getUnmatchedShardIds()));
final List<FieldCapabilitiesIndexResponse> inList = inResponse.getIndexResponses();
final List<FieldCapabilitiesIndexResponse> outList = outResponse.getIndexResponses();
assertThat(outList, hasSize(inList.size()));
for (int i = 0; i < inList.size(); i++) {
assertThat("Responses between old nodes don't have mapping hash", outList.get(i).getIndexMappingHash(), nullValue());
assertThat(outList.get(i).getIndexName(), equalTo(inList.get(i).getIndexName()));
assertThat(outList.get(i).canMatch(), equalTo(inList.get(i).canMatch()));
Map<String, IndexFieldCapabilities> outCap = outList.get(i).get();
Map<String, IndexFieldCapabilities> inCap = inList.get(i).get();
if (version.onOrAfter(TransportVersions.V_8_0_0)) {
assertThat(outCap, equalTo(inCap));
} else {
// Exclude metric types which was introduced in 8.0
assertThat(outCap.keySet(), equalTo(inCap.keySet()));
for (String field : outCap.keySet()) {
assertThat(outCap.get(field).name(), equalTo(inCap.get(field).name()));
assertThat(outCap.get(field).type(), equalTo(inCap.get(field).type()));
assertThat(outCap.get(field).isSearchable(), equalTo(inCap.get(field).isSearchable()));
assertThat(outCap.get(field).isAggregatable(), equalTo(inCap.get(field).isAggregatable()));
assertThat(outCap.get(field).meta(), equalTo(inCap.get(field).meta()));
}
}
}
}
private static FieldCapabilitiesNodeResponse randomNodeResponse(List<FieldCapabilitiesIndexResponse> indexResponses) { private static FieldCapabilitiesNodeResponse randomNodeResponse(List<FieldCapabilitiesIndexResponse> indexResponses) {
int numUnmatched = randomIntBetween(0, 3); int numUnmatched = randomIntBetween(0, 3);
final Set<ShardId> unmatchedShardIds = new HashSet<>(); final Set<ShardId> unmatchedShardIds = new HashSet<>();

View file

@ -40,7 +40,6 @@ import static org.elasticsearch.action.fieldcaps.FieldCapabilitiesIndexResponseT
import static org.elasticsearch.action.fieldcaps.FieldCapabilitiesIndexResponseTests.randomMappingHashToIndices; import static org.elasticsearch.action.fieldcaps.FieldCapabilitiesIndexResponseTests.randomMappingHashToIndices;
import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.hasSize;
import static org.hamcrest.Matchers.nullValue;
public class FieldCapabilitiesResponseTests extends AbstractWireSerializingTestCase<FieldCapabilitiesResponse> { public class FieldCapabilitiesResponseTests extends AbstractWireSerializingTestCase<FieldCapabilitiesResponse> {
@ -198,48 +197,4 @@ public class FieldCapabilitiesResponseTests extends AbstractWireSerializingTestC
} }
} }
} }
public void testSerializeCCSResponseBetweenOldClusters() throws IOException {
TransportVersion minCompactVersion = TransportVersions.MINIMUM_COMPATIBLE;
assertTrue("Remove this test once minCompactVersion >= 8.2.0", minCompactVersion.before(TransportVersions.V_8_2_0));
List<FieldCapabilitiesIndexResponse> indexResponses = CollectionUtils.concatLists(
randomIndexResponsesWithMappingHash(randomMappingHashToIndices()),
randomIndexResponsesWithoutMappingHash()
);
Randomness.shuffle(indexResponses);
FieldCapabilitiesResponse inResponse = randomCCSResponse(indexResponses);
TransportVersion version = TransportVersionUtils.randomVersionBetween(
random(),
minCompactVersion,
TransportVersionUtils.getPreviousVersion(TransportVersions.V_8_2_0)
);
final FieldCapabilitiesResponse outResponse = copyInstance(inResponse, version);
assertThat(
outResponse.getFailures().stream().flatMap(f -> Arrays.stream(f.getIndices())).toList(),
equalTo(inResponse.getFailures().stream().flatMap(f -> Arrays.stream(f.getIndices())).toList())
);
final List<FieldCapabilitiesIndexResponse> inList = inResponse.getIndexResponses();
final List<FieldCapabilitiesIndexResponse> outList = outResponse.getIndexResponses();
assertThat(outList, hasSize(inList.size()));
for (int i = 0; i < inList.size(); i++) {
assertThat("Responses between old clusters don't have mapping hash", outList.get(i).getIndexMappingHash(), nullValue());
assertThat(outList.get(i).getIndexName(), equalTo(inList.get(i).getIndexName()));
assertThat(outList.get(i).canMatch(), equalTo(inList.get(i).canMatch()));
Map<String, IndexFieldCapabilities> outCap = outList.get(i).get();
Map<String, IndexFieldCapabilities> inCap = inList.get(i).get();
if (version.onOrAfter(TransportVersions.V_8_0_0)) {
assertThat(outCap, equalTo(inCap));
} else {
// Exclude metric types which was introduced in 8.0
assertThat(outCap.keySet(), equalTo(inCap.keySet()));
for (String field : outCap.keySet()) {
assertThat(outCap.get(field).name(), equalTo(inCap.get(field).name()));
assertThat(outCap.get(field).type(), equalTo(inCap.get(field).type()));
assertThat(outCap.get(field).isSearchable(), equalTo(inCap.get(field).isSearchable()));
assertThat(outCap.get(field).isAggregatable(), equalTo(inCap.get(field).isAggregatable()));
assertThat(outCap.get(field).meta(), equalTo(inCap.get(field).meta()));
}
}
}
}
} }

View file

@ -220,7 +220,6 @@ public class SearchQueryThenFetchAsyncActionTests extends ESTestCase {
assertFalse(canReturnNullResponse.get()); assertFalse(canReturnNullResponse.get());
assertThat(numWithTopDocs.get(), equalTo(0)); assertThat(numWithTopDocs.get(), equalTo(0));
} else { } else {
assertTrue(canReturnNullResponse.get());
if (withCollapse) { if (withCollapse) {
assertThat(numWithTopDocs.get(), equalTo(0)); assertThat(numWithTopDocs.get(), equalTo(0));
} else { } else {

View file

@ -333,7 +333,7 @@ public class TransportBroadcastUnpromotableActionTests extends ESTestCase {
// We were able to mark shards as stale, so the request finishes successfully // We were able to mark shards as stale, so the request finishes successfully
assertThat(safeAwait(broadcastUnpromotableRequest(wrongRoutingTable, true)), equalTo(ActionResponse.Empty.INSTANCE)); assertThat(safeAwait(broadcastUnpromotableRequest(wrongRoutingTable, true)), equalTo(ActionResponse.Empty.INSTANCE));
for (var shardRouting : wrongRoutingTable.unpromotableShards()) { for (var shardRouting : wrongRoutingTable.assignedUnpromotableShards()) {
Mockito.verify(shardStateAction) Mockito.verify(shardStateAction)
.remoteShardFailed( .remoteShardFailed(
eq(shardRouting.shardId()), eq(shardRouting.shardId()),

View file

@ -162,7 +162,7 @@ public class PostWriteRefreshTests extends IndexShardTestCase {
new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "message"), new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "message"),
ShardRouting.Role.SEARCH_ONLY ShardRouting.Role.SEARCH_ONLY
); );
when(routingTable.allUnpromotableShards()).thenReturn(List.of(shardRouting)); when(routingTable.unpromotableShards()).thenReturn(List.of(shardRouting));
when(routingTable.shardId()).thenReturn(shardId); when(routingTable.shardId()).thenReturn(shardId);
WriteRequest.RefreshPolicy policy = randomFrom(WriteRequest.RefreshPolicy.IMMEDIATE, WriteRequest.RefreshPolicy.WAIT_UNTIL); WriteRequest.RefreshPolicy policy = randomFrom(WriteRequest.RefreshPolicy.IMMEDIATE, WriteRequest.RefreshPolicy.WAIT_UNTIL);
postWriteRefresh.refreshShard(policy, primary, result.getTranslogLocation(), f, postWriteRefreshTimeout); postWriteRefresh.refreshShard(policy, primary, result.getTranslogLocation(), f, postWriteRefreshTimeout);
@ -238,9 +238,9 @@ public class PostWriteRefreshTests extends IndexShardTestCase {
); );
// Randomly test scenarios with and without unpromotables // Randomly test scenarios with and without unpromotables
if (randomBoolean()) { if (randomBoolean()) {
when(routingTable.allUnpromotableShards()).thenReturn(Collections.emptyList()); when(routingTable.unpromotableShards()).thenReturn(Collections.emptyList());
} else { } else {
when(routingTable.allUnpromotableShards()).thenReturn(List.of(shardRouting)); when(routingTable.unpromotableShards()).thenReturn(List.of(shardRouting));
} }
WriteRequest.RefreshPolicy policy = WriteRequest.RefreshPolicy.WAIT_UNTIL; WriteRequest.RefreshPolicy policy = WriteRequest.RefreshPolicy.WAIT_UNTIL;
postWriteRefresh.refreshShard(policy, primary, null, f, postWriteRefreshTimeout); postWriteRefresh.refreshShard(policy, primary, null, f, postWriteRefreshTimeout);

View file

@ -612,11 +612,7 @@ public class ShardStateActionTests extends ESTestCase {
final String allocationId = randomRealisticUnicodeOfCodepointLengthBetween(10, 100); final String allocationId = randomRealisticUnicodeOfCodepointLengthBetween(10, 100);
final long primaryTerm = randomIntBetween(0, 100); final long primaryTerm = randomIntBetween(0, 100);
final String message = randomRealisticUnicodeOfCodepointLengthBetween(10, 100); final String message = randomRealisticUnicodeOfCodepointLengthBetween(10, 100);
final TransportVersion version = randomFrom( final TransportVersion version = randomFrom(getFirstVersion(), getPreviousVersion(TransportVersions.V_8_15_0));
getFirstVersion(),
getPreviousVersion(TransportVersions.MINIMUM_COMPATIBLE),
getPreviousVersion(TransportVersions.V_8_15_0)
);
final ShardLongFieldRange timestampRange = ShardLongFieldRangeWireTests.randomRange(); final ShardLongFieldRange timestampRange = ShardLongFieldRangeWireTests.randomRange();
final ShardLongFieldRange eventIngestedRange = ShardLongFieldRangeWireTests.randomRange(); final ShardLongFieldRange eventIngestedRange = ShardLongFieldRangeWireTests.randomRange();
var startedShardEntry = new StartedShardEntry(shardId, allocationId, primaryTerm, message, timestampRange, eventIngestedRange); var startedShardEntry = new StartedShardEntry(shardId, allocationId, primaryTerm, message, timestampRange, eventIngestedRange);

View file

@ -31,8 +31,6 @@ import static java.util.Collections.emptyMap;
import static java.util.Collections.emptySet; import static java.util.Collections.emptySet;
import static org.elasticsearch.test.NodeRoles.nonRemoteClusterClientNode; import static org.elasticsearch.test.NodeRoles.nonRemoteClusterClientNode;
import static org.elasticsearch.test.NodeRoles.remoteClusterClientNode; import static org.elasticsearch.test.NodeRoles.remoteClusterClientNode;
import static org.elasticsearch.test.TransportVersionUtils.getPreviousVersion;
import static org.elasticsearch.test.TransportVersionUtils.randomVersionBetween;
import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.allOf;
import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.equalTo;
@ -274,39 +272,5 @@ public class DiscoveryNodeTests extends ESTestCase {
} }
} }
} }
{
var oldVersion = randomVersionBetween(
random(),
TransportVersions.MINIMUM_COMPATIBLE,
getPreviousVersion(TransportVersions.NODE_VERSION_INFORMATION_WITH_MIN_READ_ONLY_INDEX_VERSION)
);
try (var out = new BytesStreamOutput()) {
out.setTransportVersion(oldVersion);
node.writeTo(out);
try (var in = StreamInput.wrap(out.bytes().array())) {
in.setTransportVersion(oldVersion);
var deserialized = new DiscoveryNode(in);
assertThat(deserialized.getId(), equalTo(node.getId()));
assertThat(deserialized.getAddress(), equalTo(node.getAddress()));
assertThat(deserialized.getMinIndexVersion(), equalTo(node.getMinIndexVersion()));
assertThat(deserialized.getMaxIndexVersion(), equalTo(node.getMaxIndexVersion()));
assertThat(deserialized.getMinReadOnlyIndexVersion(), equalTo(node.getMinIndexVersion()));
assertThat(
deserialized.getVersionInformation(),
equalTo(
new VersionInformation(
node.getBuildVersion(),
node.getMinIndexVersion(),
node.getMinIndexVersion(),
node.getMaxIndexVersion()
)
)
);
}
}
}
} }
} }

View file

@ -303,6 +303,55 @@ public class CompletionFieldMapperTests extends MapperTestCase {
); );
} }
public void testDuplicateSuggestionsWithContexts() throws IOException {
DocumentMapper defaultMapper = createDocumentMapper(fieldMapping(b -> {
b.field("type", "completion");
b.startArray("contexts");
{
b.startObject();
b.field("name", "place");
b.field("type", "category");
b.endObject();
}
b.endArray();
}));
ParsedDocument parsedDocument = defaultMapper.parse(source(b -> {
b.startArray("field");
{
b.startObject();
{
b.array("input", "timmy", "starbucks");
b.startObject("contexts").array("place", "cafe", "food").endObject();
b.field("weight", 10);
}
b.endObject();
b.startObject();
{
b.array("input", "timmy", "starbucks");
b.startObject("contexts").array("place", "restaurant").endObject();
b.field("weight", 1);
}
b.endObject();
}
b.endArray();
}));
List<IndexableField> indexedFields = parsedDocument.rootDoc().getFields("field");
assertThat(indexedFields, hasSize(4));
assertThat(
indexedFields,
containsInAnyOrder(
contextSuggestField("timmy"),
contextSuggestField("timmy"),
contextSuggestField("starbucks"),
contextSuggestField("starbucks")
)
);
}
public void testCompletionWithContextAndSubCompletion() throws Exception { public void testCompletionWithContextAndSubCompletion() throws Exception {
DocumentMapper defaultMapper = createDocumentMapper(fieldMapping(b -> { DocumentMapper defaultMapper = createDocumentMapper(fieldMapping(b -> {
b.field("type", "completion"); b.field("type", "completion");

View file

@ -22,7 +22,6 @@ import org.elasticsearch.index.cache.bitset.BitsetFilterCache;
import org.elasticsearch.index.similarity.SimilarityService; import org.elasticsearch.index.similarity.SimilarityService;
import org.elasticsearch.indices.IndicesModule; import org.elasticsearch.indices.IndicesModule;
import org.elasticsearch.script.ScriptService; import org.elasticsearch.script.ScriptService;
import org.elasticsearch.test.TransportVersionUtils;
import org.elasticsearch.test.index.IndexVersionUtils; import org.elasticsearch.test.index.IndexVersionUtils;
import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentBuilder;
import org.hamcrest.CoreMatchers; import org.hamcrest.CoreMatchers;
@ -327,11 +326,7 @@ public class MappingParserTests extends MapperServiceTestCase {
IndexVersions.MINIMUM_READONLY_COMPATIBLE, IndexVersions.MINIMUM_READONLY_COMPATIBLE,
IndexVersions.V_8_5_0 IndexVersions.V_8_5_0
); );
TransportVersion transportVersion = TransportVersionUtils.randomVersionBetween( TransportVersion transportVersion = TransportVersions.V_8_5_0;
random(),
TransportVersions.MINIMUM_COMPATIBLE,
TransportVersions.V_8_5_0
);
{ {
XContentBuilder builder = mapping(b -> b.startObject(" ").field("type", randomFieldType()).endObject()); XContentBuilder builder = mapping(b -> b.startObject(" ").field("type", randomFieldType()).endObject());
MappingParser mappingParser = createMappingParser(Settings.EMPTY, version, transportVersion); MappingParser mappingParser = createMappingParser(Settings.EMPTY, version, transportVersion);

View file

@ -0,0 +1,69 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the "Elastic License
* 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side
* Public License v 1"; you may not use this file except in compliance with, at
* your election, the "Elastic License 2.0", the "GNU Affero General Public
* License v3.0 only", or the "Server Side Public License, v 1".
*/
package org.elasticsearch.indices.cluster;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.concurrent.EsExecutors;
import org.elasticsearch.test.ESTestCase;
import java.util.ArrayList;
import java.util.concurrent.atomic.AtomicInteger;
public class ShardCloseExecutorTests extends ESTestCase {
public void testThrottling() {
// This defaults to the number of CPUs of the machine running the tests which could be either side of 10.
final var defaultProcessors = EsExecutors.NODE_PROCESSORS_SETTING.get(Settings.EMPTY).roundUp();
ensureThrottling(Math.min(10, defaultProcessors), Settings.EMPTY);
if (10 < defaultProcessors) {
ensureThrottling(
10,
Settings.builder().put(EsExecutors.NODE_PROCESSORS_SETTING.getKey(), between(10, defaultProcessors - 1)).build()
);
} // else we cannot run this check, the machine running the tests doesn't have enough CPUs
if (1 < defaultProcessors) {
final var fewProcessors = between(1, Math.min(10, defaultProcessors - 1));
ensureThrottling(fewProcessors, Settings.builder().put(EsExecutors.NODE_PROCESSORS_SETTING.getKey(), fewProcessors).build());
} // else we cannot run this check, the machine running the tests has less than 2 whole CPUs (and we already tested the 1 case)
// but in any case we can override the throttle regardless of its default value
final var override = between(1, defaultProcessors * 2);
ensureThrottling(
override,
Settings.builder().put(IndicesClusterStateService.CONCURRENT_SHARD_CLOSE_LIMIT.getKey(), override).build()
);
}
private static void ensureThrottling(int expectedLimit, Settings settings) {
final var tasksToRun = new ArrayList<Runnable>(expectedLimit + 1);
final var executor = new IndicesClusterStateService.ShardCloseExecutor(settings, tasksToRun::add);
final var runCount = new AtomicInteger();
// enqueue one more task than the throttling limit
for (int i = 0; i < expectedLimit + 1; i++) {
executor.execute(runCount::incrementAndGet);
}
// check that we submitted tasks up to the expected limit, holding back the final task behind the throttle for now
assertEquals(expectedLimit, tasksToRun.size());
// now execute all the tasks one by one
for (int i = 0; i < expectedLimit + 1; i++) {
assertEquals(i, runCount.get());
tasksToRun.get(i).run();
assertEquals(i + 1, runCount.get());
// executing the first task enqueues the final task
assertEquals(expectedLimit + 1, tasksToRun.size());
}
}
}

View file

@ -9,6 +9,7 @@
package org.elasticsearch.reservedstate.service; package org.elasticsearch.reservedstate.service;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.health.HealthIndicatorDetails; import org.elasticsearch.health.HealthIndicatorDetails;
import org.elasticsearch.health.HealthIndicatorResult; import org.elasticsearch.health.HealthIndicatorResult;
import org.elasticsearch.health.SimpleHealthIndicatorDetails; import org.elasticsearch.health.SimpleHealthIndicatorDetails;
@ -21,6 +22,7 @@ import java.util.Map;
import static org.elasticsearch.health.HealthStatus.GREEN; import static org.elasticsearch.health.HealthStatus.GREEN;
import static org.elasticsearch.health.HealthStatus.YELLOW; import static org.elasticsearch.health.HealthStatus.YELLOW;
import static org.elasticsearch.reservedstate.service.FileSettingsService.FileSettingsHealthIndicatorService.DESCRIPTION_LENGTH_LIMIT_KEY;
import static org.elasticsearch.reservedstate.service.FileSettingsService.FileSettingsHealthIndicatorService.FAILURE_SYMPTOM; import static org.elasticsearch.reservedstate.service.FileSettingsService.FileSettingsHealthIndicatorService.FAILURE_SYMPTOM;
import static org.elasticsearch.reservedstate.service.FileSettingsService.FileSettingsHealthIndicatorService.INACTIVE_SYMPTOM; import static org.elasticsearch.reservedstate.service.FileSettingsService.FileSettingsHealthIndicatorService.INACTIVE_SYMPTOM;
import static org.elasticsearch.reservedstate.service.FileSettingsService.FileSettingsHealthIndicatorService.NO_CHANGES_SYMPTOM; import static org.elasticsearch.reservedstate.service.FileSettingsService.FileSettingsHealthIndicatorService.NO_CHANGES_SYMPTOM;
@ -37,7 +39,7 @@ public class FileSettingsHealthIndicatorServiceTests extends ESTestCase {
@Before @Before
public void initialize() { public void initialize() {
healthIndicatorService = new FileSettingsHealthIndicatorService(); healthIndicatorService = new FileSettingsHealthIndicatorService(Settings.EMPTY);
} }
public void testInitiallyGreen() {} public void testInitiallyGreen() {}
@ -101,4 +103,28 @@ public class FileSettingsHealthIndicatorServiceTests extends ESTestCase {
healthIndicatorService.calculate(false, null) healthIndicatorService.calculate(false, null)
); );
} }
public void testDescriptionIsTruncated() {
checkTruncatedDescription(9, "123456789", "123456789");
checkTruncatedDescription(8, "123456789", "1234567…");
checkTruncatedDescription(1, "12", "");
}
private void checkTruncatedDescription(int lengthLimit, String description, String expectedTruncatedDescription) {
var service = new FileSettingsHealthIndicatorService(Settings.builder().put(DESCRIPTION_LENGTH_LIMIT_KEY, lengthLimit).build());
service.startOccurred();
service.changeOccurred();
service.failureOccurred(description);
assertEquals(
new HealthIndicatorResult(
"file_settings",
YELLOW,
FAILURE_SYMPTOM,
new SimpleHealthIndicatorDetails(Map.of("failure_streak", 1L, "most_recent_failure", expectedTruncatedDescription)),
STALE_SETTINGS_IMPACT,
List.of()
),
service.calculate(false, null)
);
}
} }

View file

@ -139,7 +139,7 @@ public class FileSettingsServiceTests extends ESTestCase {
List.of() List.of()
) )
); );
healthIndicatorService = spy(new FileSettingsHealthIndicatorService()); healthIndicatorService = spy(new FileSettingsHealthIndicatorService(Settings.EMPTY));
fileSettingsService = spy(new FileSettingsService(clusterService, controller, env, healthIndicatorService)); fileSettingsService = spy(new FileSettingsService(clusterService, controller, env, healthIndicatorService));
watchedFile = fileSettingsService.watchedFile(); watchedFile = fileSettingsService.watchedFile();
} }

View file

@ -628,7 +628,7 @@ public class HeapAttackIT extends ESRestTestCase {
} }
public void testLookupExplosion() throws IOException { public void testLookupExplosion() throws IOException {
int sensorDataCount = 7500; int sensorDataCount = 500;
int lookupEntries = 10000; int lookupEntries = 10000;
Map<?, ?> map = lookupExplosion(sensorDataCount, lookupEntries); Map<?, ?> map = lookupExplosion(sensorDataCount, lookupEntries);
assertMap(map, matchesMap().extraOk().entry("values", List.of(List.of(sensorDataCount * lookupEntries)))); assertMap(map, matchesMap().extraOk().entry("values", List.of(List.of(sensorDataCount * lookupEntries))));
@ -636,19 +636,74 @@ public class HeapAttackIT extends ESRestTestCase {
public void testLookupExplosionManyMatches() throws IOException { public void testLookupExplosionManyMatches() throws IOException {
assertCircuitBreaks(() -> { assertCircuitBreaks(() -> {
Map<?, ?> result = lookupExplosion(8500, 10000); Map<?, ?> result = lookupExplosion(1500, 10000);
logger.error("should have failed but got {}", result);
});
}
public void testLookupExplosionNoFetch() throws IOException {
int sensorDataCount = 7500;
int lookupEntries = 10000;
Map<?, ?> map = lookupExplosionNoFetch(sensorDataCount, lookupEntries);
assertMap(map, matchesMap().extraOk().entry("values", List.of(List.of(sensorDataCount * lookupEntries))));
}
public void testLookupExplosionNoFetchManyMatches() throws IOException {
assertCircuitBreaks(() -> {
Map<?, ?> result = lookupExplosionNoFetch(8500, 10000);
logger.error("should have failed but got {}", result);
});
}
public void testLookupExplosionBigString() throws IOException {
int sensorDataCount = 150;
int lookupEntries = 1;
Map<?, ?> map = lookupExplosionBigString(sensorDataCount, lookupEntries);
assertMap(map, matchesMap().extraOk().entry("values", List.of(List.of(sensorDataCount * lookupEntries))));
}
public void testLookupExplosionBigStringManyMatches() throws IOException {
assertCircuitBreaks(() -> {
Map<?, ?> result = lookupExplosionBigString(500, 1);
logger.error("should have failed but got {}", result); logger.error("should have failed but got {}", result);
}); });
} }
private Map<?, ?> lookupExplosion(int sensorDataCount, int lookupEntries) throws IOException { private Map<?, ?> lookupExplosion(int sensorDataCount, int lookupEntries) throws IOException {
initSensorData(sensorDataCount, 1); lookupExplosionData(sensorDataCount, lookupEntries);
initSensorLookup(lookupEntries, 1, i -> "73.9857 40.7484"); StringBuilder query = startQuery();
query.append("FROM sensor_data | LOOKUP JOIN sensor_lookup ON id | STATS COUNT(location)\"}");
return responseAsMap(query(query.toString(), null));
}
private Map<?, ?> lookupExplosionNoFetch(int sensorDataCount, int lookupEntries) throws IOException {
lookupExplosionData(sensorDataCount, lookupEntries);
StringBuilder query = startQuery(); StringBuilder query = startQuery();
query.append("FROM sensor_data | LOOKUP JOIN sensor_lookup ON id | STATS COUNT(*)\"}"); query.append("FROM sensor_data | LOOKUP JOIN sensor_lookup ON id | STATS COUNT(*)\"}");
return responseAsMap(query(query.toString(), null)); return responseAsMap(query(query.toString(), null));
} }
private void lookupExplosionData(int sensorDataCount, int lookupEntries) throws IOException {
initSensorData(sensorDataCount, 1);
initSensorLookup(lookupEntries, 1, i -> "73.9857 40.7484");
}
private Map<?, ?> lookupExplosionBigString(int sensorDataCount, int lookupEntries) throws IOException {
initSensorData(sensorDataCount, 1);
initSensorLookupString(lookupEntries, 1, i -> {
int target = Math.toIntExact(ByteSizeValue.ofMb(1).getBytes());
StringBuilder str = new StringBuilder(Math.toIntExact(ByteSizeValue.ofMb(2).getBytes()));
while (str.length() < target) {
str.append("Lorem ipsum dolor sit amet, consectetur adipiscing elit.");
}
logger.info("big string is {} characters", str.length());
return str.toString();
});
StringBuilder query = startQuery();
query.append("FROM sensor_data | LOOKUP JOIN sensor_lookup ON id | STATS COUNT(string)\"}");
return responseAsMap(query(query.toString(), null));
}
public void testEnrichExplosion() throws IOException { public void testEnrichExplosion() throws IOException {
int sensorDataCount = 1000; int sensorDataCount = 1000;
int lookupEntries = 100; int lookupEntries = 100;
@ -834,6 +889,31 @@ public class HeapAttackIT extends ESRestTestCase {
initIndex("sensor_lookup", data.toString()); initIndex("sensor_lookup", data.toString());
} }
private void initSensorLookupString(int lookupEntries, int sensorCount, IntFunction<String> string) throws IOException {
logger.info("loading sensor lookup with huge strings");
createIndex("sensor_lookup", Settings.builder().put(IndexSettings.MODE.getKey(), IndexMode.LOOKUP.getName()).build(), """
{
"properties": {
"id": { "type": "long" },
"string": { "type": "text" }
}
}""");
int docsPerBulk = 10;
StringBuilder data = new StringBuilder();
for (int i = 0; i < lookupEntries; i++) {
int sensor = i % sensorCount;
data.append(String.format(Locale.ROOT, """
{"create":{}}
{"id": %d, "string": "%s"}
""", sensor, string.apply(sensor)));
if (i % docsPerBulk == docsPerBulk - 1) {
bulk("sensor_lookup", data.toString());
data.setLength(0);
}
}
initIndex("sensor_lookup", data.toString());
}
private void initSensorEnrich(int lookupEntries, int sensorCount, IntFunction<String> location) throws IOException { private void initSensorEnrich(int lookupEntries, int sensorCount, IntFunction<String> location) throws IOException {
initSensorLookup(lookupEntries, sensorCount, location); initSensorLookup(lookupEntries, sensorCount, location);
logger.info("loading sensor enrich"); logger.info("loading sensor enrich");

View file

@ -2682,6 +2682,15 @@ public abstract class ESTestCase extends LuceneTestCase {
); );
} }
/**
* Checks a specific exception class with matched message is thrown by the given runnable, and returns it.
*/
public static <T extends Throwable> T expectThrows(Class<T> expectedType, Matcher<String> messageMatcher, ThrowingRunnable runnable) {
var e = expectThrows(expectedType, runnable);
assertThat(e.getMessage(), messageMatcher);
return e;
}
/** /**
* Same as {@link #runInParallel(int, IntConsumer)} but also attempts to start all tasks at the same time by blocking execution on a * Same as {@link #runInParallel(int, IntConsumer)} but also attempts to start all tasks at the same time by blocking execution on a
* barrier until all threads are started and ready to execute their task. * barrier until all threads are started and ready to execute their task.

View file

@ -23,7 +23,7 @@ import java.util.function.Predicate;
public class LambdaMatchers { public class LambdaMatchers {
private static class TransformMatcher<T, U> extends BaseMatcher<T> { private static class TransformMatcher<T, U> extends TypeSafeMatcher<T> {
private final Matcher<U> matcher; private final Matcher<U> matcher;
private final Function<T, U> transform; private final Function<T, U> transform;
@ -33,24 +33,21 @@ public class LambdaMatchers {
} }
@Override @Override
@SuppressWarnings("unchecked") protected boolean matchesSafely(T item) {
public boolean matches(Object actual) {
U u; U u;
try { try {
u = transform.apply((T) actual); u = transform.apply(item);
} catch (ClassCastException e) { } catch (ClassCastException e) {
throw new AssertionError(e); throw new AssertionError(e);
} }
return matcher.matches(u); return matcher.matches(u);
} }
@Override @Override
@SuppressWarnings("unchecked") protected void describeMismatchSafely(T item, Description description) {
public void describeMismatch(Object item, Description description) {
U u; U u;
try { try {
u = transform.apply((T) item); u = transform.apply(item);
} catch (ClassCastException e) { } catch (ClassCastException e) {
description.appendValue(item).appendText(" is not of the correct type (").appendText(e.getMessage()).appendText(")"); description.appendValue(item).appendText(" is not of the correct type (").appendText(e.getMessage()).appendText(")");
return; return;

View file

@ -19,11 +19,13 @@ import static org.elasticsearch.test.LambdaMatchers.transformedArrayItemsMatch;
import static org.elasticsearch.test.LambdaMatchers.transformedItemsMatch; import static org.elasticsearch.test.LambdaMatchers.transformedItemsMatch;
import static org.elasticsearch.test.LambdaMatchers.transformedMatch; import static org.elasticsearch.test.LambdaMatchers.transformedMatch;
import static org.elasticsearch.test.LambdaMatchers.trueWith; import static org.elasticsearch.test.LambdaMatchers.trueWith;
import static org.hamcrest.Matchers.anything;
import static org.hamcrest.Matchers.arrayContaining; import static org.hamcrest.Matchers.arrayContaining;
import static org.hamcrest.Matchers.arrayContainingInAnyOrder; import static org.hamcrest.Matchers.arrayContainingInAnyOrder;
import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.containsInAnyOrder;
import static org.hamcrest.Matchers.emptyString; import static org.hamcrest.Matchers.emptyString;
import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.is;
import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.not;
public class LambdaMatchersTests extends ESTestCase { public class LambdaMatchersTests extends ESTestCase {
@ -56,6 +58,7 @@ public class LambdaMatchersTests extends ESTestCase {
assertThat(new A("1"), transformedMatch(a -> a.str, equalTo("1"))); assertThat(new A("1"), transformedMatch(a -> a.str, equalTo("1")));
assertThat(new B("1"), transformedMatch((A a) -> a.str, equalTo("1"))); assertThat(new B("1"), transformedMatch((A a) -> a.str, equalTo("1")));
assertMismatch((A) null, transformedMatch(A::toString, anything()), is("was null"));
assertMismatch(new A("1"), transformedMatch(a -> a.str, emptyString()), equalTo("transformed value was \"1\"")); assertMismatch(new A("1"), transformedMatch(a -> a.str, emptyString()), equalTo("transformed value was \"1\""));
} }

View file

@ -120,7 +120,7 @@ public class CoordinatedInferenceActionRequestTests extends AbstractBWCWireSeria
instance.setPrefixType(TrainedModelPrefixStrings.PrefixType.NONE); instance.setPrefixType(TrainedModelPrefixStrings.PrefixType.NONE);
} }
return new CoordinatedInferenceAction.Request( var newInstance = new CoordinatedInferenceAction.Request(
instance.getModelId(), instance.getModelId(),
instance.getInputs(), instance.getInputs(),
instance.getTaskSettings(), instance.getTaskSettings(),
@ -131,5 +131,7 @@ public class CoordinatedInferenceActionRequestTests extends AbstractBWCWireSeria
instance.getHighPriority(), instance.getHighPriority(),
instance.getRequestModelType() instance.getRequestModelType()
); );
newInstance.setPrefixType(instance.getPrefixType());
return newInstance;
} }
} }

View file

@ -32,6 +32,7 @@ public class MetadataAttribute extends TypedAttribute {
public static final String TIMESTAMP_FIELD = "@timestamp"; public static final String TIMESTAMP_FIELD = "@timestamp";
public static final String TSID_FIELD = "_tsid"; public static final String TSID_FIELD = "_tsid";
public static final String SCORE = "_score"; public static final String SCORE = "_score";
public static final String INDEX = "_index";
static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry( static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(
Attribute.class, Attribute.class,
@ -42,7 +43,7 @@ public class MetadataAttribute extends TypedAttribute {
private static final Map<String, Tuple<DataType, Boolean>> ATTRIBUTES_MAP = Map.of( private static final Map<String, Tuple<DataType, Boolean>> ATTRIBUTES_MAP = Map.of(
"_version", "_version",
tuple(DataType.LONG, false), // _version field is not searchable tuple(DataType.LONG, false), // _version field is not searchable
"_index", INDEX,
tuple(DataType.KEYWORD, true), tuple(DataType.KEYWORD, true),
IdFieldMapper.NAME, IdFieldMapper.NAME,
tuple(DataType.KEYWORD, false), // actually searchable, but fielddata access on the _id field is disallowed by default tuple(DataType.KEYWORD, false), // actually searchable, but fielddata access on the _id field is disallowed by default

View file

@ -26,6 +26,16 @@ public class Holder<T> {
this.value = value; this.value = value;
} }
/**
* Sets a value in the holder, but only if none has already been set.
* @param value the new value to set.
*/
public void setIfAbsent(T value) {
if (this.value == null) {
this.value = value;
}
}
public T get() { public T get() {
return value; return value;
} }

View file

@ -773,6 +773,33 @@ public abstract class RestEsqlTestCase extends ESRestTestCase {
} }
} }
public void testErrorMessageForMissingParams() throws IOException {
ResponseException re = expectThrows(
ResponseException.class,
() -> runEsql(requestObjectBuilder().query("from idx | where x == ?n1").params("[]"))
);
assertThat(
EntityUtils.toString(re.getResponse().getEntity()).replaceAll("\\\\\n\s+\\\\", ""),
containsString("line 1:23: Unknown query parameter [n1]")
);
re = expectThrows(
ResponseException.class,
() -> runEsql(requestObjectBuilder().query("from idx | where x == ?n1 and y == ?n2").params("[{\"n\" : \"v\"}]"))
);
assertThat(EntityUtils.toString(re.getResponse().getEntity()).replaceAll("\\\\\n\s+\\\\", ""), containsString("""
line 1:23: Unknown query parameter [n1], did you mean [n]?; line 1:36: Unknown query parameter [n2], did you mean [n]?"""));
re = expectThrows(
ResponseException.class,
() -> runEsql(requestObjectBuilder().query("from idx | where x == ?n1 and y == ?n2").params("[{\"n1\" : \"v1\"}]"))
);
assertThat(
EntityUtils.toString(re.getResponse().getEntity()).replaceAll("\\\\\n\s+\\\\", ""),
containsString("line 1:36: Unknown query parameter [n2], did you mean [n1]")
);
}
public void testErrorMessageForLiteralDateMathOverflow() throws IOException { public void testErrorMessageForLiteralDateMathOverflow() throws IOException {
List<String> dateMathOverflowExpressions = List.of( List<String> dateMathOverflowExpressions = List.of(
"2147483647 day + 1 day", "2147483647 day + 1 day",

View file

@ -11,6 +11,8 @@ import org.apache.lucene.document.InetAddressPoint;
import org.apache.lucene.sandbox.document.HalfFloatPoint; import org.apache.lucene.sandbox.document.HalfFloatPoint;
import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRef;
import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.Strings; import org.elasticsearch.common.Strings;
import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.breaker.CircuitBreaker;
import org.elasticsearch.common.breaker.NoopCircuitBreaker; import org.elasticsearch.common.breaker.NoopCircuitBreaker;
@ -31,9 +33,11 @@ import org.elasticsearch.geo.GeometryTestUtils;
import org.elasticsearch.geo.ShapeTestUtils; import org.elasticsearch.geo.ShapeTestUtils;
import org.elasticsearch.index.IndexMode; import org.elasticsearch.index.IndexMode;
import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.license.XPackLicenseState;
import org.elasticsearch.search.SearchService;
import org.elasticsearch.tasks.TaskCancelledException; import org.elasticsearch.tasks.TaskCancelledException;
import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.transport.RemoteTransportException; import org.elasticsearch.transport.RemoteTransportException;
import org.elasticsearch.transport.TransportService;
import org.elasticsearch.xcontent.json.JsonXContent; import org.elasticsearch.xcontent.json.JsonXContent;
import org.elasticsearch.xpack.esql.action.EsqlQueryResponse; import org.elasticsearch.xpack.esql.action.EsqlQueryResponse;
import org.elasticsearch.xpack.esql.analysis.EnrichResolution; import org.elasticsearch.xpack.esql.analysis.EnrichResolution;
@ -72,8 +76,8 @@ import org.elasticsearch.xpack.esql.plan.logical.local.LocalRelation;
import org.elasticsearch.xpack.esql.plan.logical.local.LocalSupplier; import org.elasticsearch.xpack.esql.plan.logical.local.LocalSupplier;
import org.elasticsearch.xpack.esql.plugin.EsqlPlugin; import org.elasticsearch.xpack.esql.plugin.EsqlPlugin;
import org.elasticsearch.xpack.esql.plugin.QueryPragmas; import org.elasticsearch.xpack.esql.plugin.QueryPragmas;
import org.elasticsearch.xpack.esql.plugin.TransportActionServices;
import org.elasticsearch.xpack.esql.session.Configuration; import org.elasticsearch.xpack.esql.session.Configuration;
import org.elasticsearch.xpack.esql.session.QueryBuilderResolver;
import org.elasticsearch.xpack.esql.stats.SearchStats; import org.elasticsearch.xpack.esql.stats.SearchStats;
import org.elasticsearch.xpack.esql.telemetry.Metrics; import org.elasticsearch.xpack.esql.telemetry.Metrics;
import org.elasticsearch.xpack.versionfield.Version; import org.elasticsearch.xpack.versionfield.Version;
@ -140,6 +144,7 @@ import static org.elasticsearch.xpack.esql.parser.ParserUtils.ParamClassificatio
import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.instanceOf;
import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull; import static org.junit.Assert.assertNull;
import static org.mockito.Mockito.mock;
public final class EsqlTestUtils { public final class EsqlTestUtils {
@ -360,7 +365,14 @@ public final class EsqlTestUtils {
public static final Verifier TEST_VERIFIER = new Verifier(new Metrics(new EsqlFunctionRegistry()), new XPackLicenseState(() -> 0L)); public static final Verifier TEST_VERIFIER = new Verifier(new Metrics(new EsqlFunctionRegistry()), new XPackLicenseState(() -> 0L));
public static final QueryBuilderResolver MOCK_QUERY_BUILDER_RESOLVER = new MockQueryBuilderResolver(); public static final TransportActionServices MOCK_TRANSPORT_ACTION_SERVICES = new TransportActionServices(
mock(TransportService.class),
mock(SearchService.class),
null,
mock(ClusterService.class),
mock(IndexNameExpressionResolver.class),
null
);
private EsqlTestUtils() {} private EsqlTestUtils() {}

View file

@ -1,30 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0; you may not use this file except in compliance with the Elastic License
* 2.0.
*/
package org.elasticsearch.xpack.esql;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan;
import org.elasticsearch.xpack.esql.session.QueryBuilderResolver;
import org.elasticsearch.xpack.esql.session.Result;
import java.util.function.BiConsumer;
public class MockQueryBuilderResolver extends QueryBuilderResolver {
public MockQueryBuilderResolver() {
super(null, null, null, null);
}
@Override
public void resolveQueryBuilders(
LogicalPlan plan,
ActionListener<Result> listener,
BiConsumer<LogicalPlan, ActionListener<Result>> callback
) {
callback.accept(plan, listener);
}
}

View file

@ -39,7 +39,7 @@ max:integer |_index:keyword
; ;
metaIndexAliasedInAggs metaIndexAliasedInAggs
required_capability: metadata_fields required_capability: index_metadata_field
required_capability: metadata_fields_remote_test required_capability: metadata_fields_remote_test
from employees metadata _index | eval _i = _index | stats max = max(emp_no) by _i | SORT _i; from employees metadata _index | eval _i = _index | stats max = max(emp_no) by _i | SORT _i;

View file

@ -40,7 +40,7 @@ max:integer |_index:keyword
; ;
metaIndexSorted metaIndexSorted
required_capability: metadata_fields required_capability: index_metadata_field
from employees metadata _index | sort _index, emp_no desc | keep emp_no, _index | limit 2; from employees metadata _index | sort _index, emp_no desc | keep emp_no, _index | limit 2;
@ -50,7 +50,7 @@ emp_no:integer |_index:keyword
; ;
metaIndexWithInPredicate metaIndexWithInPredicate
required_capability: metadata_fields required_capability: index_metadata_field
from employees metadata _index | where _index in ("employees", "foobar") | sort emp_no desc | keep emp_no, _index | limit 2; from employees metadata _index | where _index in ("employees", "foobar") | sort emp_no desc | keep emp_no, _index | limit 2;
@ -60,7 +60,7 @@ emp_no:integer |_index:keyword
; ;
metaIndexAliasedInAggs metaIndexAliasedInAggs
required_capability: metadata_fields required_capability: index_metadata_field
from employees metadata _index | eval _i = _index | stats max = max(emp_no) by _i; from employees metadata _index | eval _i = _index | stats max = max(emp_no) by _i;

View file

@ -133,7 +133,7 @@ mc:l | count:l
multiIndexIpString multiIndexIpString
required_capability: union_types required_capability: union_types
required_capability: metadata_fields required_capability: index_metadata_field
required_capability: casting_operator required_capability: casting_operator
required_capability: union_types_remove_fields required_capability: union_types_remove_fields
@ -162,7 +162,7 @@ sample_data_str | 2023-10-23T12:15:03.360Z | 172.21.2.162 | 3450233
multiIndexIpStringRename multiIndexIpStringRename
required_capability: union_types required_capability: union_types
required_capability: metadata_fields required_capability: index_metadata_field
required_capability: casting_operator required_capability: casting_operator
required_capability: union_types_remove_fields required_capability: union_types_remove_fields
@ -191,7 +191,7 @@ sample_data_str | 2023-10-23T12:15:03.360Z | 172.21.2.162 | 3450233
multiIndexIpStringRenameToString multiIndexIpStringRenameToString
required_capability: union_types required_capability: union_types
required_capability: metadata_fields required_capability: index_metadata_field
required_capability: union_types_remove_fields required_capability: union_types_remove_fields
FROM sample_data, sample_data_str METADATA _index FROM sample_data, sample_data_str METADATA _index
@ -219,7 +219,7 @@ sample_data_str | 2023-10-23T12:15:03.360Z | 172.21.2.162 | 3450233
multiIndexWhereIpString multiIndexWhereIpString
required_capability: union_types required_capability: union_types
required_capability: metadata_fields required_capability: index_metadata_field
required_capability: union_types_remove_fields required_capability: union_types_remove_fields
FROM sample_data, sample_data_str METADATA _index FROM sample_data, sample_data_str METADATA _index
@ -237,7 +237,7 @@ sample_data_str | 2023-10-23T12:15:03.360Z | 3450233 | Connected
multiIndexWhereIpStringLike multiIndexWhereIpStringLike
required_capability: union_types required_capability: union_types
required_capability: metadata_fields required_capability: index_metadata_field
required_capability: union_types_remove_fields required_capability: union_types_remove_fields
FROM sample_data, sample_data_str METADATA _index FROM sample_data, sample_data_str METADATA _index
@ -445,7 +445,7 @@ count:long | message:keyword
multiIndexMissingIpToString multiIndexMissingIpToString
required_capability: union_types required_capability: union_types
required_capability: metadata_fields required_capability: index_metadata_field
required_capability: union_types_missing_field required_capability: union_types_missing_field
FROM sample_data, sample_data_str, missing_ip_sample_data METADATA _index FROM sample_data, sample_data_str, missing_ip_sample_data METADATA _index
@ -480,7 +480,7 @@ sample_data_str | 2023-10-23T12:15:03.360Z | 172.21.2.162 | 3450
multiIndexMissingIpToIp multiIndexMissingIpToIp
required_capability: union_types required_capability: union_types
required_capability: metadata_fields required_capability: index_metadata_field
required_capability: union_types_missing_field required_capability: union_types_missing_field
FROM sample_data, sample_data_str, missing_ip_sample_data METADATA _index FROM sample_data, sample_data_str, missing_ip_sample_data METADATA _index
@ -515,7 +515,7 @@ sample_data_str | 2023-10-23T12:15:03.360Z | 172.21.2.162 | 3450233
multiIndexTsLong multiIndexTsLong
required_capability: union_types required_capability: union_types
required_capability: metadata_fields required_capability: index_metadata_field
required_capability: union_types_remove_fields required_capability: union_types_remove_fields
FROM sample_data, sample_data_ts_long METADATA _index FROM sample_data, sample_data_ts_long METADATA _index
@ -543,7 +543,7 @@ sample_data_ts_long | 2023-10-23T12:15:03.360Z | 172.21.2.162 | 3450233
multiIndexTsLongRename multiIndexTsLongRename
required_capability: union_types required_capability: union_types
required_capability: metadata_fields required_capability: index_metadata_field
required_capability: union_types_remove_fields required_capability: union_types_remove_fields
FROM sample_data, sample_data_ts_long METADATA _index FROM sample_data, sample_data_ts_long METADATA _index
@ -573,7 +573,7 @@ sample_data_ts_long | 2023-10-23T12:15:03.360Z | 172.21.2.162 | 3450233
multiIndexTsNanosRename multiIndexTsNanosRename
required_capability: to_date_nanos required_capability: to_date_nanos
required_capability: union_types required_capability: union_types
required_capability: metadata_fields required_capability: index_metadata_field
required_capability: union_types_remove_fields required_capability: union_types_remove_fields
FROM sample_data, sample_data_ts_nanos METADATA _index FROM sample_data, sample_data_ts_nanos METADATA _index
@ -602,7 +602,7 @@ sample_data_ts_nanos | 2023-10-23T12:15:03.360Z | 172.21.2.162 | 3450233
multiIndexTsNanosRenameToNanos multiIndexTsNanosRenameToNanos
required_capability: to_date_nanos required_capability: to_date_nanos
required_capability: union_types required_capability: union_types
required_capability: metadata_fields required_capability: index_metadata_field
required_capability: union_types_remove_fields required_capability: union_types_remove_fields
FROM sample_data, sample_data_ts_nanos METADATA _index FROM sample_data, sample_data_ts_nanos METADATA _index
@ -631,7 +631,7 @@ sample_data_ts_nanos | 2023-10-23T12:15:03.360123456Z | 172.21.2.162 | 34502
multiIndex sort millis and nanos as nanos multiIndex sort millis and nanos as nanos
required_capability: to_date_nanos required_capability: to_date_nanos
required_capability: union_types required_capability: union_types
required_capability: metadata_fields required_capability: index_metadata_field
required_capability: union_types_remove_fields required_capability: union_types_remove_fields
FROM sample_data, sample_data_ts_nanos METADATA _index FROM sample_data, sample_data_ts_nanos METADATA _index
@ -660,7 +660,7 @@ sample_data | 2023-10-23T12:15:03.360000000Z | 172.21.2.162 | 34502
multiIndex sort millis and nanos as millis multiIndex sort millis and nanos as millis
required_capability: to_date_nanos required_capability: to_date_nanos
required_capability: union_types required_capability: union_types
required_capability: metadata_fields required_capability: index_metadata_field
required_capability: union_types_remove_fields required_capability: union_types_remove_fields
FROM sample_data, sample_data_ts_nanos METADATA _index FROM sample_data, sample_data_ts_nanos METADATA _index
@ -691,7 +691,7 @@ multiIndexTsNanosRenameToNanosWithFiltering
required_capability: to_date_nanos required_capability: to_date_nanos
required_capability: date_nanos_binary_comparison required_capability: date_nanos_binary_comparison
required_capability: union_types required_capability: union_types
required_capability: metadata_fields required_capability: index_metadata_field
required_capability: union_types_remove_fields required_capability: union_types_remove_fields
FROM sample_data, sample_data_ts_nanos METADATA _index FROM sample_data, sample_data_ts_nanos METADATA _index
@ -716,7 +716,7 @@ sample_data_ts_nanos | 2023-10-23T13:33:34.937123456Z | 172.21.0.5 | 12323
multiIndexTsLongRenameToString multiIndexTsLongRenameToString
required_capability: union_types required_capability: union_types
required_capability: metadata_fields required_capability: index_metadata_field
required_capability: union_types_remove_fields required_capability: union_types_remove_fields
FROM sample_data, sample_data_ts_long METADATA _index FROM sample_data, sample_data_ts_long METADATA _index
@ -744,7 +744,7 @@ sample_data_ts_long | 2023-10-23T12:15:03.360Z | 172.21.2.162 | 3450233
multiIndexWhereTsLong multiIndexWhereTsLong
required_capability: union_types required_capability: union_types
required_capability: metadata_fields required_capability: index_metadata_field
required_capability: union_types_remove_fields required_capability: union_types_remove_fields
FROM sample_data, sample_data_ts_long METADATA _index FROM sample_data, sample_data_ts_long METADATA _index
@ -979,7 +979,7 @@ count:long | message:keyword
multiIndexIpStringTsLong multiIndexIpStringTsLong
required_capability: union_types required_capability: union_types
required_capability: metadata_fields required_capability: index_metadata_field
required_capability: union_types_remove_fields required_capability: union_types_remove_fields
required_capability: to_date_nanos required_capability: to_date_nanos
@ -1022,7 +1022,7 @@ sample_data_ts_nanos | 2023-10-23T12:15:03.360Z | 172.21.2.162 | 3450233
multiIndexIpStringTsLongDropped multiIndexIpStringTsLongDropped
required_capability: union_types required_capability: union_types
required_capability: metadata_fields required_capability: index_metadata_field
required_capability: to_date_nanos required_capability: to_date_nanos
FROM sample_data* METADATA _index FROM sample_data* METADATA _index
@ -1064,7 +1064,7 @@ sample_data_ts_nanos | 8268153 | Connection error
multiIndexIpStringTsLongRename multiIndexIpStringTsLongRename
required_capability: union_types required_capability: union_types
required_capability: metadata_fields required_capability: index_metadata_field
required_capability: union_types_remove_fields required_capability: union_types_remove_fields
required_capability: to_date_nanos required_capability: to_date_nanos
@ -1107,7 +1107,7 @@ sample_data_ts_nanos | 2023-10-23T12:15:03.360Z | 172.21.2.162 | 3450233
multiIndexIpStringTsLongRenameDropped multiIndexIpStringTsLongRenameDropped
required_capability: union_types required_capability: union_types
required_capability: metadata_fields required_capability: index_metadata_field
required_capability: to_date_nanos required_capability: to_date_nanos
FROM sample_data* METADATA _index FROM sample_data* METADATA _index
@ -1149,7 +1149,7 @@ sample_data_ts_nanos | 8268153 | Connection error
multiIndexIpStringTsLongRenameToString multiIndexIpStringTsLongRenameToString
required_capability: union_types required_capability: union_types
required_capability: metadata_fields required_capability: index_metadata_field
required_capability: union_types_remove_fields required_capability: union_types_remove_fields
required_capability: to_date_nanos required_capability: to_date_nanos
@ -1192,7 +1192,7 @@ sample_data_ts_nanos | 2023-10-23T12:15:03.360Z | 172.21.2.162 | 3450233
multiIndexWhereIpStringTsLong multiIndexWhereIpStringTsLong
required_capability: union_types required_capability: union_types
required_capability: metadata_fields required_capability: index_metadata_field
required_capability: union_types_remove_fields required_capability: union_types_remove_fields
required_capability: to_date_nanos required_capability: to_date_nanos
@ -1226,7 +1226,7 @@ count:long | message:keyword
multiIndexWhereIpStringLikeTsLong multiIndexWhereIpStringLikeTsLong
required_capability: union_types required_capability: union_types
required_capability: metadata_fields required_capability: index_metadata_field
required_capability: union_types_remove_fields required_capability: union_types_remove_fields
required_capability: to_date_nanos required_capability: to_date_nanos
@ -1260,7 +1260,7 @@ count:long | message:keyword
multiIndexMultiColumnTypesRename multiIndexMultiColumnTypesRename
required_capability: union_types required_capability: union_types
required_capability: metadata_fields required_capability: index_metadata_field
required_capability: union_types_remove_fields required_capability: union_types_remove_fields
required_capability: to_date_nanos required_capability: to_date_nanos
@ -1279,7 +1279,7 @@ null | null | 8268153 | Connectio
multiIndexMultiColumnTypesRenameAndKeep multiIndexMultiColumnTypesRenameAndKeep
required_capability: union_types required_capability: union_types
required_capability: metadata_fields required_capability: index_metadata_field
required_capability: union_types_remove_fields required_capability: union_types_remove_fields
required_capability: to_date_nanos required_capability: to_date_nanos
@ -1299,7 +1299,7 @@ sample_data_ts_nanos | 2023-10-23T13:52:55.015Z | 2023-10-23T13:52:55.015123456
multiIndexMultiColumnTypesRenameAndDrop multiIndexMultiColumnTypesRenameAndDrop
required_capability: union_types required_capability: union_types
required_capability: metadata_fields required_capability: index_metadata_field
required_capability: union_types_remove_fields required_capability: union_types_remove_fields
required_capability: to_date_nanos required_capability: to_date_nanos
@ -1591,7 +1591,7 @@ FROM sample_data, sample_data_ts_long
shortIntegerWidening shortIntegerWidening
required_capability: union_types required_capability: union_types
required_capability: metadata_fields required_capability: index_metadata_field
required_capability: casting_operator required_capability: casting_operator
required_capability: union_types_numeric_widening required_capability: union_types_numeric_widening

View file

@ -246,7 +246,7 @@ public class MatchFunctionIT extends AbstractEsqlIntegTestCase {
var error = expectThrows(ElasticsearchException.class, () -> run(query)); var error = expectThrows(ElasticsearchException.class, () -> run(query));
assertThat( assertThat(
error.getMessage(), error.getMessage(),
containsString("[MATCH] function cannot operate on [\"a brown fox\"], which is not a field from an index mapping") containsString("line 2:15: [MATCH] function cannot operate on [content], which is not a field from an index mapping")
); );
} }

View file

@ -230,7 +230,7 @@ public class MatchOperatorIT extends AbstractEsqlIntegTestCase {
var error = expectThrows(ElasticsearchException.class, () -> run(query)); var error = expectThrows(ElasticsearchException.class, () -> run(query));
assertThat( assertThat(
error.getMessage(), error.getMessage(),
containsString("[:] operator cannot operate on [\"a brown fox\"], which is not a field from an index mapping") containsString("line 2:9: [:] operator cannot operate on [content], which is not a field from an index mapping")
); );
} }

View file

@ -121,12 +121,17 @@ public class EsqlCapabilities {
* Cast string literals to a desired data type for IN predicate and more types for BinaryComparison. * Cast string literals to a desired data type for IN predicate and more types for BinaryComparison.
*/ */
STRING_LITERAL_AUTO_CASTING_EXTENDED, STRING_LITERAL_AUTO_CASTING_EXTENDED,
/** /**
* Support for metadata fields. * Support for metadata fields.
*/ */
METADATA_FIELDS, METADATA_FIELDS,
/**
* Support specifically for *just* the _index METADATA field. Used by CsvTests, since that is the only metadata field currently
* supported.
*/
INDEX_METADATA_FIELD,
/** /**
* Support for timespan units abbreviations * Support for timespan units abbreviations
*/ */

View file

@ -21,10 +21,10 @@ import org.elasticsearch.xpack.esql.expression.function.EsqlFunctionRegistry;
import org.elasticsearch.xpack.esql.optimizer.LogicalOptimizerContext; import org.elasticsearch.xpack.esql.optimizer.LogicalOptimizerContext;
import org.elasticsearch.xpack.esql.optimizer.LogicalPlanOptimizer; import org.elasticsearch.xpack.esql.optimizer.LogicalPlanOptimizer;
import org.elasticsearch.xpack.esql.planner.mapper.Mapper; import org.elasticsearch.xpack.esql.planner.mapper.Mapper;
import org.elasticsearch.xpack.esql.plugin.TransportActionServices;
import org.elasticsearch.xpack.esql.session.Configuration; import org.elasticsearch.xpack.esql.session.Configuration;
import org.elasticsearch.xpack.esql.session.EsqlSession; import org.elasticsearch.xpack.esql.session.EsqlSession;
import org.elasticsearch.xpack.esql.session.IndexResolver; import org.elasticsearch.xpack.esql.session.IndexResolver;
import org.elasticsearch.xpack.esql.session.QueryBuilderResolver;
import org.elasticsearch.xpack.esql.session.Result; import org.elasticsearch.xpack.esql.session.Result;
import org.elasticsearch.xpack.esql.telemetry.Metrics; import org.elasticsearch.xpack.esql.telemetry.Metrics;
import org.elasticsearch.xpack.esql.telemetry.PlanTelemetry; import org.elasticsearch.xpack.esql.telemetry.PlanTelemetry;
@ -62,7 +62,7 @@ public class PlanExecutor {
EsqlExecutionInfo executionInfo, EsqlExecutionInfo executionInfo,
IndicesExpressionGrouper indicesExpressionGrouper, IndicesExpressionGrouper indicesExpressionGrouper,
EsqlSession.PlanRunner planRunner, EsqlSession.PlanRunner planRunner,
QueryBuilderResolver queryBuilderResolver, TransportActionServices services,
ActionListener<Result> listener ActionListener<Result> listener
) { ) {
final PlanTelemetry planTelemetry = new PlanTelemetry(functionRegistry); final PlanTelemetry planTelemetry = new PlanTelemetry(functionRegistry);
@ -78,7 +78,7 @@ public class PlanExecutor {
verifier, verifier,
planTelemetry, planTelemetry,
indicesExpressionGrouper, indicesExpressionGrouper,
queryBuilderResolver services
); );
QueryMetric clientId = QueryMetric.fromString("rest"); QueryMetric clientId = QueryMetric.fromString("rest");
metrics.total(clientId); metrics.total(clientId);

View file

@ -7,7 +7,7 @@
package org.elasticsearch.xpack.esql.expression.function.fulltext; package org.elasticsearch.xpack.esql.expression.function.fulltext;
import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.lucene.BytesRefs;
import org.elasticsearch.compute.lucene.LuceneQueryExpressionEvaluator; import org.elasticsearch.compute.lucene.LuceneQueryExpressionEvaluator;
import org.elasticsearch.compute.lucene.LuceneQueryExpressionEvaluator.ShardConfig; import org.elasticsearch.compute.lucene.LuceneQueryExpressionEvaluator.ShardConfig;
import org.elasticsearch.compute.operator.EvalOperator; import org.elasticsearch.compute.operator.EvalOperator;
@ -110,11 +110,7 @@ public abstract class FullTextFunction extends Function implements TranslationAw
*/ */
public Object queryAsObject() { public Object queryAsObject() {
Object queryAsObject = query().fold(FoldContext.small() /* TODO remove me */); Object queryAsObject = query().fold(FoldContext.small() /* TODO remove me */);
if (queryAsObject instanceof BytesRef bytesRef) { return BytesRefs.toString(queryAsObject);
return bytesRef.utf8ToString();
}
return queryAsObject;
} }
@Override @Override

View file

@ -14,7 +14,7 @@ import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.unit.Fuzziness; import org.elasticsearch.common.unit.Fuzziness;
import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.xpack.esql.capabilities.PostOptimizationVerificationAware; import org.elasticsearch.xpack.esql.capabilities.PostAnalysisPlanVerificationAware;
import org.elasticsearch.xpack.esql.common.Failure; import org.elasticsearch.xpack.esql.common.Failure;
import org.elasticsearch.xpack.esql.common.Failures; import org.elasticsearch.xpack.esql.common.Failures;
import org.elasticsearch.xpack.esql.core.InvalidArgumentException; import org.elasticsearch.xpack.esql.core.InvalidArgumentException;
@ -30,6 +30,7 @@ import org.elasticsearch.xpack.esql.core.tree.Source;
import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.core.type.DataType;
import org.elasticsearch.xpack.esql.core.type.DataTypeConverter; import org.elasticsearch.xpack.esql.core.type.DataTypeConverter;
import org.elasticsearch.xpack.esql.core.type.MultiTypeEsField; import org.elasticsearch.xpack.esql.core.type.MultiTypeEsField;
import org.elasticsearch.xpack.esql.core.util.Check;
import org.elasticsearch.xpack.esql.core.util.NumericUtils; import org.elasticsearch.xpack.esql.core.util.NumericUtils;
import org.elasticsearch.xpack.esql.expression.function.Example; import org.elasticsearch.xpack.esql.expression.function.Example;
import org.elasticsearch.xpack.esql.expression.function.FunctionInfo; import org.elasticsearch.xpack.esql.expression.function.FunctionInfo;
@ -38,6 +39,7 @@ import org.elasticsearch.xpack.esql.expression.function.OptionalArgument;
import org.elasticsearch.xpack.esql.expression.function.Param; import org.elasticsearch.xpack.esql.expression.function.Param;
import org.elasticsearch.xpack.esql.expression.function.scalar.convert.AbstractConvertFunction; import org.elasticsearch.xpack.esql.expression.function.scalar.convert.AbstractConvertFunction;
import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput;
import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan;
import org.elasticsearch.xpack.esql.planner.TranslatorHandler; import org.elasticsearch.xpack.esql.planner.TranslatorHandler;
import org.elasticsearch.xpack.esql.querydsl.query.MatchQuery; import org.elasticsearch.xpack.esql.querydsl.query.MatchQuery;
import org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter; import org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter;
@ -48,6 +50,7 @@ import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.Objects; import java.util.Objects;
import java.util.Set; import java.util.Set;
import java.util.function.BiConsumer;
import static java.util.Map.entry; import static java.util.Map.entry;
import static org.elasticsearch.common.logging.LoggerMessageFormat.format; import static org.elasticsearch.common.logging.LoggerMessageFormat.format;
@ -88,7 +91,7 @@ import static org.elasticsearch.xpack.esql.expression.predicate.operator.compari
/** /**
* Full text function that performs a {@link org.elasticsearch.xpack.esql.querydsl.query.MatchQuery} . * Full text function that performs a {@link org.elasticsearch.xpack.esql.querydsl.query.MatchQuery} .
*/ */
public class Match extends FullTextFunction implements OptionalArgument, PostOptimizationVerificationAware { public class Match extends FullTextFunction implements OptionalArgument, PostAnalysisPlanVerificationAware {
public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(Expression.class, "Match", Match::readFrom); public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(Expression.class, "Match", Match::readFrom);
public static final Set<DataType> FIELD_DATA_TYPES = Set.of( public static final Set<DataType> FIELD_DATA_TYPES = Set.of(
@ -429,23 +432,23 @@ public class Match extends FullTextFunction implements OptionalArgument, PostOpt
} }
@Override @Override
public void postOptimizationVerification(Failures failures) { public BiConsumer<LogicalPlan, Failures> postAnalysisPlanVerification() {
Expression fieldExpression = field(); return (plan, failures) -> {
// Field may be converted to other data type (field_name :: data_type), so we need to check the original field super.postAnalysisPlanVerification().accept(plan, failures);
if (fieldExpression instanceof AbstractConvertFunction convertFunction) { plan.forEachExpression(Match.class, m -> {
fieldExpression = convertFunction.field(); if (m.fieldAsFieldAttribute() == null) {
}
if (fieldExpression instanceof FieldAttribute == false) {
failures.add( failures.add(
Failure.fail( Failure.fail(
field, m.field(),
"[{}] {} cannot operate on [{}], which is not a field from an index mapping", "[{}] {} cannot operate on [{}], which is not a field from an index mapping",
functionName(), functionName(),
functionType(), functionType(),
field.sourceText() m.field().sourceText()
) )
); );
} }
});
};
} }
@Override @Override
@ -476,12 +479,8 @@ public class Match extends FullTextFunction implements OptionalArgument, PostOpt
@Override @Override
protected Query translate(TranslatorHandler handler) { protected Query translate(TranslatorHandler handler) {
Expression fieldExpression = field; var fieldAttribute = fieldAsFieldAttribute();
// Field may be converted to other data type (field_name :: data_type), so we need to check the original field Check.notNull(fieldAttribute, "Match must have a field attribute as the first argument");
if (fieldExpression instanceof AbstractConvertFunction convertFunction) {
fieldExpression = convertFunction.field();
}
if (fieldExpression instanceof FieldAttribute fieldAttribute) {
String fieldName = fieldAttribute.name(); String fieldName = fieldAttribute.name();
if (fieldAttribute.field() instanceof MultiTypeEsField multiTypeEsField) { if (fieldAttribute.field() instanceof MultiTypeEsField multiTypeEsField) {
// If we have multiple field types, we allow the query to be done, but getting the underlying field name // If we have multiple field types, we allow the query to be done, but getting the underlying field name
@ -491,7 +490,13 @@ public class Match extends FullTextFunction implements OptionalArgument, PostOpt
return new MatchQuery(source(), fieldName, queryAsObject(), matchQueryOptions()); return new MatchQuery(source(), fieldName, queryAsObject(), matchQueryOptions());
} }
throw new IllegalArgumentException("Match must have a field attribute as the first argument"); private FieldAttribute fieldAsFieldAttribute() {
Expression fieldExpression = field;
// Field may be converted to other data type (field_name :: data_type), so we need to check the original field
if (fieldExpression instanceof AbstractConvertFunction convertFunction) {
fieldExpression = convertFunction.field();
}
return fieldExpression instanceof FieldAttribute fieldAttribute ? fieldAttribute : null;
} }
@Override @Override

View file

@ -0,0 +1,95 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0; you may not use this file except in compliance with the Elastic License
* 2.0.
*/
package org.elasticsearch.xpack.esql.expression.function.fulltext;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ResolvedIndices;
import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.index.query.QueryRewriteContext;
import org.elasticsearch.index.query.Rewriteable;
import org.elasticsearch.xpack.esql.core.util.Holder;
import org.elasticsearch.xpack.esql.plan.logical.EsRelation;
import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan;
import org.elasticsearch.xpack.esql.planner.TranslatorHandler;
import org.elasticsearch.xpack.esql.plugin.TransportActionServices;
import org.elasticsearch.xpack.esql.session.IndexResolver;
import java.io.IOException;
import java.util.HashSet;
import java.util.Set;
/**
* Some {@link FullTextFunction} implementations such as {@link org.elasticsearch.xpack.esql.expression.function.fulltext.Match}
* will be translated to a {@link QueryBuilder} that require a rewrite phase on the coordinator.
* {@link QueryBuilderResolver#resolveQueryBuilders(LogicalPlan, TransportActionServices, ActionListener)} will rewrite the plan by
* replacing {@link FullTextFunction} expression with new ones that hold rewritten {@link QueryBuilder}s.
*/
public final class QueryBuilderResolver {
private QueryBuilderResolver() {}
public static void resolveQueryBuilders(LogicalPlan plan, TransportActionServices services, ActionListener<LogicalPlan> listener) {
var hasFullTextFunctions = plan.anyMatch(p -> {
Holder<Boolean> hasFullTextFunction = new Holder<>(false);
p.forEachExpression(FullTextFunction.class, unused -> hasFullTextFunction.set(true));
return hasFullTextFunction.get();
});
if (hasFullTextFunctions) {
Rewriteable.rewriteAndFetch(
new FullTextFunctionsRewritable(plan),
queryRewriteContext(services, indexNames(plan)),
listener.delegateFailureAndWrap((l, r) -> l.onResponse(r.plan))
);
} else {
listener.onResponse(plan);
}
}
private static QueryRewriteContext queryRewriteContext(TransportActionServices services, Set<String> indexNames) {
ResolvedIndices resolvedIndices = ResolvedIndices.resolveWithIndexNamesAndOptions(
indexNames.toArray(String[]::new),
IndexResolver.FIELD_CAPS_INDICES_OPTIONS,
services.clusterService().state().metadata().getProject(),
services.indexNameExpressionResolver(),
services.transportService().getRemoteClusterService(),
System.currentTimeMillis()
);
return services.searchService().getRewriteContext(System::currentTimeMillis, resolvedIndices, null);
}
private static Set<String> indexNames(LogicalPlan plan) {
Set<String> indexNames = new HashSet<>();
plan.forEachDown(EsRelation.class, esRelation -> indexNames.addAll(esRelation.concreteIndices()));
return indexNames;
}
private record FullTextFunctionsRewritable(LogicalPlan plan) implements Rewriteable<QueryBuilderResolver.FullTextFunctionsRewritable> {
@Override
public FullTextFunctionsRewritable rewrite(QueryRewriteContext ctx) throws IOException {
Holder<IOException> exceptionHolder = new Holder<>();
Holder<Boolean> updated = new Holder<>(false);
LogicalPlan newPlan = plan.transformExpressionsDown(FullTextFunction.class, f -> {
QueryBuilder builder = f.queryBuilder(), initial = builder;
builder = builder == null ? f.asQuery(TranslatorHandler.TRANSLATOR_HANDLER).asBuilder() : builder;
try {
builder = builder.rewrite(ctx);
} catch (IOException e) {
exceptionHolder.setIfAbsent(e);
}
var rewritten = builder != initial;
updated.set(updated.get() || rewritten);
return rewritten ? f.replaceQueryBuilder(builder) : f;
});
if (exceptionHolder.get() != null) {
throw exceptionHolder.get();
}
return updated.get() ? new FullTextFunctionsRewritable(newPlan) : this;
}
}
}

View file

@ -12,7 +12,7 @@ import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.xpack.esql.capabilities.PostOptimizationVerificationAware; import org.elasticsearch.xpack.esql.capabilities.PostAnalysisPlanVerificationAware;
import org.elasticsearch.xpack.esql.common.Failure; import org.elasticsearch.xpack.esql.common.Failure;
import org.elasticsearch.xpack.esql.common.Failures; import org.elasticsearch.xpack.esql.common.Failures;
import org.elasticsearch.xpack.esql.core.expression.Expression; import org.elasticsearch.xpack.esql.core.expression.Expression;
@ -26,10 +26,12 @@ import org.elasticsearch.xpack.esql.expression.function.Example;
import org.elasticsearch.xpack.esql.expression.function.FunctionInfo; import org.elasticsearch.xpack.esql.expression.function.FunctionInfo;
import org.elasticsearch.xpack.esql.expression.function.Param; import org.elasticsearch.xpack.esql.expression.function.Param;
import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput;
import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan;
import org.elasticsearch.xpack.esql.planner.TranslatorHandler; import org.elasticsearch.xpack.esql.planner.TranslatorHandler;
import java.io.IOException; import java.io.IOException;
import java.util.List; import java.util.List;
import java.util.function.BiConsumer;
import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.ParamOrdinal.FIRST; import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.ParamOrdinal.FIRST;
import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.ParamOrdinal.SECOND; import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.ParamOrdinal.SECOND;
@ -39,7 +41,7 @@ import static org.elasticsearch.xpack.esql.core.expression.TypeResolutions.isStr
/** /**
* Full text function that performs a {@link TermQuery} . * Full text function that performs a {@link TermQuery} .
*/ */
public class Term extends FullTextFunction implements PostOptimizationVerificationAware { public class Term extends FullTextFunction implements PostAnalysisPlanVerificationAware {
public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(Expression.class, "Term", Term::readFrom); public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry(Expression.class, "Term", Term::readFrom);
@ -104,18 +106,23 @@ public class Term extends FullTextFunction implements PostOptimizationVerificati
} }
@Override @Override
public void postOptimizationVerification(Failures failures) { public BiConsumer<LogicalPlan, Failures> postAnalysisPlanVerification() {
if (field instanceof FieldAttribute == false) { return (plan, failures) -> {
super.postAnalysisPlanVerification().accept(plan, failures);
plan.forEachExpression(Term.class, t -> {
if (t.field() instanceof FieldAttribute == false) { // TODO: is a conversion possible, similar to Match's case?
failures.add( failures.add(
Failure.fail( Failure.fail(
field, t.field(),
"[{}] {} cannot operate on [{}], which is not a field from an index mapping", "[{}] {} cannot operate on [{}], which is not a field from an index mapping",
functionName(), t.functionName(),
functionType(), t.functionType(),
field.sourceText() t.field().sourceText()
) )
); );
} }
});
};
} }
@Override @Override

View file

@ -108,17 +108,7 @@ public class LogicalPlanBuilder extends ExpressionBuilder {
if (errors.hasNext() == false) { if (errors.hasNext() == false) {
return p; return p;
} else { } else {
StringBuilder message = new StringBuilder(); throw ParsingException.combineParsingExceptions(errors);
int i = 0;
while (errors.hasNext()) {
if (i > 0) {
message.append("; ");
}
message.append(errors.next().getMessage());
i++;
}
throw new ParsingException(message.toString());
} }
} }

View file

@ -9,6 +9,8 @@ package org.elasticsearch.xpack.esql.parser;
import org.elasticsearch.xpack.esql.EsqlClientException; import org.elasticsearch.xpack.esql.EsqlClientException;
import org.elasticsearch.xpack.esql.core.tree.Source; import org.elasticsearch.xpack.esql.core.tree.Source;
import java.util.Iterator;
import static org.elasticsearch.common.logging.LoggerMessageFormat.format; import static org.elasticsearch.common.logging.LoggerMessageFormat.format;
public class ParsingException extends EsqlClientException { public class ParsingException extends EsqlClientException {
@ -21,6 +23,10 @@ public class ParsingException extends EsqlClientException {
this.charPositionInLine = charPositionInLine + 1; this.charPositionInLine = charPositionInLine + 1;
} }
/**
* To be used only if the exception cannot be associated with a specific position in the query.
* Error message will start with {@code line -1:-1:} instead of using specific location.
*/
public ParsingException(String message, Object... args) { public ParsingException(String message, Object... args) {
this(Source.EMPTY, message, args); this(Source.EMPTY, message, args);
} }
@ -37,6 +43,38 @@ public class ParsingException extends EsqlClientException {
this.charPositionInLine = source.source().getColumnNumber(); this.charPositionInLine = source.source().getColumnNumber();
} }
private ParsingException(int line, int charPositionInLine, String message, Object... args) {
super(message, args);
this.line = line;
this.charPositionInLine = charPositionInLine;
}
/**
* Combine multiple {@code ParsingException} into one, this is used by {@code LogicalPlanBuilder} to
* consolidate multiple named parameters related {@code ParsingException}.
*/
public static ParsingException combineParsingExceptions(Iterator<ParsingException> parsingExceptions) {
StringBuilder message = new StringBuilder();
int i = 0;
int line = -1;
int charPositionInLine = -1;
while (parsingExceptions.hasNext()) {
ParsingException e = parsingExceptions.next();
if (i > 0) {
message.append("; ");
message.append(e.getMessage());
} else {
// line and column numbers are the associated with the first error
line = e.getLineNumber();
charPositionInLine = e.getColumnNumber();
message.append(e.getErrorMessage());
}
i++;
}
return new ParsingException(line, charPositionInLine, message.toString());
}
public int getLineNumber() { public int getLineNumber() {
return line; return line;
} }

View file

@ -0,0 +1,40 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0; you may not use this file except in compliance with the Elastic License
* 2.0.
*/
package org.elasticsearch.xpack.esql.planner.premapper;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.xpack.esql.expression.function.fulltext.QueryBuilderResolver;
import org.elasticsearch.xpack.esql.plan.logical.LogicalPlan;
import org.elasticsearch.xpack.esql.plugin.TransportActionServices;
/**
* The class is responsible for invoking any premapping steps that need to be applied to the logical plan,
* before this is being mapped to a physical one.
*/
public class PreMapper {
private final TransportActionServices services;
public PreMapper(TransportActionServices services) {
this.services = services;
}
/**
* Invokes any premapping steps that need to be applied to the logical plan, before this is being mapped to a physical one.
*/
public void preMapper(LogicalPlan plan, ActionListener<LogicalPlan> listener) {
queryRewrite(plan, listener.delegateFailureAndWrap((l, p) -> {
p.setOptimized();
l.onResponse(p);
}));
}
private void queryRewrite(LogicalPlan plan, ActionListener<LogicalPlan> listener) {
QueryBuilderResolver.resolveQueryBuilders(plan, services, listener);
}
}

View file

@ -0,0 +1,24 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0; you may not use this file except in compliance with the Elastic License
* 2.0.
*/
package org.elasticsearch.xpack.esql.plugin;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.compute.operator.exchange.ExchangeService;
import org.elasticsearch.search.SearchService;
import org.elasticsearch.transport.TransportService;
import org.elasticsearch.usage.UsageService;
public record TransportActionServices(
TransportService transportService,
SearchService searchService,
ExchangeService exchangeService,
ClusterService clusterService,
IndexNameExpressionResolver indexNameExpressionResolver,
UsageService usageService
) {}

View file

@ -53,7 +53,6 @@ import org.elasticsearch.xpack.esql.enrich.LookupFromIndexService;
import org.elasticsearch.xpack.esql.execution.PlanExecutor; import org.elasticsearch.xpack.esql.execution.PlanExecutor;
import org.elasticsearch.xpack.esql.session.Configuration; import org.elasticsearch.xpack.esql.session.Configuration;
import org.elasticsearch.xpack.esql.session.EsqlSession.PlanRunner; import org.elasticsearch.xpack.esql.session.EsqlSession.PlanRunner;
import org.elasticsearch.xpack.esql.session.QueryBuilderResolver;
import org.elasticsearch.xpack.esql.session.Result; import org.elasticsearch.xpack.esql.session.Result;
import java.io.IOException; import java.io.IOException;
@ -81,8 +80,8 @@ public class TransportEsqlQueryAction extends HandledTransportAction<EsqlQueryRe
private final LookupFromIndexService lookupFromIndexService; private final LookupFromIndexService lookupFromIndexService;
private final AsyncTaskManagementService<EsqlQueryRequest, EsqlQueryResponse, EsqlQueryTask> asyncTaskManagementService; private final AsyncTaskManagementService<EsqlQueryRequest, EsqlQueryResponse, EsqlQueryTask> asyncTaskManagementService;
private final RemoteClusterService remoteClusterService; private final RemoteClusterService remoteClusterService;
private final QueryBuilderResolver queryBuilderResolver;
private final UsageService usageService; private final UsageService usageService;
private final TransportActionServices services;
// Listeners for active async queries, key being the async task execution ID // Listeners for active async queries, key being the async task execution ID
private final Map<String, EsqlQueryListener> asyncListeners = ConcurrentCollections.newConcurrentMap(); private final Map<String, EsqlQueryListener> asyncListeners = ConcurrentCollections.newConcurrentMap();
@ -153,8 +152,16 @@ public class TransportEsqlQueryAction extends HandledTransportAction<EsqlQueryRe
bigArrays bigArrays
); );
this.remoteClusterService = transportService.getRemoteClusterService(); this.remoteClusterService = transportService.getRemoteClusterService();
this.queryBuilderResolver = new QueryBuilderResolver(searchService, clusterService, transportService, indexNameExpressionResolver);
this.usageService = usageService; this.usageService = usageService;
this.services = new TransportActionServices(
transportService,
searchService,
exchangeService,
clusterService,
indexNameExpressionResolver,
usageService
);
} }
@Override @Override
@ -258,7 +265,7 @@ public class TransportEsqlQueryAction extends HandledTransportAction<EsqlQueryRe
executionInfo, executionInfo,
remoteClusterService, remoteClusterService,
planRunner, planRunner,
queryBuilderResolver, services,
ActionListener.wrap(result -> { ActionListener.wrap(result -> {
recordCCSTelemetry(task, executionInfo, request, null); recordCCSTelemetry(task, executionInfo, request, null);
listener.onResponse(toResponse(task, request, configuration, result)); listener.onResponse(toResponse(task, request, configuration, result));

View file

@ -73,6 +73,8 @@ import org.elasticsearch.xpack.esql.plan.physical.EstimatesRowSize;
import org.elasticsearch.xpack.esql.plan.physical.FragmentExec; import org.elasticsearch.xpack.esql.plan.physical.FragmentExec;
import org.elasticsearch.xpack.esql.plan.physical.PhysicalPlan; import org.elasticsearch.xpack.esql.plan.physical.PhysicalPlan;
import org.elasticsearch.xpack.esql.planner.mapper.Mapper; import org.elasticsearch.xpack.esql.planner.mapper.Mapper;
import org.elasticsearch.xpack.esql.planner.premapper.PreMapper;
import org.elasticsearch.xpack.esql.plugin.TransportActionServices;
import org.elasticsearch.xpack.esql.telemetry.PlanTelemetry; import org.elasticsearch.xpack.esql.telemetry.PlanTelemetry;
import java.util.ArrayList; import java.util.ArrayList;
@ -109,12 +111,12 @@ public class EsqlSession {
private final Verifier verifier; private final Verifier verifier;
private final EsqlFunctionRegistry functionRegistry; private final EsqlFunctionRegistry functionRegistry;
private final LogicalPlanOptimizer logicalPlanOptimizer; private final LogicalPlanOptimizer logicalPlanOptimizer;
private final PreMapper preMapper;
private final Mapper mapper; private final Mapper mapper;
private final PhysicalPlanOptimizer physicalPlanOptimizer; private final PhysicalPlanOptimizer physicalPlanOptimizer;
private final PlanTelemetry planTelemetry; private final PlanTelemetry planTelemetry;
private final IndicesExpressionGrouper indicesExpressionGrouper; private final IndicesExpressionGrouper indicesExpressionGrouper;
private final QueryBuilderResolver queryBuilderResolver;
public EsqlSession( public EsqlSession(
String sessionId, String sessionId,
@ -128,7 +130,7 @@ public class EsqlSession {
Verifier verifier, Verifier verifier,
PlanTelemetry planTelemetry, PlanTelemetry planTelemetry,
IndicesExpressionGrouper indicesExpressionGrouper, IndicesExpressionGrouper indicesExpressionGrouper,
QueryBuilderResolver queryBuilderResolver TransportActionServices services
) { ) {
this.sessionId = sessionId; this.sessionId = sessionId;
this.configuration = configuration; this.configuration = configuration;
@ -142,7 +144,7 @@ public class EsqlSession {
this.physicalPlanOptimizer = new PhysicalPlanOptimizer(new PhysicalOptimizerContext(configuration)); this.physicalPlanOptimizer = new PhysicalPlanOptimizer(new PhysicalOptimizerContext(configuration));
this.planTelemetry = planTelemetry; this.planTelemetry = planTelemetry;
this.indicesExpressionGrouper = indicesExpressionGrouper; this.indicesExpressionGrouper = indicesExpressionGrouper;
this.queryBuilderResolver = queryBuilderResolver; this.preMapper = new PreMapper(services);
} }
public String sessionId() { public String sessionId() {
@ -162,16 +164,12 @@ public class EsqlSession {
new EsqlSessionCCSUtils.CssPartialErrorsActionListener(executionInfo, listener) { new EsqlSessionCCSUtils.CssPartialErrorsActionListener(executionInfo, listener) {
@Override @Override
public void onResponse(LogicalPlan analyzedPlan) { public void onResponse(LogicalPlan analyzedPlan) {
try { preMapper.preMapper(
var optimizedPlan = optimizedPlan(analyzedPlan); analyzedPlan,
queryBuilderResolver.resolveQueryBuilders( listener.delegateFailureAndWrap(
optimizedPlan, (l, p) -> executeOptimizedPlan(request, executionInfo, planRunner, optimizedPlan(p), l)
listener, )
(newPlan, next) -> executeOptimizedPlan(request, executionInfo, planRunner, newPlan, next)
); );
} catch (Exception e) {
listener.onFailure(e);
}
} }
} }
); );

View file

@ -21,6 +21,7 @@ import org.elasticsearch.index.mapper.TimeSeriesParams;
import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.xpack.esql.action.EsqlResolveFieldsAction; import org.elasticsearch.xpack.esql.action.EsqlResolveFieldsAction;
import org.elasticsearch.xpack.esql.core.expression.MetadataAttribute;
import org.elasticsearch.xpack.esql.core.type.DataType; import org.elasticsearch.xpack.esql.core.type.DataType;
import org.elasticsearch.xpack.esql.core.type.DateEsField; import org.elasticsearch.xpack.esql.core.type.DateEsField;
import org.elasticsearch.xpack.esql.core.type.EsField; import org.elasticsearch.xpack.esql.core.type.EsField;
@ -50,7 +51,7 @@ import static org.elasticsearch.xpack.esql.core.type.DataType.UNSUPPORTED;
public class IndexResolver { public class IndexResolver {
public static final Set<String> ALL_FIELDS = Set.of("*"); public static final Set<String> ALL_FIELDS = Set.of("*");
public static final Set<String> INDEX_METADATA_FIELD = Set.of("_index"); public static final Set<String> INDEX_METADATA_FIELD = Set.of(MetadataAttribute.INDEX);
public static final String UNMAPPED = "unmapped"; public static final String UNMAPPED = "unmapped";
public static final IndicesOptions FIELD_CAPS_INDICES_OPTIONS = IndicesOptions.builder() public static final IndicesOptions FIELD_CAPS_INDICES_OPTIONS = IndicesOptions.builder()

View file

@ -516,7 +516,7 @@ public class CsvTests extends ESTestCase {
TEST_VERIFIER, TEST_VERIFIER,
new PlanTelemetry(functionRegistry), new PlanTelemetry(functionRegistry),
null, null,
EsqlTestUtils.MOCK_QUERY_BUILDER_RESOLVER EsqlTestUtils.MOCK_TRANSPORT_ACTION_SERVICES
); );
TestPhysicalOperationProviders physicalOperationProviders = testOperationProviders(foldCtx, testDatasets); TestPhysicalOperationProviders physicalOperationProviders = testOperationProviders(foldCtx, testDatasets);

View file

@ -1186,9 +1186,9 @@ public class VerifierTests extends ESTestCase {
public void testMatchInsideEval() throws Exception { public void testMatchInsideEval() throws Exception {
assumeTrue("Match operator is available just for snapshots", Build.current().isSnapshot()); assumeTrue("Match operator is available just for snapshots", Build.current().isSnapshot());
assertEquals( assertEquals(
"1:36: [:] operator is only supported in WHERE commands", "1:36: [:] operator is only supported in WHERE commands\n"
+ "line 1:36: [:] operator cannot operate on [title], which is not a field from an index mapping",
error("row title = \"brown fox\" | eval x = title:\"fox\" ") error("row title = \"brown fox\" | eval x = title:\"fox\" ")
); );
} }
@ -1217,6 +1217,25 @@ public class VerifierTests extends ESTestCase {
assertEquals("1:24: [:] operator cannot be used after LIMIT", error("from test | limit 10 | where first_name : \"Anna\"")); assertEquals("1:24: [:] operator cannot be used after LIMIT", error("from test | limit 10 | where first_name : \"Anna\""));
} }
// These should pass eventually once we lift some restrictions on match function
public void testMatchWithNonIndexedColumnCurrentlyUnsupported() {
assertEquals(
"1:67: [MATCH] function cannot operate on [initial], which is not a field from an index mapping",
error("from test | eval initial = substring(first_name, 1) | where match(initial, \"A\")")
);
assertEquals(
"1:67: [MATCH] function cannot operate on [text], which is not a field from an index mapping",
error("from test | eval text=concat(first_name, last_name) | where match(text, \"cat\")")
);
}
public void testMatchFunctionIsNotNullable() {
assertEquals(
"1:48: [MATCH] function cannot operate on [text::keyword], which is not a field from an index mapping",
error("row n = null | eval text = n + 5 | where match(text::keyword, \"Anna\")")
);
}
public void testQueryStringFunctionsNotAllowedAfterCommands() throws Exception { public void testQueryStringFunctionsNotAllowedAfterCommands() throws Exception {
// Source commands // Source commands
assertEquals("1:13: [QSTR] function cannot be used after SHOW", error("show info | where qstr(\"8.16.0\")")); assertEquals("1:13: [QSTR] function cannot be used after SHOW", error("show info | where qstr(\"8.16.0\")"));

View file

@ -61,7 +61,7 @@ public abstract class AbstractAggregationTestCase extends AbstractFunctionTestCa
* Use if possible, as this method may get updated with new checks in the future. * Use if possible, as this method may get updated with new checks in the future.
* </p> * </p>
*/ */
protected static Iterable<Object[]> parameterSuppliersFromTypedDataWithDefaultChecks( protected static Iterable<Object[]> parameterSuppliersFromTypedDataWithDefaultChecksNoErrors(
List<TestCaseSupplier> suppliers, List<TestCaseSupplier> suppliers,
boolean entirelyNullPreservesType, boolean entirelyNullPreservesType,
PositionalErrorMessageSupplier positionalErrorMessageSupplier PositionalErrorMessageSupplier positionalErrorMessageSupplier
@ -74,13 +74,24 @@ public abstract class AbstractAggregationTestCase extends AbstractFunctionTestCa
); );
} }
// TODO: Remove and migrate everything to the method with all the parameters
/** /**
* @deprecated Use {@link #parameterSuppliersFromTypedDataWithDefaultChecks(List, boolean, PositionalErrorMessageSupplier)} instead. * Converts a list of test cases into a list of parameter suppliers.
* This method doesn't add all the default checks. * Also, adds a default set of extra test cases.
* <p>
* Use if possible, as this method may get updated with new checks in the future.
* </p>
*
* @param entirelyNullPreservesType See {@link #anyNullIsNull(boolean, List)}
*/ */
@Deprecated protected static Iterable<Object[]> parameterSuppliersFromTypedDataWithDefaultChecksNoErrors(
protected static Iterable<Object[]> parameterSuppliersFromTypedDataWithDefaultChecks(List<TestCaseSupplier> suppliers) { // TODO remove after removing parameterSuppliersFromTypedDataWithDefaultChecks rename this to that.
List<TestCaseSupplier> suppliers,
boolean entirelyNullPreservesType
) {
return parameterSuppliersFromTypedData(anyNullIsNull(entirelyNullPreservesType, randomizeBytesRefsOffset(suppliers)));
}
protected static Iterable<Object[]> parameterSuppliersFromTypedDataWithDefaultChecksNoErrors(List<TestCaseSupplier> suppliers) {
return parameterSuppliersFromTypedData(withNoRowsExpectingNull(randomizeBytesRefsOffset(suppliers))); return parameterSuppliersFromTypedData(withNoRowsExpectingNull(randomizeBytesRefsOffset(suppliers)));
} }

View file

@ -51,33 +51,6 @@ import static org.hamcrest.Matchers.sameInstance;
* which can be automatically tested against several scenarios (null handling, concurrency, etc). * which can be automatically tested against several scenarios (null handling, concurrency, etc).
*/ */
public abstract class AbstractScalarFunctionTestCase extends AbstractFunctionTestCase { public abstract class AbstractScalarFunctionTestCase extends AbstractFunctionTestCase {
/**
* Converts a list of test cases into a list of parameter suppliers.
* Also, adds a default set of extra test cases.
* <p>
* Use if possible, as this method may get updated with new checks in the future.
* </p>
*
* @param entirelyNullPreservesType See {@link #anyNullIsNull(boolean, List)}
* @deprecated use {@link #parameterSuppliersFromTypedDataWithDefaultChecksNoErrors}
* and make a subclass of {@link ErrorsForCasesWithoutExamplesTestCase}.
* It's a <strong>long</strong> faster.
*/
@Deprecated
protected static Iterable<Object[]> parameterSuppliersFromTypedDataWithDefaultChecks(
boolean entirelyNullPreservesType,
List<TestCaseSupplier> suppliers,
PositionalErrorMessageSupplier positionalErrorMessageSupplier
) {
return parameterSuppliersFromTypedData(
errorsForCasesWithoutExamples(
anyNullIsNull(entirelyNullPreservesType, randomizeBytesRefsOffset(suppliers)),
positionalErrorMessageSupplier
)
);
}
/** /**
* Converts a list of test cases into a list of parameter suppliers. * Converts a list of test cases into a list of parameter suppliers.
* Also, adds a default set of extra test cases. * Also, adds a default set of extra test cases.
@ -113,30 +86,6 @@ public abstract class AbstractScalarFunctionTestCase extends AbstractFunctionTes
return parameterSuppliersFromTypedData(anyNullIsNull(randomizeBytesRefsOffset(suppliers), nullsExpectedType, evaluatorToString)); return parameterSuppliersFromTypedData(anyNullIsNull(randomizeBytesRefsOffset(suppliers), nullsExpectedType, evaluatorToString));
} }
/**
* Converts a list of test cases into a list of parameter suppliers.
* Also, adds a default set of extra test cases.
* <p>
* Use if possible, as this method may get updated with new checks in the future.
* </p>
*
* @param nullsExpectedType See {@link #anyNullIsNull(List, ExpectedType, ExpectedEvaluatorToString)}
* @param evaluatorToString See {@link #anyNullIsNull(List, ExpectedType, ExpectedEvaluatorToString)}
*/
protected static Iterable<Object[]> parameterSuppliersFromTypedDataWithDefaultChecks(
ExpectedType nullsExpectedType,
ExpectedEvaluatorToString evaluatorToString,
List<TestCaseSupplier> suppliers,
PositionalErrorMessageSupplier positionalErrorMessageSupplier
) {
return parameterSuppliersFromTypedData(
errorsForCasesWithoutExamples(
anyNullIsNull(randomizeBytesRefsOffset(suppliers), nullsExpectedType, evaluatorToString),
positionalErrorMessageSupplier
)
);
}
public final void testEvaluate() { public final void testEvaluate() {
assumeTrue("Can't build evaluator", testCase.canBuildEvaluator()); assumeTrue("Can't build evaluator", testCase.canBuildEvaluator());
boolean readFloating = randomBoolean(); boolean readFloating = randomBoolean();

View file

@ -0,0 +1,37 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0; you may not use this file except in compliance with the Elastic License
* 2.0.
*/
package org.elasticsearch.xpack.esql.expression.function.aggregate;
import org.elasticsearch.xpack.esql.core.expression.Expression;
import org.elasticsearch.xpack.esql.core.tree.Source;
import org.elasticsearch.xpack.esql.core.type.DataType;
import org.elasticsearch.xpack.esql.expression.function.ErrorsForCasesWithoutExamplesTestCase;
import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier;
import org.hamcrest.Matcher;
import java.util.List;
import java.util.Set;
import static org.hamcrest.Matchers.equalTo;
public class AvgErrorTests extends ErrorsForCasesWithoutExamplesTestCase {
@Override
protected List<TestCaseSupplier> cases() {
return paramsToSuppliers(AvgTests.parameters());
}
@Override
protected Expression build(Source source, List<Expression> args) {
return new Avg(source, args.get(0));
}
@Override
protected Matcher<String> expectedTypeErrorMatcher(List<Set<DataType>> validPerPosition, List<DataType> signature) {
return equalTo(typeErrorMessage(false, validPerPosition, signature, (v, p) -> "numeric except unsigned_long or counter types"));
}
}

View file

@ -53,7 +53,7 @@ public class AvgTests extends AbstractAggregationTestCase {
) )
); );
return parameterSuppliersFromTypedDataWithDefaultChecks(suppliers, true, (v, p) -> "numeric except unsigned_long or counter types"); return parameterSuppliersFromTypedDataWithDefaultChecksNoErrors(suppliers, true);
} }
@Override @Override

View file

@ -93,7 +93,6 @@ public class CountDistinctTests extends AbstractAggregationTestCase {
} }
// "No rows" expects 0 here instead of null // "No rows" expects 0 here instead of null
// return parameterSuppliersFromTypedDataWithDefaultChecks(suppliers);
return parameterSuppliersFromTypedData(randomizeBytesRefsOffset(suppliers)); return parameterSuppliersFromTypedData(randomizeBytesRefsOffset(suppliers));
} }

View file

@ -82,7 +82,6 @@ public class CountTests extends AbstractAggregationTestCase {
} }
// "No rows" expects 0 here instead of null // "No rows" expects 0 here instead of null
// return parameterSuppliersFromTypedDataWithDefaultChecks(suppliers);
return parameterSuppliersFromTypedData(randomizeBytesRefsOffset(suppliers)); return parameterSuppliersFromTypedData(randomizeBytesRefsOffset(suppliers));
} }

View file

@ -0,0 +1,39 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0; you may not use this file except in compliance with the Elastic License
* 2.0.
*/
package org.elasticsearch.xpack.esql.expression.function.aggregate;
import org.elasticsearch.xpack.esql.core.expression.Expression;
import org.elasticsearch.xpack.esql.core.tree.Source;
import org.elasticsearch.xpack.esql.core.type.DataType;
import org.elasticsearch.xpack.esql.expression.function.ErrorsForCasesWithoutExamplesTestCase;
import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier;
import org.hamcrest.Matcher;
import java.util.List;
import java.util.Set;
import static org.hamcrest.Matchers.equalTo;
public class MaxErrorTests extends ErrorsForCasesWithoutExamplesTestCase {
@Override
protected List<TestCaseSupplier> cases() {
return paramsToSuppliers(MaxTests.parameters());
}
@Override
protected Expression build(Source source, List<Expression> args) {
return new Max(source, args.get(0));
}
@Override
protected Matcher<String> expectedTypeErrorMatcher(List<Set<DataType>> validPerPosition, List<DataType> signature) {
return equalTo(
typeErrorMessage(false, validPerPosition, signature, (v, p) -> "representable except unsigned_long and spatial types")
);
}
}

View file

@ -157,11 +157,7 @@ public class MaxTests extends AbstractAggregationTestCase {
) )
); );
return parameterSuppliersFromTypedDataWithDefaultChecks( return parameterSuppliersFromTypedDataWithDefaultChecksNoErrors(suppliers, false);
suppliers,
false,
(v, p) -> "representable except unsigned_long and spatial types"
);
} }
@Override @Override

View file

@ -39,7 +39,7 @@ public class MedianAbsoluteDeviationTests extends AbstractAggregationTestCase {
MultiRowTestCaseSupplier.doubleCases(1, 1000, -Double.MAX_VALUE, Double.MAX_VALUE, true) MultiRowTestCaseSupplier.doubleCases(1, 1000, -Double.MAX_VALUE, Double.MAX_VALUE, true)
).flatMap(List::stream).map(MedianAbsoluteDeviationTests::makeSupplier).toList(); ).flatMap(List::stream).map(MedianAbsoluteDeviationTests::makeSupplier).toList();
return parameterSuppliersFromTypedDataWithDefaultChecks(suppliers); return parameterSuppliersFromTypedDataWithDefaultChecksNoErrors(suppliers, true);
} }
@Override @Override

View file

@ -73,7 +73,7 @@ public class MedianTests extends AbstractAggregationTestCase {
) )
); );
return parameterSuppliersFromTypedDataWithDefaultChecks(suppliers); return parameterSuppliersFromTypedDataWithDefaultChecksNoErrors(suppliers, true);
} }
@Override @Override

View file

@ -0,0 +1,39 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0; you may not use this file except in compliance with the Elastic License
* 2.0.
*/
package org.elasticsearch.xpack.esql.expression.function.aggregate;
import org.elasticsearch.xpack.esql.core.expression.Expression;
import org.elasticsearch.xpack.esql.core.tree.Source;
import org.elasticsearch.xpack.esql.core.type.DataType;
import org.elasticsearch.xpack.esql.expression.function.ErrorsForCasesWithoutExamplesTestCase;
import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier;
import org.hamcrest.Matcher;
import java.util.List;
import java.util.Set;
import static org.hamcrest.Matchers.equalTo;
public class MinErrorTests extends ErrorsForCasesWithoutExamplesTestCase {
@Override
protected List<TestCaseSupplier> cases() {
return paramsToSuppliers(MinTests.parameters());
}
@Override
protected Expression build(Source source, List<Expression> args) {
return new Min(source, args.get(0));
}
@Override
protected Matcher<String> expectedTypeErrorMatcher(List<Set<DataType>> validPerPosition, List<DataType> signature) {
return equalTo(
typeErrorMessage(false, validPerPosition, signature, (v, p) -> "representable except unsigned_long and spatial types")
);
}
}

View file

@ -157,11 +157,7 @@ public class MinTests extends AbstractAggregationTestCase {
) )
); );
return parameterSuppliersFromTypedDataWithDefaultChecks( return parameterSuppliersFromTypedDataWithDefaultChecksNoErrors(suppliers, false);
suppliers,
false,
(v, p) -> "representable except unsigned_long and spatial types"
);
} }
@Override @Override

View file

@ -0,0 +1,37 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0; you may not use this file except in compliance with the Elastic License
* 2.0.
*/
package org.elasticsearch.xpack.esql.expression.function.aggregate;
import org.elasticsearch.xpack.esql.core.expression.Expression;
import org.elasticsearch.xpack.esql.core.tree.Source;
import org.elasticsearch.xpack.esql.core.type.DataType;
import org.elasticsearch.xpack.esql.expression.function.ErrorsForCasesWithoutExamplesTestCase;
import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier;
import org.hamcrest.Matcher;
import java.util.List;
import java.util.Set;
import static org.hamcrest.Matchers.equalTo;
public class PercentileErrorTests extends ErrorsForCasesWithoutExamplesTestCase {
@Override
protected List<TestCaseSupplier> cases() {
return paramsToSuppliers(PercentileTests.parameters());
}
@Override
protected Expression build(Source source, List<Expression> args) {
return new Percentile(source, args.get(0), args.get(1));
}
@Override
protected Matcher<String> expectedTypeErrorMatcher(List<Set<DataType>> validPerPosition, List<DataType> signature) {
return equalTo(typeErrorMessage(true, validPerPosition, signature, (v, p) -> "numeric except unsigned_long"));
}
}

View file

@ -53,7 +53,7 @@ public class PercentileTests extends AbstractAggregationTestCase {
} }
} }
return parameterSuppliersFromTypedDataWithDefaultChecks(suppliers, false, (v, p) -> "numeric except unsigned_long"); return parameterSuppliersFromTypedDataWithDefaultChecksNoErrors(suppliers, false);
} }
@Override @Override

View file

@ -47,7 +47,6 @@ public class SpatialCentroidTests extends AbstractAggregationTestCase {
).flatMap(List::stream).map(SpatialCentroidTests::makeSupplier).toList(); ).flatMap(List::stream).map(SpatialCentroidTests::makeSupplier).toList();
// The withNoRowsExpectingNull() cases don't work here, as this aggregator doesn't return nulls. // The withNoRowsExpectingNull() cases don't work here, as this aggregator doesn't return nulls.
// return parameterSuppliersFromTypedDataWithDefaultChecks(suppliers);
return parameterSuppliersFromTypedData(randomizeBytesRefsOffset(suppliers)); return parameterSuppliersFromTypedData(randomizeBytesRefsOffset(suppliers));
} }

View file

@ -48,7 +48,6 @@ public class SpatialExtentTests extends AbstractAggregationTestCase {
).flatMap(List::stream).map(SpatialExtentTests::makeSupplier).toList(); ).flatMap(List::stream).map(SpatialExtentTests::makeSupplier).toList();
// The withNoRowsExpectingNull() cases don't work here, as this aggregator doesn't return nulls. // The withNoRowsExpectingNull() cases don't work here, as this aggregator doesn't return nulls.
// return parameterSuppliersFromTypedDataWithDefaultChecks(suppliers);
return parameterSuppliersFromTypedData(randomizeBytesRefsOffset(suppliers)); return parameterSuppliersFromTypedData(randomizeBytesRefsOffset(suppliers));
} }

View file

@ -41,7 +41,7 @@ public class StdDevTests extends AbstractAggregationTestCase {
MultiRowTestCaseSupplier.doubleCases(1, 1000, -Double.MAX_VALUE, Double.MAX_VALUE, true) MultiRowTestCaseSupplier.doubleCases(1, 1000, -Double.MAX_VALUE, Double.MAX_VALUE, true)
).flatMap(List::stream).map(StdDevTests::makeSupplier).collect(Collectors.toCollection(() -> suppliers)); ).flatMap(List::stream).map(StdDevTests::makeSupplier).collect(Collectors.toCollection(() -> suppliers));
return parameterSuppliersFromTypedDataWithDefaultChecks(suppliers); return parameterSuppliersFromTypedDataWithDefaultChecksNoErrors(suppliers, true);
} }
@Override @Override

Some files were not shown because too many files have changed in this diff Show more