mirror of
https://github.com/elastic/elasticsearch.git
synced 2025-04-24 23:27:25 -04:00
Merge revision 5c00341c2b
into multi-project
This commit is contained in:
commit
680e7a6979
315 changed files with 11381 additions and 4339 deletions
|
@ -261,7 +261,8 @@ public class ValuesSourceReaderBenchmark {
|
|||
null,
|
||||
false,
|
||||
null,
|
||||
null
|
||||
null,
|
||||
false
|
||||
).blockLoader(null);
|
||||
}
|
||||
|
||||
|
|
|
@ -83,7 +83,7 @@ public class ScriptScoreBenchmark {
|
|||
private final ScriptModule scriptModule = new ScriptModule(Settings.EMPTY, pluginsService.filterPlugins(ScriptPlugin.class).toList());
|
||||
|
||||
private final Map<String, MappedFieldType> fieldTypes = Map.ofEntries(
|
||||
Map.entry("n", new NumberFieldType("n", NumberType.LONG, false, false, true, true, null, Map.of(), null, false, null, null))
|
||||
Map.entry("n", new NumberFieldType("n", NumberType.LONG, false, false, true, true, null, Map.of(), null, false, null, null, false))
|
||||
);
|
||||
private final IndexFieldDataCache fieldDataCache = new IndexFieldDataCache.None();
|
||||
private final CircuitBreakerService breakerService = new NoneCircuitBreakerService();
|
||||
|
|
|
@ -32,7 +32,9 @@ develocity {
|
|||
// Automatically publish scans from Elasticsearch CI
|
||||
if (onCI) {
|
||||
publishing.onlyIf { true }
|
||||
server = 'https://gradle-enterprise.elastic.co'
|
||||
if(server.isPresent() == false) {
|
||||
server = 'https://gradle-enterprise.elastic.co'
|
||||
}
|
||||
} else if( server.isPresent() == false) {
|
||||
publishing.onlyIf { false }
|
||||
}
|
||||
|
|
|
@ -15,6 +15,7 @@ import com.github.javaparser.ast.NodeList;
|
|||
import com.github.javaparser.ast.body.ClassOrInterfaceDeclaration;
|
||||
import com.github.javaparser.ast.body.FieldDeclaration;
|
||||
import com.github.javaparser.ast.body.VariableDeclarator;
|
||||
import com.github.javaparser.ast.expr.Expression;
|
||||
import com.github.javaparser.ast.expr.NameExpr;
|
||||
import com.github.javaparser.printer.lexicalpreservation.LexicalPreservingPrinter;
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
|
@ -33,6 +34,7 @@ import java.util.NavigableMap;
|
|||
import java.util.Objects;
|
||||
import java.util.Optional;
|
||||
import java.util.TreeMap;
|
||||
import java.util.function.Function;
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
import java.util.stream.Collectors;
|
||||
|
@ -51,6 +53,8 @@ public class UpdateVersionsTask extends AbstractVersionsTask {
|
|||
private boolean setCurrent;
|
||||
@Nullable
|
||||
private Version removeVersion;
|
||||
@Nullable
|
||||
private String addTransportVersion;
|
||||
|
||||
@Inject
|
||||
public UpdateVersionsTask(BuildLayout layout) {
|
||||
|
@ -62,6 +66,11 @@ public class UpdateVersionsTask extends AbstractVersionsTask {
|
|||
this.addVersion = Version.fromString(version);
|
||||
}
|
||||
|
||||
@Option(option = "add-transport-version", description = "Specifies transport version to add")
|
||||
public void addTransportVersion(String transportVersion) {
|
||||
this.addTransportVersion = transportVersion;
|
||||
}
|
||||
|
||||
@Option(option = "set-current", description = "Set the 'current' constant to the new version")
|
||||
public void setCurrent(boolean setCurrent) {
|
||||
this.setCurrent = setCurrent;
|
||||
|
@ -87,15 +96,18 @@ public class UpdateVersionsTask extends AbstractVersionsTask {
|
|||
|
||||
@TaskAction
|
||||
public void executeTask() throws IOException {
|
||||
if (addVersion == null && removeVersion == null) {
|
||||
if (addVersion == null && removeVersion == null && addTransportVersion == null) {
|
||||
throw new IllegalArgumentException("No versions to add or remove specified");
|
||||
}
|
||||
if (setCurrent && addVersion == null) {
|
||||
throw new IllegalArgumentException("No new version added to set as the current version");
|
||||
}
|
||||
if (Objects.equals(addVersion, removeVersion)) {
|
||||
if (addVersion != null && removeVersion != null && Objects.equals(addVersion, removeVersion)) {
|
||||
throw new IllegalArgumentException("Same version specified to add and remove");
|
||||
}
|
||||
if (addTransportVersion != null && addTransportVersion.split(":").length != 2) {
|
||||
throw new IllegalArgumentException("Transport version specified must be in the format '<constant>:<version-id>'");
|
||||
}
|
||||
|
||||
Path versionJava = rootDir.resolve(VERSION_FILE_PATH);
|
||||
CompilationUnit file = LexicalPreservingPrinter.setup(StaticJavaParser.parse(versionJava));
|
||||
|
@ -115,6 +127,18 @@ public class UpdateVersionsTask extends AbstractVersionsTask {
|
|||
modifiedFile = removed;
|
||||
}
|
||||
}
|
||||
if (addTransportVersion != null) {
|
||||
var constant = addTransportVersion.split(":")[0];
|
||||
var versionId = Integer.parseInt(addTransportVersion.split(":")[1]);
|
||||
LOGGER.lifecycle("Adding transport version constant [{}] with id [{}]", constant, versionId);
|
||||
|
||||
var transportVersionsFile = rootDir.resolve(TRANSPORT_VERSIONS_FILE_PATH);
|
||||
var transportVersions = LexicalPreservingPrinter.setup(StaticJavaParser.parse(transportVersionsFile));
|
||||
var modified = addTransportVersionConstant(transportVersions, constant, versionId);
|
||||
if (modified.isPresent()) {
|
||||
writeOutNewContents(transportVersionsFile, modified.get());
|
||||
}
|
||||
}
|
||||
|
||||
if (modifiedFile.isPresent()) {
|
||||
writeOutNewContents(versionJava, modifiedFile.get());
|
||||
|
@ -161,6 +185,51 @@ public class UpdateVersionsTask extends AbstractVersionsTask {
|
|||
return Optional.of(versionJava);
|
||||
}
|
||||
|
||||
@VisibleForTesting
|
||||
static Optional<CompilationUnit> addTransportVersionConstant(CompilationUnit transportVersions, String constant, int versionId) {
|
||||
ClassOrInterfaceDeclaration transportVersionsClass = transportVersions.getClassByName("TransportVersions").get();
|
||||
if (transportVersionsClass.getFieldByName(constant).isPresent()) {
|
||||
LOGGER.lifecycle("New transport version constant [{}] already present, skipping", constant);
|
||||
return Optional.empty();
|
||||
}
|
||||
|
||||
TreeMap<Integer, FieldDeclaration> versions = transportVersionsClass.getFields()
|
||||
.stream()
|
||||
.filter(f -> f.getElementType().asString().equals("TransportVersion"))
|
||||
.filter(
|
||||
f -> f.getVariables().stream().limit(1).allMatch(v -> v.getInitializer().filter(Expression::isMethodCallExpr).isPresent())
|
||||
)
|
||||
.filter(f -> f.getVariable(0).getInitializer().get().asMethodCallExpr().getNameAsString().endsWith("def"))
|
||||
.collect(
|
||||
Collectors.toMap(
|
||||
f -> f.getVariable(0)
|
||||
.getInitializer()
|
||||
.get()
|
||||
.asMethodCallExpr()
|
||||
.getArgument(0)
|
||||
.asIntegerLiteralExpr()
|
||||
.asNumber()
|
||||
.intValue(),
|
||||
Function.identity(),
|
||||
(f1, f2) -> {
|
||||
throw new IllegalStateException("Duplicate version constant " + f1);
|
||||
},
|
||||
TreeMap::new
|
||||
)
|
||||
);
|
||||
|
||||
// find the version this should be inserted after
|
||||
Map.Entry<Integer, FieldDeclaration> previousVersion = versions.lowerEntry(versionId);
|
||||
if (previousVersion == null) {
|
||||
throw new IllegalStateException(String.format("Could not find previous version to [%s]", versionId));
|
||||
}
|
||||
|
||||
FieldDeclaration newTransportVersion = createNewTransportVersionConstant(previousVersion.getValue(), constant, versionId);
|
||||
transportVersionsClass.getMembers().addAfter(newTransportVersion, previousVersion.getValue());
|
||||
|
||||
return Optional.of(transportVersions);
|
||||
}
|
||||
|
||||
private static FieldDeclaration createNewVersionConstant(FieldDeclaration lastVersion, String newName, String newExpr) {
|
||||
return new FieldDeclaration(
|
||||
new NodeList<>(lastVersion.getModifiers()),
|
||||
|
@ -172,6 +241,29 @@ public class UpdateVersionsTask extends AbstractVersionsTask {
|
|||
);
|
||||
}
|
||||
|
||||
private static FieldDeclaration createNewTransportVersionConstant(FieldDeclaration lastVersion, String newName, int newId) {
|
||||
return new FieldDeclaration(
|
||||
new NodeList<>(lastVersion.getModifiers()),
|
||||
new VariableDeclarator(
|
||||
lastVersion.getCommonType(),
|
||||
newName,
|
||||
StaticJavaParser.parseExpression(String.format("def(%s)", formatTransportVersionId(newId)))
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
private static String formatTransportVersionId(int id) {
|
||||
String idString = Integer.toString(id);
|
||||
|
||||
return new StringBuilder(idString.substring(idString.length() - 2, idString.length())).insert(0, "_")
|
||||
.insert(0, idString.substring(idString.length() - 3, idString.length() - 2))
|
||||
.insert(0, "_")
|
||||
.insert(0, idString.substring(idString.length() - 6, idString.length() - 3))
|
||||
.insert(0, "_")
|
||||
.insert(0, idString.substring(0, idString.length() - 6))
|
||||
.toString();
|
||||
}
|
||||
|
||||
@VisibleForTesting
|
||||
static Optional<CompilationUnit> removeVersionConstant(CompilationUnit versionJava, Version version) {
|
||||
String removeFieldName = toVersionField(version);
|
||||
|
|
|
@ -239,6 +239,96 @@ public class UpdateVersionsTaskTests {
|
|||
assertThat(field.isPresent(), is(false));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void addTransportVersion() throws Exception {
|
||||
var transportVersions = """
|
||||
public class TransportVersions {
|
||||
public static final TransportVersion V_1_0_0 = def(1_000_0_00);
|
||||
public static final TransportVersion V_1_1_0 = def(1_001_0_00);
|
||||
public static final TransportVersion V_1_2_0 = def(1_002_0_00);
|
||||
public static final TransportVersion V_1_2_1 = def(1_002_0_01);
|
||||
public static final TransportVersion V_1_2_2 = def(1_002_0_02);
|
||||
public static final TransportVersion SOME_OTHER_VERSION = def(1_003_0_00);
|
||||
public static final TransportVersion YET_ANOTHER_VERSION = def(1_004_0_00);
|
||||
public static final TransportVersion MINIMUM_COMPATIBLE = V_1_0_0;
|
||||
}
|
||||
""";
|
||||
|
||||
var expectedTransportVersions = """
|
||||
public class TransportVersions {
|
||||
|
||||
public static final TransportVersion V_1_0_0 = def(1_000_0_00);
|
||||
|
||||
public static final TransportVersion V_1_1_0 = def(1_001_0_00);
|
||||
|
||||
public static final TransportVersion V_1_2_0 = def(1_002_0_00);
|
||||
|
||||
public static final TransportVersion V_1_2_1 = def(1_002_0_01);
|
||||
|
||||
public static final TransportVersion V_1_2_2 = def(1_002_0_02);
|
||||
|
||||
public static final TransportVersion SOME_OTHER_VERSION = def(1_003_0_00);
|
||||
|
||||
public static final TransportVersion YET_ANOTHER_VERSION = def(1_004_0_00);
|
||||
|
||||
public static final TransportVersion NEXT_TRANSPORT_VERSION = def(1_005_0_00);
|
||||
|
||||
public static final TransportVersion MINIMUM_COMPATIBLE = V_1_0_0;
|
||||
}
|
||||
""";
|
||||
|
||||
var unit = StaticJavaParser.parse(transportVersions);
|
||||
var result = UpdateVersionsTask.addTransportVersionConstant(unit, "NEXT_TRANSPORT_VERSION", 1_005_0_00);
|
||||
|
||||
assertThat(result.isPresent(), is(true));
|
||||
assertThat(result.get(), hasToString(expectedTransportVersions));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void addTransportVersionPatch() throws Exception {
|
||||
var transportVersions = """
|
||||
public class TransportVersions {
|
||||
public static final TransportVersion V_1_0_0 = def(1_000_0_00);
|
||||
public static final TransportVersion V_1_1_0 = def(1_001_0_00);
|
||||
public static final TransportVersion V_1_2_0 = def(1_002_0_00);
|
||||
public static final TransportVersion V_1_2_1 = def(1_002_0_01);
|
||||
public static final TransportVersion V_1_2_2 = def(1_002_0_02);
|
||||
public static final TransportVersion SOME_OTHER_VERSION = def(1_003_0_00);
|
||||
public static final TransportVersion YET_ANOTHER_VERSION = def(1_004_0_00);
|
||||
public static final TransportVersion MINIMUM_COMPATIBLE = V_1_0_0;
|
||||
}
|
||||
""";
|
||||
|
||||
var expectedTransportVersions = """
|
||||
public class TransportVersions {
|
||||
|
||||
public static final TransportVersion V_1_0_0 = def(1_000_0_00);
|
||||
|
||||
public static final TransportVersion V_1_1_0 = def(1_001_0_00);
|
||||
|
||||
public static final TransportVersion V_1_2_0 = def(1_002_0_00);
|
||||
|
||||
public static final TransportVersion V_1_2_1 = def(1_002_0_01);
|
||||
|
||||
public static final TransportVersion V_1_2_2 = def(1_002_0_02);
|
||||
|
||||
public static final TransportVersion SOME_OTHER_VERSION = def(1_003_0_00);
|
||||
|
||||
public static final TransportVersion PATCH_TRANSPORT_VERSION = def(1_003_0_01);
|
||||
|
||||
public static final TransportVersion YET_ANOTHER_VERSION = def(1_004_0_00);
|
||||
|
||||
public static final TransportVersion MINIMUM_COMPATIBLE = V_1_0_0;
|
||||
}
|
||||
""";
|
||||
|
||||
var unit = StaticJavaParser.parse(transportVersions);
|
||||
var result = UpdateVersionsTask.addTransportVersionConstant(unit, "PATCH_TRANSPORT_VERSION", 1_003_0_01);
|
||||
|
||||
assertThat(result.isPresent(), is(true));
|
||||
assertThat(result.get(), hasToString(expectedTransportVersions));
|
||||
}
|
||||
|
||||
private static Optional<FieldDeclaration> findFirstField(Node node, String name) {
|
||||
return node.findFirst(FieldDeclaration.class, f -> f.getVariable(0).getName().getIdentifier().equals(name));
|
||||
}
|
||||
|
|
|
@ -38,6 +38,7 @@ import org.elasticsearch.core.SuppressForbidden;
|
|||
import org.elasticsearch.core.Tuple;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.jdk.JarHell;
|
||||
import org.elasticsearch.jdk.RuntimeVersionFeature;
|
||||
import org.elasticsearch.plugin.scanner.ClassReaders;
|
||||
import org.elasticsearch.plugin.scanner.NamedComponentScanner;
|
||||
import org.elasticsearch.plugins.Platforms;
|
||||
|
@ -922,10 +923,12 @@ public class InstallPluginAction implements Closeable {
|
|||
*/
|
||||
private PluginDescriptor installPlugin(InstallablePlugin descriptor, Path tmpRoot, List<Path> deleteOnFailure) throws Exception {
|
||||
final PluginDescriptor info = loadPluginInfo(tmpRoot);
|
||||
PluginPolicyInfo pluginPolicy = PolicyUtil.getPluginPolicyInfo(tmpRoot, env.tmpDir());
|
||||
if (pluginPolicy != null) {
|
||||
Set<String> permissions = PluginSecurity.getPermissionDescriptions(pluginPolicy, env.tmpDir());
|
||||
PluginSecurity.confirmPolicyExceptions(terminal, permissions, batch);
|
||||
if (RuntimeVersionFeature.isSecurityManagerAvailable()) {
|
||||
PluginPolicyInfo pluginPolicy = PolicyUtil.getPluginPolicyInfo(tmpRoot, env.tmpDir());
|
||||
if (pluginPolicy != null) {
|
||||
Set<String> permissions = PluginSecurity.getPermissionDescriptions(pluginPolicy, env.tmpDir());
|
||||
PluginSecurity.confirmPolicyExceptions(terminal, permissions, batch);
|
||||
}
|
||||
}
|
||||
|
||||
// Validate that the downloaded plugin's ID matches what we expect from the descriptor. The
|
||||
|
|
5
docs/changelog/119886.yaml
Normal file
5
docs/changelog/119886.yaml
Normal file
|
@ -0,0 +1,5 @@
|
|||
pr: 119886
|
||||
summary: Initial support for unmapped fields
|
||||
area: ES|QL
|
||||
type: feature
|
||||
issues: []
|
5
docs/changelog/121370.yaml
Normal file
5
docs/changelog/121370.yaml
Normal file
|
@ -0,0 +1,5 @@
|
|||
pr: 121370
|
||||
summary: Improve SLM Health Indicator to cover missing snapshot
|
||||
area: ILM+SLM
|
||||
type: enhancement
|
||||
issues: []
|
5
docs/changelog/122066.yaml
Normal file
5
docs/changelog/122066.yaml
Normal file
|
@ -0,0 +1,5 @@
|
|||
pr: 122066
|
||||
summary: Adding elser default endpoint for EIS
|
||||
area: Machine Learning
|
||||
type: enhancement
|
||||
issues: []
|
8
docs/changelog/122074.yaml
Normal file
8
docs/changelog/122074.yaml
Normal file
|
@ -0,0 +1,8 @@
|
|||
pr: 122074
|
||||
summary: If the Transform is configured to write to an alias as its destination index,
|
||||
when the delete_dest_index parameter is set to true, then the Delete API will now
|
||||
delete the write index backing the alias
|
||||
area: Transform
|
||||
type: bug
|
||||
issues:
|
||||
- 121913
|
5
docs/changelog/122199.yaml
Normal file
5
docs/changelog/122199.yaml
Normal file
|
@ -0,0 +1,5 @@
|
|||
pr: 122199
|
||||
summary: Fix issues that prevents using search only snapshots for indices that use index sorting. This is includes Logsdb and time series indices.
|
||||
area: Logs
|
||||
type: bug
|
||||
issues: []
|
6
docs/changelog/122224.yaml
Normal file
6
docs/changelog/122224.yaml
Normal file
|
@ -0,0 +1,6 @@
|
|||
pr: 122224
|
||||
summary: Enable the use of nested field type with index.mode=time_series
|
||||
area: Mapping
|
||||
type: enhancement
|
||||
issues:
|
||||
- 120874
|
5
docs/changelog/122257.yaml
Normal file
5
docs/changelog/122257.yaml
Normal file
|
@ -0,0 +1,5 @@
|
|||
pr: 122257
|
||||
summary: Revive inlinestats
|
||||
area: ES|QL
|
||||
type: bug
|
||||
issues: []
|
6
docs/changelog/122272.yaml
Normal file
6
docs/changelog/122272.yaml
Normal file
|
@ -0,0 +1,6 @@
|
|||
pr: 122272
|
||||
summary: "[Inference API] Rename `model_id` prop to model in EIS sparse inference\
|
||||
\ request body"
|
||||
area: Inference
|
||||
type: enhancement
|
||||
issues: []
|
5
docs/changelog/122280.yaml
Normal file
5
docs/changelog/122280.yaml
Normal file
|
@ -0,0 +1,5 @@
|
|||
pr: 122280
|
||||
summary: Use `FallbackSyntheticSourceBlockLoader` for number fields
|
||||
area: Mapping
|
||||
type: enhancement
|
||||
issues: []
|
5
docs/changelog/122326.yaml
Normal file
5
docs/changelog/122326.yaml
Normal file
|
@ -0,0 +1,5 @@
|
|||
pr: 122326
|
||||
summary: System Index Migration Failure Results in a Non-Recoverable State
|
||||
area: Infra/Core
|
||||
type: bug
|
||||
issues: []
|
6
docs/changelog/122357.yaml
Normal file
6
docs/changelog/122357.yaml
Normal file
|
@ -0,0 +1,6 @@
|
|||
pr: 122357
|
||||
summary: Handle search timeout in `SuggestPhase`
|
||||
area: Search
|
||||
type: bug
|
||||
issues:
|
||||
- 122186
|
5
docs/changelog/122365.yaml
Normal file
5
docs/changelog/122365.yaml
Normal file
|
@ -0,0 +1,5 @@
|
|||
pr: 122365
|
||||
summary: Fix handling of auto expand replicas for stateless indices
|
||||
area: "Search"
|
||||
type: bug
|
||||
issues: []
|
6
docs/changelog/122417.yaml
Normal file
6
docs/changelog/122417.yaml
Normal file
|
@ -0,0 +1,6 @@
|
|||
pr: 122417
|
||||
summary: Fix listener leak in exchange service
|
||||
area: ES|QL
|
||||
type: bug
|
||||
issues:
|
||||
- 122271
|
5
docs/changelog/122425.yaml
Normal file
5
docs/changelog/122425.yaml
Normal file
|
@ -0,0 +1,5 @@
|
|||
pr: 122425
|
||||
summary: Fix synthetic source bug that would mishandle nested `dense_vector` fields
|
||||
area: Mapping
|
||||
type: bug
|
||||
issues: []
|
5
docs/changelog/122427.yaml
Normal file
5
docs/changelog/122427.yaml
Normal file
|
@ -0,0 +1,5 @@
|
|||
pr: 122427
|
||||
summary: Improve size limiting string message
|
||||
area: Infra/Core
|
||||
type: enhancement
|
||||
issues: []
|
5
docs/changelog/122496.yaml
Normal file
5
docs/changelog/122496.yaml
Normal file
|
@ -0,0 +1,5 @@
|
|||
pr: 122496
|
||||
summary: Deduplicate `IngestStats` and `IngestStats.Stats` identity records when deserializing
|
||||
area: Ingest Node
|
||||
type: bug
|
||||
issues: []
|
|
@ -23,6 +23,7 @@ public class TimeValue implements Comparable<TimeValue> {
|
|||
public static final TimeValue MAX_VALUE = new TimeValue(Long.MAX_VALUE, TimeUnit.NANOSECONDS);
|
||||
public static final TimeValue THIRTY_SECONDS = new TimeValue(30, TimeUnit.SECONDS);
|
||||
public static final TimeValue ONE_MINUTE = new TimeValue(1, TimeUnit.MINUTES);
|
||||
public static final TimeValue ONE_HOUR = new TimeValue(1, TimeUnit.HOURS);
|
||||
|
||||
private static final long C0 = 1L;
|
||||
private static final long C1 = C0 * 1000L;
|
||||
|
|
|
@ -44,10 +44,11 @@ public class InstrumentationServiceImpl implements InstrumentationService {
|
|||
return InstrumenterImpl.create(clazz, methods);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<MethodKey, CheckMethod> lookupMethods(Class<?> checkerClass) throws IOException {
|
||||
Map<MethodKey, CheckMethod> methodsToInstrument = new HashMap<>();
|
||||
private interface CheckerMethodVisitor {
|
||||
void visit(Class<?> currentClass, int access, String checkerMethodName, String checkerMethodDescriptor);
|
||||
}
|
||||
|
||||
private void visitClassAndSupers(Class<?> checkerClass, CheckerMethodVisitor checkerMethodVisitor) throws ClassNotFoundException {
|
||||
Set<Class<?>> visitedClasses = new HashSet<>();
|
||||
ArrayDeque<Class<?>> classesToVisit = new ArrayDeque<>(Collections.singleton(checkerClass));
|
||||
while (classesToVisit.isEmpty() == false) {
|
||||
|
@ -57,52 +58,61 @@ public class InstrumentationServiceImpl implements InstrumentationService {
|
|||
}
|
||||
visitedClasses.add(currentClass);
|
||||
|
||||
var classFileInfo = InstrumenterImpl.getClassFileInfo(currentClass);
|
||||
ClassReader reader = new ClassReader(classFileInfo.bytecodes());
|
||||
ClassVisitor visitor = new ClassVisitor(Opcodes.ASM9) {
|
||||
try {
|
||||
var classFileInfo = InstrumenterImpl.getClassFileInfo(currentClass);
|
||||
ClassReader reader = new ClassReader(classFileInfo.bytecodes());
|
||||
ClassVisitor visitor = new ClassVisitor(Opcodes.ASM9) {
|
||||
|
||||
@Override
|
||||
public void visit(int version, int access, String name, String signature, String superName, String[] interfaces) {
|
||||
super.visit(version, access, name, signature, superName, interfaces);
|
||||
try {
|
||||
if (OBJECT_INTERNAL_NAME.equals(superName) == false) {
|
||||
classesToVisit.add(Class.forName(Type.getObjectType(superName).getClassName()));
|
||||
@Override
|
||||
public void visit(int version, int access, String name, String signature, String superName, String[] interfaces) {
|
||||
super.visit(version, access, name, signature, superName, interfaces);
|
||||
try {
|
||||
if (OBJECT_INTERNAL_NAME.equals(superName) == false) {
|
||||
classesToVisit.add(Class.forName(Type.getObjectType(superName).getClassName()));
|
||||
}
|
||||
for (var interfaceName : interfaces) {
|
||||
classesToVisit.add(Class.forName(Type.getObjectType(interfaceName).getClassName()));
|
||||
}
|
||||
} catch (ClassNotFoundException e) {
|
||||
throw new IllegalArgumentException("Cannot inspect checker class " + currentClass.getName(), e);
|
||||
}
|
||||
for (var interfaceName : interfaces) {
|
||||
classesToVisit.add(Class.forName(Type.getObjectType(interfaceName).getClassName()));
|
||||
}
|
||||
} catch (ClassNotFoundException e) {
|
||||
throw new IllegalArgumentException("Cannot inspect checker class " + checkerClass.getName(), e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public MethodVisitor visitMethod(
|
||||
int access,
|
||||
String checkerMethodName,
|
||||
String checkerMethodDescriptor,
|
||||
String signature,
|
||||
String[] exceptions
|
||||
) {
|
||||
var mv = super.visitMethod(access, checkerMethodName, checkerMethodDescriptor, signature, exceptions);
|
||||
if (checkerMethodName.startsWith(InstrumentationService.CHECK_METHOD_PREFIX)) {
|
||||
var checkerMethodArgumentTypes = Type.getArgumentTypes(checkerMethodDescriptor);
|
||||
var methodToInstrument = parseCheckerMethodSignature(checkerMethodName, checkerMethodArgumentTypes);
|
||||
|
||||
var checkerParameterDescriptors = Arrays.stream(checkerMethodArgumentTypes).map(Type::getDescriptor).toList();
|
||||
var checkMethod = new CheckMethod(
|
||||
Type.getInternalName(currentClass),
|
||||
checkerMethodName,
|
||||
checkerParameterDescriptors
|
||||
);
|
||||
|
||||
methodsToInstrument.putIfAbsent(methodToInstrument, checkMethod);
|
||||
@Override
|
||||
public MethodVisitor visitMethod(
|
||||
int access,
|
||||
String checkerMethodName,
|
||||
String checkerMethodDescriptor,
|
||||
String signature,
|
||||
String[] exceptions
|
||||
) {
|
||||
var mv = super.visitMethod(access, checkerMethodName, checkerMethodDescriptor, signature, exceptions);
|
||||
checkerMethodVisitor.visit(currentClass, access, checkerMethodName, checkerMethodDescriptor);
|
||||
return mv;
|
||||
}
|
||||
return mv;
|
||||
}
|
||||
};
|
||||
reader.accept(visitor, 0);
|
||||
};
|
||||
reader.accept(visitor, 0);
|
||||
} catch (IOException e) {
|
||||
throw new ClassNotFoundException("Cannot find a definition for class [" + checkerClass.getName() + "]", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<MethodKey, CheckMethod> lookupMethods(Class<?> checkerClass) throws ClassNotFoundException {
|
||||
Map<MethodKey, CheckMethod> methodsToInstrument = new HashMap<>();
|
||||
|
||||
visitClassAndSupers(checkerClass, (currentClass, access, checkerMethodName, checkerMethodDescriptor) -> {
|
||||
if (checkerMethodName.startsWith(InstrumentationService.CHECK_METHOD_PREFIX)) {
|
||||
var checkerMethodArgumentTypes = Type.getArgumentTypes(checkerMethodDescriptor);
|
||||
var methodToInstrument = parseCheckerMethodSignature(checkerMethodName, checkerMethodArgumentTypes);
|
||||
|
||||
var checkerParameterDescriptors = Arrays.stream(checkerMethodArgumentTypes).map(Type::getDescriptor).toList();
|
||||
var checkMethod = new CheckMethod(Type.getInternalName(currentClass), checkerMethodName, checkerParameterDescriptors);
|
||||
methodsToInstrument.putIfAbsent(methodToInstrument, checkMethod);
|
||||
}
|
||||
});
|
||||
|
||||
return methodsToInstrument;
|
||||
}
|
||||
|
||||
|
@ -110,14 +120,14 @@ public class InstrumentationServiceImpl implements InstrumentationService {
|
|||
@Override
|
||||
public InstrumentationInfo lookupImplementationMethod(
|
||||
Class<?> targetSuperclass,
|
||||
String methodName,
|
||||
String targetMethodName,
|
||||
Class<?> implementationClass,
|
||||
Class<?> checkerClass,
|
||||
String checkMethodName,
|
||||
Class<?>... parameterTypes
|
||||
) throws NoSuchMethodException, ClassNotFoundException {
|
||||
|
||||
var targetMethod = targetSuperclass.getDeclaredMethod(methodName, parameterTypes);
|
||||
var targetMethod = targetSuperclass.getDeclaredMethod(targetMethodName, parameterTypes);
|
||||
var implementationMethod = implementationClass.getMethod(targetMethod.getName(), targetMethod.getParameterTypes());
|
||||
validateTargetMethod(implementationClass, targetMethod, implementationMethod);
|
||||
|
||||
|
@ -128,33 +138,15 @@ public class InstrumentationServiceImpl implements InstrumentationService {
|
|||
|
||||
CheckMethod[] checkMethod = new CheckMethod[1];
|
||||
|
||||
try {
|
||||
InstrumenterImpl.ClassFileInfo classFileInfo = InstrumenterImpl.getClassFileInfo(checkerClass);
|
||||
ClassReader reader = new ClassReader(classFileInfo.bytecodes());
|
||||
ClassVisitor visitor = new ClassVisitor(Opcodes.ASM9) {
|
||||
@Override
|
||||
public MethodVisitor visitMethod(
|
||||
int access,
|
||||
String methodName,
|
||||
String methodDescriptor,
|
||||
String signature,
|
||||
String[] exceptions
|
||||
) {
|
||||
var mv = super.visitMethod(access, methodName, methodDescriptor, signature, exceptions);
|
||||
if (methodName.equals(checkMethodName)) {
|
||||
var methodArgumentTypes = Type.getArgumentTypes(methodDescriptor);
|
||||
if (Arrays.equals(methodArgumentTypes, checkMethodArgumentTypes)) {
|
||||
var checkerParameterDescriptors = Arrays.stream(methodArgumentTypes).map(Type::getDescriptor).toList();
|
||||
checkMethod[0] = new CheckMethod(Type.getInternalName(checkerClass), methodName, checkerParameterDescriptors);
|
||||
}
|
||||
}
|
||||
return mv;
|
||||
visitClassAndSupers(checkerClass, (currentClass, access, methodName, methodDescriptor) -> {
|
||||
if (methodName.equals(checkMethodName)) {
|
||||
var methodArgumentTypes = Type.getArgumentTypes(methodDescriptor);
|
||||
if (Arrays.equals(methodArgumentTypes, checkMethodArgumentTypes)) {
|
||||
var checkerParameterDescriptors = Arrays.stream(methodArgumentTypes).map(Type::getDescriptor).toList();
|
||||
checkMethod[0] = new CheckMethod(Type.getInternalName(currentClass), methodName, checkerParameterDescriptors);
|
||||
}
|
||||
};
|
||||
reader.accept(visitor, 0);
|
||||
} catch (IOException e) {
|
||||
throw new ClassNotFoundException("Cannot find a definition for class [" + checkerClass.getName() + "]", e);
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
if (checkMethod[0] == null) {
|
||||
throw new NoSuchMethodException(
|
||||
|
|
|
@ -152,14 +152,13 @@ public class InstrumenterImpl implements Instrumenter {
|
|||
if (isAnnotationPresent == false) {
|
||||
boolean isStatic = (access & ACC_STATIC) != 0;
|
||||
boolean isCtor = "<init>".equals(name);
|
||||
boolean hasReceiver = (isStatic || isCtor) == false;
|
||||
var key = new MethodKey(className, name, Stream.of(Type.getArgumentTypes(descriptor)).map(Type::getInternalName).toList());
|
||||
var instrumentationMethod = checkMethods.get(key);
|
||||
if (instrumentationMethod != null) {
|
||||
// LOGGER.debug("Will instrument method {}", key);
|
||||
// System.out.println("Will instrument method " + key);
|
||||
return new EntitlementMethodVisitor(Opcodes.ASM9, mv, isStatic, isCtor, descriptor, instrumentationMethod);
|
||||
} else {
|
||||
// LOGGER.trace("Will not instrument method {}", key);
|
||||
// System.out.println("Will not instrument method " + key);
|
||||
}
|
||||
}
|
||||
return mv;
|
||||
|
|
|
@ -15,7 +15,6 @@ import org.elasticsearch.entitlement.instrumentation.MethodKey;
|
|||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.objectweb.asm.Type;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
|
@ -90,7 +89,9 @@ public class InstrumentationServiceImplTests extends ESTestCase {
|
|||
void checkInstanceMethodManual(Class<?> clazz, TestTargetBaseClass that, int x, String y);
|
||||
}
|
||||
|
||||
public void testInstrumentationTargetLookup() throws IOException {
|
||||
interface TestCheckerDerived3 extends TestCheckerMixed {}
|
||||
|
||||
public void testInstrumentationTargetLookup() throws ClassNotFoundException {
|
||||
Map<MethodKey, CheckMethod> checkMethods = instrumentationService.lookupMethods(TestChecker.class);
|
||||
|
||||
assertThat(checkMethods, aMapWithSize(3));
|
||||
|
@ -143,7 +144,7 @@ public class InstrumentationServiceImplTests extends ESTestCase {
|
|||
);
|
||||
}
|
||||
|
||||
public void testInstrumentationTargetLookupWithOverloads() throws IOException {
|
||||
public void testInstrumentationTargetLookupWithOverloads() throws ClassNotFoundException {
|
||||
Map<MethodKey, CheckMethod> checkMethods = instrumentationService.lookupMethods(TestCheckerOverloads.class);
|
||||
|
||||
assertThat(checkMethods, aMapWithSize(2));
|
||||
|
@ -175,7 +176,7 @@ public class InstrumentationServiceImplTests extends ESTestCase {
|
|||
);
|
||||
}
|
||||
|
||||
public void testInstrumentationTargetLookupWithDerivedClass() throws IOException {
|
||||
public void testInstrumentationTargetLookupWithDerivedClass() throws ClassNotFoundException {
|
||||
Map<MethodKey, CheckMethod> checkMethods = instrumentationService.lookupMethods(TestCheckerDerived2.class);
|
||||
|
||||
assertThat(checkMethods, aMapWithSize(4));
|
||||
|
@ -244,7 +245,7 @@ public class InstrumentationServiceImplTests extends ESTestCase {
|
|||
);
|
||||
}
|
||||
|
||||
public void testInstrumentationTargetLookupWithCtors() throws IOException {
|
||||
public void testInstrumentationTargetLookupWithCtors() throws ClassNotFoundException {
|
||||
Map<MethodKey, CheckMethod> checkMethods = instrumentationService.lookupMethods(TestCheckerCtors.class);
|
||||
|
||||
assertThat(checkMethods, aMapWithSize(2));
|
||||
|
@ -276,7 +277,7 @@ public class InstrumentationServiceImplTests extends ESTestCase {
|
|||
);
|
||||
}
|
||||
|
||||
public void testInstrumentationTargetLookupWithExtraMethods() throws IOException {
|
||||
public void testInstrumentationTargetLookupWithExtraMethods() throws ClassNotFoundException {
|
||||
Map<MethodKey, CheckMethod> checkMethods = instrumentationService.lookupMethods(TestCheckerMixed.class);
|
||||
|
||||
assertThat(checkMethods, aMapWithSize(1));
|
||||
|
@ -371,7 +372,7 @@ public class InstrumentationServiceImplTests extends ESTestCase {
|
|||
);
|
||||
}
|
||||
|
||||
public void testLookupImplementationMethodWithInheritance() throws ClassNotFoundException, NoSuchMethodException {
|
||||
public void testLookupImplementationMethodWithInheritanceOnTarget() throws ClassNotFoundException, NoSuchMethodException {
|
||||
var info = instrumentationService.lookupImplementationMethod(
|
||||
TestTargetBaseClass.class,
|
||||
"instanceMethod2",
|
||||
|
@ -409,6 +410,44 @@ public class InstrumentationServiceImplTests extends ESTestCase {
|
|||
);
|
||||
}
|
||||
|
||||
public void testLookupImplementationMethodWithInheritanceOnChecker() throws ClassNotFoundException, NoSuchMethodException {
|
||||
var info = instrumentationService.lookupImplementationMethod(
|
||||
TestTargetBaseClass.class,
|
||||
"instanceMethod2",
|
||||
TestTargetImplementationClass.class,
|
||||
TestCheckerDerived3.class,
|
||||
"checkInstanceMethodManual",
|
||||
int.class,
|
||||
String.class
|
||||
);
|
||||
|
||||
assertThat(
|
||||
info.targetMethod(),
|
||||
equalTo(
|
||||
new MethodKey(
|
||||
"org/elasticsearch/entitlement/instrumentation/impl/InstrumentationServiceImplTests$TestTargetIntermediateClass",
|
||||
"instanceMethod2",
|
||||
List.of("I", "java/lang/String")
|
||||
)
|
||||
)
|
||||
);
|
||||
assertThat(
|
||||
info.checkMethod(),
|
||||
equalTo(
|
||||
new CheckMethod(
|
||||
"org/elasticsearch/entitlement/instrumentation/impl/InstrumentationServiceImplTests$TestCheckerMixed",
|
||||
"checkInstanceMethodManual",
|
||||
List.of(
|
||||
"Ljava/lang/Class;",
|
||||
"Lorg/elasticsearch/entitlement/instrumentation/impl/InstrumentationServiceImplTests$TestTargetBaseClass;",
|
||||
"I",
|
||||
"Ljava/lang/String;"
|
||||
)
|
||||
)
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
public void testParseCheckerMethodSignatureStaticMethod() {
|
||||
var methodKey = InstrumentationServiceImpl.parseCheckerMethodSignature(
|
||||
"check$org_example_TestClass$$staticMethod",
|
||||
|
|
|
@ -35,6 +35,7 @@ import java.net.ServerSocket;
|
|||
import java.net.Socket;
|
||||
import java.net.SocketAddress;
|
||||
import java.net.SocketImplFactory;
|
||||
import java.net.URI;
|
||||
import java.net.URL;
|
||||
import java.net.URLStreamHandler;
|
||||
import java.net.URLStreamHandlerFactory;
|
||||
|
@ -50,16 +51,25 @@ import java.nio.channels.ServerSocketChannel;
|
|||
import java.nio.channels.SocketChannel;
|
||||
import java.nio.channels.spi.SelectorProvider;
|
||||
import java.nio.charset.Charset;
|
||||
import java.nio.file.AccessMode;
|
||||
import java.nio.file.CopyOption;
|
||||
import java.nio.file.DirectoryStream;
|
||||
import java.nio.file.FileStore;
|
||||
import java.nio.file.LinkOption;
|
||||
import java.nio.file.OpenOption;
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.attribute.FileAttribute;
|
||||
import java.nio.file.attribute.UserPrincipal;
|
||||
import java.nio.file.spi.FileSystemProvider;
|
||||
import java.security.cert.CertStoreParameters;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.Properties;
|
||||
import java.util.Set;
|
||||
import java.util.TimeZone;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.ForkJoinPool;
|
||||
import java.util.function.Consumer;
|
||||
|
||||
import javax.net.ssl.HostnameVerifier;
|
||||
|
@ -500,6 +510,36 @@ public interface EntitlementChecker {
|
|||
//
|
||||
|
||||
// old io (ie File)
|
||||
void check$java_io_File$createNewFile(Class<?> callerClass, File file);
|
||||
|
||||
void check$java_io_File$$createTempFile(Class<?> callerClass, String prefix, String suffix, File directory);
|
||||
|
||||
void check$java_io_File$delete(Class<?> callerClass, File file);
|
||||
|
||||
void check$java_io_File$deleteOnExit(Class<?> callerClass, File file);
|
||||
|
||||
void check$java_io_File$mkdir(Class<?> callerClass, File file);
|
||||
|
||||
void check$java_io_File$mkdirs(Class<?> callerClass, File file);
|
||||
|
||||
void check$java_io_File$renameTo(Class<?> callerClass, File file, File dest);
|
||||
|
||||
void check$java_io_File$setExecutable(Class<?> callerClass, File file, boolean executable);
|
||||
|
||||
void check$java_io_File$setExecutable(Class<?> callerClass, File file, boolean executable, boolean ownerOnly);
|
||||
|
||||
void check$java_io_File$setLastModified(Class<?> callerClass, File file, long time);
|
||||
|
||||
void check$java_io_File$setReadable(Class<?> callerClass, File file, boolean readable);
|
||||
|
||||
void check$java_io_File$setReadable(Class<?> callerClass, File file, boolean readable, boolean ownerOnly);
|
||||
|
||||
void check$java_io_File$setReadOnly(Class<?> callerClass, File file);
|
||||
|
||||
void check$java_io_File$setWritable(Class<?> callerClass, File file, boolean writable);
|
||||
|
||||
void check$java_io_File$setWritable(Class<?> callerClass, File file, boolean writable, boolean ownerOnly);
|
||||
|
||||
void check$java_io_FileOutputStream$(Class<?> callerClass, File file);
|
||||
|
||||
void check$java_io_FileOutputStream$(Class<?> callerClass, File file, boolean append);
|
||||
|
@ -522,5 +562,117 @@ public interface EntitlementChecker {
|
|||
void check$java_nio_file_Files$$setOwner(Class<?> callerClass, Path path, UserPrincipal principal);
|
||||
|
||||
// file system providers
|
||||
void check$java_nio_file_spi_FileSystemProvider$(Class<?> callerClass);
|
||||
|
||||
void checkNewFileSystem(Class<?> callerClass, FileSystemProvider that, URI uri, Map<String, ?> env);
|
||||
|
||||
void checkNewFileSystem(Class<?> callerClass, FileSystemProvider that, Path path, Map<String, ?> env);
|
||||
|
||||
void checkNewInputStream(Class<?> callerClass, FileSystemProvider that, Path path, OpenOption... options);
|
||||
|
||||
void checkNewOutputStream(Class<?> callerClass, FileSystemProvider that, Path path, OpenOption... options);
|
||||
|
||||
void checkNewFileChannel(
|
||||
Class<?> callerClass,
|
||||
FileSystemProvider that,
|
||||
Path path,
|
||||
Set<? extends OpenOption> options,
|
||||
FileAttribute<?>... attrs
|
||||
);
|
||||
|
||||
void checkNewAsynchronousFileChannel(
|
||||
Class<?> callerClass,
|
||||
FileSystemProvider that,
|
||||
Path path,
|
||||
Set<? extends OpenOption> options,
|
||||
ExecutorService executor,
|
||||
FileAttribute<?>... attrs
|
||||
);
|
||||
|
||||
void checkNewByteChannel(
|
||||
Class<?> callerClass,
|
||||
FileSystemProvider that,
|
||||
Path path,
|
||||
Set<? extends OpenOption> options,
|
||||
FileAttribute<?>... attrs
|
||||
);
|
||||
|
||||
void checkNewDirectoryStream(Class<?> callerClass, FileSystemProvider that, Path dir, DirectoryStream.Filter<? super Path> filter);
|
||||
|
||||
void checkCreateDirectory(Class<?> callerClass, FileSystemProvider that, Path dir, FileAttribute<?>... attrs);
|
||||
|
||||
void checkCreateSymbolicLink(Class<?> callerClass, FileSystemProvider that, Path link, Path target, FileAttribute<?>... attrs);
|
||||
|
||||
void checkCreateLink(Class<?> callerClass, FileSystemProvider that, Path link, Path existing);
|
||||
|
||||
void checkDelete(Class<?> callerClass, FileSystemProvider that, Path path);
|
||||
|
||||
void checkDeleteIfExists(Class<?> callerClass, FileSystemProvider that, Path path);
|
||||
|
||||
void checkReadSymbolicLink(Class<?> callerClass, FileSystemProvider that, Path link);
|
||||
|
||||
void checkCopy(Class<?> callerClass, FileSystemProvider that, Path source, Path target, CopyOption... options);
|
||||
|
||||
void checkMove(Class<?> callerClass, FileSystemProvider that, Path source, Path target, CopyOption... options);
|
||||
|
||||
void checkIsSameFile(Class<?> callerClass, FileSystemProvider that, Path path, Path path2);
|
||||
|
||||
void checkIsHidden(Class<?> callerClass, FileSystemProvider that, Path path);
|
||||
|
||||
void checkGetFileStore(Class<?> callerClass, FileSystemProvider that, Path path);
|
||||
|
||||
void checkCheckAccess(Class<?> callerClass, FileSystemProvider that, Path path, AccessMode... modes);
|
||||
|
||||
void checkGetFileAttributeView(Class<?> callerClass, FileSystemProvider that, Path path, Class<?> type, LinkOption... options);
|
||||
|
||||
void checkReadAttributes(Class<?> callerClass, FileSystemProvider that, Path path, Class<?> type, LinkOption... options);
|
||||
|
||||
void checkReadAttributes(Class<?> callerClass, FileSystemProvider that, Path path, String attributes, LinkOption... options);
|
||||
|
||||
void checkReadAttributesIfExists(Class<?> callerClass, FileSystemProvider that, Path path, Class<?> type, LinkOption... options);
|
||||
|
||||
void checkSetAttribute(Class<?> callerClass, FileSystemProvider that, Path path, String attribute, Object value, LinkOption... options);
|
||||
|
||||
void checkExists(Class<?> callerClass, FileSystemProvider that, Path path, LinkOption... options);
|
||||
|
||||
// file store
|
||||
void checkGetFileStoreAttributeView(Class<?> callerClass, FileStore that, Class<?> type);
|
||||
|
||||
void checkGetAttribute(Class<?> callerClass, FileStore that, String attribute);
|
||||
|
||||
void checkGetBlockSize(Class<?> callerClass, FileStore that);
|
||||
|
||||
void checkGetTotalSpace(Class<?> callerClass, FileStore that);
|
||||
|
||||
void checkGetUnallocatedSpace(Class<?> callerClass, FileStore that);
|
||||
|
||||
void checkGetUsableSpace(Class<?> callerClass, FileStore that);
|
||||
|
||||
void checkIsReadOnly(Class<?> callerClass, FileStore that);
|
||||
|
||||
void checkName(Class<?> callerClass, FileStore that);
|
||||
|
||||
void checkType(Class<?> callerClass, FileStore that);
|
||||
|
||||
////////////////////
|
||||
//
|
||||
// Thread management
|
||||
//
|
||||
|
||||
void check$java_lang_Thread$start(Class<?> callerClass, Thread thread);
|
||||
|
||||
void check$java_lang_Thread$setDaemon(Class<?> callerClass, Thread thread, boolean on);
|
||||
|
||||
void check$java_lang_ThreadGroup$setDaemon(Class<?> callerClass, ThreadGroup threadGroup, boolean daemon);
|
||||
|
||||
void check$java_util_concurrent_ForkJoinPool$setParallelism(Class<?> callerClass, ForkJoinPool forkJoinPool, int size);
|
||||
|
||||
void check$java_lang_Thread$setName(Class<?> callerClass, Thread thread, String name);
|
||||
|
||||
void check$java_lang_Thread$setPriority(Class<?> callerClass, Thread thread, int newPriority);
|
||||
|
||||
void check$java_lang_Thread$setUncaughtExceptionHandler(Class<?> callerClass, Thread thread, Thread.UncaughtExceptionHandler ueh);
|
||||
|
||||
void check$java_lang_ThreadGroup$setMaxPriority(Class<?> callerClass, ThreadGroup threadGroup, int pri);
|
||||
|
||||
}
|
||||
|
|
|
@ -14,17 +14,47 @@ import org.elasticsearch.core.SuppressForbidden;
|
|||
import java.io.IOException;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.Paths;
|
||||
import java.nio.file.attribute.UserPrincipal;
|
||||
import java.security.SecureRandom;
|
||||
|
||||
@SuppressForbidden(reason = "Exposes forbidden APIs for testing purposes")
|
||||
public final class EntitledActions {
|
||||
private EntitledActions() {}
|
||||
|
||||
@SuppressForbidden(reason = "Exposes forbidden APIs for testing purposes")
|
||||
static void System_clearProperty(String key) {
|
||||
System.clearProperty(key);
|
||||
private static final SecureRandom random = new SecureRandom();
|
||||
|
||||
private static final Path testRootDir = Paths.get(System.getProperty("es.entitlements.testdir"));
|
||||
|
||||
private static Path readDir() {
|
||||
return testRootDir.resolve("read_dir");
|
||||
}
|
||||
|
||||
private static Path readWriteDir() {
|
||||
return testRootDir.resolve("read_write_dir");
|
||||
}
|
||||
|
||||
public static UserPrincipal getFileOwner(Path path) throws IOException {
|
||||
return Files.getOwner(path);
|
||||
}
|
||||
|
||||
public static void createFile(Path path) throws IOException {
|
||||
Files.createFile(path);
|
||||
}
|
||||
|
||||
public static Path createTempFileForRead() throws IOException {
|
||||
return Files.createFile(readDir().resolve("entitlements-" + random.nextLong() + ".tmp"));
|
||||
}
|
||||
|
||||
public static Path createTempFileForWrite() throws IOException {
|
||||
return Files.createFile(readWriteDir().resolve("entitlements-" + random.nextLong() + ".tmp"));
|
||||
}
|
||||
|
||||
public static Path createTempDirectoryForWrite() throws IOException {
|
||||
return Files.createDirectory(readWriteDir().resolve("entitlements-dir-" + random.nextLong()));
|
||||
}
|
||||
|
||||
public static Path createTempSymbolicLink() throws IOException {
|
||||
return Files.createSymbolicLink(readDir().resolve("entitlements-link-" + random.nextLong()), readWriteDir());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -15,7 +15,7 @@ import org.elasticsearch.logging.Logger;
|
|||
import org.elasticsearch.plugins.ExtensiblePlugin;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
|
||||
import static org.elasticsearch.entitlement.qa.entitled.EntitledActions.System_clearProperty;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
|
||||
public class EntitledPlugin extends Plugin implements ExtensiblePlugin {
|
||||
|
||||
|
@ -28,11 +28,19 @@ public class EntitledPlugin extends Plugin implements ExtensiblePlugin {
|
|||
selfTestNotEntitled();
|
||||
}
|
||||
|
||||
private static final String SELF_TEST_PROPERTY = "org.elasticsearch.entitlement.qa.selfTest";
|
||||
|
||||
private static void selfTestEntitled() {
|
||||
logger.debug("selfTestEntitled");
|
||||
System_clearProperty(SELF_TEST_PROPERTY);
|
||||
AtomicBoolean threadRan = new AtomicBoolean(false);
|
||||
try {
|
||||
Thread testThread = new Thread(() -> threadRan.set(true), "testThread");
|
||||
testThread.start();
|
||||
testThread.join();
|
||||
} catch (InterruptedException e) {
|
||||
throw new AssertionError(e);
|
||||
}
|
||||
if (threadRan.get() == false) {
|
||||
throw new AssertionError("Self-test thread did not run");
|
||||
}
|
||||
}
|
||||
|
||||
private static void selfTestNotEntitled() {
|
||||
|
|
|
@ -23,11 +23,13 @@ import java.net.Socket;
|
|||
import java.net.SocketAddress;
|
||||
import java.net.SocketException;
|
||||
import java.net.SocketImpl;
|
||||
import java.net.URI;
|
||||
import java.nio.channels.AsynchronousChannelGroup;
|
||||
import java.nio.channels.AsynchronousServerSocketChannel;
|
||||
import java.nio.channels.AsynchronousSocketChannel;
|
||||
import java.nio.channels.DatagramChannel;
|
||||
import java.nio.channels.Pipe;
|
||||
import java.nio.channels.SeekableByteChannel;
|
||||
import java.nio.channels.ServerSocketChannel;
|
||||
import java.nio.channels.SocketChannel;
|
||||
import java.nio.channels.spi.AbstractSelector;
|
||||
|
@ -35,6 +37,18 @@ import java.nio.channels.spi.AsynchronousChannelProvider;
|
|||
import java.nio.channels.spi.SelectorProvider;
|
||||
import java.nio.charset.Charset;
|
||||
import java.nio.charset.spi.CharsetProvider;
|
||||
import java.nio.file.AccessMode;
|
||||
import java.nio.file.CopyOption;
|
||||
import java.nio.file.DirectoryStream;
|
||||
import java.nio.file.FileStore;
|
||||
import java.nio.file.FileSystem;
|
||||
import java.nio.file.LinkOption;
|
||||
import java.nio.file.OpenOption;
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.attribute.BasicFileAttributes;
|
||||
import java.nio.file.attribute.FileAttribute;
|
||||
import java.nio.file.attribute.FileAttributeView;
|
||||
import java.nio.file.spi.FileSystemProvider;
|
||||
import java.security.cert.Certificate;
|
||||
import java.text.BreakIterator;
|
||||
import java.text.Collator;
|
||||
|
@ -51,6 +65,7 @@ import java.text.spi.NumberFormatProvider;
|
|||
import java.util.Iterator;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.ThreadFactory;
|
||||
import java.util.spi.CalendarDataProvider;
|
||||
|
@ -568,4 +583,97 @@ class DummyImplementations {
|
|||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
static class DummyFileSystemProvider extends FileSystemProvider {
|
||||
@Override
|
||||
public String getScheme() {
|
||||
return "";
|
||||
}
|
||||
|
||||
@Override
|
||||
public FileSystem newFileSystem(URI uri, Map<String, ?> env) throws IOException {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public FileSystem getFileSystem(URI uri) {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Path getPath(URI uri) {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public SeekableByteChannel newByteChannel(Path path, Set<? extends OpenOption> options, FileAttribute<?>... attrs)
|
||||
throws IOException {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public DirectoryStream<Path> newDirectoryStream(Path dir, DirectoryStream.Filter<? super Path> filter) throws IOException {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void createDirectory(Path dir, FileAttribute<?>... attrs) throws IOException {
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public void delete(Path path) throws IOException {
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public void copy(Path source, Path target, CopyOption... options) throws IOException {
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public void move(Path source, Path target, CopyOption... options) throws IOException {
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isSameFile(Path path, Path path2) throws IOException {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isHidden(Path path) throws IOException {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public FileStore getFileStore(Path path) throws IOException {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void checkAccess(Path path, AccessMode... modes) throws IOException {
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public <V extends FileAttributeView> V getFileAttributeView(Path path, Class<V> type, LinkOption... options) {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public <A extends BasicFileAttributes> A readAttributes(Path path, Class<A> type, LinkOption... options) throws IOException {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<String, Object> readAttributes(Path path, String attributes, LinkOption... options) throws IOException {
|
||||
return Map.of();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setAttribute(Path path, String attribute, Object value, LinkOption... options) throws IOException {
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -12,6 +12,7 @@ package org.elasticsearch.entitlement.qa.test;
|
|||
import org.elasticsearch.core.SuppressForbidden;
|
||||
import org.elasticsearch.entitlement.qa.entitled.EntitledActions;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.FileNotFoundException;
|
||||
import java.io.FileOutputStream;
|
||||
import java.io.IOException;
|
||||
|
@ -27,24 +28,109 @@ import static org.elasticsearch.entitlement.qa.test.EntitlementTest.ExpectedAcce
|
|||
@SuppressForbidden(reason = "Explicitly checking APIs that are forbidden")
|
||||
class FileCheckActions {
|
||||
|
||||
private static Path testRootDir = Paths.get(System.getProperty("es.entitlements.testdir"));
|
||||
static Path testRootDir = Paths.get(System.getProperty("es.entitlements.testdir"));
|
||||
|
||||
private static Path readDir() {
|
||||
static Path readDir() {
|
||||
return testRootDir.resolve("read_dir");
|
||||
}
|
||||
|
||||
private static Path readWriteDir() {
|
||||
static Path readWriteDir() {
|
||||
return testRootDir.resolve("read_write_dir");
|
||||
}
|
||||
|
||||
private static Path readFile() {
|
||||
static Path readFile() {
|
||||
return testRootDir.resolve("read_file");
|
||||
}
|
||||
|
||||
private static Path readWriteFile() {
|
||||
static Path readWriteFile() {
|
||||
return testRootDir.resolve("read_write_file");
|
||||
}
|
||||
|
||||
@EntitlementTest(expectedAccess = PLUGINS)
|
||||
static void fileCreateNewFile() throws IOException {
|
||||
readWriteDir().resolve("new_file").toFile().createNewFile();
|
||||
}
|
||||
|
||||
@EntitlementTest(expectedAccess = PLUGINS)
|
||||
static void fileCreateTempFile() throws IOException {
|
||||
File.createTempFile("prefix", "suffix", readWriteDir().toFile());
|
||||
}
|
||||
|
||||
@EntitlementTest(expectedAccess = PLUGINS)
|
||||
static void fileDelete() throws IOException {
|
||||
Path toDelete = readWriteDir().resolve("to_delete");
|
||||
EntitledActions.createFile(toDelete);
|
||||
toDelete.toFile().delete();
|
||||
}
|
||||
|
||||
@EntitlementTest(expectedAccess = PLUGINS)
|
||||
static void fileDeleteOnExit() throws IOException {
|
||||
Path toDelete = readWriteDir().resolve("to_delete_on_exit");
|
||||
EntitledActions.createFile(toDelete);
|
||||
toDelete.toFile().deleteOnExit();
|
||||
}
|
||||
|
||||
@EntitlementTest(expectedAccess = PLUGINS)
|
||||
static void fileMkdir() throws IOException {
|
||||
Path mkdir = readWriteDir().resolve("mkdir");
|
||||
mkdir.toFile().mkdir();
|
||||
}
|
||||
|
||||
@EntitlementTest(expectedAccess = PLUGINS)
|
||||
static void fileMkdirs() throws IOException {
|
||||
Path mkdir = readWriteDir().resolve("mkdirs");
|
||||
mkdir.toFile().mkdirs();
|
||||
}
|
||||
|
||||
@EntitlementTest(expectedAccess = PLUGINS)
|
||||
static void fileRenameTo() throws IOException {
|
||||
Path toRename = readWriteDir().resolve("to_rename");
|
||||
EntitledActions.createFile(toRename);
|
||||
toRename.toFile().renameTo(readWriteDir().resolve("renamed").toFile());
|
||||
}
|
||||
|
||||
@EntitlementTest(expectedAccess = PLUGINS)
|
||||
static void fileSetExecutable() throws IOException {
|
||||
readWriteFile().toFile().setExecutable(false);
|
||||
}
|
||||
|
||||
@EntitlementTest(expectedAccess = PLUGINS)
|
||||
static void fileSetExecutableOwner() throws IOException {
|
||||
readWriteFile().toFile().setExecutable(false, false);
|
||||
}
|
||||
|
||||
@EntitlementTest(expectedAccess = PLUGINS)
|
||||
static void fileSetLastModified() throws IOException {
|
||||
readWriteFile().toFile().setLastModified(System.currentTimeMillis());
|
||||
}
|
||||
|
||||
@EntitlementTest(expectedAccess = PLUGINS)
|
||||
static void fileSetReadable() throws IOException {
|
||||
readWriteFile().toFile().setReadable(true);
|
||||
}
|
||||
|
||||
@EntitlementTest(expectedAccess = PLUGINS)
|
||||
static void fileSetReadableOwner() throws IOException {
|
||||
readWriteFile().toFile().setReadable(true, false);
|
||||
}
|
||||
|
||||
@EntitlementTest(expectedAccess = PLUGINS)
|
||||
static void fileSetReadOnly() throws IOException {
|
||||
Path readOnly = readWriteDir().resolve("read_only");
|
||||
EntitledActions.createFile(readOnly);
|
||||
readOnly.toFile().setReadOnly();
|
||||
}
|
||||
|
||||
@EntitlementTest(expectedAccess = PLUGINS)
|
||||
static void fileSetWritable() throws IOException {
|
||||
readWriteFile().toFile().setWritable(true);
|
||||
}
|
||||
|
||||
@EntitlementTest(expectedAccess = PLUGINS)
|
||||
static void fileSetWritableOwner() throws IOException {
|
||||
readWriteFile().toFile().setWritable(true, false);
|
||||
}
|
||||
|
||||
@EntitlementTest(expectedAccess = PLUGINS)
|
||||
static void createScannerFile() throws FileNotFoundException {
|
||||
new Scanner(readFile().toFile());
|
||||
|
|
|
@ -0,0 +1,71 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the "Elastic License
|
||||
* 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side
|
||||
* Public License v 1"; you may not use this file except in compliance with, at
|
||||
* your election, the "Elastic License 2.0", the "GNU Affero General Public
|
||||
* License v3.0 only", or the "Server Side Public License, v 1".
|
||||
*/
|
||||
|
||||
package org.elasticsearch.entitlement.qa.test;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.attribute.FileStoreAttributeView;
|
||||
|
||||
import static org.elasticsearch.entitlement.qa.test.EntitlementTest.ExpectedAccess.ALWAYS_DENIED;
|
||||
import static org.elasticsearch.entitlement.qa.test.EntitlementTest.ExpectedAccess.SERVER_ONLY;
|
||||
|
||||
class FileStoreActions {
|
||||
|
||||
@EntitlementTest(expectedAccess = ALWAYS_DENIED)
|
||||
static void checkGetFileStoreAttributeView() throws IOException {
|
||||
Files.getFileStore(FileCheckActions.readWriteFile()).getFileStoreAttributeView(FileStoreAttributeView.class);
|
||||
}
|
||||
|
||||
@EntitlementTest(expectedAccess = SERVER_ONLY)
|
||||
static void checkGetAttribute() throws IOException {
|
||||
try {
|
||||
Files.getFileStore(FileCheckActions.readFile()).getAttribute("zfs:compression");
|
||||
} catch (UnsupportedOperationException e) {
|
||||
// It's OK if the attribute view is not available or it does not support reading the attribute
|
||||
}
|
||||
}
|
||||
|
||||
@EntitlementTest(expectedAccess = SERVER_ONLY)
|
||||
static void checkGetBlockSize() throws IOException {
|
||||
Files.getFileStore(FileCheckActions.readWriteFile()).getBlockSize();
|
||||
}
|
||||
|
||||
@EntitlementTest(expectedAccess = SERVER_ONLY)
|
||||
static void checkGetTotalSpace() throws IOException {
|
||||
Files.getFileStore(FileCheckActions.readWriteFile()).getTotalSpace();
|
||||
}
|
||||
|
||||
@EntitlementTest(expectedAccess = SERVER_ONLY)
|
||||
static void checkGetUnallocatedSpace() throws IOException {
|
||||
Files.getFileStore(FileCheckActions.readWriteFile()).getUnallocatedSpace();
|
||||
}
|
||||
|
||||
@EntitlementTest(expectedAccess = SERVER_ONLY)
|
||||
static void checkGetUsableSpace() throws IOException {
|
||||
Files.getFileStore(FileCheckActions.readFile()).getUsableSpace();
|
||||
}
|
||||
|
||||
@EntitlementTest(expectedAccess = SERVER_ONLY)
|
||||
static void checkIsReadOnly() throws IOException {
|
||||
Files.getFileStore(FileCheckActions.readFile()).isReadOnly();
|
||||
}
|
||||
|
||||
@EntitlementTest(expectedAccess = SERVER_ONLY)
|
||||
static void checkName() throws IOException {
|
||||
Files.getFileStore(FileCheckActions.readFile()).name();
|
||||
}
|
||||
|
||||
@EntitlementTest(expectedAccess = SERVER_ONLY)
|
||||
static void checkType() throws IOException {
|
||||
Files.getFileStore(FileCheckActions.readFile()).type();
|
||||
}
|
||||
|
||||
private FileStoreActions() {}
|
||||
}
|
|
@ -0,0 +1,69 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the "Elastic License
|
||||
* 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side
|
||||
* Public License v 1"; you may not use this file except in compliance with, at
|
||||
* your election, the "Elastic License 2.0", the "GNU Affero General Public
|
||||
* License v3.0 only", or the "Server Side Public License, v 1".
|
||||
*/
|
||||
|
||||
package org.elasticsearch.entitlement.qa.test;
|
||||
|
||||
import org.elasticsearch.core.SuppressForbidden;
|
||||
|
||||
import java.util.concurrent.ForkJoinPool;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
|
||||
import static java.lang.Thread.currentThread;
|
||||
import static org.elasticsearch.entitlement.qa.test.EntitlementTest.ExpectedAccess.PLUGINS;
|
||||
|
||||
@SuppressForbidden(reason = "testing entitlements")
|
||||
@SuppressWarnings("unused") // used via reflection
|
||||
class ManageThreadsActions {
|
||||
private ManageThreadsActions() {}
|
||||
|
||||
@EntitlementTest(expectedAccess = PLUGINS)
|
||||
static void java_lang_Thread$start() throws InterruptedException {
|
||||
AtomicBoolean threadRan = new AtomicBoolean(false);
|
||||
Thread thread = new Thread(() -> threadRan.set(true), "test");
|
||||
thread.start();
|
||||
thread.join();
|
||||
assert threadRan.get();
|
||||
}
|
||||
|
||||
@EntitlementTest(expectedAccess = PLUGINS)
|
||||
static void java_lang_Thread$setDaemon() {
|
||||
new Thread().setDaemon(true);
|
||||
}
|
||||
|
||||
@EntitlementTest(expectedAccess = PLUGINS)
|
||||
static void java_lang_ThreadGroup$setDaemon() {
|
||||
currentThread().getThreadGroup().setDaemon(currentThread().getThreadGroup().isDaemon());
|
||||
}
|
||||
|
||||
@EntitlementTest(expectedAccess = PLUGINS)
|
||||
static void java_util_concurrent_ForkJoinPool$setParallelism() {
|
||||
ForkJoinPool.commonPool().setParallelism(ForkJoinPool.commonPool().getParallelism());
|
||||
}
|
||||
|
||||
@EntitlementTest(expectedAccess = PLUGINS)
|
||||
static void java_lang_Thread$setName() {
|
||||
currentThread().setName(currentThread().getName());
|
||||
}
|
||||
|
||||
@EntitlementTest(expectedAccess = PLUGINS)
|
||||
static void java_lang_Thread$setPriority() {
|
||||
currentThread().setPriority(currentThread().getPriority());
|
||||
}
|
||||
|
||||
@EntitlementTest(expectedAccess = PLUGINS)
|
||||
static void java_lang_Thread$setUncaughtExceptionHandler() {
|
||||
currentThread().setUncaughtExceptionHandler(currentThread().getUncaughtExceptionHandler());
|
||||
}
|
||||
|
||||
@EntitlementTest(expectedAccess = PLUGINS)
|
||||
static void java_lang_ThreadGroup$setMaxPriority() {
|
||||
currentThread().getThreadGroup().setMaxPriority(currentThread().getThreadGroup().getMaxPriority());
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,230 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the "Elastic License
|
||||
* 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side
|
||||
* Public License v 1"; you may not use this file except in compliance with, at
|
||||
* your election, the "Elastic License 2.0", the "GNU Affero General Public
|
||||
* License v3.0 only", or the "Server Side Public License, v 1".
|
||||
*/
|
||||
|
||||
package org.elasticsearch.entitlement.qa.test;
|
||||
|
||||
import org.elasticsearch.common.util.concurrent.EsExecutors;
|
||||
import org.elasticsearch.entitlement.qa.entitled.EntitledActions;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.URI;
|
||||
import java.nio.file.FileSystemException;
|
||||
import java.nio.file.FileSystems;
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.StandardOpenOption;
|
||||
import java.nio.file.attribute.BasicFileAttributes;
|
||||
import java.nio.file.attribute.FileOwnerAttributeView;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import static org.elasticsearch.entitlement.qa.test.EntitlementTest.ExpectedAccess.ALWAYS_DENIED;
|
||||
import static org.elasticsearch.entitlement.qa.test.EntitlementTest.ExpectedAccess.PLUGINS;
|
||||
import static org.elasticsearch.entitlement.qa.test.EntitlementTest.ExpectedAccess.SERVER_ONLY;
|
||||
|
||||
class NioFileSystemActions {
|
||||
|
||||
@EntitlementTest(expectedAccess = SERVER_ONLY)
|
||||
static void createFileSystemProvider() {
|
||||
new DummyImplementations.DummyFileSystemProvider();
|
||||
}
|
||||
|
||||
@EntitlementTest(expectedAccess = ALWAYS_DENIED)
|
||||
static void checkNewFileSystemFromUri() throws IOException {
|
||||
try (var fs = FileSystems.getDefault().provider().newFileSystem(URI.create("/dummy/path"), Map.of())) {}
|
||||
}
|
||||
|
||||
@EntitlementTest(expectedAccess = ALWAYS_DENIED)
|
||||
static void checkNewFileSystemFromPath() {
|
||||
var fs = FileSystems.getDefault().provider();
|
||||
try (var newFs = fs.newFileSystem(Path.of("/dummy/path"), Map.of())) {} catch (IOException e) {
|
||||
// When entitled, we expect to throw IOException, as the path is not valid - we don't really want to create a FS
|
||||
}
|
||||
}
|
||||
|
||||
@EntitlementTest(expectedAccess = PLUGINS)
|
||||
static void checkNewInputStream() throws IOException {
|
||||
var fs = FileSystems.getDefault().provider();
|
||||
try (var is = fs.newInputStream(FileCheckActions.readFile())) {}
|
||||
}
|
||||
|
||||
@EntitlementTest(expectedAccess = PLUGINS)
|
||||
static void checkNewOutputStream() throws IOException {
|
||||
var fs = FileSystems.getDefault().provider();
|
||||
try (var os = fs.newOutputStream(FileCheckActions.readWriteFile())) {}
|
||||
}
|
||||
|
||||
@EntitlementTest(expectedAccess = PLUGINS)
|
||||
static void checkNewFileChannelRead() throws IOException {
|
||||
var fs = FileSystems.getDefault().provider();
|
||||
try (var fc = fs.newFileChannel(FileCheckActions.readFile(), Set.of(StandardOpenOption.READ))) {}
|
||||
}
|
||||
|
||||
@EntitlementTest(expectedAccess = PLUGINS)
|
||||
static void checkNewFileChannelWrite() throws IOException {
|
||||
var fs = FileSystems.getDefault().provider();
|
||||
try (var fc = fs.newFileChannel(FileCheckActions.readWriteFile(), Set.of(StandardOpenOption.WRITE))) {}
|
||||
}
|
||||
|
||||
@EntitlementTest(expectedAccess = PLUGINS)
|
||||
static void checkNewAsynchronousFileChannel() throws IOException {
|
||||
var fs = FileSystems.getDefault().provider();
|
||||
try (
|
||||
var fc = fs.newAsynchronousFileChannel(
|
||||
FileCheckActions.readWriteFile(),
|
||||
Set.of(StandardOpenOption.WRITE),
|
||||
EsExecutors.DIRECT_EXECUTOR_SERVICE
|
||||
)
|
||||
) {}
|
||||
}
|
||||
|
||||
@EntitlementTest(expectedAccess = PLUGINS)
|
||||
static void checkNewByteChannel() throws IOException {
|
||||
var fs = FileSystems.getDefault().provider();
|
||||
try (var bc = fs.newByteChannel(FileCheckActions.readWriteFile(), Set.of(StandardOpenOption.WRITE))) {}
|
||||
}
|
||||
|
||||
@EntitlementTest(expectedAccess = PLUGINS)
|
||||
static void checkNewDirectoryStream() throws IOException {
|
||||
var fs = FileSystems.getDefault().provider();
|
||||
try (var bc = fs.newDirectoryStream(FileCheckActions.readDir(), entry -> false)) {}
|
||||
}
|
||||
|
||||
@EntitlementTest(expectedAccess = PLUGINS)
|
||||
static void checkCreateDirectory() throws IOException {
|
||||
var fs = FileSystems.getDefault().provider();
|
||||
var directory = EntitledActions.createTempDirectoryForWrite();
|
||||
fs.createDirectory(directory.resolve("subdir"));
|
||||
}
|
||||
|
||||
@EntitlementTest(expectedAccess = PLUGINS)
|
||||
static void checkCreateSymbolicLink() throws IOException {
|
||||
var fs = FileSystems.getDefault().provider();
|
||||
var directory = EntitledActions.createTempDirectoryForWrite();
|
||||
try {
|
||||
fs.createSymbolicLink(directory.resolve("link"), FileCheckActions.readFile());
|
||||
} catch (UnsupportedOperationException | FileSystemException e) {
|
||||
// OK not to implement symbolic link in the filesystem
|
||||
}
|
||||
}
|
||||
|
||||
@EntitlementTest(expectedAccess = PLUGINS)
|
||||
static void checkCreateLink() throws IOException {
|
||||
var fs = FileSystems.getDefault().provider();
|
||||
var directory = EntitledActions.createTempDirectoryForWrite();
|
||||
try {
|
||||
fs.createLink(directory.resolve("link"), FileCheckActions.readFile());
|
||||
} catch (UnsupportedOperationException | FileSystemException e) {
|
||||
// OK not to implement symbolic link in the filesystem
|
||||
}
|
||||
}
|
||||
|
||||
@EntitlementTest(expectedAccess = PLUGINS)
|
||||
static void checkDelete() throws IOException {
|
||||
var fs = FileSystems.getDefault().provider();
|
||||
var file = EntitledActions.createTempFileForWrite();
|
||||
fs.delete(file);
|
||||
}
|
||||
|
||||
@EntitlementTest(expectedAccess = PLUGINS)
|
||||
static void checkDeleteIfExists() throws IOException {
|
||||
var fs = FileSystems.getDefault().provider();
|
||||
var file = EntitledActions.createTempFileForWrite();
|
||||
fs.deleteIfExists(file);
|
||||
}
|
||||
|
||||
@EntitlementTest(expectedAccess = PLUGINS)
|
||||
static void checkReadSymbolicLink() throws IOException {
|
||||
var fs = FileSystems.getDefault().provider();
|
||||
var link = EntitledActions.createTempSymbolicLink();
|
||||
fs.readSymbolicLink(link);
|
||||
}
|
||||
|
||||
@EntitlementTest(expectedAccess = PLUGINS)
|
||||
static void checkCopy() throws IOException {
|
||||
var fs = FileSystems.getDefault().provider();
|
||||
var directory = EntitledActions.createTempDirectoryForWrite();
|
||||
fs.copy(FileCheckActions.readFile(), directory.resolve("copied"));
|
||||
}
|
||||
|
||||
@EntitlementTest(expectedAccess = PLUGINS)
|
||||
static void checkMove() throws IOException {
|
||||
var fs = FileSystems.getDefault().provider();
|
||||
var directory = EntitledActions.createTempDirectoryForWrite();
|
||||
var file = EntitledActions.createTempFileForWrite();
|
||||
fs.move(file, directory.resolve("moved"));
|
||||
}
|
||||
|
||||
@EntitlementTest(expectedAccess = PLUGINS)
|
||||
static void checkIsSameFile() throws IOException {
|
||||
var fs = FileSystems.getDefault().provider();
|
||||
fs.isSameFile(FileCheckActions.readWriteFile(), FileCheckActions.readFile());
|
||||
}
|
||||
|
||||
@EntitlementTest(expectedAccess = PLUGINS)
|
||||
static void checkIsHidden() throws IOException {
|
||||
var fs = FileSystems.getDefault().provider();
|
||||
fs.isHidden(FileCheckActions.readFile());
|
||||
}
|
||||
|
||||
@EntitlementTest(expectedAccess = PLUGINS)
|
||||
static void checkGetFileStore() throws IOException {
|
||||
var fs = FileSystems.getDefault().provider();
|
||||
var file = EntitledActions.createTempFileForRead();
|
||||
var store = fs.getFileStore(file);
|
||||
}
|
||||
|
||||
@EntitlementTest(expectedAccess = PLUGINS)
|
||||
static void checkCheckAccess() throws IOException {
|
||||
var fs = FileSystems.getDefault().provider();
|
||||
fs.checkAccess(FileCheckActions.readFile());
|
||||
}
|
||||
|
||||
@EntitlementTest(expectedAccess = ALWAYS_DENIED)
|
||||
static void checkGetFileAttributeView() {
|
||||
var fs = FileSystems.getDefault().provider();
|
||||
fs.getFileAttributeView(FileCheckActions.readFile(), FileOwnerAttributeView.class);
|
||||
}
|
||||
|
||||
@EntitlementTest(expectedAccess = PLUGINS)
|
||||
static void checkReadAttributesWithClass() throws IOException {
|
||||
var fs = FileSystems.getDefault().provider();
|
||||
fs.readAttributes(FileCheckActions.readFile(), BasicFileAttributes.class);
|
||||
}
|
||||
|
||||
@EntitlementTest(expectedAccess = PLUGINS)
|
||||
static void checkReadAttributesWithString() throws IOException {
|
||||
var fs = FileSystems.getDefault().provider();
|
||||
fs.readAttributes(FileCheckActions.readFile(), "*");
|
||||
}
|
||||
|
||||
@EntitlementTest(expectedAccess = PLUGINS)
|
||||
static void checkReadAttributesIfExists() throws IOException {
|
||||
var fs = FileSystems.getDefault().provider();
|
||||
fs.readAttributesIfExists(FileCheckActions.readFile(), BasicFileAttributes.class);
|
||||
}
|
||||
|
||||
@EntitlementTest(expectedAccess = PLUGINS)
|
||||
static void checkSetAttribute() throws IOException {
|
||||
var fs = FileSystems.getDefault().provider();
|
||||
var file = EntitledActions.createTempFileForWrite();
|
||||
try {
|
||||
fs.setAttribute(file, "dos:hidden", true);
|
||||
} catch (UnsupportedOperationException | IllegalArgumentException | FileSystemException e) {
|
||||
// OK if the file does not have/does not support the attribute
|
||||
}
|
||||
}
|
||||
|
||||
@EntitlementTest(expectedAccess = PLUGINS)
|
||||
static void checkExists() {
|
||||
var fs = FileSystems.getDefault().provider();
|
||||
fs.exists(FileCheckActions.readFile());
|
||||
}
|
||||
|
||||
private NioFileSystemActions() {}
|
||||
}
|
|
@ -181,11 +181,17 @@ public class RestEntitlementsCheckAction extends BaseRestHandler {
|
|||
entry("runtime_load_library", forPlugins(LoadNativeLibrariesCheckActions::runtimeLoadLibrary)),
|
||||
entry("system_load", forPlugins(LoadNativeLibrariesCheckActions::systemLoad)),
|
||||
entry("system_load_library", forPlugins(LoadNativeLibrariesCheckActions::systemLoadLibrary))
|
||||
|
||||
// MAINTENANCE NOTE: Please don't add any more entries to this map.
|
||||
// Put new tests into their own "Actions" class using the @EntitlementTest annotation.
|
||||
),
|
||||
getTestEntries(FileCheckActions.class),
|
||||
getTestEntries(FileStoreActions.class),
|
||||
getTestEntries(ManageThreadsActions.class),
|
||||
getTestEntries(NativeActions.class),
|
||||
getTestEntries(NioFileSystemActions.class),
|
||||
getTestEntries(SpiActions.class),
|
||||
getTestEntries(SystemActions.class),
|
||||
getTestEntries(NativeActions.class)
|
||||
getTestEntries(SystemActions.class)
|
||||
)
|
||||
.flatMap(Function.identity())
|
||||
.filter(entry -> entry.getValue().fromJavaVersion() == null || Runtime.version().feature() >= entry.getValue().fromJavaVersion())
|
||||
|
@ -422,7 +428,9 @@ public class RestEntitlementsCheckAction extends BaseRestHandler {
|
|||
return channel -> {
|
||||
logger.info("Calling check action [{}]", actionName);
|
||||
checkAction.action().run();
|
||||
logger.debug("Check action [{}] returned", actionName);
|
||||
channel.sendResponse(new RestResponse(RestStatus.OK, Strings.format("Succesfully executed action [%s]", actionName)));
|
||||
};
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -29,6 +29,7 @@ public abstract class AbstractEntitlementsIT extends ESRestTestCase {
|
|||
builder.value("inbound_network");
|
||||
builder.value("outbound_network");
|
||||
builder.value("load_native_libraries");
|
||||
builder.value("manage_threads");
|
||||
builder.value(
|
||||
Map.of(
|
||||
"write_system_properties",
|
||||
|
|
|
@ -33,7 +33,7 @@ class EntitlementsTestRule implements TestRule {
|
|||
|
||||
// entitlements that test methods may use, see EntitledActions
|
||||
private static final PolicyBuilder ENTITLED_POLICY = (builder, tempDir) -> {
|
||||
builder.value(Map.of("write_system_properties", Map.of("properties", List.of("org.elasticsearch.entitlement.qa.selfTest"))));
|
||||
builder.value("manage_threads");
|
||||
builder.value(
|
||||
Map.of(
|
||||
"files",
|
||||
|
@ -74,6 +74,8 @@ class EntitlementsTestRule implements TestRule {
|
|||
.systemProperty("es.entitlements.enabled", "true")
|
||||
.systemProperty("es.entitlements.testdir", () -> testDir.getRoot().getAbsolutePath())
|
||||
.setting("xpack.security.enabled", "false")
|
||||
// Logs in libs/entitlement/qa/build/test-results/javaRestTest/TEST-org.elasticsearch.entitlement.qa.EntitlementsXXX.xml
|
||||
// .setting("logger.org.elasticsearch.entitlement", "DEBUG")
|
||||
.build();
|
||||
ruleChain = RuleChain.outerRule(testDir).around(tempDirSetup).around(cluster);
|
||||
}
|
||||
|
|
|
@ -15,7 +15,6 @@ import com.sun.tools.attach.AttachNotSupportedException;
|
|||
import com.sun.tools.attach.VirtualMachine;
|
||||
|
||||
import org.elasticsearch.core.CheckedConsumer;
|
||||
import org.elasticsearch.core.CheckedSupplier;
|
||||
import org.elasticsearch.core.SuppressForbidden;
|
||||
import org.elasticsearch.entitlement.initialization.EntitlementInitialization;
|
||||
import org.elasticsearch.entitlement.runtime.api.NotEntitledException;
|
||||
|
@ -27,7 +26,6 @@ import java.io.IOException;
|
|||
import java.lang.reflect.InvocationTargetException;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.attribute.FileAttribute;
|
||||
import java.util.Map;
|
||||
import java.util.function.Function;
|
||||
|
||||
|
@ -149,11 +147,8 @@ public class EntitlementBootstrap {
|
|||
*/
|
||||
private static void selfTest() {
|
||||
ensureCannotStartProcess(ProcessBuilder::start);
|
||||
ensureCanCreateTempFile(EntitlementBootstrap::createTempFile);
|
||||
|
||||
// Try again with reflection
|
||||
ensureCannotStartProcess(EntitlementBootstrap::reflectiveStartProcess);
|
||||
ensureCanCreateTempFile(EntitlementBootstrap::reflectiveCreateTempFile);
|
||||
}
|
||||
|
||||
private static void ensureCannotStartProcess(CheckedConsumer<ProcessBuilder, ?> startProcess) {
|
||||
|
@ -169,31 +164,6 @@ public class EntitlementBootstrap {
|
|||
throw new IllegalStateException("Entitlement protection self-test was incorrectly permitted");
|
||||
}
|
||||
|
||||
@SuppressForbidden(reason = "accesses jvm default tempdir as a self-test")
|
||||
private static void ensureCanCreateTempFile(CheckedSupplier<Path, ?> createTempFile) {
|
||||
try {
|
||||
Path p = createTempFile.get();
|
||||
p.toFile().deleteOnExit();
|
||||
|
||||
// Make an effort to clean up the file immediately; also, deleteOnExit leaves the file if the JVM exits abnormally.
|
||||
try {
|
||||
Files.delete(p);
|
||||
} catch (IOException ignored) {
|
||||
// Can be caused by virus scanner
|
||||
}
|
||||
} catch (NotEntitledException e) {
|
||||
throw new IllegalStateException("Entitlement protection self-test was incorrectly forbidden", e);
|
||||
} catch (Exception e) {
|
||||
throw new IllegalStateException("Unable to perform entitlement protection self-test", e);
|
||||
}
|
||||
logger.debug("Success: Entitlement protection correctly permitted temp file creation");
|
||||
}
|
||||
|
||||
@SuppressForbidden(reason = "accesses jvm default tempdir as a self-test")
|
||||
private static Path createTempFile() throws Exception {
|
||||
return Files.createTempFile(null, null);
|
||||
}
|
||||
|
||||
private static void reflectiveStartProcess(ProcessBuilder pb) throws Exception {
|
||||
try {
|
||||
var start = ProcessBuilder.class.getMethod("start");
|
||||
|
@ -203,10 +173,5 @@ public class EntitlementBootstrap {
|
|||
}
|
||||
}
|
||||
|
||||
private static Path reflectiveCreateTempFile() throws Exception {
|
||||
return (Path) Files.class.getMethod("createTempFile", String.class, String.class, FileAttribute[].class)
|
||||
.invoke(null, null, null, new FileAttribute<?>[0]);
|
||||
}
|
||||
|
||||
private static final Logger logger = LogManager.getLogger(EntitlementBootstrap.class);
|
||||
}
|
||||
|
|
|
@ -24,25 +24,42 @@ import org.elasticsearch.entitlement.runtime.policy.Scope;
|
|||
import org.elasticsearch.entitlement.runtime.policy.entitlements.CreateClassLoaderEntitlement;
|
||||
import org.elasticsearch.entitlement.runtime.policy.entitlements.Entitlement;
|
||||
import org.elasticsearch.entitlement.runtime.policy.entitlements.ExitVMEntitlement;
|
||||
import org.elasticsearch.entitlement.runtime.policy.entitlements.FilesEntitlement;
|
||||
import org.elasticsearch.entitlement.runtime.policy.entitlements.FilesEntitlement.FileData;
|
||||
import org.elasticsearch.entitlement.runtime.policy.entitlements.InboundNetworkEntitlement;
|
||||
import org.elasticsearch.entitlement.runtime.policy.entitlements.LoadNativeLibrariesEntitlement;
|
||||
import org.elasticsearch.entitlement.runtime.policy.entitlements.ManageThreadsEntitlement;
|
||||
import org.elasticsearch.entitlement.runtime.policy.entitlements.OutboundNetworkEntitlement;
|
||||
import org.elasticsearch.entitlement.runtime.policy.entitlements.ReadStoreAttributesEntitlement;
|
||||
|
||||
import java.lang.instrument.Instrumentation;
|
||||
import java.lang.reflect.Constructor;
|
||||
import java.lang.reflect.InvocationTargetException;
|
||||
import java.net.URI;
|
||||
import java.nio.channels.spi.SelectorProvider;
|
||||
import java.nio.file.AccessMode;
|
||||
import java.nio.file.CopyOption;
|
||||
import java.nio.file.DirectoryStream;
|
||||
import java.nio.file.FileStore;
|
||||
import java.nio.file.FileSystems;
|
||||
import java.nio.file.LinkOption;
|
||||
import java.nio.file.OpenOption;
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.attribute.FileAttribute;
|
||||
import java.nio.file.spi.FileSystemProvider;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.function.Function;
|
||||
import java.util.stream.Collectors;
|
||||
import java.util.stream.Stream;
|
||||
import java.util.stream.StreamSupport;
|
||||
|
||||
import static org.elasticsearch.entitlement.runtime.policy.entitlements.FilesEntitlement.Mode.READ_WRITE;
|
||||
|
||||
/**
|
||||
* Called by the agent during {@code agentmain} to configure the entitlement system,
|
||||
|
@ -58,6 +75,11 @@ public class EntitlementInitialization {
|
|||
|
||||
private static ElasticsearchEntitlementChecker manager;
|
||||
|
||||
interface InstrumentationInfoFactory {
|
||||
InstrumentationService.InstrumentationInfo of(String methodName, Class<?>... parameterTypes) throws ClassNotFoundException,
|
||||
NoSuchMethodException;
|
||||
}
|
||||
|
||||
// Note: referenced by bridge reflectively
|
||||
public static EntitlementChecker checker() {
|
||||
return manager;
|
||||
|
@ -70,25 +92,21 @@ public class EntitlementInitialization {
|
|||
var latestCheckerInterface = getVersionSpecificCheckerClass(EntitlementChecker.class);
|
||||
|
||||
Map<MethodKey, CheckMethod> checkMethods = new HashMap<>(INSTRUMENTATION_SERVICE.lookupMethods(latestCheckerInterface));
|
||||
var fileSystemProviderClass = FileSystems.getDefault().provider().getClass();
|
||||
Stream.of(
|
||||
INSTRUMENTATION_SERVICE.lookupImplementationMethod(
|
||||
FileSystemProvider.class,
|
||||
"newInputStream",
|
||||
fileSystemProviderClass,
|
||||
EntitlementChecker.class,
|
||||
"checkNewInputStream",
|
||||
Path.class,
|
||||
OpenOption[].class
|
||||
),
|
||||
INSTRUMENTATION_SERVICE.lookupImplementationMethod(
|
||||
SelectorProvider.class,
|
||||
"inheritedChannel",
|
||||
SelectorProvider.provider().getClass(),
|
||||
EntitlementChecker.class,
|
||||
"checkSelectorProviderInheritedChannel"
|
||||
fileSystemProviderChecks(),
|
||||
fileStoreChecks(),
|
||||
Stream.of(
|
||||
INSTRUMENTATION_SERVICE.lookupImplementationMethod(
|
||||
SelectorProvider.class,
|
||||
"inheritedChannel",
|
||||
SelectorProvider.provider().getClass(),
|
||||
EntitlementChecker.class,
|
||||
"checkSelectorProviderInheritedChannel"
|
||||
)
|
||||
)
|
||||
).forEach(instrumentation -> checkMethods.put(instrumentation.targetMethod(), instrumentation.checkMethod()));
|
||||
)
|
||||
.flatMap(Function.identity())
|
||||
.forEach(instrumentation -> checkMethods.put(instrumentation.targetMethod(), instrumentation.checkMethod()));
|
||||
|
||||
var classesToTransform = checkMethods.keySet().stream().map(MethodKey::className).collect(Collectors.toSet());
|
||||
|
||||
|
@ -109,6 +127,8 @@ public class EntitlementInitialization {
|
|||
|
||||
private static PolicyManager createPolicyManager() {
|
||||
Map<String, Policy> pluginPolicies = EntitlementBootstrap.bootstrapArgs().pluginPolicies();
|
||||
Path[] dataDirs = EntitlementBootstrap.bootstrapArgs().dataDirs();
|
||||
Path tempDir = EntitlementBootstrap.bootstrapArgs().tempDir();
|
||||
|
||||
// TODO(ES-10031): Decide what goes in the elasticsearch default policy and extend it
|
||||
var serverPolicy = new Policy(
|
||||
|
@ -120,23 +140,131 @@ public class EntitlementInitialization {
|
|||
"org.elasticsearch.server",
|
||||
List.of(
|
||||
new ExitVMEntitlement(),
|
||||
new ReadStoreAttributesEntitlement(),
|
||||
new CreateClassLoaderEntitlement(),
|
||||
new InboundNetworkEntitlement(),
|
||||
new OutboundNetworkEntitlement(),
|
||||
new LoadNativeLibrariesEntitlement()
|
||||
new LoadNativeLibrariesEntitlement(),
|
||||
new ManageThreadsEntitlement(),
|
||||
new FilesEntitlement(
|
||||
List.of(new FilesEntitlement.FileData(EntitlementBootstrap.bootstrapArgs().tempDir().toString(), READ_WRITE))
|
||||
)
|
||||
)
|
||||
),
|
||||
new Scope("org.apache.httpcomponents.httpclient", List.of(new OutboundNetworkEntitlement())),
|
||||
new Scope("io.netty.transport", List.of(new InboundNetworkEntitlement(), new OutboundNetworkEntitlement())),
|
||||
new Scope("org.apache.lucene.core", List.of(new LoadNativeLibrariesEntitlement())),
|
||||
new Scope("org.elasticsearch.nativeaccess", List.of(new LoadNativeLibrariesEntitlement()))
|
||||
new Scope("org.apache.lucene.core", List.of(new LoadNativeLibrariesEntitlement(), new ManageThreadsEntitlement())),
|
||||
new Scope("org.apache.logging.log4j.core", List.of(new ManageThreadsEntitlement())),
|
||||
new Scope(
|
||||
"org.elasticsearch.nativeaccess",
|
||||
List.of(
|
||||
new LoadNativeLibrariesEntitlement(),
|
||||
new FilesEntitlement(Arrays.stream(dataDirs).map(d -> new FileData(d.toString(), READ_WRITE)).toList())
|
||||
)
|
||||
)
|
||||
)
|
||||
);
|
||||
// agents run without a module, so this is a special hack for the apm agent
|
||||
// this should be removed once https://github.com/elastic/elasticsearch/issues/109335 is completed
|
||||
List<Entitlement> agentEntitlements = List.of(new CreateClassLoaderEntitlement());
|
||||
List<Entitlement> agentEntitlements = List.of(new CreateClassLoaderEntitlement(), new ManageThreadsEntitlement());
|
||||
var resolver = EntitlementBootstrap.bootstrapArgs().pluginResolver();
|
||||
return new PolicyManager(serverPolicy, agentEntitlements, pluginPolicies, resolver, AGENTS_PACKAGE_NAME, ENTITLEMENTS_MODULE);
|
||||
return new PolicyManager(
|
||||
serverPolicy,
|
||||
agentEntitlements,
|
||||
pluginPolicies,
|
||||
resolver,
|
||||
AGENTS_PACKAGE_NAME,
|
||||
ENTITLEMENTS_MODULE,
|
||||
tempDir
|
||||
);
|
||||
}
|
||||
|
||||
private static Stream<InstrumentationService.InstrumentationInfo> fileSystemProviderChecks() throws ClassNotFoundException,
|
||||
NoSuchMethodException {
|
||||
var fileSystemProviderClass = FileSystems.getDefault().provider().getClass();
|
||||
|
||||
var instrumentation = new InstrumentationInfoFactory() {
|
||||
@Override
|
||||
public InstrumentationService.InstrumentationInfo of(String methodName, Class<?>... parameterTypes)
|
||||
throws ClassNotFoundException, NoSuchMethodException {
|
||||
return INSTRUMENTATION_SERVICE.lookupImplementationMethod(
|
||||
FileSystemProvider.class,
|
||||
methodName,
|
||||
fileSystemProviderClass,
|
||||
EntitlementChecker.class,
|
||||
"check" + Character.toUpperCase(methodName.charAt(0)) + methodName.substring(1),
|
||||
parameterTypes
|
||||
);
|
||||
}
|
||||
};
|
||||
|
||||
return Stream.of(
|
||||
instrumentation.of("newFileSystem", URI.class, Map.class),
|
||||
instrumentation.of("newFileSystem", Path.class, Map.class),
|
||||
instrumentation.of("newInputStream", Path.class, OpenOption[].class),
|
||||
instrumentation.of("newOutputStream", Path.class, OpenOption[].class),
|
||||
instrumentation.of("newFileChannel", Path.class, Set.class, FileAttribute[].class),
|
||||
instrumentation.of("newAsynchronousFileChannel", Path.class, Set.class, ExecutorService.class, FileAttribute[].class),
|
||||
instrumentation.of("newByteChannel", Path.class, Set.class, FileAttribute[].class),
|
||||
instrumentation.of("newDirectoryStream", Path.class, DirectoryStream.Filter.class),
|
||||
instrumentation.of("createDirectory", Path.class, FileAttribute[].class),
|
||||
instrumentation.of("createSymbolicLink", Path.class, Path.class, FileAttribute[].class),
|
||||
instrumentation.of("createLink", Path.class, Path.class),
|
||||
instrumentation.of("delete", Path.class),
|
||||
instrumentation.of("deleteIfExists", Path.class),
|
||||
instrumentation.of("readSymbolicLink", Path.class),
|
||||
instrumentation.of("copy", Path.class, Path.class, CopyOption[].class),
|
||||
instrumentation.of("move", Path.class, Path.class, CopyOption[].class),
|
||||
instrumentation.of("isSameFile", Path.class, Path.class),
|
||||
instrumentation.of("isHidden", Path.class),
|
||||
instrumentation.of("getFileStore", Path.class),
|
||||
instrumentation.of("checkAccess", Path.class, AccessMode[].class),
|
||||
instrumentation.of("getFileAttributeView", Path.class, Class.class, LinkOption[].class),
|
||||
instrumentation.of("readAttributes", Path.class, Class.class, LinkOption[].class),
|
||||
instrumentation.of("readAttributes", Path.class, String.class, LinkOption[].class),
|
||||
instrumentation.of("readAttributesIfExists", Path.class, Class.class, LinkOption[].class),
|
||||
instrumentation.of("setAttribute", Path.class, String.class, Object.class, LinkOption[].class),
|
||||
instrumentation.of("exists", Path.class, LinkOption[].class)
|
||||
);
|
||||
}
|
||||
|
||||
private static Stream<InstrumentationService.InstrumentationInfo> fileStoreChecks() {
|
||||
var fileStoreClasses = StreamSupport.stream(FileSystems.getDefault().getFileStores().spliterator(), false)
|
||||
.map(FileStore::getClass)
|
||||
.distinct();
|
||||
return fileStoreClasses.flatMap(fileStoreClass -> {
|
||||
var instrumentation = new InstrumentationInfoFactory() {
|
||||
@Override
|
||||
public InstrumentationService.InstrumentationInfo of(String methodName, Class<?>... parameterTypes)
|
||||
throws ClassNotFoundException, NoSuchMethodException {
|
||||
return INSTRUMENTATION_SERVICE.lookupImplementationMethod(
|
||||
FileStore.class,
|
||||
methodName,
|
||||
fileStoreClass,
|
||||
EntitlementChecker.class,
|
||||
"check" + Character.toUpperCase(methodName.charAt(0)) + methodName.substring(1),
|
||||
parameterTypes
|
||||
);
|
||||
}
|
||||
};
|
||||
|
||||
try {
|
||||
return Stream.of(
|
||||
instrumentation.of("getFileStoreAttributeView", Class.class),
|
||||
instrumentation.of("getAttribute", String.class),
|
||||
instrumentation.of("getBlockSize"),
|
||||
instrumentation.of("getTotalSpace"),
|
||||
instrumentation.of("getUnallocatedSpace"),
|
||||
instrumentation.of("getUsableSpace"),
|
||||
instrumentation.of("isReadOnly"),
|
||||
instrumentation.of("name"),
|
||||
instrumentation.of("type")
|
||||
|
||||
);
|
||||
} catch (NoSuchMethodException | ClassNotFoundException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -9,7 +9,6 @@
|
|||
|
||||
package org.elasticsearch.entitlement.instrumentation;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
|
@ -23,7 +22,7 @@ public interface InstrumentationService {
|
|||
|
||||
Instrumenter newInstrumenter(Class<?> clazz, Map<MethodKey, CheckMethod> methods);
|
||||
|
||||
Map<MethodKey, CheckMethod> lookupMethods(Class<?> clazz) throws IOException;
|
||||
Map<MethodKey, CheckMethod> lookupMethods(Class<?> clazz) throws ClassNotFoundException;
|
||||
|
||||
InstrumentationInfo lookupImplementationMethod(
|
||||
Class<?> targetSuperclass,
|
||||
|
|
|
@ -40,6 +40,7 @@ import java.net.ServerSocket;
|
|||
import java.net.Socket;
|
||||
import java.net.SocketAddress;
|
||||
import java.net.SocketImplFactory;
|
||||
import java.net.URI;
|
||||
import java.net.URL;
|
||||
import java.net.URLStreamHandler;
|
||||
import java.net.URLStreamHandlerFactory;
|
||||
|
@ -55,16 +56,26 @@ import java.nio.channels.ServerSocketChannel;
|
|||
import java.nio.channels.SocketChannel;
|
||||
import java.nio.channels.spi.SelectorProvider;
|
||||
import java.nio.charset.Charset;
|
||||
import java.nio.file.AccessMode;
|
||||
import java.nio.file.CopyOption;
|
||||
import java.nio.file.DirectoryStream;
|
||||
import java.nio.file.FileStore;
|
||||
import java.nio.file.LinkOption;
|
||||
import java.nio.file.OpenOption;
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.StandardOpenOption;
|
||||
import java.nio.file.attribute.FileAttribute;
|
||||
import java.nio.file.attribute.UserPrincipal;
|
||||
import java.nio.file.spi.FileSystemProvider;
|
||||
import java.security.cert.CertStoreParameters;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.Properties;
|
||||
import java.util.Set;
|
||||
import java.util.TimeZone;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.ForkJoinPool;
|
||||
import java.util.function.Consumer;
|
||||
|
||||
import javax.net.ssl.HostnameVerifier;
|
||||
|
@ -940,6 +951,82 @@ public class ElasticsearchEntitlementChecker implements EntitlementChecker {
|
|||
|
||||
// old io (ie File)
|
||||
|
||||
@Override
|
||||
public void check$java_io_File$createNewFile(Class<?> callerClass, File file) {
|
||||
policyManager.checkFileWrite(callerClass, file);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void check$java_io_File$$createTempFile(Class<?> callerClass, String prefix, String suffix, File directory) {
|
||||
policyManager.checkFileWrite(callerClass, directory);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void check$java_io_File$delete(Class<?> callerClass, File file) {
|
||||
policyManager.checkFileWrite(callerClass, file);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void check$java_io_File$deleteOnExit(Class<?> callerClass, File file) {
|
||||
policyManager.checkFileWrite(callerClass, file);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void check$java_io_File$mkdir(Class<?> callerClass, File file) {
|
||||
policyManager.checkFileWrite(callerClass, file);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void check$java_io_File$mkdirs(Class<?> callerClass, File file) {
|
||||
policyManager.checkFileWrite(callerClass, file);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void check$java_io_File$renameTo(Class<?> callerClass, File file, File dest) {
|
||||
policyManager.checkFileRead(callerClass, file);
|
||||
policyManager.checkFileWrite(callerClass, dest);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void check$java_io_File$setExecutable(Class<?> callerClass, File file, boolean executable) {
|
||||
policyManager.checkFileWrite(callerClass, file);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void check$java_io_File$setExecutable(Class<?> callerClass, File file, boolean executable, boolean ownerOnly) {
|
||||
policyManager.checkFileWrite(callerClass, file);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void check$java_io_File$setLastModified(Class<?> callerClass, File file, long time) {
|
||||
policyManager.checkFileWrite(callerClass, file);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void check$java_io_File$setReadable(Class<?> callerClass, File file, boolean readable) {
|
||||
policyManager.checkFileWrite(callerClass, file);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void check$java_io_File$setReadable(Class<?> callerClass, File file, boolean readable, boolean ownerOnly) {
|
||||
policyManager.checkFileWrite(callerClass, file);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void check$java_io_File$setReadOnly(Class<?> callerClass, File file) {
|
||||
policyManager.checkFileWrite(callerClass, file);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void check$java_io_File$setWritable(Class<?> callerClass, File file, boolean writable) {
|
||||
policyManager.checkFileWrite(callerClass, file);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void check$java_io_File$setWritable(Class<?> callerClass, File file, boolean writable, boolean ownerOnly) {
|
||||
policyManager.checkFileWrite(callerClass, file);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void check$java_io_FileOutputStream$(Class<?> callerClass, String name) {
|
||||
policyManager.checkFileWrite(callerClass, new File(name));
|
||||
|
@ -994,8 +1081,292 @@ public class ElasticsearchEntitlementChecker implements EntitlementChecker {
|
|||
|
||||
// file system providers
|
||||
|
||||
@Override
|
||||
public void check$java_nio_file_spi_FileSystemProvider$(Class<?> callerClass) {
|
||||
policyManager.checkChangeJVMGlobalState(callerClass);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void checkNewFileSystem(Class<?> callerClass, FileSystemProvider that, URI uri, Map<String, ?> env) {
|
||||
policyManager.checkChangeJVMGlobalState(callerClass);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void checkNewFileSystem(Class<?> callerClass, FileSystemProvider that, Path path, Map<String, ?> env) {
|
||||
policyManager.checkChangeJVMGlobalState(callerClass);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void checkNewInputStream(Class<?> callerClass, FileSystemProvider that, Path path, OpenOption... options) {
|
||||
// TODO: policyManger.checkFileSystemRead(path);
|
||||
policyManager.checkFileRead(callerClass, path);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void checkNewOutputStream(Class<?> callerClass, FileSystemProvider that, Path path, OpenOption... options) {
|
||||
policyManager.checkFileWrite(callerClass, path);
|
||||
}
|
||||
|
||||
private static boolean isOpenForWrite(Set<? extends OpenOption> options) {
|
||||
return options.contains(StandardOpenOption.WRITE)
|
||||
|| options.contains(StandardOpenOption.APPEND)
|
||||
|| options.contains(StandardOpenOption.CREATE)
|
||||
|| options.contains(StandardOpenOption.CREATE_NEW)
|
||||
|| options.contains(StandardOpenOption.DELETE_ON_CLOSE);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void checkNewFileChannel(
|
||||
Class<?> callerClass,
|
||||
FileSystemProvider that,
|
||||
Path path,
|
||||
Set<? extends OpenOption> options,
|
||||
FileAttribute<?>... attrs
|
||||
) {
|
||||
if (isOpenForWrite(options)) {
|
||||
policyManager.checkFileWrite(callerClass, path);
|
||||
} else {
|
||||
policyManager.checkFileRead(callerClass, path);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void checkNewAsynchronousFileChannel(
|
||||
Class<?> callerClass,
|
||||
FileSystemProvider that,
|
||||
Path path,
|
||||
Set<? extends OpenOption> options,
|
||||
ExecutorService executor,
|
||||
FileAttribute<?>... attrs
|
||||
) {
|
||||
if (isOpenForWrite(options)) {
|
||||
policyManager.checkFileWrite(callerClass, path);
|
||||
} else {
|
||||
policyManager.checkFileRead(callerClass, path);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void checkNewByteChannel(
|
||||
Class<?> callerClass,
|
||||
FileSystemProvider that,
|
||||
Path path,
|
||||
Set<? extends OpenOption> options,
|
||||
FileAttribute<?>... attrs
|
||||
) {
|
||||
if (isOpenForWrite(options)) {
|
||||
policyManager.checkFileWrite(callerClass, path);
|
||||
} else {
|
||||
policyManager.checkFileRead(callerClass, path);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void checkNewDirectoryStream(
|
||||
Class<?> callerClass,
|
||||
FileSystemProvider that,
|
||||
Path dir,
|
||||
DirectoryStream.Filter<? super Path> filter
|
||||
) {
|
||||
policyManager.checkFileRead(callerClass, dir);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void checkCreateDirectory(Class<?> callerClass, FileSystemProvider that, Path dir, FileAttribute<?>... attrs) {
|
||||
policyManager.checkFileWrite(callerClass, dir);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void checkCreateSymbolicLink(Class<?> callerClass, FileSystemProvider that, Path link, Path target, FileAttribute<?>... attrs) {
|
||||
policyManager.checkFileWrite(callerClass, link);
|
||||
policyManager.checkFileRead(callerClass, target);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void checkCreateLink(Class<?> callerClass, FileSystemProvider that, Path link, Path existing) {
|
||||
policyManager.checkFileWrite(callerClass, link);
|
||||
policyManager.checkFileRead(callerClass, existing);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void checkDelete(Class<?> callerClass, FileSystemProvider that, Path path) {
|
||||
policyManager.checkFileWrite(callerClass, path);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void checkDeleteIfExists(Class<?> callerClass, FileSystemProvider that, Path path) {
|
||||
policyManager.checkFileWrite(callerClass, path);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void checkReadSymbolicLink(Class<?> callerClass, FileSystemProvider that, Path link) {
|
||||
policyManager.checkFileRead(callerClass, link);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void checkCopy(Class<?> callerClass, FileSystemProvider that, Path source, Path target, CopyOption... options) {
|
||||
policyManager.checkFileWrite(callerClass, target);
|
||||
policyManager.checkFileRead(callerClass, source);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void checkMove(Class<?> callerClass, FileSystemProvider that, Path source, Path target, CopyOption... options) {
|
||||
policyManager.checkFileWrite(callerClass, target);
|
||||
policyManager.checkFileWrite(callerClass, source);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void checkIsSameFile(Class<?> callerClass, FileSystemProvider that, Path path, Path path2) {
|
||||
policyManager.checkFileRead(callerClass, path);
|
||||
policyManager.checkFileRead(callerClass, path2);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void checkIsHidden(Class<?> callerClass, FileSystemProvider that, Path path) {
|
||||
policyManager.checkFileRead(callerClass, path);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void checkGetFileStore(Class<?> callerClass, FileSystemProvider that, Path path) {
|
||||
policyManager.checkFileRead(callerClass, path);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void checkCheckAccess(Class<?> callerClass, FileSystemProvider that, Path path, AccessMode... modes) {
|
||||
policyManager.checkFileRead(callerClass, path);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void checkGetFileAttributeView(Class<?> callerClass, FileSystemProvider that, Path path, Class<?> type, LinkOption... options) {
|
||||
policyManager.checkGetFileAttributeView(callerClass);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void checkReadAttributes(Class<?> callerClass, FileSystemProvider that, Path path, Class<?> type, LinkOption... options) {
|
||||
policyManager.checkFileRead(callerClass, path);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void checkReadAttributes(Class<?> callerClass, FileSystemProvider that, Path path, String attributes, LinkOption... options) {
|
||||
policyManager.checkFileRead(callerClass, path);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void checkReadAttributesIfExists(
|
||||
Class<?> callerClass,
|
||||
FileSystemProvider that,
|
||||
Path path,
|
||||
Class<?> type,
|
||||
LinkOption... options
|
||||
) {
|
||||
policyManager.checkFileRead(callerClass, path);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void checkSetAttribute(
|
||||
Class<?> callerClass,
|
||||
FileSystemProvider that,
|
||||
Path path,
|
||||
String attribute,
|
||||
Object value,
|
||||
LinkOption... options
|
||||
) {
|
||||
policyManager.checkFileWrite(callerClass, path);
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public void checkExists(Class<?> callerClass, FileSystemProvider that, Path path, LinkOption... options) {
|
||||
policyManager.checkFileRead(callerClass, path);
|
||||
}
|
||||
|
||||
// Thread management
|
||||
|
||||
@Override
|
||||
public void check$java_lang_Thread$start(Class<?> callerClass, Thread thread) {
|
||||
policyManager.checkManageThreadsEntitlement(callerClass);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void check$java_lang_Thread$setDaemon(Class<?> callerClass, Thread thread, boolean on) {
|
||||
policyManager.checkManageThreadsEntitlement(callerClass);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void check$java_lang_ThreadGroup$setDaemon(Class<?> callerClass, ThreadGroup threadGroup, boolean daemon) {
|
||||
policyManager.checkManageThreadsEntitlement(callerClass);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void check$java_util_concurrent_ForkJoinPool$setParallelism(Class<?> callerClass, ForkJoinPool forkJoinPool, int size) {
|
||||
policyManager.checkManageThreadsEntitlement(callerClass);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void check$java_lang_Thread$setName(Class<?> callerClass, Thread thread, String name) {
|
||||
policyManager.checkManageThreadsEntitlement(callerClass);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void check$java_lang_Thread$setPriority(Class<?> callerClass, Thread thread, int newPriority) {
|
||||
policyManager.checkManageThreadsEntitlement(callerClass);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void check$java_lang_Thread$setUncaughtExceptionHandler(
|
||||
Class<?> callerClass,
|
||||
Thread thread,
|
||||
Thread.UncaughtExceptionHandler ueh
|
||||
) {
|
||||
policyManager.checkManageThreadsEntitlement(callerClass);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void check$java_lang_ThreadGroup$setMaxPriority(Class<?> callerClass, ThreadGroup threadGroup, int pri) {
|
||||
policyManager.checkManageThreadsEntitlement(callerClass);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void checkGetFileStoreAttributeView(Class<?> callerClass, FileStore that, Class<?> type) {
|
||||
policyManager.checkWriteStoreAttributes(callerClass);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void checkGetAttribute(Class<?> callerClass, FileStore that, String attribute) {
|
||||
policyManager.checkReadStoreAttributes(callerClass);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void checkGetBlockSize(Class<?> callerClass, FileStore that) {
|
||||
policyManager.checkReadStoreAttributes(callerClass);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void checkGetTotalSpace(Class<?> callerClass, FileStore that) {
|
||||
policyManager.checkReadStoreAttributes(callerClass);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void checkGetUnallocatedSpace(Class<?> callerClass, FileStore that) {
|
||||
policyManager.checkReadStoreAttributes(callerClass);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void checkGetUsableSpace(Class<?> callerClass, FileStore that) {
|
||||
policyManager.checkReadStoreAttributes(callerClass);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void checkIsReadOnly(Class<?> callerClass, FileStore that) {
|
||||
policyManager.checkReadStoreAttributes(callerClass);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void checkName(Class<?> callerClass, FileStore that) {
|
||||
policyManager.checkReadStoreAttributes(callerClass);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void checkType(Class<?> callerClass, FileStore that) {
|
||||
policyManager.checkReadStoreAttributes(callerClass);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,13 +20,12 @@ import java.util.Objects;
|
|||
import static org.elasticsearch.core.PathUtils.getDefaultFileSystem;
|
||||
|
||||
public final class FileAccessTree {
|
||||
public static final FileAccessTree EMPTY = new FileAccessTree(FilesEntitlement.EMPTY);
|
||||
private static final String FILE_SEPARATOR = getDefaultFileSystem().getSeparator();
|
||||
|
||||
private final String[] readPaths;
|
||||
private final String[] writePaths;
|
||||
|
||||
private FileAccessTree(FilesEntitlement filesEntitlement) {
|
||||
private FileAccessTree(FilesEntitlement filesEntitlement, Path tempDir) {
|
||||
List<String> readPaths = new ArrayList<>();
|
||||
List<String> writePaths = new ArrayList<>();
|
||||
for (FilesEntitlement.FileData fileData : filesEntitlement.filesData()) {
|
||||
|
@ -38,6 +37,10 @@ public final class FileAccessTree {
|
|||
readPaths.add(path);
|
||||
}
|
||||
|
||||
// everything has access to the temp dir
|
||||
readPaths.add(tempDir.toString());
|
||||
writePaths.add(tempDir.toString());
|
||||
|
||||
readPaths.sort(String::compareTo);
|
||||
writePaths.sort(String::compareTo);
|
||||
|
||||
|
@ -45,8 +48,8 @@ public final class FileAccessTree {
|
|||
this.writePaths = writePaths.toArray(new String[0]);
|
||||
}
|
||||
|
||||
public static FileAccessTree of(FilesEntitlement filesEntitlement) {
|
||||
return new FileAccessTree(filesEntitlement);
|
||||
public static FileAccessTree of(FilesEntitlement filesEntitlement, Path tempDir) {
|
||||
return new FileAccessTree(filesEntitlement, tempDir);
|
||||
}
|
||||
|
||||
boolean canRead(Path path) {
|
||||
|
|
|
@ -19,7 +19,9 @@ import org.elasticsearch.entitlement.runtime.policy.entitlements.ExitVMEntitleme
|
|||
import org.elasticsearch.entitlement.runtime.policy.entitlements.FilesEntitlement;
|
||||
import org.elasticsearch.entitlement.runtime.policy.entitlements.InboundNetworkEntitlement;
|
||||
import org.elasticsearch.entitlement.runtime.policy.entitlements.LoadNativeLibrariesEntitlement;
|
||||
import org.elasticsearch.entitlement.runtime.policy.entitlements.ManageThreadsEntitlement;
|
||||
import org.elasticsearch.entitlement.runtime.policy.entitlements.OutboundNetworkEntitlement;
|
||||
import org.elasticsearch.entitlement.runtime.policy.entitlements.ReadStoreAttributesEntitlement;
|
||||
import org.elasticsearch.entitlement.runtime.policy.entitlements.SetHttpsConnectionPropertiesEntitlement;
|
||||
import org.elasticsearch.entitlement.runtime.policy.entitlements.WriteSystemPropertiesEntitlement;
|
||||
import org.elasticsearch.logging.LogManager;
|
||||
|
@ -68,24 +70,6 @@ public class PolicyManager {
|
|||
entitlementsByType = Map.copyOf(entitlementsByType);
|
||||
}
|
||||
|
||||
public static ModuleEntitlements none(String componentName) {
|
||||
return new ModuleEntitlements(componentName, Map.of(), FileAccessTree.EMPTY);
|
||||
}
|
||||
|
||||
public static ModuleEntitlements from(String componentName, List<Entitlement> entitlements) {
|
||||
FilesEntitlement filesEntitlement = FilesEntitlement.EMPTY;
|
||||
for (Entitlement entitlement : entitlements) {
|
||||
if (entitlement instanceof FilesEntitlement) {
|
||||
filesEntitlement = (FilesEntitlement) entitlement;
|
||||
}
|
||||
}
|
||||
return new ModuleEntitlements(
|
||||
componentName,
|
||||
entitlements.stream().collect(groupingBy(Entitlement::getClass)),
|
||||
FileAccessTree.of(filesEntitlement)
|
||||
);
|
||||
}
|
||||
|
||||
public boolean hasEntitlement(Class<? extends Entitlement> entitlementClass) {
|
||||
return entitlementsByType.containsKey(entitlementClass);
|
||||
}
|
||||
|
@ -99,12 +83,34 @@ public class PolicyManager {
|
|||
}
|
||||
}
|
||||
|
||||
// pkg private for testing
|
||||
ModuleEntitlements defaultEntitlements(String componentName) {
|
||||
return new ModuleEntitlements(componentName, Map.of(), defaultFileAccess);
|
||||
}
|
||||
|
||||
// pkg private for testing
|
||||
ModuleEntitlements policyEntitlements(String componentName, List<Entitlement> entitlements) {
|
||||
FilesEntitlement filesEntitlement = FilesEntitlement.EMPTY;
|
||||
for (Entitlement entitlement : entitlements) {
|
||||
if (entitlement instanceof FilesEntitlement) {
|
||||
filesEntitlement = (FilesEntitlement) entitlement;
|
||||
}
|
||||
}
|
||||
return new ModuleEntitlements(
|
||||
componentName,
|
||||
entitlements.stream().collect(groupingBy(Entitlement::getClass)),
|
||||
FileAccessTree.of(filesEntitlement, tempDir)
|
||||
);
|
||||
}
|
||||
|
||||
final Map<Module, ModuleEntitlements> moduleEntitlementsMap = new ConcurrentHashMap<>();
|
||||
|
||||
protected final Map<String, List<Entitlement>> serverEntitlements;
|
||||
protected final List<Entitlement> apmAgentEntitlements;
|
||||
protected final Map<String, Map<String, List<Entitlement>>> pluginsEntitlements;
|
||||
private final Map<String, List<Entitlement>> serverEntitlements;
|
||||
private final List<Entitlement> apmAgentEntitlements;
|
||||
private final Map<String, Map<String, List<Entitlement>>> pluginsEntitlements;
|
||||
private final Function<Class<?>, String> pluginResolver;
|
||||
private final Path tempDir;
|
||||
private final FileAccessTree defaultFileAccess;
|
||||
|
||||
public static final String ALL_UNNAMED = "ALL-UNNAMED";
|
||||
|
||||
|
@ -139,7 +145,8 @@ public class PolicyManager {
|
|||
Map<String, Policy> pluginPolicies,
|
||||
Function<Class<?>, String> pluginResolver,
|
||||
String apmAgentPackageName,
|
||||
Module entitlementsModule
|
||||
Module entitlementsModule,
|
||||
Path tempDir
|
||||
) {
|
||||
this.serverEntitlements = buildScopeEntitlementsMap(requireNonNull(serverPolicy));
|
||||
this.apmAgentEntitlements = apmAgentEntitlements;
|
||||
|
@ -149,6 +156,9 @@ public class PolicyManager {
|
|||
this.pluginResolver = pluginResolver;
|
||||
this.apmAgentPackageName = apmAgentPackageName;
|
||||
this.entitlementsModule = entitlementsModule;
|
||||
this.defaultFileAccess = FileAccessTree.of(FilesEntitlement.EMPTY, tempDir);
|
||||
|
||||
this.tempDir = tempDir;
|
||||
|
||||
for (var e : serverEntitlements.entrySet()) {
|
||||
validateEntitlementsPerModule(SERVER_COMPONENT_NAME, e.getKey(), e.getValue());
|
||||
|
@ -181,6 +191,14 @@ public class PolicyManager {
|
|||
neverEntitled(callerClass, () -> "start process");
|
||||
}
|
||||
|
||||
public void checkWriteStoreAttributes(Class<?> callerClass) {
|
||||
neverEntitled(callerClass, () -> "change file store attributes");
|
||||
}
|
||||
|
||||
public void checkReadStoreAttributes(Class<?> callerClass) {
|
||||
checkEntitlementPresent(callerClass, ReadStoreAttributesEntitlement.class);
|
||||
}
|
||||
|
||||
/**
|
||||
* @param operationDescription is only called when the operation is not trivially allowed, meaning the check is about to fail;
|
||||
* therefore, its performance is not a major concern.
|
||||
|
@ -191,7 +209,7 @@ public class PolicyManager {
|
|||
return;
|
||||
}
|
||||
|
||||
throw new NotEntitledException(
|
||||
notEntitled(
|
||||
Strings.format(
|
||||
"Not entitled: component [%s], module [%s], class [%s], operation [%s]",
|
||||
getEntitlements(requestingClass).componentName(),
|
||||
|
@ -215,17 +233,19 @@ public class PolicyManager {
|
|||
}
|
||||
|
||||
public void checkChangeJVMGlobalState(Class<?> callerClass) {
|
||||
neverEntitled(callerClass, () -> {
|
||||
// Look up the check$ method to compose an informative error message.
|
||||
// This way, we don't need to painstakingly describe every individual global-state change.
|
||||
Optional<String> checkMethodName = StackWalker.getInstance()
|
||||
.walk(
|
||||
frames -> frames.map(StackFrame::getMethodName)
|
||||
.dropWhile(not(methodName -> methodName.startsWith(InstrumentationService.CHECK_METHOD_PREFIX)))
|
||||
.findFirst()
|
||||
);
|
||||
return checkMethodName.map(this::operationDescription).orElse("change JVM global state");
|
||||
});
|
||||
neverEntitled(callerClass, () -> walkStackForCheckMethodName().orElse("change JVM global state"));
|
||||
}
|
||||
|
||||
private Optional<String> walkStackForCheckMethodName() {
|
||||
// Look up the check$ method to compose an informative error message.
|
||||
// This way, we don't need to painstakingly describe every individual global-state change.
|
||||
return StackWalker.getInstance()
|
||||
.walk(
|
||||
frames -> frames.map(StackFrame::getMethodName)
|
||||
.dropWhile(not(methodName -> methodName.startsWith(InstrumentationService.CHECK_METHOD_PREFIX)))
|
||||
.findFirst()
|
||||
)
|
||||
.map(this::operationDescription);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -248,11 +268,11 @@ public class PolicyManager {
|
|||
|
||||
ModuleEntitlements entitlements = getEntitlements(requestingClass);
|
||||
if (entitlements.fileAccess().canRead(path) == false) {
|
||||
throw new NotEntitledException(
|
||||
notEntitled(
|
||||
Strings.format(
|
||||
"Not entitled: component [%s], module [%s], class [%s], entitlement [file], operation [read], path [%s]",
|
||||
entitlements.componentName(),
|
||||
requestingClass.getModule(),
|
||||
requestingClass.getModule().getName(),
|
||||
requestingClass,
|
||||
path
|
||||
)
|
||||
|
@ -273,11 +293,11 @@ public class PolicyManager {
|
|||
|
||||
ModuleEntitlements entitlements = getEntitlements(requestingClass);
|
||||
if (entitlements.fileAccess().canWrite(path) == false) {
|
||||
throw new NotEntitledException(
|
||||
notEntitled(
|
||||
Strings.format(
|
||||
"Not entitled: component [%s], module [%s], class [%s], entitlement [file], operation [write], path [%s]",
|
||||
entitlements.componentName(),
|
||||
requestingClass.getModule(),
|
||||
requestingClass.getModule().getName(),
|
||||
requestingClass,
|
||||
path
|
||||
)
|
||||
|
@ -285,6 +305,15 @@ public class PolicyManager {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Invoked when we try to get an arbitrary {@code FileAttributeView} class. Such a class can modify attributes, like owner etc.;
|
||||
* we could think about introducing checks for each of the operations, but for now we over-approximate this and simply deny when it is
|
||||
* used directly.
|
||||
*/
|
||||
public void checkGetFileAttributeView(Class<?> callerClass) {
|
||||
neverEntitled(callerClass, () -> "get file attribute view");
|
||||
}
|
||||
|
||||
/**
|
||||
* Check for operations that can access sensitive network information, e.g. secrets, tokens or SSL sessions
|
||||
*/
|
||||
|
@ -322,7 +351,7 @@ public class PolicyManager {
|
|||
Class<?> requestingClass
|
||||
) {
|
||||
if (classEntitlements.hasEntitlement(entitlementClass) == false) {
|
||||
throw new NotEntitledException(
|
||||
notEntitled(
|
||||
Strings.format(
|
||||
"Not entitled: component [%s], module [%s], class [%s], entitlement [%s]",
|
||||
classEntitlements.componentName(),
|
||||
|
@ -362,7 +391,7 @@ public class PolicyManager {
|
|||
);
|
||||
return;
|
||||
}
|
||||
throw new NotEntitledException(
|
||||
notEntitled(
|
||||
Strings.format(
|
||||
"Not entitled: component [%s], module [%s], class [%s], entitlement [write_system_properties], property [%s]",
|
||||
entitlements.componentName(),
|
||||
|
@ -373,6 +402,14 @@ public class PolicyManager {
|
|||
);
|
||||
}
|
||||
|
||||
private static void notEntitled(String message) {
|
||||
throw new NotEntitledException(message);
|
||||
}
|
||||
|
||||
public void checkManageThreadsEntitlement(Class<?> callerClass) {
|
||||
checkEntitlementPresent(callerClass, ManageThreadsEntitlement.class);
|
||||
}
|
||||
|
||||
private void checkEntitlementPresent(Class<?> callerClass, Class<? extends Entitlement> entitlementClass) {
|
||||
var requestingClass = requestingClass(callerClass);
|
||||
if (isTriviallyAllowed(requestingClass)) {
|
||||
|
@ -396,7 +433,7 @@ public class PolicyManager {
|
|||
if (pluginName != null) {
|
||||
var pluginEntitlements = pluginsEntitlements.get(pluginName);
|
||||
if (pluginEntitlements == null) {
|
||||
return ModuleEntitlements.none(pluginName);
|
||||
return defaultEntitlements(pluginName);
|
||||
} else {
|
||||
final String scopeName;
|
||||
if (requestingModule.isNamed() == false) {
|
||||
|
@ -410,10 +447,10 @@ public class PolicyManager {
|
|||
|
||||
if (requestingModule.isNamed() == false && requestingClass.getPackageName().startsWith(apmAgentPackageName)) {
|
||||
// The APM agent is the only thing running non-modular in the system classloader
|
||||
return ModuleEntitlements.from(APM_AGENT_COMPONENT_NAME, apmAgentEntitlements);
|
||||
return policyEntitlements(APM_AGENT_COMPONENT_NAME, apmAgentEntitlements);
|
||||
}
|
||||
|
||||
return ModuleEntitlements.none(UNKNOWN_COMPONENT_NAME);
|
||||
return defaultEntitlements(UNKNOWN_COMPONENT_NAME);
|
||||
}
|
||||
|
||||
private ModuleEntitlements getModuleScopeEntitlements(
|
||||
|
@ -423,9 +460,9 @@ public class PolicyManager {
|
|||
) {
|
||||
var entitlements = scopeEntitlements.get(moduleName);
|
||||
if (entitlements == null) {
|
||||
return ModuleEntitlements.none(componentName);
|
||||
return defaultEntitlements(componentName);
|
||||
}
|
||||
return ModuleEntitlements.from(componentName, entitlements);
|
||||
return policyEntitlements(componentName, entitlements);
|
||||
}
|
||||
|
||||
private static boolean isServerModule(Module requestingModule) {
|
||||
|
|
|
@ -14,8 +14,10 @@ import org.elasticsearch.entitlement.runtime.policy.entitlements.Entitlement;
|
|||
import org.elasticsearch.entitlement.runtime.policy.entitlements.FilesEntitlement;
|
||||
import org.elasticsearch.entitlement.runtime.policy.entitlements.InboundNetworkEntitlement;
|
||||
import org.elasticsearch.entitlement.runtime.policy.entitlements.LoadNativeLibrariesEntitlement;
|
||||
import org.elasticsearch.entitlement.runtime.policy.entitlements.ManageThreadsEntitlement;
|
||||
import org.elasticsearch.entitlement.runtime.policy.entitlements.OutboundNetworkEntitlement;
|
||||
import org.elasticsearch.entitlement.runtime.policy.entitlements.SetHttpsConnectionPropertiesEntitlement;
|
||||
import org.elasticsearch.entitlement.runtime.policy.entitlements.WriteAllSystemPropertiesEntitlement;
|
||||
import org.elasticsearch.entitlement.runtime.policy.entitlements.WriteSystemPropertiesEntitlement;
|
||||
import org.elasticsearch.xcontent.XContentLocation;
|
||||
import org.elasticsearch.xcontent.XContentParser;
|
||||
|
@ -45,20 +47,22 @@ import java.util.stream.Stream;
|
|||
*/
|
||||
public class PolicyParser {
|
||||
|
||||
private static final Map<String, Class<?>> EXTERNAL_ENTITLEMENTS = Stream.of(
|
||||
FilesEntitlement.class,
|
||||
private static final Map<String, Class<? extends Entitlement>> EXTERNAL_ENTITLEMENTS = Stream.of(
|
||||
CreateClassLoaderEntitlement.class,
|
||||
SetHttpsConnectionPropertiesEntitlement.class,
|
||||
OutboundNetworkEntitlement.class,
|
||||
FilesEntitlement.class,
|
||||
InboundNetworkEntitlement.class,
|
||||
WriteSystemPropertiesEntitlement.class,
|
||||
LoadNativeLibrariesEntitlement.class
|
||||
LoadNativeLibrariesEntitlement.class,
|
||||
ManageThreadsEntitlement.class,
|
||||
OutboundNetworkEntitlement.class,
|
||||
SetHttpsConnectionPropertiesEntitlement.class,
|
||||
WriteAllSystemPropertiesEntitlement.class,
|
||||
WriteSystemPropertiesEntitlement.class
|
||||
).collect(Collectors.toUnmodifiableMap(PolicyParser::getEntitlementTypeName, Function.identity()));
|
||||
|
||||
protected final XContentParser policyParser;
|
||||
protected final String policyName;
|
||||
private final boolean isExternalPlugin;
|
||||
private final Map<String, Class<?>> externalEntitlements;
|
||||
private final Map<String, Class<? extends Entitlement>> externalEntitlements;
|
||||
|
||||
static String getEntitlementTypeName(Class<? extends Entitlement> entitlementClass) {
|
||||
var entitlementClassName = entitlementClass.getSimpleName();
|
||||
|
@ -81,8 +85,12 @@ public class PolicyParser {
|
|||
}
|
||||
|
||||
// package private for tests
|
||||
PolicyParser(InputStream inputStream, String policyName, boolean isExternalPlugin, Map<String, Class<?>> externalEntitlements)
|
||||
throws IOException {
|
||||
PolicyParser(
|
||||
InputStream inputStream,
|
||||
String policyName,
|
||||
boolean isExternalPlugin,
|
||||
Map<String, Class<? extends Entitlement>> externalEntitlements
|
||||
) throws IOException {
|
||||
this.policyParser = YamlXContent.yamlXContent.createParser(XContentParserConfiguration.EMPTY, Objects.requireNonNull(inputStream));
|
||||
this.policyName = policyName;
|
||||
this.isExternalPlugin = isExternalPlugin;
|
||||
|
|
|
@ -0,0 +1,17 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the "Elastic License
|
||||
* 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side
|
||||
* Public License v 1"; you may not use this file except in compliance with, at
|
||||
* your election, the "Elastic License 2.0", the "GNU Affero General Public
|
||||
* License v3.0 only", or the "Server Side Public License, v 1".
|
||||
*/
|
||||
|
||||
package org.elasticsearch.entitlement.runtime.policy.entitlements;
|
||||
|
||||
import org.elasticsearch.entitlement.runtime.policy.ExternalEntitlement;
|
||||
|
||||
public record ManageThreadsEntitlement() implements Entitlement {
|
||||
@ExternalEntitlement(esModulesOnly = false)
|
||||
public ManageThreadsEntitlement {}
|
||||
}
|
|
@ -7,8 +7,9 @@
|
|||
* License v3.0 only", or the "Server Side Public License, v 1".
|
||||
*/
|
||||
|
||||
package org.elasticsearch.common.util;
|
||||
package org.elasticsearch.entitlement.runtime.policy.entitlements;
|
||||
|
||||
public interface Countable {
|
||||
int size();
|
||||
}
|
||||
/**
|
||||
* Describes an entitlement for reading file store attributes (e.g. disk space)
|
||||
*/
|
||||
public record ReadStoreAttributesEntitlement() implements Entitlement {}
|
|
@ -36,13 +36,13 @@ public class FileAccessTreeTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public void testEmpty() {
|
||||
var tree = FileAccessTree.of(FilesEntitlement.EMPTY);
|
||||
var tree = accessTree(FilesEntitlement.EMPTY);
|
||||
assertThat(tree.canRead(path("path")), is(false));
|
||||
assertThat(tree.canWrite(path("path")), is(false));
|
||||
}
|
||||
|
||||
public void testRead() {
|
||||
var tree = FileAccessTree.of(entitlement("foo", "read"));
|
||||
var tree = accessTree(entitlement("foo", "read"));
|
||||
assertThat(tree.canRead(path("foo")), is(true));
|
||||
assertThat(tree.canRead(path("foo/subdir")), is(true));
|
||||
assertThat(tree.canRead(path("food")), is(false));
|
||||
|
@ -54,7 +54,7 @@ public class FileAccessTreeTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public void testWrite() {
|
||||
var tree = FileAccessTree.of(entitlement("foo", "read_write"));
|
||||
var tree = accessTree(entitlement("foo", "read_write"));
|
||||
assertThat(tree.canWrite(path("foo")), is(true));
|
||||
assertThat(tree.canWrite(path("foo/subdir")), is(true));
|
||||
assertThat(tree.canWrite(path("food")), is(false));
|
||||
|
@ -66,7 +66,7 @@ public class FileAccessTreeTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public void testTwoPaths() {
|
||||
var tree = FileAccessTree.of(entitlement("foo", "read", "bar", "read"));
|
||||
var tree = accessTree(entitlement("foo", "read", "bar", "read"));
|
||||
assertThat(tree.canRead(path("a")), is(false));
|
||||
assertThat(tree.canRead(path("bar")), is(true));
|
||||
assertThat(tree.canRead(path("bar/subdir")), is(true));
|
||||
|
@ -77,7 +77,7 @@ public class FileAccessTreeTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public void testReadWriteUnderRead() {
|
||||
var tree = FileAccessTree.of(entitlement("foo", "read", "foo/bar", "read_write"));
|
||||
var tree = accessTree(entitlement("foo", "read", "foo/bar", "read_write"));
|
||||
assertThat(tree.canRead(path("foo")), is(true));
|
||||
assertThat(tree.canWrite(path("foo")), is(false));
|
||||
assertThat(tree.canRead(path("foo/bar")), is(true));
|
||||
|
@ -85,7 +85,7 @@ public class FileAccessTreeTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public void testNormalizePath() {
|
||||
var tree = FileAccessTree.of(entitlement("foo/../bar", "read"));
|
||||
var tree = accessTree(entitlement("foo/../bar", "read"));
|
||||
assertThat(tree.canRead(path("foo/../bar")), is(true));
|
||||
assertThat(tree.canRead(path("foo")), is(false));
|
||||
assertThat(tree.canRead(path("")), is(false));
|
||||
|
@ -93,7 +93,7 @@ public class FileAccessTreeTests extends ESTestCase {
|
|||
|
||||
public void testForwardSlashes() {
|
||||
String sep = getDefaultFileSystem().getSeparator();
|
||||
var tree = FileAccessTree.of(entitlement("a/b", "read", "m" + sep + "n", "read"));
|
||||
var tree = accessTree(entitlement("a/b", "read", "m" + sep + "n", "read"));
|
||||
|
||||
// Native separators work
|
||||
assertThat(tree.canRead(path("a" + sep + "b")), is(true));
|
||||
|
@ -104,6 +104,18 @@ public class FileAccessTreeTests extends ESTestCase {
|
|||
assertThat(tree.canRead(path("m/n")), is(true));
|
||||
}
|
||||
|
||||
public void testTempDirAccess() {
|
||||
Path tempDir = createTempDir();
|
||||
var tree = FileAccessTree.of(FilesEntitlement.EMPTY, tempDir);
|
||||
|
||||
assertThat(tree.canRead(tempDir), is(true));
|
||||
assertThat(tree.canWrite(tempDir), is(true));
|
||||
}
|
||||
|
||||
FileAccessTree accessTree(FilesEntitlement entitlement) {
|
||||
return FileAccessTree.of(entitlement, createTempDir());
|
||||
}
|
||||
|
||||
FilesEntitlement entitlement(String... values) {
|
||||
List<Object> filesData = new ArrayList<>();
|
||||
for (int i = 0; i < values.length; i += 2) {
|
||||
|
|
|
@ -71,16 +71,21 @@ public class PolicyManagerTests extends ESTestCase {
|
|||
Map.of("plugin1", createPluginPolicy("plugin.module")),
|
||||
c -> "plugin1",
|
||||
TEST_AGENTS_PACKAGE_NAME,
|
||||
NO_ENTITLEMENTS_MODULE
|
||||
NO_ENTITLEMENTS_MODULE,
|
||||
createTempDir()
|
||||
);
|
||||
|
||||
// Any class from the current module (unnamed) will do
|
||||
var callerClass = this.getClass();
|
||||
var requestingModule = callerClass.getModule();
|
||||
|
||||
assertEquals("No policy for the unnamed module", ModuleEntitlements.none("plugin1"), policyManager.getEntitlements(callerClass));
|
||||
assertEquals(
|
||||
"No policy for the unnamed module",
|
||||
policyManager.defaultEntitlements("plugin1"),
|
||||
policyManager.getEntitlements(callerClass)
|
||||
);
|
||||
|
||||
assertEquals(Map.of(requestingModule, ModuleEntitlements.none("plugin1")), policyManager.moduleEntitlementsMap);
|
||||
assertEquals(Map.of(requestingModule, policyManager.defaultEntitlements("plugin1")), policyManager.moduleEntitlementsMap);
|
||||
}
|
||||
|
||||
public void testGetEntitlementsThrowsOnMissingPolicyForPlugin() {
|
||||
|
@ -90,16 +95,17 @@ public class PolicyManagerTests extends ESTestCase {
|
|||
Map.of(),
|
||||
c -> "plugin1",
|
||||
TEST_AGENTS_PACKAGE_NAME,
|
||||
NO_ENTITLEMENTS_MODULE
|
||||
NO_ENTITLEMENTS_MODULE,
|
||||
createTempDir()
|
||||
);
|
||||
|
||||
// Any class from the current module (unnamed) will do
|
||||
var callerClass = this.getClass();
|
||||
var requestingModule = callerClass.getModule();
|
||||
|
||||
assertEquals("No policy for this plugin", ModuleEntitlements.none("plugin1"), policyManager.getEntitlements(callerClass));
|
||||
assertEquals("No policy for this plugin", policyManager.defaultEntitlements("plugin1"), policyManager.getEntitlements(callerClass));
|
||||
|
||||
assertEquals(Map.of(requestingModule, ModuleEntitlements.none("plugin1")), policyManager.moduleEntitlementsMap);
|
||||
assertEquals(Map.of(requestingModule, policyManager.defaultEntitlements("plugin1")), policyManager.moduleEntitlementsMap);
|
||||
}
|
||||
|
||||
public void testGetEntitlementsFailureIsCached() {
|
||||
|
@ -109,21 +115,22 @@ public class PolicyManagerTests extends ESTestCase {
|
|||
Map.of(),
|
||||
c -> "plugin1",
|
||||
TEST_AGENTS_PACKAGE_NAME,
|
||||
NO_ENTITLEMENTS_MODULE
|
||||
NO_ENTITLEMENTS_MODULE,
|
||||
createTempDir()
|
||||
);
|
||||
|
||||
// Any class from the current module (unnamed) will do
|
||||
var callerClass = this.getClass();
|
||||
var requestingModule = callerClass.getModule();
|
||||
|
||||
assertEquals(ModuleEntitlements.none("plugin1"), policyManager.getEntitlements(callerClass));
|
||||
assertEquals(Map.of(requestingModule, ModuleEntitlements.none("plugin1")), policyManager.moduleEntitlementsMap);
|
||||
assertEquals(policyManager.defaultEntitlements("plugin1"), policyManager.getEntitlements(callerClass));
|
||||
assertEquals(Map.of(requestingModule, policyManager.defaultEntitlements("plugin1")), policyManager.moduleEntitlementsMap);
|
||||
|
||||
// A second time
|
||||
assertEquals(ModuleEntitlements.none("plugin1"), policyManager.getEntitlements(callerClass));
|
||||
assertEquals(policyManager.defaultEntitlements("plugin1"), policyManager.getEntitlements(callerClass));
|
||||
|
||||
// Nothing new in the map
|
||||
assertEquals(Map.of(requestingModule, ModuleEntitlements.none("plugin1")), policyManager.moduleEntitlementsMap);
|
||||
assertEquals(Map.of(requestingModule, policyManager.defaultEntitlements("plugin1")), policyManager.moduleEntitlementsMap);
|
||||
}
|
||||
|
||||
public void testGetEntitlementsReturnsEntitlementsForPluginUnnamedModule() {
|
||||
|
@ -133,7 +140,8 @@ public class PolicyManagerTests extends ESTestCase {
|
|||
Map.ofEntries(entry("plugin2", createPluginPolicy(ALL_UNNAMED))),
|
||||
c -> "plugin2",
|
||||
TEST_AGENTS_PACKAGE_NAME,
|
||||
NO_ENTITLEMENTS_MODULE
|
||||
NO_ENTITLEMENTS_MODULE,
|
||||
createTempDir()
|
||||
);
|
||||
|
||||
// Any class from the current module (unnamed) will do
|
||||
|
@ -150,7 +158,8 @@ public class PolicyManagerTests extends ESTestCase {
|
|||
Map.of(),
|
||||
c -> null,
|
||||
TEST_AGENTS_PACKAGE_NAME,
|
||||
NO_ENTITLEMENTS_MODULE
|
||||
NO_ENTITLEMENTS_MODULE,
|
||||
createTempDir()
|
||||
);
|
||||
|
||||
// Tests do not run modular, so we cannot use a server class.
|
||||
|
@ -162,11 +171,14 @@ public class PolicyManagerTests extends ESTestCase {
|
|||
|
||||
assertEquals(
|
||||
"No policy for this module in server",
|
||||
ModuleEntitlements.none(SERVER_COMPONENT_NAME),
|
||||
policyManager.defaultEntitlements(SERVER_COMPONENT_NAME),
|
||||
policyManager.getEntitlements(mockServerClass)
|
||||
);
|
||||
|
||||
assertEquals(Map.of(requestingModule, ModuleEntitlements.none(SERVER_COMPONENT_NAME)), policyManager.moduleEntitlementsMap);
|
||||
assertEquals(
|
||||
Map.of(requestingModule, policyManager.defaultEntitlements(SERVER_COMPONENT_NAME)),
|
||||
policyManager.moduleEntitlementsMap
|
||||
);
|
||||
}
|
||||
|
||||
public void testGetEntitlementsReturnsEntitlementsForServerModule() throws ClassNotFoundException {
|
||||
|
@ -176,7 +188,8 @@ public class PolicyManagerTests extends ESTestCase {
|
|||
Map.of(),
|
||||
c -> null,
|
||||
TEST_AGENTS_PACKAGE_NAME,
|
||||
NO_ENTITLEMENTS_MODULE
|
||||
NO_ENTITLEMENTS_MODULE,
|
||||
createTempDir()
|
||||
);
|
||||
|
||||
// Tests do not run modular, so we cannot use a server class.
|
||||
|
@ -201,7 +214,8 @@ public class PolicyManagerTests extends ESTestCase {
|
|||
Map.of("mock-plugin", createPluginPolicy("org.example.plugin")),
|
||||
c -> "mock-plugin",
|
||||
TEST_AGENTS_PACKAGE_NAME,
|
||||
NO_ENTITLEMENTS_MODULE
|
||||
NO_ENTITLEMENTS_MODULE,
|
||||
createTempDir()
|
||||
);
|
||||
|
||||
var layer = createLayerForJar(jar, "org.example.plugin");
|
||||
|
@ -220,7 +234,8 @@ public class PolicyManagerTests extends ESTestCase {
|
|||
Map.ofEntries(entry("plugin2", createPluginPolicy(ALL_UNNAMED))),
|
||||
c -> "plugin2",
|
||||
TEST_AGENTS_PACKAGE_NAME,
|
||||
NO_ENTITLEMENTS_MODULE
|
||||
NO_ENTITLEMENTS_MODULE,
|
||||
createTempDir()
|
||||
);
|
||||
|
||||
// Any class from the current module (unnamed) will do
|
||||
|
@ -278,7 +293,8 @@ public class PolicyManagerTests extends ESTestCase {
|
|||
Map.of(),
|
||||
c -> c.getPackageName().startsWith(TEST_AGENTS_PACKAGE_NAME) ? null : "test",
|
||||
TEST_AGENTS_PACKAGE_NAME,
|
||||
NO_ENTITLEMENTS_MODULE
|
||||
NO_ENTITLEMENTS_MODULE,
|
||||
createTempDir()
|
||||
);
|
||||
ModuleEntitlements agentsEntitlements = policyManager.getEntitlements(TestAgent.class);
|
||||
assertThat(agentsEntitlements.hasEntitlement(CreateClassLoaderEntitlement.class), is(true));
|
||||
|
@ -305,7 +321,8 @@ public class PolicyManagerTests extends ESTestCase {
|
|||
Map.of(),
|
||||
c -> "test",
|
||||
TEST_AGENTS_PACKAGE_NAME,
|
||||
NO_ENTITLEMENTS_MODULE
|
||||
NO_ENTITLEMENTS_MODULE,
|
||||
createTempDir()
|
||||
)
|
||||
);
|
||||
assertEquals(
|
||||
|
@ -321,7 +338,8 @@ public class PolicyManagerTests extends ESTestCase {
|
|||
Map.of(),
|
||||
c -> "test",
|
||||
TEST_AGENTS_PACKAGE_NAME,
|
||||
NO_ENTITLEMENTS_MODULE
|
||||
NO_ENTITLEMENTS_MODULE,
|
||||
createTempDir()
|
||||
)
|
||||
);
|
||||
assertEquals(
|
||||
|
@ -352,7 +370,8 @@ public class PolicyManagerTests extends ESTestCase {
|
|||
),
|
||||
c -> "plugin1",
|
||||
TEST_AGENTS_PACKAGE_NAME,
|
||||
NO_ENTITLEMENTS_MODULE
|
||||
NO_ENTITLEMENTS_MODULE,
|
||||
createTempDir()
|
||||
)
|
||||
);
|
||||
assertEquals(
|
||||
|
@ -371,7 +390,8 @@ public class PolicyManagerTests extends ESTestCase {
|
|||
Map.of(),
|
||||
c -> "test", // Insist that the class is in a plugin
|
||||
TEST_AGENTS_PACKAGE_NAME,
|
||||
NO_ENTITLEMENTS_MODULE
|
||||
NO_ENTITLEMENTS_MODULE,
|
||||
createTempDir()
|
||||
);
|
||||
ModuleEntitlements notAgentsEntitlements = policyManager.getEntitlements(TestAgent.class);
|
||||
assertThat(notAgentsEntitlements.hasEntitlement(CreateClassLoaderEntitlement.class), is(false));
|
||||
|
@ -385,7 +405,15 @@ public class PolicyManagerTests extends ESTestCase {
|
|||
}
|
||||
|
||||
private static PolicyManager policyManager(String agentsPackageName, Module entitlementsModule) {
|
||||
return new PolicyManager(createEmptyTestServerPolicy(), List.of(), Map.of(), c -> "test", agentsPackageName, entitlementsModule);
|
||||
return new PolicyManager(
|
||||
createEmptyTestServerPolicy(),
|
||||
List.of(),
|
||||
Map.of(),
|
||||
c -> "test",
|
||||
agentsPackageName,
|
||||
entitlementsModule,
|
||||
createTempDir()
|
||||
);
|
||||
}
|
||||
|
||||
private static Policy createEmptyTestServerPolicy() {
|
||||
|
|
|
@ -20,6 +20,7 @@ import org.elasticsearch.client.WarningsHandler;
|
|||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.concurrent.ThreadContext;
|
||||
import org.elasticsearch.core.Nullable;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
import org.elasticsearch.test.cluster.ElasticsearchCluster;
|
||||
import org.elasticsearch.test.cluster.FeatureFlag;
|
||||
import org.elasticsearch.test.cluster.local.distribution.DistributionType;
|
||||
|
@ -170,7 +171,7 @@ public class FullClusterRestartIT extends ParameterizedFullClusterRestartTestCas
|
|||
@SuppressWarnings("unchecked")
|
||||
private void testDatabasesLoaded() throws IOException {
|
||||
Request getTaskState = new Request("GET", "/_cluster/state");
|
||||
ObjectPath state = ObjectPath.createFromResponse(client().performRequest(getTaskState));
|
||||
ObjectPath state = ObjectPath.createFromResponse(assertOK(client().performRequest(getTaskState)));
|
||||
|
||||
List<?> tasks = state.evaluate("metadata.persistent_tasks.tasks");
|
||||
// Short-circuit to avoid using steams if the list is empty
|
||||
|
@ -196,7 +197,10 @@ public class FullClusterRestartIT extends ParameterizedFullClusterRestartTestCas
|
|||
|
||||
private void testCatIndices(List<String> indexNames, @Nullable List<String> additionalIndexNames) throws IOException {
|
||||
Request catIndices = new Request("GET", "_cat/indices/*?s=index&h=index&expand_wildcards=all");
|
||||
String response = EntityUtils.toString(client().performRequest(catIndices).getEntity());
|
||||
// the cat APIs can sometimes 404, erroneously
|
||||
// see https://github.com/elastic/elasticsearch/issues/104371
|
||||
setIgnoredErrorResponseCodes(catIndices, RestStatus.NOT_FOUND);
|
||||
String response = EntityUtils.toString(assertOK(client().performRequest(catIndices)).getEntity());
|
||||
List<String> indices = List.of(response.trim().split("\\s+"));
|
||||
|
||||
if (additionalIndexNames != null && additionalIndexNames.isEmpty() == false) {
|
||||
|
@ -215,7 +219,7 @@ public class FullClusterRestartIT extends ParameterizedFullClusterRestartTestCas
|
|||
assertOK(client().performRequest(putDoc));
|
||||
|
||||
Request getDoc = new Request("GET", "/my-index-00001/_doc/my_id");
|
||||
ObjectPath doc = ObjectPath.createFromResponse(client().performRequest(getDoc));
|
||||
ObjectPath doc = ObjectPath.createFromResponse(assertOK(client().performRequest(getDoc)));
|
||||
assertNull(doc.evaluate("_source.tags"));
|
||||
assertEquals("Sweden", doc.evaluate("_source.geo.country_name"));
|
||||
}
|
||||
|
@ -225,8 +229,7 @@ public class FullClusterRestartIT extends ParameterizedFullClusterRestartTestCas
|
|||
getStar.setOptions(
|
||||
RequestOptions.DEFAULT.toBuilder().setWarningsHandler(WarningsHandler.PERMISSIVE) // we don't care about warnings, just errors
|
||||
);
|
||||
Response response = client().performRequest(getStar);
|
||||
assertOK(response);
|
||||
Response response = assertOK(client().performRequest(getStar));
|
||||
|
||||
if (additionalIndexNames != null && additionalIndexNames.isEmpty() == false) {
|
||||
indexNames = new ArrayList<>(indexNames); // recopy into a mutable list
|
||||
|
@ -244,8 +247,7 @@ public class FullClusterRestartIT extends ParameterizedFullClusterRestartTestCas
|
|||
.addHeader("X-elastic-product-origin", "kibana")
|
||||
.setWarningsHandler(WarningsHandler.PERMISSIVE) // we don't care about warnings, just errors
|
||||
);
|
||||
Response response = client().performRequest(getStar);
|
||||
assertOK(response);
|
||||
Response response = assertOK(client().performRequest(getStar));
|
||||
|
||||
if (additionalIndexNames != null && additionalIndexNames.isEmpty() == false) {
|
||||
indexNames = new ArrayList<>(indexNames); // recopy into a mutable list
|
||||
|
|
|
@ -423,7 +423,7 @@ public class MustacheScriptEngineTests extends ESTestCase {
|
|||
ex.getCause().getCause(),
|
||||
allOf(
|
||||
instanceOf(SizeLimitingStringWriter.SizeLimitExceededException.class),
|
||||
transformedMatch(Throwable::getMessage, endsWith("has exceeded the size limit [1024]"))
|
||||
transformedMatch(Throwable::getMessage, endsWith("has size [1030] which exceeds the size limit [1024]"))
|
||||
)
|
||||
);
|
||||
}
|
||||
|
|
|
@ -86,7 +86,8 @@ public class TokenCountFieldMapper extends FieldMapper {
|
|||
store.getValue(),
|
||||
hasDocValues.getValue(),
|
||||
nullValue.getValue(),
|
||||
meta.getValue()
|
||||
meta.getValue(),
|
||||
context.isSourceSynthetic()
|
||||
);
|
||||
return new TokenCountFieldMapper(leafName(), ft, builderParams(this, context), this);
|
||||
}
|
||||
|
@ -100,7 +101,8 @@ public class TokenCountFieldMapper extends FieldMapper {
|
|||
boolean isStored,
|
||||
boolean hasDocValues,
|
||||
Number nullValue,
|
||||
Map<String, String> meta
|
||||
Map<String, String> meta,
|
||||
boolean isSyntheticSource
|
||||
) {
|
||||
super(
|
||||
name,
|
||||
|
@ -114,7 +116,8 @@ public class TokenCountFieldMapper extends FieldMapper {
|
|||
null,
|
||||
false,
|
||||
null,
|
||||
null
|
||||
null,
|
||||
isSyntheticSource
|
||||
);
|
||||
}
|
||||
|
||||
|
|
|
@ -277,7 +277,6 @@ public class FeatureMigrationIT extends AbstractFeatureMigrationIntegTest {
|
|||
});
|
||||
}
|
||||
|
||||
@AwaitsFix(bugUrl = "ES-10666") // This test uncovered an existing issue
|
||||
public void testIndexBlockIsRemovedWhenAliasRequestFails() throws Exception {
|
||||
createSystemIndexForDescriptor(INTERNAL_UNMANAGED);
|
||||
ensureGreen();
|
||||
|
|
|
@ -1,2 +1,3 @@
|
|||
ALL-UNNAMED:
|
||||
- manage_threads
|
||||
- outbound_network
|
||||
|
|
|
@ -1,2 +1,3 @@
|
|||
io.netty.common:
|
||||
- outbound_network
|
||||
- manage_threads
|
||||
|
|
|
@ -1,2 +1,3 @@
|
|||
ALL-UNNAMED:
|
||||
- manage_threads
|
||||
- outbound_network
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
io.netty.transport:
|
||||
- inbound_network
|
||||
- outbound_network
|
||||
- manage_threads
|
||||
io.netty.common:
|
||||
- inbound_network
|
||||
- outbound_network
|
||||
- manage_threads
|
||||
|
|
|
@ -254,9 +254,6 @@ tests:
|
|||
- class: org.elasticsearch.xpack.security.FileSettingsRoleMappingsRestartIT
|
||||
method: testFileSettingsReprocessedOnRestartWithoutVersionChange
|
||||
issue: https://github.com/elastic/elasticsearch/issues/120964
|
||||
- class: org.elasticsearch.xpack.ml.integration.ClassificationIT
|
||||
method: testWithOnlyTrainingRowsAndTrainingPercentIsFifty_DependentVariableIsKeyword
|
||||
issue: https://github.com/elastic/elasticsearch/issues/120071
|
||||
- class: org.elasticsearch.xpack.security.profile.ProfileIntegTests
|
||||
method: testGetUsersWithProfileUidWhenProfileIndexDoesNotExists
|
||||
issue: https://github.com/elastic/elasticsearch/issues/121179
|
||||
|
@ -265,9 +262,6 @@ tests:
|
|||
- class: org.elasticsearch.xpack.security.profile.ProfileIntegTests
|
||||
method: testSetEnabled
|
||||
issue: https://github.com/elastic/elasticsearch/issues/121183
|
||||
- class: org.elasticsearch.xpack.ml.integration.ClassificationIT
|
||||
method: testWithDatastreams
|
||||
issue: https://github.com/elastic/elasticsearch/issues/121236
|
||||
- class: org.elasticsearch.xpack.test.rest.XPackRestIT
|
||||
method: test {p0=transform/*}
|
||||
issue: https://github.com/elastic/elasticsearch/issues/120816
|
||||
|
@ -297,9 +291,6 @@ tests:
|
|||
- class: org.elasticsearch.env.NodeEnvironmentTests
|
||||
method: testGetBestDowngradeVersion
|
||||
issue: https://github.com/elastic/elasticsearch/issues/121316
|
||||
- class: org.elasticsearch.index.engine.ShuffleForcedMergePolicyTests
|
||||
method: testDiagnostics
|
||||
issue: https://github.com/elastic/elasticsearch/issues/121336
|
||||
- class: org.elasticsearch.smoketest.DocsClientYamlTestSuiteIT
|
||||
method: test {yaml=reference/rest-api/security/invalidate-tokens/line_194}
|
||||
issue: https://github.com/elastic/elasticsearch/issues/121337
|
||||
|
@ -317,9 +308,6 @@ tests:
|
|||
issue: https://github.com/elastic/elasticsearch/issues/121151
|
||||
- class: org.elasticsearch.test.rest.yaml.CcsCommonYamlTestSuiteIT
|
||||
issue: https://github.com/elastic/elasticsearch/issues/121407
|
||||
- class: org.elasticsearch.xpack.ml.integration.ClassificationIT
|
||||
method: testDependentVariableIsAliasToNested
|
||||
issue: https://github.com/elastic/elasticsearch/issues/121415
|
||||
- class: org.elasticsearch.xpack.security.authc.jwt.JwtRealmSingleNodeTests
|
||||
method: testClientSecretRotation
|
||||
issue: https://github.com/elastic/elasticsearch/issues/120985
|
||||
|
@ -329,30 +317,12 @@ tests:
|
|||
- class: org.elasticsearch.xpack.security.profile.ProfileIntegTests
|
||||
method: testGetUsersWithProfileUid
|
||||
issue: https://github.com/elastic/elasticsearch/issues/121483
|
||||
- class: org.elasticsearch.xpack.transform.checkpoint.TransformCCSCanMatchIT
|
||||
method: testTransformLifecycle_RangeQueryThatMatchesNoShards
|
||||
issue: https://github.com/elastic/elasticsearch/issues/121480
|
||||
- class: org.elasticsearch.xpack.security.profile.ProfileIntegTests
|
||||
method: testSuggestProfilesWithHint
|
||||
issue: https://github.com/elastic/elasticsearch/issues/121116
|
||||
- class: org.elasticsearch.xpack.security.profile.ProfileIntegTests
|
||||
method: testSuggestProfileWithData
|
||||
issue: https://github.com/elastic/elasticsearch/issues/121258
|
||||
- class: org.elasticsearch.ingest.geoip.FullClusterRestartIT
|
||||
method: testGeoIpSystemFeaturesMigration {cluster=UPGRADED}
|
||||
issue: https://github.com/elastic/elasticsearch/issues/121115
|
||||
- class: org.elasticsearch.xpack.core.ilm.SetSingleNodeAllocateStepTests
|
||||
method: testPerformActionSomeShardsOnlyOnNewNodesButNewNodesInvalidAttrs
|
||||
issue: https://github.com/elastic/elasticsearch/issues/121495
|
||||
- class: org.elasticsearch.backwards.MixedClusterClientYamlTestSuiteIT
|
||||
method: test {p0=search.vectors/42_knn_search_int4_flat/Vector similarity with filter only}
|
||||
issue: https://github.com/elastic/elasticsearch/issues/121412
|
||||
- class: org.elasticsearch.xpack.ml.integration.ClassificationIT
|
||||
method: testDependentVariableIsAliasToKeyword
|
||||
issue: https://github.com/elastic/elasticsearch/issues/121492
|
||||
- class: org.elasticsearch.search.CrossClusterSearchUnavailableClusterIT
|
||||
method: testSearchSkipUnavailable
|
||||
issue: https://github.com/elastic/elasticsearch/issues/121497
|
||||
- class: org.elasticsearch.smoketest.DocsClientYamlTestSuiteIT
|
||||
method: test {yaml=reference/cat/health/cat-health-no-timestamp-example}
|
||||
issue: https://github.com/elastic/elasticsearch/issues/121867
|
||||
|
@ -361,9 +331,6 @@ tests:
|
|||
issue: https://github.com/elastic/elasticsearch/issues/121625
|
||||
- class: org.elasticsearch.xpack.searchablesnapshots.hdfs.SecureHdfsSearchableSnapshotsIT
|
||||
issue: https://github.com/elastic/elasticsearch/issues/121967
|
||||
- class: org.elasticsearch.action.search.SearchQueryThenFetchAsyncActionTests
|
||||
method: testBottomFieldSort
|
||||
issue: https://github.com/elastic/elasticsearch/issues/121503
|
||||
- class: org.elasticsearch.xpack.application.CohereServiceUpgradeIT
|
||||
issue: https://github.com/elastic/elasticsearch/issues/121537
|
||||
- class: org.elasticsearch.xpack.restart.FullClusterRestartIT
|
||||
|
@ -372,30 +339,9 @@ tests:
|
|||
- class: org.elasticsearch.test.rest.ClientYamlTestSuiteIT
|
||||
method: test {yaml=snapshot.delete/10_basic/Delete a snapshot asynchronously}
|
||||
issue: https://github.com/elastic/elasticsearch/issues/122102
|
||||
- class: org.elasticsearch.test.rest.ClientYamlTestSuiteIT
|
||||
method: test {yaml=search/180_locale_dependent_mapping/Test Index and Search locale dependent mappings / dates}
|
||||
issue: https://github.com/elastic/elasticsearch/issues/122103
|
||||
- class: org.elasticsearch.xpack.security.CoreWithSecurityClientYamlTestSuiteIT
|
||||
method: test {yaml=snapshot.delete/10_basic/Delete a snapshot asynchronously}
|
||||
issue: https://github.com/elastic/elasticsearch/issues/122104
|
||||
- class: org.elasticsearch.xpack.ml.integration.ClassificationIT
|
||||
method: testWithOnlyTrainingRowsAndTrainingPercentIsFifty_DependentVariableIsBoolean
|
||||
issue: https://github.com/elastic/elasticsearch/issues/121680
|
||||
- class: org.elasticsearch.xpack.downsample.DownsampleActionSingleNodeTests
|
||||
method: testDuplicateDownsampleRequest
|
||||
issue: https://github.com/elastic/elasticsearch/issues/122158
|
||||
- class: org.elasticsearch.search.SearchCancellationIT
|
||||
method: testCancelFailedSearchWhenPartialResultDisallowed
|
||||
issue: https://github.com/elastic/elasticsearch/issues/121719
|
||||
- class: org.elasticsearch.xpack.esql.analysis.VerifierTests
|
||||
method: testChangePoint
|
||||
issue: https://github.com/elastic/elasticsearch/issues/122179
|
||||
- class: org.elasticsearch.xpack.esql.analysis.VerifierTests
|
||||
method: testChangePoint_keySortable
|
||||
issue: https://github.com/elastic/elasticsearch/issues/122180
|
||||
- class: org.elasticsearch.xpack.esql.analysis.VerifierTests
|
||||
method: testChangePoint_valueNumeric
|
||||
issue: https://github.com/elastic/elasticsearch/issues/122181
|
||||
- class: org.elasticsearch.datastreams.TSDBPassthroughIndexingIT
|
||||
issue: https://github.com/elastic/elasticsearch/issues/121716
|
||||
- class: org.elasticsearch.smoketest.SmokeTestMonitoringWithSecurityIT
|
||||
|
@ -407,17 +353,41 @@ tests:
|
|||
- class: org.elasticsearch.xpack.security.authz.IndexAliasesTests
|
||||
method: testRemoveIndex
|
||||
issue: https://github.com/elastic/elasticsearch/issues/122221
|
||||
- class: org.elasticsearch.xpack.migrate.action.ReindexDatastreamIndexTransportActionIT
|
||||
issue: https://github.com/elastic/elasticsearch/issues/121737
|
||||
- class: org.elasticsearch.xpack.esql.action.EsqlActionBreakerIT
|
||||
method: testGroupingMultiValueByOrdinals
|
||||
issue: https://github.com/elastic/elasticsearch/issues/122228
|
||||
- class: org.elasticsearch.xpack.esql.action.EsqlNodeFailureIT
|
||||
method: testFailureLoadingFields
|
||||
issue: https://github.com/elastic/elasticsearch/issues/122132
|
||||
- class: org.elasticsearch.blocks.SimpleBlocksIT
|
||||
method: testConcurrentAddBlock
|
||||
issue: https://github.com/elastic/elasticsearch/issues/122324
|
||||
- class: org.elasticsearch.xpack.searchablesnapshots.hdfs.HdfsSearchableSnapshotsIT
|
||||
issue: https://github.com/elastic/elasticsearch/issues/122024
|
||||
- class: org.elasticsearch.smoketest.DocsClientYamlTestSuiteIT
|
||||
method: test {yaml=reference/cat/health/cat-health-example}
|
||||
issue: https://github.com/elastic/elasticsearch/issues/122335
|
||||
- class: org.elasticsearch.xpack.esql.action.CrossClusterCancellationIT
|
||||
method: testCloseSkipUnavailable
|
||||
issue: https://github.com/elastic/elasticsearch/issues/122336
|
||||
- class: org.elasticsearch.smoketest.DocsClientYamlTestSuiteIT
|
||||
method: test {yaml=reference/alias/line_260}
|
||||
issue: https://github.com/elastic/elasticsearch/issues/122343
|
||||
- class: org.elasticsearch.smoketest.DocsClientYamlTestSuiteIT
|
||||
method: test {yaml=reference/snapshot-restore/apis/get-snapshot-api/line_488}
|
||||
issue: https://github.com/elastic/elasticsearch/issues/121611
|
||||
- class: org.elasticsearch.repositories.blobstore.testkit.analyze.SecureHdfsRepositoryAnalysisRestIT
|
||||
issue: https://github.com/elastic/elasticsearch/issues/122377
|
||||
- class: org.elasticsearch.repositories.blobstore.testkit.analyze.HdfsRepositoryAnalysisRestIT
|
||||
issue: https://github.com/elastic/elasticsearch/issues/122378
|
||||
- class: org.elasticsearch.telemetry.apm.ApmAgentSettingsIT
|
||||
issue: https://github.com/elastic/elasticsearch/issues/122546
|
||||
- class: org.elasticsearch.search.SearchTimeoutIT
|
||||
method: testSuggestTimeoutWithPartialResults
|
||||
issue: https://github.com/elastic/elasticsearch/issues/122548
|
||||
- class: org.elasticsearch.xpack.inference.mapper.SemanticInferenceMetadataFieldsRecoveryTests
|
||||
method: testSnapshotRecovery {p0=false p1=false}
|
||||
issue: https://github.com/elastic/elasticsearch/issues/122549
|
||||
- class: org.elasticsearch.xpack.inference.mapper.SemanticInferenceMetadataFieldsRecoveryTests
|
||||
method: testSnapshotRecovery {p0=true p1=false}
|
||||
issue: https://github.com/elastic/elasticsearch/issues/122550
|
||||
- class: org.elasticsearch.xpack.inference.mapper.SemanticInferenceMetadataFieldsRecoveryTests
|
||||
method: testSnapshotRecovery {p0=false p1=true}
|
||||
issue: https://github.com/elastic/elasticsearch/issues/122551
|
||||
|
||||
# Examples:
|
||||
#
|
||||
|
|
|
@ -1,2 +1,3 @@
|
|||
ALL-UNNAMED:
|
||||
- manage_threads
|
||||
- outbound_network
|
||||
|
|
|
@ -50,7 +50,7 @@ public class SizeFieldMapper extends MetadataFieldMapper {
|
|||
|
||||
private static class SizeFieldType extends NumberFieldType {
|
||||
SizeFieldType() {
|
||||
super(NAME, NumberType.INTEGER, true, true, true, false, null, Collections.emptyMap(), null, false, null, null);
|
||||
super(NAME, NumberType.INTEGER, true, true, true, false, null, Collections.emptyMap(), null, false, null, null, false);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -1,5 +1,7 @@
|
|||
ALL-UNNAMED:
|
||||
- manage_threads
|
||||
- outbound_network
|
||||
- load_native_libraries
|
||||
- write_system_properties:
|
||||
properties:
|
||||
- hadoop.home.dir
|
||||
|
|
|
@ -31,6 +31,7 @@ import org.elasticsearch.index.IndexSettings;
|
|||
import org.elasticsearch.index.IndexVersion;
|
||||
import org.elasticsearch.index.IndexVersions;
|
||||
import org.elasticsearch.index.mapper.DateFieldMapper;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
import org.elasticsearch.rest.action.admin.indices.RestPutIndexTemplateAction;
|
||||
import org.elasticsearch.search.SearchFeatures;
|
||||
import org.elasticsearch.test.NotEqualMessageBuilder;
|
||||
|
@ -628,13 +629,14 @@ public class FullClusterRestartIT extends ParameterizedFullClusterRestartTestCas
|
|||
)
|
||||
);
|
||||
|
||||
// assertBusy to work around https://github.com/elastic/elasticsearch/issues/104371
|
||||
assertBusy(
|
||||
() -> assertThat(
|
||||
EntityUtils.toString(client().performRequest(new Request("GET", "/_cat/indices?v&error_trace")).getEntity()),
|
||||
containsString("testrollover-000002")
|
||||
)
|
||||
);
|
||||
assertBusy(() -> {
|
||||
Request catIndices = new Request("GET", "/_cat/indices?v&error_trace");
|
||||
// the cat APIs can sometimes 404, erroneously
|
||||
// see https://github.com/elastic/elasticsearch/issues/104371
|
||||
setIgnoredErrorResponseCodes(catIndices, RestStatus.NOT_FOUND);
|
||||
Response response = assertOK(client().performRequest(catIndices));
|
||||
assertThat(EntityUtils.toString(response.getEntity()), containsString("testrollover-000002"));
|
||||
});
|
||||
}
|
||||
|
||||
Request countRequest = new Request("POST", "/" + index + "-*/_search");
|
||||
|
|
|
@ -82,4 +82,5 @@ tasks.named("yamlRestCompatTestTransform").configure ({ task ->
|
|||
"cluster.desired_nodes/10_basic/Test update desired nodes with node_version generates a warning",
|
||||
"node_version warning is removed in 9.0"
|
||||
)
|
||||
task.skipTest("tsdb/20_mapping/nested fields", "nested field support in tsdb indices is now supported")
|
||||
})
|
||||
|
|
|
@ -2008,3 +2008,143 @@ create index with use_synthetic_source:
|
|||
flush: false
|
||||
- gt: { test.store_size_in_bytes: 0 }
|
||||
- is_false: test.fields._recovery_source
|
||||
---
|
||||
"Nested synthetic source with indexed dense vectors":
|
||||
- requires:
|
||||
test_runner_features: [ capabilities ]
|
||||
capabilities:
|
||||
- method: PUT
|
||||
path: /{index}
|
||||
capabilities: [ synthetic_nested_dense_vector_bug_fix ]
|
||||
reason: "Requires synthetic source bugfix for dense vectors in nested objects"
|
||||
- do:
|
||||
indices.create:
|
||||
index: nested_dense_vector_synthetic_test
|
||||
body:
|
||||
mappings:
|
||||
properties:
|
||||
parent:
|
||||
type: nested
|
||||
properties:
|
||||
vector:
|
||||
type: dense_vector
|
||||
index: true
|
||||
similarity: l2_norm
|
||||
text:
|
||||
type: text
|
||||
settings:
|
||||
index:
|
||||
mapping:
|
||||
source:
|
||||
mode: synthetic
|
||||
- do:
|
||||
index:
|
||||
index: nested_dense_vector_synthetic_test
|
||||
id: 0
|
||||
refresh: true
|
||||
body: { "parent": [ { "vector": [ 1, 2 ],"text": "foo" }, { "vector": [ 2, 2 ], "text": "bar" } ] }
|
||||
|
||||
- do:
|
||||
index:
|
||||
index: nested_dense_vector_synthetic_test
|
||||
id: 1
|
||||
refresh: true
|
||||
body: { "parent": [ { "text": "foo" }, { "vector": [ 2, 2 ], "text": "bar" } ] }
|
||||
|
||||
- do:
|
||||
index:
|
||||
index: nested_dense_vector_synthetic_test
|
||||
id: 2
|
||||
refresh: true
|
||||
body: { "parent": [ { "vector": [ 1, 2 ] }, { "vector": [ 2, 2 ], "text": "bar" } ] }
|
||||
|
||||
|
||||
- do:
|
||||
search:
|
||||
index: nested_dense_vector_synthetic_test
|
||||
body:
|
||||
query:
|
||||
match_all: {}
|
||||
|
||||
- match: { hits.hits.0._source.parent.0.vector: [ 1.0, 2.0 ] }
|
||||
- match: { hits.hits.0._source.parent.0.text: "foo" }
|
||||
- match: { hits.hits.0._source.parent.1.vector: [ 2.0, 2.0 ] }
|
||||
- match: { hits.hits.0._source.parent.1.text: "bar" }
|
||||
- is_false: hits.hits.1._source.parent.0.vector
|
||||
- match: { hits.hits.1._source.parent.0.text: "foo" }
|
||||
- match: { hits.hits.1._source.parent.1.vector: [ 2.0, 2.0 ] }
|
||||
- match: { hits.hits.1._source.parent.1.text: "bar" }
|
||||
- match: {hits.hits.2._source.parent.0.vector: [ 1.0, 2.0 ] }
|
||||
- is_false: hits.hits.2._source.parent.0.text
|
||||
- match: { hits.hits.2._source.parent.1.vector: [ 2.0, 2.0 ] }
|
||||
- match: { hits.hits.2._source.parent.1.text: "bar" }
|
||||
---
|
||||
"Nested synthetic source with un-indexed dense vectors":
|
||||
- requires:
|
||||
test_runner_features: [ capabilities ]
|
||||
capabilities:
|
||||
- method: PUT
|
||||
path: /{index}
|
||||
capabilities: [ synthetic_nested_dense_vector_bug_fix ]
|
||||
reason: "Requires synthetic source bugfix for dense vectors in nested objects"
|
||||
- do:
|
||||
indices.create:
|
||||
index: nested_dense_vector_synthetic_test
|
||||
body:
|
||||
mappings:
|
||||
properties:
|
||||
parent:
|
||||
type: nested
|
||||
properties:
|
||||
vector:
|
||||
type: dense_vector
|
||||
index: false
|
||||
text:
|
||||
type: text
|
||||
settings:
|
||||
index:
|
||||
mapping:
|
||||
source:
|
||||
mode: synthetic
|
||||
- do:
|
||||
index:
|
||||
index: nested_dense_vector_synthetic_test
|
||||
id: 0
|
||||
refresh: true
|
||||
body: { "parent": [ { "vector": [ 1, 2 ],"text": "foo" }, { "vector": [ 2, 2 ], "text": "bar" } ] }
|
||||
|
||||
- do:
|
||||
index:
|
||||
index: nested_dense_vector_synthetic_test
|
||||
id: 1
|
||||
refresh: true
|
||||
body: { "parent": [ { "text": "foo" }, { "vector": [ 2, 2 ], "text": "bar" } ] }
|
||||
|
||||
- do:
|
||||
index:
|
||||
index: nested_dense_vector_synthetic_test
|
||||
id: 2
|
||||
refresh: true
|
||||
body: { "parent": [ { "vector": [ 1, 2 ] }, { "vector": [ 2, 2 ], "text": "bar" } ] }
|
||||
|
||||
|
||||
- do:
|
||||
search:
|
||||
index: nested_dense_vector_synthetic_test
|
||||
body:
|
||||
query:
|
||||
match_all: {}
|
||||
|
||||
- match: { hits.hits.0._source.parent.0.vector: [ 1.0, 2.0 ] }
|
||||
- match: { hits.hits.0._source.parent.0.text: "foo" }
|
||||
- match: { hits.hits.0._source.parent.1.vector: [ 2.0, 2.0 ] }
|
||||
- match: { hits.hits.0._source.parent.1.text: "bar" }
|
||||
- is_false: hits.hits.1._source.parent.0.vector
|
||||
- match: { hits.hits.1._source.parent.0.text: "foo" }
|
||||
- match: { hits.hits.1._source.parent.1.vector: [ 2.0, 2.0 ] }
|
||||
- match: { hits.hits.1._source.parent.1.text: "bar" }
|
||||
- match: {hits.hits.2._source.parent.0.vector: [ 1.0, 2.0 ] }
|
||||
- is_false: hits.hits.2._source.parent.0.text
|
||||
- match: { hits.hits.2._source.parent.1.vector: [ 2.0, 2.0 ] }
|
||||
- match: { hits.hits.2._source.parent.1.text: "bar" }
|
||||
|
||||
|
|
|
@ -0,0 +1,233 @@
|
|||
setup:
|
||||
- requires:
|
||||
cluster_features: ["mapper.tsdb_nested_field_support"]
|
||||
reason: "tsdb index with nested field support enabled"
|
||||
|
||||
---
|
||||
"Create TSDB index with field of nested type":
|
||||
- do:
|
||||
indices.create:
|
||||
index: test
|
||||
body:
|
||||
settings:
|
||||
index:
|
||||
mode: time_series
|
||||
number_of_replicas: 1
|
||||
number_of_shards: 1
|
||||
routing_path: [department]
|
||||
time_series:
|
||||
start_time: 2021-04-28T00:00:00Z
|
||||
end_time: 2021-04-29T00:00:00Z
|
||||
mappings:
|
||||
properties:
|
||||
"@timestamp":
|
||||
type: date
|
||||
department:
|
||||
type: keyword
|
||||
time_series_dimension: true
|
||||
staff:
|
||||
type: integer
|
||||
courses:
|
||||
type: nested
|
||||
properties:
|
||||
name:
|
||||
type: keyword
|
||||
credits:
|
||||
type: integer
|
||||
|
||||
- do:
|
||||
index:
|
||||
index: test
|
||||
body: { "@timestamp": "2021-04-28T01:00:00Z", "department": "compsci", "staff": 12, "courses": [ { "name": "Object Oriented Programming", "credits": 3 }, { "name": "Theory of Computation", "credits": 4 } ] }
|
||||
|
||||
- do:
|
||||
index:
|
||||
index: test
|
||||
body: { "@timestamp": "2021-04-28T02:00:00Z", "department": "math", "staff": 20, "courses": [ { "name": "Precalculus", "credits": 1 }, { "name": "Linear Algebra", "credits": 3 } ] }
|
||||
|
||||
- do:
|
||||
indices.refresh:
|
||||
index: [ test ]
|
||||
|
||||
- do:
|
||||
search:
|
||||
index: test
|
||||
body:
|
||||
size: 0
|
||||
query:
|
||||
nested:
|
||||
path: "courses"
|
||||
query:
|
||||
bool:
|
||||
must:
|
||||
- term:
|
||||
courses.name: Precalculus
|
||||
- term:
|
||||
courses.credits: 3
|
||||
|
||||
- match: { hits.total.value: 0 }
|
||||
|
||||
- do:
|
||||
search:
|
||||
index: test
|
||||
body:
|
||||
query:
|
||||
nested:
|
||||
path: "courses"
|
||||
query:
|
||||
bool:
|
||||
must:
|
||||
- term:
|
||||
courses.name: "Object Oriented Programming"
|
||||
- term:
|
||||
courses.credits: 3
|
||||
|
||||
- match: { hits.total.value: 1 }
|
||||
- match: { "hits.hits.0._source.@timestamp": "2021-04-28T01:00:00.000Z" }
|
||||
- match: { hits.hits.0._source.department: "compsci" }
|
||||
- match: { hits.hits.0._source.courses: [ { "name": "Object Oriented Programming", "credits": 3 }, { "name": "Theory of Computation", "credits": 4, } ] }
|
||||
|
||||
---
|
||||
|
||||
"TSDB index with multi-level nested fields":
|
||||
- do:
|
||||
indices.create:
|
||||
index: test
|
||||
body:
|
||||
settings:
|
||||
index:
|
||||
mode: time_series
|
||||
number_of_replicas: 1
|
||||
number_of_shards: 1
|
||||
routing_path: [department]
|
||||
time_series:
|
||||
start_time: 2021-04-28T00:00:00Z
|
||||
end_time: 2021-04-29T00:00:00Z
|
||||
mappings:
|
||||
properties:
|
||||
"@timestamp":
|
||||
type: date
|
||||
department:
|
||||
type: keyword
|
||||
time_series_dimension: true
|
||||
staff:
|
||||
type: integer
|
||||
courses:
|
||||
type: nested
|
||||
properties:
|
||||
name:
|
||||
type: keyword
|
||||
credits:
|
||||
type: integer
|
||||
students:
|
||||
type: nested
|
||||
properties:
|
||||
name:
|
||||
type: text
|
||||
major:
|
||||
type: keyword
|
||||
|
||||
- do:
|
||||
index:
|
||||
index: test
|
||||
body:
|
||||
"@timestamp": "2021-04-28T01:00:00Z"
|
||||
department: "compsci"
|
||||
staff: 12
|
||||
courses:
|
||||
- name: "Object Oriented Programming"
|
||||
credits: 3
|
||||
students:
|
||||
- name: "Kimora Tanner"
|
||||
major: "Computer Science"
|
||||
- name: "Bruno Garrett"
|
||||
major: "Software Engineering"
|
||||
- name: "Theory of Computation"
|
||||
credits: 4
|
||||
students:
|
||||
- name: "Elliott Booker"
|
||||
major: "Computer Engineering"
|
||||
- name: "Kimora Tanner"
|
||||
major: "Software Engineering"
|
||||
|
||||
- do:
|
||||
index:
|
||||
index: test
|
||||
body:
|
||||
"@timestamp": "2021-04-28T02:00:00Z"
|
||||
department: "math"
|
||||
staff: 20
|
||||
courses:
|
||||
- name: "Precalculus"
|
||||
credits: 4
|
||||
students:
|
||||
- name: "Elliott Ayers"
|
||||
major: "Software Engineering"
|
||||
- name: "Sylvie Howe"
|
||||
major: "Computer Engineering"
|
||||
- name: "Linear Algebra"
|
||||
credits: 3
|
||||
students:
|
||||
- name: "Kimora Tanner"
|
||||
major: "Computer Science"
|
||||
- name: "Bruno Garett"
|
||||
major: "Software Engineering"
|
||||
- name: "Amelia Booker"
|
||||
major: "Psychology"
|
||||
|
||||
- do:
|
||||
index:
|
||||
index: test
|
||||
body:
|
||||
"@timestamp": "2021-04-28T03:00:00Z"
|
||||
department: "compsci"
|
||||
staff: 12
|
||||
courses:
|
||||
- name: "Object Oriented Programming"
|
||||
credits: 3
|
||||
students:
|
||||
- name: "Kimora Tanner"
|
||||
major: "Computer Science"
|
||||
- name: "Bruno Garrett"
|
||||
major: "Software Engineering"
|
||||
- name: "Elliott Booker"
|
||||
major: "Computer Engineering"
|
||||
- name: "Theory of Computation"
|
||||
credits: 4
|
||||
students:
|
||||
- name: "Kimora Tanner"
|
||||
major: "Software Engineering"
|
||||
- name: "Elliott Ayers"
|
||||
major: "Software Engineering"
|
||||
- name: "Apollo Pittman"
|
||||
major: "Computer Engineering"
|
||||
|
||||
- do:
|
||||
indices.refresh:
|
||||
index: [ test ]
|
||||
|
||||
- do:
|
||||
search:
|
||||
index: test
|
||||
body:
|
||||
query:
|
||||
nested:
|
||||
path: "courses"
|
||||
query:
|
||||
bool:
|
||||
must:
|
||||
- nested:
|
||||
path: "courses.students"
|
||||
query:
|
||||
bool:
|
||||
must:
|
||||
- match:
|
||||
courses.students.name: "Elliott"
|
||||
- term:
|
||||
courses.students.major: "Computer Engineering"
|
||||
- term:
|
||||
courses.name: "Theory of Computation"
|
||||
|
||||
- match: { hits.total.value: 1 }
|
||||
- match: { hits.hits.0._source.department: "compsci" }
|
||||
- match: { "hits.hits.0._source.@timestamp": "2021-04-28T01:00:00.000Z" }
|
|
@ -344,37 +344,6 @@ nested dimensions:
|
|||
type: keyword
|
||||
time_series_dimension: true
|
||||
|
||||
---
|
||||
nested fields:
|
||||
- requires:
|
||||
cluster_features: ["gte_v8.2.0"]
|
||||
reason: message changed in 8.2.0
|
||||
|
||||
- do:
|
||||
catch: /cannot have nested fields when index is in \[index.mode=time_series\]/
|
||||
indices.create:
|
||||
index: test
|
||||
body:
|
||||
settings:
|
||||
index:
|
||||
mode: time_series
|
||||
routing_path: [dim]
|
||||
time_series:
|
||||
start_time: 2021-04-28T00:00:00Z
|
||||
end_time: 2021-04-29T00:00:00Z
|
||||
mappings:
|
||||
properties:
|
||||
"@timestamp":
|
||||
type: date
|
||||
dim:
|
||||
type: keyword
|
||||
time_series_dimension: true
|
||||
nested:
|
||||
type: nested
|
||||
properties:
|
||||
foo:
|
||||
type: keyword
|
||||
|
||||
---
|
||||
"Unable to define a metric type for a runtime field":
|
||||
- requires:
|
||||
|
|
|
@ -77,7 +77,9 @@ public class PersistentTaskCreationFailureIT extends ESIntegTestCase {
|
|||
.pendingTasks()
|
||||
.stream()
|
||||
.filter(
|
||||
pendingClusterTask -> pendingClusterTask.getSource().string().equals("finish persistent task (failed)")
|
||||
pendingClusterTask -> pendingClusterTask.getSource()
|
||||
.string()
|
||||
.matches("finish persistent task \\[.*] \\(failed\\)")
|
||||
)
|
||||
.count();
|
||||
assertThat(completePersistentTaskPendingTasksCount, lessThanOrEqualTo(1L));
|
||||
|
|
|
@ -14,6 +14,7 @@ import org.apache.lucene.search.BulkScorer;
|
|||
import org.apache.lucene.search.ConstantScoreScorer;
|
||||
import org.apache.lucene.search.ConstantScoreWeight;
|
||||
import org.apache.lucene.search.DocIdSetIterator;
|
||||
import org.apache.lucene.search.Explanation;
|
||||
import org.apache.lucene.search.IndexSearcher;
|
||||
import org.apache.lucene.search.LeafCollector;
|
||||
import org.apache.lucene.search.Query;
|
||||
|
@ -22,8 +23,10 @@ import org.apache.lucene.search.Scorable;
|
|||
import org.apache.lucene.search.ScoreMode;
|
||||
import org.apache.lucene.search.Scorer;
|
||||
import org.apache.lucene.search.ScorerSupplier;
|
||||
import org.apache.lucene.search.TopDocs;
|
||||
import org.apache.lucene.search.Weight;
|
||||
import org.apache.lucene.util.Bits;
|
||||
import org.apache.lucene.util.CharsRefBuilder;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.TransportVersion;
|
||||
import org.elasticsearch.action.search.SearchRequestBuilder;
|
||||
|
@ -33,12 +36,23 @@ import org.elasticsearch.common.settings.Settings;
|
|||
import org.elasticsearch.core.TimeValue;
|
||||
import org.elasticsearch.index.query.AbstractQueryBuilder;
|
||||
import org.elasticsearch.index.query.QueryBuilder;
|
||||
import org.elasticsearch.index.query.QueryRewriteContext;
|
||||
import org.elasticsearch.index.query.SearchExecutionContext;
|
||||
import org.elasticsearch.index.query.TermQueryBuilder;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.plugins.SearchPlugin;
|
||||
import org.elasticsearch.search.aggregations.bucket.terms.StringTerms;
|
||||
import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder;
|
||||
import org.elasticsearch.search.internal.ContextIndexSearcher;
|
||||
import org.elasticsearch.search.rescore.RescoreContext;
|
||||
import org.elasticsearch.search.rescore.Rescorer;
|
||||
import org.elasticsearch.search.rescore.RescorerBuilder;
|
||||
import org.elasticsearch.search.suggest.SortBy;
|
||||
import org.elasticsearch.search.suggest.SuggestBuilder;
|
||||
import org.elasticsearch.search.suggest.Suggester;
|
||||
import org.elasticsearch.search.suggest.SuggestionSearchContext;
|
||||
import org.elasticsearch.search.suggest.term.TermSuggestion;
|
||||
import org.elasticsearch.search.suggest.term.TermSuggestionBuilder;
|
||||
import org.elasticsearch.test.ESIntegTestCase;
|
||||
import org.elasticsearch.test.hamcrest.ElasticsearchAssertions;
|
||||
import org.elasticsearch.xcontent.XContentBuilder;
|
||||
|
@ -58,7 +72,7 @@ public class SearchTimeoutIT extends ESIntegTestCase {
|
|||
|
||||
@Override
|
||||
protected Collection<Class<? extends Plugin>> nodePlugins() {
|
||||
return Collections.singleton(BulkScorerTimeoutQueryPlugin.class);
|
||||
return Collections.singleton(SearchTimeoutPlugin.class);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -72,6 +86,9 @@ public class SearchTimeoutIT extends ESIntegTestCase {
|
|||
indexRandom(true, "test", randomIntBetween(20, 50));
|
||||
}
|
||||
|
||||
/**
|
||||
* Test the scenario where the query times out before starting to collect documents, verify that partial hits are not returned
|
||||
*/
|
||||
public void testTopHitsTimeoutBeforeCollecting() {
|
||||
// setting the timeout is necessary only because we check that if a TimeExceededException is thrown, a timeout was set
|
||||
SearchRequestBuilder searchRequestBuilder = prepareSearch("test").setTimeout(new TimeValue(10, TimeUnit.SECONDS))
|
||||
|
@ -88,6 +105,9 @@ public class SearchTimeoutIT extends ESIntegTestCase {
|
|||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Test the scenario where the query times out while collecting documents, verify that partial hits results are returned
|
||||
*/
|
||||
public void testTopHitsTimeoutWhileCollecting() {
|
||||
// setting the timeout is necessary only because we check that if a TimeExceededException is thrown, a timeout was set
|
||||
SearchRequestBuilder searchRequestBuilder = prepareSearch("test").setTimeout(new TimeValue(10, TimeUnit.SECONDS))
|
||||
|
@ -103,6 +123,9 @@ public class SearchTimeoutIT extends ESIntegTestCase {
|
|||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Test the scenario where the query times out before starting to collect documents, verify that partial aggs results are not returned
|
||||
*/
|
||||
public void testAggsTimeoutBeforeCollecting() {
|
||||
SearchRequestBuilder searchRequestBuilder = prepareSearch("test").setSize(0)
|
||||
// setting the timeout is necessary only because we check that if a TimeExceededException is thrown, a timeout was set
|
||||
|
@ -123,6 +146,9 @@ public class SearchTimeoutIT extends ESIntegTestCase {
|
|||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Test the scenario where the query times out while collecting documents, verify that partial aggs results are returned
|
||||
*/
|
||||
public void testAggsTimeoutWhileCollecting() {
|
||||
SearchRequestBuilder searchRequestBuilder = prepareSearch("test").setSize(0)
|
||||
// setting the timeout is necessary only because we check that if a TimeExceededException is thrown, a timeout was set
|
||||
|
@ -145,6 +171,56 @@ public class SearchTimeoutIT extends ESIntegTestCase {
|
|||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Test the scenario where the suggest phase (part of the query phase) times out, yet there are results
|
||||
* available coming from executing the query and aggs on each shard.
|
||||
*/
|
||||
public void testSuggestTimeoutWithPartialResults() {
|
||||
SuggestBuilder suggestBuilder = new SuggestBuilder();
|
||||
suggestBuilder.setGlobalText("text");
|
||||
TimeoutSuggestionBuilder timeoutSuggestionBuilder = new TimeoutSuggestionBuilder();
|
||||
suggestBuilder.addSuggestion("suggest", timeoutSuggestionBuilder);
|
||||
SearchRequestBuilder searchRequestBuilder = prepareSearch("test").suggest(suggestBuilder)
|
||||
.addAggregation(new TermsAggregationBuilder("terms").field("field.keyword"));
|
||||
ElasticsearchAssertions.assertResponse(searchRequestBuilder, searchResponse -> {
|
||||
assertThat(searchResponse.isTimedOut(), equalTo(true));
|
||||
assertEquals(0, searchResponse.getShardFailures().length);
|
||||
assertEquals(0, searchResponse.getFailedShards());
|
||||
assertThat(searchResponse.getSuccessfulShards(), greaterThan(0));
|
||||
assertEquals(searchResponse.getSuccessfulShards(), searchResponse.getTotalShards());
|
||||
assertThat(searchResponse.getHits().getTotalHits().value(), greaterThan(0L));
|
||||
assertThat(searchResponse.getHits().getHits().length, greaterThan(0));
|
||||
StringTerms terms = searchResponse.getAggregations().get("terms");
|
||||
assertEquals(1, terms.getBuckets().size());
|
||||
StringTerms.Bucket bucket = terms.getBuckets().get(0);
|
||||
assertEquals("value", bucket.getKeyAsString());
|
||||
assertThat(bucket.getDocCount(), greaterThan(0L));
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Test the scenario where the rescore phase (part of the query phase) times out, yet there are results
|
||||
* available coming from executing the query and aggs on each shard.
|
||||
*/
|
||||
public void testRescoreTimeoutWithPartialResults() {
|
||||
SearchRequestBuilder searchRequestBuilder = prepareSearch("test").setRescorer(new TimeoutRescorerBuilder())
|
||||
.addAggregation(new TermsAggregationBuilder("terms").field("field.keyword"));
|
||||
ElasticsearchAssertions.assertResponse(searchRequestBuilder, searchResponse -> {
|
||||
assertThat(searchResponse.isTimedOut(), equalTo(true));
|
||||
assertEquals(0, searchResponse.getShardFailures().length);
|
||||
assertEquals(0, searchResponse.getFailedShards());
|
||||
assertThat(searchResponse.getSuccessfulShards(), greaterThan(0));
|
||||
assertEquals(searchResponse.getSuccessfulShards(), searchResponse.getTotalShards());
|
||||
assertThat(searchResponse.getHits().getTotalHits().value(), greaterThan(0L));
|
||||
assertThat(searchResponse.getHits().getHits().length, greaterThan(0));
|
||||
StringTerms terms = searchResponse.getAggregations().get("terms");
|
||||
assertEquals(1, terms.getBuckets().size());
|
||||
StringTerms.Bucket bucket = terms.getBuckets().get(0);
|
||||
assertEquals("value", bucket.getKeyAsString());
|
||||
assertThat(bucket.getDocCount(), greaterThan(0L));
|
||||
});
|
||||
}
|
||||
|
||||
public void testPartialResultsIntolerantTimeoutBeforeCollecting() {
|
||||
ElasticsearchException ex = expectThrows(
|
||||
ElasticsearchException.class,
|
||||
|
@ -171,13 +247,67 @@ public class SearchTimeoutIT extends ESIntegTestCase {
|
|||
assertEquals(429, ex.status().getStatus());
|
||||
}
|
||||
|
||||
public static final class BulkScorerTimeoutQueryPlugin extends Plugin implements SearchPlugin {
|
||||
public void testPartialResultsIntolerantTimeoutWhileSuggestingOnly() {
|
||||
SuggestBuilder suggestBuilder = new SuggestBuilder();
|
||||
suggestBuilder.setGlobalText("text");
|
||||
TimeoutSuggestionBuilder timeoutSuggestionBuilder = new TimeoutSuggestionBuilder();
|
||||
suggestBuilder.addSuggestion("suggest", timeoutSuggestionBuilder);
|
||||
ElasticsearchException ex = expectThrows(
|
||||
ElasticsearchException.class,
|
||||
prepareSearch("test").suggest(suggestBuilder).setAllowPartialSearchResults(false) // this line causes timeouts to report
|
||||
// failures
|
||||
);
|
||||
assertTrue(ex.toString().contains("Time exceeded"));
|
||||
assertEquals(429, ex.status().getStatus());
|
||||
}
|
||||
|
||||
public void testPartialResultsIntolerantTimeoutWhileSuggesting() {
|
||||
SuggestBuilder suggestBuilder = new SuggestBuilder();
|
||||
suggestBuilder.setGlobalText("text");
|
||||
TimeoutSuggestionBuilder timeoutSuggestionBuilder = new TimeoutSuggestionBuilder();
|
||||
suggestBuilder.addSuggestion("suggest", timeoutSuggestionBuilder);
|
||||
ElasticsearchException ex = expectThrows(
|
||||
ElasticsearchException.class,
|
||||
prepareSearch("test").setQuery(new TermQueryBuilder("field", "value"))
|
||||
.suggest(suggestBuilder)
|
||||
.setAllowPartialSearchResults(false) // this line causes timeouts to report failures
|
||||
);
|
||||
assertTrue(ex.toString().contains("Time exceeded"));
|
||||
assertEquals(429, ex.status().getStatus());
|
||||
}
|
||||
|
||||
public void testPartialResultsIntolerantTimeoutWhileRescoring() {
|
||||
ElasticsearchException ex = expectThrows(
|
||||
ElasticsearchException.class,
|
||||
prepareSearch("test").setQuery(new TermQueryBuilder("field", "value"))
|
||||
.setRescorer(new TimeoutRescorerBuilder())
|
||||
.setAllowPartialSearchResults(false) // this line causes timeouts to report failures
|
||||
);
|
||||
assertTrue(ex.toString().contains("Time exceeded"));
|
||||
assertEquals(429, ex.status().getStatus());
|
||||
}
|
||||
|
||||
public static final class SearchTimeoutPlugin extends Plugin implements SearchPlugin {
|
||||
@Override
|
||||
public List<QuerySpec<?>> getQueries() {
|
||||
return Collections.singletonList(new QuerySpec<QueryBuilder>("timeout", BulkScorerTimeoutQuery::new, parser -> {
|
||||
throw new UnsupportedOperationException();
|
||||
}));
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<SuggesterSpec<?>> getSuggesters() {
|
||||
return Collections.singletonList(new SuggesterSpec<>("timeout", TimeoutSuggestionBuilder::new, parser -> {
|
||||
throw new UnsupportedOperationException();
|
||||
}, TermSuggestion::new));
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<RescorerSpec<?>> getRescorers() {
|
||||
return Collections.singletonList(new RescorerSpec<>("timeout", TimeoutRescorerBuilder::new, parser -> {
|
||||
throw new UnsupportedOperationException();
|
||||
}));
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -315,4 +445,111 @@ public class SearchTimeoutIT extends ESIntegTestCase {
|
|||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Suggestion builder that triggers a timeout as part of its execution
|
||||
*/
|
||||
private static final class TimeoutSuggestionBuilder extends TermSuggestionBuilder {
|
||||
TimeoutSuggestionBuilder() {
|
||||
super("field");
|
||||
}
|
||||
|
||||
TimeoutSuggestionBuilder(StreamInput in) throws IOException {
|
||||
super(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getWriteableName() {
|
||||
return "timeout";
|
||||
}
|
||||
|
||||
@Override
|
||||
public SuggestionSearchContext.SuggestionContext build(SearchExecutionContext context) {
|
||||
return new TimeoutSuggestionContext(new TimeoutSuggester((ContextIndexSearcher) context.searcher()), context);
|
||||
}
|
||||
}
|
||||
|
||||
private static final class TimeoutSuggester extends Suggester<TimeoutSuggestionContext> {
|
||||
private final ContextIndexSearcher contextIndexSearcher;
|
||||
|
||||
TimeoutSuggester(ContextIndexSearcher contextIndexSearcher) {
|
||||
this.contextIndexSearcher = contextIndexSearcher;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected TermSuggestion innerExecute(
|
||||
String name,
|
||||
TimeoutSuggestionContext suggestion,
|
||||
IndexSearcher searcher,
|
||||
CharsRefBuilder spare
|
||||
) {
|
||||
contextIndexSearcher.throwTimeExceededException();
|
||||
assert false;
|
||||
return new TermSuggestion(name, suggestion.getSize(), SortBy.SCORE);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected TermSuggestion emptySuggestion(String name, TimeoutSuggestionContext suggestion, CharsRefBuilder spare) {
|
||||
return new TermSuggestion(name, suggestion.getSize(), SortBy.SCORE);
|
||||
}
|
||||
}
|
||||
|
||||
private static final class TimeoutSuggestionContext extends SuggestionSearchContext.SuggestionContext {
|
||||
TimeoutSuggestionContext(Suggester<?> suggester, SearchExecutionContext searchExecutionContext) {
|
||||
super(suggester, searchExecutionContext);
|
||||
}
|
||||
}
|
||||
|
||||
private static final class TimeoutRescorerBuilder extends RescorerBuilder<TimeoutRescorerBuilder> {
|
||||
TimeoutRescorerBuilder() {
|
||||
super();
|
||||
}
|
||||
|
||||
TimeoutRescorerBuilder(StreamInput in) throws IOException {
|
||||
super(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doWriteTo(StreamOutput out) {}
|
||||
|
||||
@Override
|
||||
protected void doXContent(XContentBuilder builder, Params params) {}
|
||||
|
||||
@Override
|
||||
protected RescoreContext innerBuildContext(int windowSize, SearchExecutionContext context) throws IOException {
|
||||
return new RescoreContext(10, new Rescorer() {
|
||||
@Override
|
||||
public TopDocs rescore(TopDocs topDocs, IndexSearcher searcher, RescoreContext rescoreContext) {
|
||||
((ContextIndexSearcher) context.searcher()).throwTimeExceededException();
|
||||
assert false;
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Explanation explain(
|
||||
int topLevelDocId,
|
||||
IndexSearcher searcher,
|
||||
RescoreContext rescoreContext,
|
||||
Explanation sourceExplanation
|
||||
) {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getWriteableName() {
|
||||
return "timeout";
|
||||
}
|
||||
|
||||
@Override
|
||||
public TransportVersion getMinimalSupportedVersion() {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public RescorerBuilder<TimeoutRescorerBuilder> rewrite(QueryRewriteContext ctx) {
|
||||
return this;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -24,7 +24,6 @@ import org.elasticsearch.common.lucene.search.function.LeafScoreFunction;
|
|||
import org.elasticsearch.common.lucene.search.function.ScoreFunction;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.settings.Settings.Builder;
|
||||
import org.elasticsearch.core.TimeValue;
|
||||
import org.elasticsearch.index.query.Operator;
|
||||
import org.elasticsearch.index.query.QueryBuilder;
|
||||
import org.elasticsearch.index.query.QueryBuilders;
|
||||
|
@ -994,22 +993,6 @@ public class QueryRescorerIT extends ESIntegTestCase {
|
|||
});
|
||||
}
|
||||
|
||||
public void testRescoreWithTimeout() throws Exception {
|
||||
// no dummy docs since merges can change scores while we run queries.
|
||||
int numDocs = indexRandomNumbers("whitespace", -1, false);
|
||||
|
||||
String intToEnglish = English.intToEnglish(between(0, numDocs - 1));
|
||||
String query = intToEnglish.split(" ")[0];
|
||||
assertResponse(
|
||||
prepareSearch().setSearchType(SearchType.QUERY_THEN_FETCH)
|
||||
.setQuery(QueryBuilders.matchQuery("field1", query).operator(Operator.OR))
|
||||
.setSize(10)
|
||||
.addRescorer(new QueryRescorerBuilder(functionScoreQuery(new TestTimedScoreFunctionBuilder())).windowSize(100))
|
||||
.setTimeout(TimeValue.timeValueMillis(10)),
|
||||
r -> assertTrue(r.isTimedOut())
|
||||
);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Collection<Class<? extends Plugin>> nodePlugins() {
|
||||
return List.of(TestTimedQueryPlugin.class);
|
||||
|
|
|
@ -174,11 +174,13 @@ public class TransportVersions {
|
|||
public static final TransportVersion INFERENCE_REQUEST_ADAPTIVE_RATE_LIMITING = def(8_839_0_00);
|
||||
public static final TransportVersion ML_INFERENCE_IBM_WATSONX_RERANK_ADDED = def(8_840_0_00);
|
||||
public static final TransportVersion COHERE_BIT_EMBEDDING_TYPE_SUPPORT_ADDED_BACKPORT_8_X = def(8_840_0_01);
|
||||
public static final TransportVersion REMOVE_ALL_APPLICABLE_SELECTOR_BACKPORT_8_X = def(8_840_0_02);
|
||||
public static final TransportVersion ELASTICSEARCH_9_0 = def(9_000_0_00);
|
||||
public static final TransportVersion REMOVE_SNAPSHOT_FAILURES_90 = def(9_000_0_01);
|
||||
public static final TransportVersion TRANSPORT_STATS_HANDLING_TIME_REQUIRED_90 = def(9_000_0_02);
|
||||
public static final TransportVersion REMOVE_DESIRED_NODE_VERSION_90 = def(9_000_0_03);
|
||||
public static final TransportVersion ESQL_DRIVER_TASK_DESCRIPTION_90 = def(9_000_0_04);
|
||||
public static final TransportVersion REMOVE_ALL_APPLICABLE_SELECTOR_9_0 = def(9_000_0_05);
|
||||
public static final TransportVersion COHERE_BIT_EMBEDDING_TYPE_SUPPORT_ADDED = def(9_001_0_00);
|
||||
public static final TransportVersion REMOVE_SNAPSHOT_FAILURES = def(9_002_0_00);
|
||||
public static final TransportVersion TRANSPORT_STATS_HANDLING_TIME_REQUIRED = def(9_003_0_00);
|
||||
|
@ -186,6 +188,9 @@ public class TransportVersions {
|
|||
public static final TransportVersion ESQL_DRIVER_TASK_DESCRIPTION = def(9_005_0_00);
|
||||
public static final TransportVersion ESQL_RETRY_ON_SHARD_LEVEL_FAILURE = def(9_006_0_00);
|
||||
public static final TransportVersion ESQL_PROFILE_ASYNC_NANOS = def(9_007_00_0);
|
||||
public static final TransportVersion ESQL_LOOKUP_JOIN_SOURCE_TEXT = def(9_008_0_00);
|
||||
public static final TransportVersion REMOVE_ALL_APPLICABLE_SELECTOR = def(9_009_0_00);
|
||||
public static final TransportVersion SLM_UNHEALTHY_IF_NO_SNAPSHOT_WITHIN = def(9_010_0_00);
|
||||
|
||||
/*
|
||||
* WARNING: DO NOT MERGE INTO MAIN!
|
||||
|
@ -208,6 +213,8 @@ public class TransportVersions {
|
|||
* A new transport version should be added EVERY TIME a change is made to the serialization protocol of one or more classes. Each
|
||||
* transport version should only be used in a single merged commit (apart from the BwC versions copied from o.e.Version, ≤V_8_8_1).
|
||||
*
|
||||
* More information about versions and backporting at docs/internal/Versioning.md
|
||||
*
|
||||
* ADDING A TRANSPORT VERSION
|
||||
* To add a new transport version, add a new constant at the bottom of the list, above this comment. Don't add other lines,
|
||||
* comments, etc. The version id has the following layout:
|
||||
|
|
|
@ -12,6 +12,7 @@ package org.elasticsearch.action.admin.indices.forcemerge;
|
|||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.ActionRunnable;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.SubscribableListener;
|
||||
import org.elasticsearch.action.support.broadcast.BroadcastResponse;
|
||||
import org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
|
@ -96,12 +97,16 @@ public class TransportForceMergeAction extends TransportBroadcastByNodeAction<
|
|||
ActionListener<TransportBroadcastByNodeAction.EmptyResult> listener
|
||||
) {
|
||||
assert (task instanceof CancellableTask) == false; // TODO: add cancellation handling here once the task supports it
|
||||
threadPool.executor(ThreadPool.Names.FORCE_MERGE).execute(ActionRunnable.supply(listener, () -> {
|
||||
SubscribableListener.<IndexShard>newForked(l -> {
|
||||
IndexShard indexShard = indicesService.indexServiceSafe(shardRouting.shardId().getIndex())
|
||||
.getShard(shardRouting.shardId().id());
|
||||
indexShard.forceMerge(request);
|
||||
return EmptyResult.INSTANCE;
|
||||
}));
|
||||
indexShard.ensureMutable(l.map(unused -> indexShard));
|
||||
}).<EmptyResult>andThen((l, indexShard) -> {
|
||||
threadPool.executor(ThreadPool.Names.FORCE_MERGE).execute(ActionRunnable.supply(l, () -> {
|
||||
indexShard.forceMerge(request);
|
||||
return EmptyResult.INSTANCE;
|
||||
}));
|
||||
}).addListener(listener);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -656,10 +656,6 @@ public class ResolveIndexAction extends ActionType<ResolveIndexAction.Response>
|
|||
: switch (resolvedExpression.selector()) {
|
||||
case DATA -> dataStream.getDataComponent().getIndices().stream();
|
||||
case FAILURES -> dataStream.getFailureIndices().stream();
|
||||
case ALL_APPLICABLE -> Stream.concat(
|
||||
dataStream.getIndices().stream(),
|
||||
dataStream.getFailureIndices().stream()
|
||||
);
|
||||
};
|
||||
String[] backingIndices = dataStreamIndices.map(Index::getName).toArray(String[]::new);
|
||||
dataStreams.add(new ResolvedDataStream(dataStream.getName(), backingIndices, DataStream.TIMESTAMP_FIELD_NAME));
|
||||
|
@ -684,13 +680,6 @@ public class ResolveIndexAction extends ActionType<ResolveIndexAction.Response>
|
|||
assert ia.isDataStreamRelated() : "Illegal selector [failures] used on non data stream alias";
|
||||
yield ia.getFailureIndices(metadata).stream();
|
||||
}
|
||||
case ALL_APPLICABLE -> {
|
||||
if (ia.isDataStreamRelated()) {
|
||||
yield Stream.concat(ia.getIndices().stream(), ia.getFailureIndices(metadata).stream());
|
||||
} else {
|
||||
yield ia.getIndices().stream();
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
return aliasIndices;
|
||||
|
|
|
@ -13,14 +13,13 @@ import org.elasticsearch.action.ActionRequestValidationException;
|
|||
import org.elasticsearch.action.IndicesRequest;
|
||||
import org.elasticsearch.action.admin.indices.create.CreateIndexRequest;
|
||||
import org.elasticsearch.action.support.ActiveShardCount;
|
||||
import org.elasticsearch.action.support.IndexComponentSelector;
|
||||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.action.support.master.AcknowledgedRequest;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.ResolvedExpression;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.SelectorResolver;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.index.mapper.MapperService;
|
||||
import org.elasticsearch.indices.InvalidIndexNameException;
|
||||
import org.elasticsearch.tasks.CancellableTask;
|
||||
import org.elasticsearch.tasks.Task;
|
||||
import org.elasticsearch.tasks.TaskId;
|
||||
|
@ -126,14 +125,12 @@ public class RolloverRequest extends AcknowledgedRequest<RolloverRequest> implem
|
|||
);
|
||||
}
|
||||
|
||||
// Ensure we have a valid selector in the request
|
||||
if (rolloverTarget != null) {
|
||||
ResolvedExpression resolvedExpression = SelectorResolver.parseExpression(rolloverTarget, indicesOptions);
|
||||
IndexComponentSelector selector = resolvedExpression.selector();
|
||||
if (IndexComponentSelector.ALL_APPLICABLE.equals(selector)) {
|
||||
validationException = addValidationError(
|
||||
"rollover cannot be applied to both regular and failure indices at the same time",
|
||||
validationException
|
||||
);
|
||||
try {
|
||||
SelectorResolver.parseExpression(rolloverTarget, indicesOptions);
|
||||
} catch (InvalidIndexNameException exception) {
|
||||
validationException = addValidationError(exception.getMessage(), validationException);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -12,7 +12,6 @@ package org.elasticsearch.action.search;
|
|||
import org.elasticsearch.action.OriginalIndices;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.routing.ShardsIterator;
|
||||
import org.elasticsearch.common.util.Countable;
|
||||
import org.elasticsearch.common.util.PlainIterator;
|
||||
import org.elasticsearch.core.Nullable;
|
||||
import org.elasticsearch.core.TimeValue;
|
||||
|
@ -29,7 +28,7 @@ import java.util.Objects;
|
|||
* the cluster alias.
|
||||
* @see OriginalIndices
|
||||
*/
|
||||
public final class SearchShardIterator implements Comparable<SearchShardIterator>, Countable {
|
||||
public final class SearchShardIterator implements Comparable<SearchShardIterator> {
|
||||
|
||||
private final OriginalIndices originalIndices;
|
||||
private final String clusterAlias;
|
||||
|
@ -171,7 +170,6 @@ public final class SearchShardIterator implements Comparable<SearchShardIterator
|
|||
*
|
||||
* @return number of shard routing instances in this iterator
|
||||
*/
|
||||
@Override
|
||||
public int size() {
|
||||
return targetNodesIterator.size();
|
||||
}
|
||||
|
|
|
@ -9,6 +9,7 @@
|
|||
|
||||
package org.elasticsearch.action.support;
|
||||
|
||||
import org.elasticsearch.TransportVersions;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
|
@ -23,14 +24,11 @@ import java.util.Map;
|
|||
* We define as index components the two different sets of indices a data stream could consist of:
|
||||
* - DATA: represents the backing indices
|
||||
* - FAILURES: represent the failing indices
|
||||
* - ALL: represents all available in this expression components, meaning if it's a data stream both backing and failure indices and if it's
|
||||
* an index only the index itself.
|
||||
* Note: An index is its own DATA component, but it cannot have a FAILURE component.
|
||||
*/
|
||||
public enum IndexComponentSelector implements Writeable {
|
||||
DATA("data", (byte) 0),
|
||||
FAILURES("failures", (byte) 1),
|
||||
ALL_APPLICABLE("*", (byte) 2);
|
||||
FAILURES("failures", (byte) 1);
|
||||
|
||||
private final String key;
|
||||
private final byte id;
|
||||
|
@ -75,7 +73,15 @@ public enum IndexComponentSelector implements Writeable {
|
|||
}
|
||||
|
||||
public static IndexComponentSelector read(StreamInput in) throws IOException {
|
||||
return getById(in.readByte());
|
||||
byte id = in.readByte();
|
||||
if (in.getTransportVersion().onOrAfter(TransportVersions.REMOVE_ALL_APPLICABLE_SELECTOR)
|
||||
|| in.getTransportVersion().isPatchFrom(TransportVersions.REMOVE_ALL_APPLICABLE_SELECTOR_9_0)
|
||||
|| in.getTransportVersion().isPatchFrom(TransportVersions.REMOVE_ALL_APPLICABLE_SELECTOR_BACKPORT_8_X)) {
|
||||
return getById(id);
|
||||
} else {
|
||||
// Legacy value ::*, converted to ::data
|
||||
return id == 2 ? DATA : getById(id);
|
||||
}
|
||||
}
|
||||
|
||||
// Visible for testing
|
||||
|
@ -95,10 +101,10 @@ public enum IndexComponentSelector implements Writeable {
|
|||
}
|
||||
|
||||
public boolean shouldIncludeData() {
|
||||
return this == ALL_APPLICABLE || this == DATA;
|
||||
return this == DATA;
|
||||
}
|
||||
|
||||
public boolean shouldIncludeFailures() {
|
||||
return this == ALL_APPLICABLE || this == FAILURES;
|
||||
return this == FAILURES;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -23,6 +23,7 @@ import org.elasticsearch.action.delete.DeleteResponse;
|
|||
import org.elasticsearch.action.index.IndexRequest;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.AutoCreateIndex;
|
||||
import org.elasticsearch.action.support.SubscribableListener;
|
||||
import org.elasticsearch.action.support.TransportActions;
|
||||
import org.elasticsearch.action.support.single.instance.TransportInstanceSingleOperationAction;
|
||||
import org.elasticsearch.client.internal.node.NodeClient;
|
||||
|
@ -107,7 +108,10 @@ public class TransportUpdateAction extends TransportInstanceSingleOperationActio
|
|||
|
||||
@Override
|
||||
protected Executor executor(ShardId shardId) {
|
||||
final IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex());
|
||||
return executor(indicesService.indexServiceSafe(shardId.getIndex()));
|
||||
}
|
||||
|
||||
private Executor executor(IndexService indexService) {
|
||||
return threadPool.executor(indexService.getIndexSettings().getIndexMetadata().isSystem() ? Names.SYSTEM_WRITE : Names.WRITE);
|
||||
}
|
||||
|
||||
|
@ -201,136 +205,148 @@ public class TransportUpdateAction extends TransportInstanceSingleOperationActio
|
|||
final IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex());
|
||||
final IndexShard indexShard = indexService.getShard(shardId.getId());
|
||||
final MappingLookup mappingLookup = indexShard.mapperService().mappingLookup();
|
||||
final UpdateHelper.Result result = deleteInferenceResults(
|
||||
request,
|
||||
updateHelper.prepare(request, indexShard, threadPool::absoluteTimeInMillis),
|
||||
indexService.getMetadata(),
|
||||
mappingLookup
|
||||
);
|
||||
|
||||
switch (result.getResponseResult()) {
|
||||
case CREATED -> {
|
||||
IndexRequest upsertRequest = result.action();
|
||||
// we fetch it from the index request so we don't generate the bytes twice, its already done in the index request
|
||||
final BytesReference upsertSourceBytes = upsertRequest.source();
|
||||
client.bulk(
|
||||
toSingleItemBulkRequest(upsertRequest),
|
||||
unwrappingSingleItemBulkResponse(ActionListener.<DocWriteResponse>wrap(response -> {
|
||||
UpdateResponse update = new UpdateResponse(
|
||||
response.getShardInfo(),
|
||||
response.getShardId(),
|
||||
response.getId(),
|
||||
response.getSeqNo(),
|
||||
response.getPrimaryTerm(),
|
||||
response.getVersion(),
|
||||
response.getResult()
|
||||
);
|
||||
if (request.fetchSource() != null && request.fetchSource().fetchSource()) {
|
||||
Tuple<XContentType, Map<String, Object>> sourceAndContent = XContentHelper.convertToMap(
|
||||
upsertSourceBytes,
|
||||
true,
|
||||
upsertRequest.getContentType()
|
||||
);
|
||||
update.setGetResult(
|
||||
UpdateHelper.extractGetResult(
|
||||
request,
|
||||
request.concreteIndex(),
|
||||
mappingLookup,
|
||||
var executor = executor(indexService);
|
||||
assert ThreadPool.assertCurrentThreadPool(Names.SYSTEM_WRITE, Names.WRITE);
|
||||
|
||||
SubscribableListener.newForked(indexShard::ensureMutable)
|
||||
// Make sure to fork back to a `write` thread pool if necessary
|
||||
.<UpdateHelper.Result>andThen(executor, threadPool.getThreadContext(), (l, unused) -> ActionListener.completeWith(l, () -> {
|
||||
assert ThreadPool.assertCurrentThreadPool(Names.SYSTEM_WRITE, Names.WRITE);
|
||||
return deleteInferenceResults(
|
||||
request,
|
||||
updateHelper.prepare(request, indexShard, threadPool::absoluteTimeInMillis), // Gets the doc using the engine
|
||||
indexService.getMetadata(),
|
||||
mappingLookup
|
||||
);
|
||||
}))
|
||||
// Proceed with a single item bulk request
|
||||
.<UpdateResponse>andThen((l, result) -> {
|
||||
switch (result.getResponseResult()) {
|
||||
case CREATED -> {
|
||||
IndexRequest upsertRequest = result.action();
|
||||
// we fetch it from the index request so we don't generate the bytes twice, its already done in the index request
|
||||
final BytesReference upsertSourceBytes = upsertRequest.source();
|
||||
client.bulk(
|
||||
toSingleItemBulkRequest(upsertRequest),
|
||||
unwrappingSingleItemBulkResponse(ActionListener.<DocWriteResponse>wrap(response -> {
|
||||
UpdateResponse update = new UpdateResponse(
|
||||
response.getShardInfo(),
|
||||
response.getShardId(),
|
||||
response.getId(),
|
||||
response.getSeqNo(),
|
||||
response.getPrimaryTerm(),
|
||||
response.getVersion(),
|
||||
sourceAndContent.v2(),
|
||||
sourceAndContent.v1(),
|
||||
upsertSourceBytes
|
||||
)
|
||||
);
|
||||
} else {
|
||||
update.setGetResult(null);
|
||||
}
|
||||
update.setForcedRefresh(response.forcedRefresh());
|
||||
listener.onResponse(update);
|
||||
}, exception -> handleUpdateFailureWithRetry(listener, request, exception, retryCount)))
|
||||
);
|
||||
}
|
||||
case UPDATED -> {
|
||||
IndexRequest indexRequest = result.action();
|
||||
// we fetch it from the index request so we don't generate the bytes twice, its already done in the index request
|
||||
final BytesReference indexSourceBytes = indexRequest.source();
|
||||
client.bulk(
|
||||
toSingleItemBulkRequest(indexRequest),
|
||||
unwrappingSingleItemBulkResponse(ActionListener.<DocWriteResponse>wrap(response -> {
|
||||
UpdateResponse update = new UpdateResponse(
|
||||
response.getShardInfo(),
|
||||
response.getShardId(),
|
||||
response.getId(),
|
||||
response.getSeqNo(),
|
||||
response.getPrimaryTerm(),
|
||||
response.getVersion(),
|
||||
response.getResult()
|
||||
response.getResult()
|
||||
);
|
||||
if (request.fetchSource() != null && request.fetchSource().fetchSource()) {
|
||||
Tuple<XContentType, Map<String, Object>> sourceAndContent = XContentHelper.convertToMap(
|
||||
upsertSourceBytes,
|
||||
true,
|
||||
upsertRequest.getContentType()
|
||||
);
|
||||
update.setGetResult(
|
||||
UpdateHelper.extractGetResult(
|
||||
request,
|
||||
request.concreteIndex(),
|
||||
mappingLookup,
|
||||
response.getSeqNo(),
|
||||
response.getPrimaryTerm(),
|
||||
response.getVersion(),
|
||||
sourceAndContent.v2(),
|
||||
sourceAndContent.v1(),
|
||||
upsertSourceBytes
|
||||
)
|
||||
);
|
||||
} else {
|
||||
update.setGetResult(null);
|
||||
}
|
||||
update.setForcedRefresh(response.forcedRefresh());
|
||||
l.onResponse(update);
|
||||
}, exception -> handleUpdateFailureWithRetry(l, request, exception, retryCount)))
|
||||
);
|
||||
update.setGetResult(
|
||||
UpdateHelper.extractGetResult(
|
||||
request,
|
||||
request.concreteIndex(),
|
||||
mappingLookup,
|
||||
response.getSeqNo(),
|
||||
response.getPrimaryTerm(),
|
||||
response.getVersion(),
|
||||
result.updatedSourceAsMap(),
|
||||
result.updateSourceContentType(),
|
||||
indexSourceBytes
|
||||
)
|
||||
);
|
||||
update.setForcedRefresh(response.forcedRefresh());
|
||||
listener.onResponse(update);
|
||||
}, exception -> handleUpdateFailureWithRetry(listener, request, exception, retryCount)))
|
||||
);
|
||||
}
|
||||
case DELETED -> {
|
||||
DeleteRequest deleteRequest = result.action();
|
||||
client.bulk(
|
||||
toSingleItemBulkRequest(deleteRequest),
|
||||
unwrappingSingleItemBulkResponse(ActionListener.<DeleteResponse>wrap(response -> {
|
||||
UpdateResponse update = new UpdateResponse(
|
||||
response.getShardInfo(),
|
||||
response.getShardId(),
|
||||
response.getId(),
|
||||
response.getSeqNo(),
|
||||
response.getPrimaryTerm(),
|
||||
response.getVersion(),
|
||||
response.getResult()
|
||||
);
|
||||
update.setGetResult(
|
||||
UpdateHelper.extractGetResult(
|
||||
request,
|
||||
request.concreteIndex(),
|
||||
mappingLookup,
|
||||
response.getSeqNo(),
|
||||
response.getPrimaryTerm(),
|
||||
response.getVersion(),
|
||||
result.updatedSourceAsMap(),
|
||||
result.updateSourceContentType(),
|
||||
null
|
||||
)
|
||||
);
|
||||
update.setForcedRefresh(response.forcedRefresh());
|
||||
listener.onResponse(update);
|
||||
}, exception -> handleUpdateFailureWithRetry(listener, request, exception, retryCount)))
|
||||
);
|
||||
}
|
||||
case NOOP -> {
|
||||
UpdateResponse update = result.action();
|
||||
IndexService indexServiceOrNull = indicesService.indexService(shardId.getIndex());
|
||||
if (indexServiceOrNull != null) {
|
||||
IndexShard shard = indexService.getShardOrNull(shardId.getId());
|
||||
if (shard != null) {
|
||||
shard.noopUpdate();
|
||||
}
|
||||
case UPDATED -> {
|
||||
IndexRequest indexRequest = result.action();
|
||||
// we fetch it from the index request so we don't generate the bytes twice, its already done in the index request
|
||||
final BytesReference indexSourceBytes = indexRequest.source();
|
||||
client.bulk(
|
||||
toSingleItemBulkRequest(indexRequest),
|
||||
unwrappingSingleItemBulkResponse(ActionListener.<DocWriteResponse>wrap(response -> {
|
||||
UpdateResponse update = new UpdateResponse(
|
||||
response.getShardInfo(),
|
||||
response.getShardId(),
|
||||
response.getId(),
|
||||
response.getSeqNo(),
|
||||
response.getPrimaryTerm(),
|
||||
response.getVersion(),
|
||||
response.getResult()
|
||||
);
|
||||
update.setGetResult(
|
||||
UpdateHelper.extractGetResult(
|
||||
request,
|
||||
request.concreteIndex(),
|
||||
mappingLookup,
|
||||
response.getSeqNo(),
|
||||
response.getPrimaryTerm(),
|
||||
response.getVersion(),
|
||||
result.updatedSourceAsMap(),
|
||||
result.updateSourceContentType(),
|
||||
indexSourceBytes
|
||||
)
|
||||
);
|
||||
update.setForcedRefresh(response.forcedRefresh());
|
||||
l.onResponse(update);
|
||||
}, exception -> handleUpdateFailureWithRetry(l, request, exception, retryCount)))
|
||||
);
|
||||
}
|
||||
case DELETED -> {
|
||||
DeleteRequest deleteRequest = result.action();
|
||||
client.bulk(
|
||||
toSingleItemBulkRequest(deleteRequest),
|
||||
unwrappingSingleItemBulkResponse(ActionListener.<DeleteResponse>wrap(response -> {
|
||||
UpdateResponse update = new UpdateResponse(
|
||||
response.getShardInfo(),
|
||||
response.getShardId(),
|
||||
response.getId(),
|
||||
response.getSeqNo(),
|
||||
response.getPrimaryTerm(),
|
||||
response.getVersion(),
|
||||
response.getResult()
|
||||
);
|
||||
update.setGetResult(
|
||||
UpdateHelper.extractGetResult(
|
||||
request,
|
||||
request.concreteIndex(),
|
||||
mappingLookup,
|
||||
response.getSeqNo(),
|
||||
response.getPrimaryTerm(),
|
||||
response.getVersion(),
|
||||
result.updatedSourceAsMap(),
|
||||
result.updateSourceContentType(),
|
||||
null
|
||||
)
|
||||
);
|
||||
update.setForcedRefresh(response.forcedRefresh());
|
||||
l.onResponse(update);
|
||||
}, exception -> handleUpdateFailureWithRetry(l, request, exception, retryCount)))
|
||||
);
|
||||
}
|
||||
case NOOP -> {
|
||||
UpdateResponse update = result.action();
|
||||
IndexService indexServiceOrNull = indicesService.indexService(shardId.getIndex());
|
||||
if (indexServiceOrNull != null) {
|
||||
IndexShard shard = indexService.getShardOrNull(shardId.getId());
|
||||
if (shard != null) {
|
||||
shard.noopUpdate();
|
||||
}
|
||||
}
|
||||
l.onResponse(update);
|
||||
}
|
||||
default -> throw new IllegalStateException("Illegal result " + result.getResponseResult());
|
||||
}
|
||||
listener.onResponse(update);
|
||||
}
|
||||
default -> throw new IllegalStateException("Illegal result " + result.getResponseResult());
|
||||
}
|
||||
})
|
||||
.addListener(listener);
|
||||
}
|
||||
|
||||
private void handleUpdateFailureWithRetry(
|
||||
|
|
|
@ -249,7 +249,8 @@ class Elasticsearch {
|
|||
nodeEnv.configDir(),
|
||||
nodeEnv.tmpDir()
|
||||
);
|
||||
} else if (RuntimeVersionFeature.isSecurityManagerAvailable()) {
|
||||
} else {
|
||||
assert RuntimeVersionFeature.isSecurityManagerAvailable();
|
||||
// no need to explicitly enable native access for legacy code
|
||||
pluginsLoader = PluginsLoader.createPluginsLoader(modulesBundles, pluginsBundles, Map.of());
|
||||
// install SM after natives, shutdown hooks, etc.
|
||||
|
@ -259,10 +260,6 @@ class Elasticsearch {
|
|||
SECURITY_FILTER_BAD_DEFAULTS_SETTING.get(args.nodeSettings()),
|
||||
args.pidFile()
|
||||
);
|
||||
} else {
|
||||
// TODO: should we throw/interrupt startup in this case?
|
||||
pluginsLoader = PluginsLoader.createPluginsLoader(modulesBundles, pluginsBundles, Map.of());
|
||||
LogManager.getLogger(Elasticsearch.class).warn("Bootstrapping without any protection");
|
||||
}
|
||||
|
||||
bootstrap.setPluginsLoader(pluginsLoader);
|
||||
|
|
|
@ -156,9 +156,8 @@ public record AutoExpandReplicas(int minReplicas, int maxReplicas, boolean enabl
|
|||
)) {
|
||||
if (indexMetadata.getNumberOfReplicas() == 0) {
|
||||
nrReplicasChanged.computeIfAbsent(1, ArrayList::new).add(indexMetadata.getIndex().getName());
|
||||
} else {
|
||||
continue;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
if (allocation == null) {
|
||||
allocation = allocationSupplier.get();
|
||||
|
|
|
@ -81,8 +81,7 @@ public class IndexAbstractionResolver {
|
|||
indexNameExpressionResolver,
|
||||
includeDataStreams
|
||||
)) {
|
||||
// Resolve any ::* suffixes on the expression. We need to resolve them all to their final valid selectors
|
||||
resolveSelectorsAndCombine(authorizedIndex, selectorString, indicesOptions, resolvedIndices, projectMetadata);
|
||||
resolveSelectorsAndCollect(authorizedIndex, selectorString, indicesOptions, resolvedIndices, projectMetadata);
|
||||
}
|
||||
}
|
||||
if (resolvedIndices.isEmpty()) {
|
||||
|
@ -98,9 +97,8 @@ public class IndexAbstractionResolver {
|
|||
}
|
||||
}
|
||||
} else {
|
||||
// Resolve any ::* suffixes on the expression. We need to resolve them all to their final valid selectors
|
||||
Set<String> resolvedIndices = new HashSet<>();
|
||||
resolveSelectorsAndCombine(indexAbstraction, selectorString, indicesOptions, resolvedIndices, projectMetadata);
|
||||
resolveSelectorsAndCollect(indexAbstraction, selectorString, indicesOptions, resolvedIndices, projectMetadata);
|
||||
if (minus) {
|
||||
finalIndices.removeAll(resolvedIndices);
|
||||
} else if (indicesOptions.ignoreUnavailable() == false || isAuthorized.test(indexAbstraction)) {
|
||||
|
@ -114,7 +112,7 @@ public class IndexAbstractionResolver {
|
|||
return finalIndices;
|
||||
}
|
||||
|
||||
private static void resolveSelectorsAndCombine(
|
||||
private static void resolveSelectorsAndCollect(
|
||||
String indexAbstraction,
|
||||
String selectorString,
|
||||
IndicesOptions indicesOptions,
|
||||
|
@ -132,19 +130,8 @@ public class IndexAbstractionResolver {
|
|||
selectorString = IndexComponentSelector.DATA.getKey();
|
||||
}
|
||||
|
||||
if (Regex.isMatchAllPattern(selectorString)) {
|
||||
// Always accept data
|
||||
collect.add(IndexNameExpressionResolver.combineSelectorExpression(indexAbstraction, IndexComponentSelector.DATA.getKey()));
|
||||
// Only put failures on the expression if the abstraction supports it.
|
||||
if (acceptsAllSelectors) {
|
||||
collect.add(
|
||||
IndexNameExpressionResolver.combineSelectorExpression(indexAbstraction, IndexComponentSelector.FAILURES.getKey())
|
||||
);
|
||||
}
|
||||
} else {
|
||||
// A non-wildcard selector is always passed along as-is, it's validity for this kind of abstraction is tested later
|
||||
collect.add(IndexNameExpressionResolver.combineSelectorExpression(indexAbstraction, selectorString));
|
||||
}
|
||||
// A selector is always passed along as-is, it's validity for this kind of abstraction is tested later
|
||||
collect.add(IndexNameExpressionResolver.combineSelectorExpression(indexAbstraction, selectorString));
|
||||
} else {
|
||||
assert selectorString == null
|
||||
: "A selector string [" + selectorString + "] is present but selectors are disabled in this context";
|
||||
|
|
|
@ -2072,6 +2072,12 @@ public class IndexMetadata implements Diffable<IndexMetadata>, ToXContentFragmen
|
|||
return this;
|
||||
}
|
||||
|
||||
public Builder putRolloverInfos(Map<String, RolloverInfo> rolloverInfos) {
|
||||
this.rolloverInfos.clear();
|
||||
this.rolloverInfos.putAllFromMap(rolloverInfos);
|
||||
return this;
|
||||
}
|
||||
|
||||
public long version() {
|
||||
return this.version;
|
||||
}
|
||||
|
|
|
@ -433,21 +433,9 @@ public class IndexNameExpressionResolver {
|
|||
}
|
||||
} else {
|
||||
if (isExclusion) {
|
||||
if (IndexComponentSelector.ALL_APPLICABLE.equals(selector)) {
|
||||
resources.remove(new ResolvedExpression(baseExpression, IndexComponentSelector.DATA));
|
||||
resources.remove(new ResolvedExpression(baseExpression, IndexComponentSelector.FAILURES));
|
||||
} else {
|
||||
resources.remove(new ResolvedExpression(baseExpression, selector));
|
||||
}
|
||||
resources.remove(new ResolvedExpression(baseExpression, selector));
|
||||
} else if (ensureAliasOrIndexExists(context, baseExpression, selector)) {
|
||||
if (IndexComponentSelector.ALL_APPLICABLE.equals(selector)) {
|
||||
resources.add(new ResolvedExpression(baseExpression, IndexComponentSelector.DATA));
|
||||
if (context.getProject().getIndicesLookup().get(baseExpression).isDataStreamRelated()) {
|
||||
resources.add(new ResolvedExpression(baseExpression, IndexComponentSelector.FAILURES));
|
||||
}
|
||||
} else {
|
||||
resources.add(new ResolvedExpression(baseExpression, selector));
|
||||
}
|
||||
resources.add(new ResolvedExpression(baseExpression, selector));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1279,8 +1267,7 @@ public class IndexNameExpressionResolver {
|
|||
|
||||
private static boolean resolvedExpressionsContainsAbstraction(Set<ResolvedExpression> resolvedExpressions, String abstractionName) {
|
||||
return resolvedExpressions.contains(new ResolvedExpression(abstractionName))
|
||||
|| resolvedExpressions.contains(new ResolvedExpression(abstractionName, IndexComponentSelector.DATA))
|
||||
|| resolvedExpressions.contains(new ResolvedExpression(abstractionName, IndexComponentSelector.ALL_APPLICABLE));
|
||||
|| resolvedExpressions.contains(new ResolvedExpression(abstractionName, IndexComponentSelector.DATA));
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1585,8 +1572,7 @@ public class IndexNameExpressionResolver {
|
|||
if (context.options.allowSelectors()) {
|
||||
// Ensure that the selectors are present and that they are compatible with the abstractions they are used with
|
||||
assert selector != null : "Earlier logic should have parsed selectors or added the default selectors already";
|
||||
// Check if ::failures has been explicitly requested, since requesting ::* for non-data-stream abstractions would just
|
||||
// return their data components.
|
||||
// Check if ::failures has been explicitly requested
|
||||
if (IndexComponentSelector.FAILURES.equals(selector) && indexAbstraction.isDataStreamRelated() == false) {
|
||||
// If requested abstraction is not data stream related, then you cannot use ::failures
|
||||
if (ignoreUnavailable) {
|
||||
|
@ -1942,9 +1928,9 @@ public class IndexNameExpressionResolver {
|
|||
final IndexMetadata.State excludeState = excludeState(context.getOptions());
|
||||
Set<ResolvedExpression> resources = new HashSet<>();
|
||||
if (context.isPreserveAliases() && indexAbstraction.getType() == Type.ALIAS) {
|
||||
expandToApplicableSelectors(indexAbstraction, selector, resources);
|
||||
resources.add(new ResolvedExpression(indexAbstraction.getName(), selector));
|
||||
} else if (context.isPreserveDataStreams() && indexAbstraction.getType() == Type.DATA_STREAM) {
|
||||
expandToApplicableSelectors(indexAbstraction, selector, resources);
|
||||
resources.add(new ResolvedExpression(indexAbstraction.getName(), selector));
|
||||
} else {
|
||||
if (shouldIncludeRegularIndices(context.getOptions(), selector)) {
|
||||
for (int i = 0, n = indexAbstraction.getIndices().size(); i < n; i++) {
|
||||
|
@ -1971,31 +1957,6 @@ public class IndexNameExpressionResolver {
|
|||
return resources;
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds the abstraction and selector to the results when preserving data streams and aliases at wildcard resolution. If a selector
|
||||
* is provided, the result is only added if the selector is applicable to the abstraction provided. If
|
||||
* {@link IndexComponentSelector#ALL_APPLICABLE} is given, the selectors are expanded only to those which are applicable to the
|
||||
* provided abstraction.
|
||||
* @param indexAbstraction abstraction to add
|
||||
* @param selector The selector to add
|
||||
* @param resources Result collector which is updated with all applicable resolved expressions for a given abstraction and selector
|
||||
* pair.
|
||||
*/
|
||||
private static void expandToApplicableSelectors(
|
||||
IndexAbstraction indexAbstraction,
|
||||
IndexComponentSelector selector,
|
||||
Set<ResolvedExpression> resources
|
||||
) {
|
||||
if (IndexComponentSelector.ALL_APPLICABLE.equals(selector)) {
|
||||
resources.add(new ResolvedExpression(indexAbstraction.getName(), IndexComponentSelector.DATA));
|
||||
if (indexAbstraction.isDataStreamRelated()) {
|
||||
resources.add(new ResolvedExpression(indexAbstraction.getName(), IndexComponentSelector.FAILURES));
|
||||
}
|
||||
} else if (selector == null || indexAbstraction.isDataStreamRelated() || selector.shouldIncludeFailures() == false) {
|
||||
resources.add(new ResolvedExpression(indexAbstraction.getName(), selector));
|
||||
}
|
||||
}
|
||||
|
||||
private static List<ResolvedExpression> resolveEmptyOrTrivialWildcard(Context context, IndexComponentSelector selector) {
|
||||
final String[] allIndices = resolveEmptyOrTrivialWildcardToAllIndices(context.getOptions(), context.getProject(), selector);
|
||||
List<String> indices;
|
||||
|
@ -2388,20 +2349,10 @@ public class IndexNameExpressionResolver {
|
|||
String suffix = expression.substring(lastDoubleColon + SELECTOR_SEPARATOR.length());
|
||||
IndexComponentSelector selector = IndexComponentSelector.getByKey(suffix);
|
||||
if (selector == null) {
|
||||
// Do some work to surface a helpful error message for likely errors
|
||||
if (Regex.isSimpleMatchPattern(suffix)) {
|
||||
throw new InvalidIndexNameException(
|
||||
expression,
|
||||
"Invalid usage of :: separator, ["
|
||||
+ suffix
|
||||
+ "] contains a wildcard, but only the match all wildcard [*] is supported in a selector"
|
||||
);
|
||||
} else {
|
||||
throw new InvalidIndexNameException(
|
||||
expression,
|
||||
"Invalid usage of :: separator, [" + suffix + "] is not a recognized selector"
|
||||
);
|
||||
}
|
||||
throw new InvalidIndexNameException(
|
||||
expression,
|
||||
"invalid usage of :: separator, [" + suffix + "] is not a recognized selector"
|
||||
);
|
||||
}
|
||||
String expressionBase = expression.substring(0, lastDoubleColon);
|
||||
ensureNoMoreSelectorSeparators(expressionBase, expression);
|
||||
|
|
|
@ -8,26 +8,12 @@
|
|||
*/
|
||||
package org.elasticsearch.cluster.routing;
|
||||
|
||||
import org.elasticsearch.common.util.Countable;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* Allows to iterate over unrelated shards.
|
||||
*/
|
||||
public interface ShardsIterator extends Iterable<ShardRouting>, Countable {
|
||||
|
||||
/**
|
||||
* Resets the iterator to its initial state.
|
||||
*/
|
||||
void reset();
|
||||
|
||||
/**
|
||||
* The number of shard routing instances.
|
||||
*
|
||||
* @return number of shard routing instances in this iterator
|
||||
*/
|
||||
int size();
|
||||
public interface ShardsIterator extends Iterable<ShardRouting> {
|
||||
|
||||
/**
|
||||
* The number of active shard routing instances
|
||||
|
@ -41,13 +27,6 @@ public interface ShardsIterator extends Iterable<ShardRouting>, Countable {
|
|||
*/
|
||||
ShardRouting nextOrNull();
|
||||
|
||||
/**
|
||||
* Return the number of shards remaining in this {@link ShardsIterator}
|
||||
*
|
||||
* @return number of shard remaining
|
||||
*/
|
||||
int remaining();
|
||||
|
||||
@Override
|
||||
int hashCode();
|
||||
|
||||
|
|
|
@ -11,6 +11,7 @@ package org.elasticsearch.cluster.routing.allocation.allocator;
|
|||
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.routing.allocation.NodeAllocationStatsAndWeightsCalculator.NodeAllocationStatsAndWeight;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders;
|
||||
import org.elasticsearch.telemetry.metric.DoubleWithAttributes;
|
||||
import org.elasticsearch.telemetry.metric.LongWithAttributes;
|
||||
import org.elasticsearch.telemetry.metric.MeterRegistry;
|
||||
|
@ -28,17 +29,28 @@ import java.util.concurrent.atomic.AtomicReference;
|
|||
*/
|
||||
public class DesiredBalanceMetrics {
|
||||
|
||||
/**
|
||||
* @param unassignedShards Shards that are not assigned to any node.
|
||||
* @param totalAllocations Shards that are assigned to a node.
|
||||
* @param undesiredAllocationsExcludingShuttingDownNodes Shards that are assigned to a node but must move to alleviate a resource
|
||||
* constraint per the {@link AllocationDeciders}. Excludes shards that must move
|
||||
* because of a node shutting down.
|
||||
*/
|
||||
public record AllocationStats(long unassignedShards, long totalAllocations, long undesiredAllocationsExcludingShuttingDownNodes) {}
|
||||
|
||||
public record NodeWeightStats(long shardCount, double diskUsageInBytes, double writeLoad, double nodeWeight) {}
|
||||
|
||||
public static final DesiredBalanceMetrics NOOP = new DesiredBalanceMetrics(MeterRegistry.NOOP);
|
||||
|
||||
// Reconciliation metrics.
|
||||
/** See {@link #unassignedShards} */
|
||||
public static final String UNASSIGNED_SHARDS_METRIC_NAME = "es.allocator.desired_balance.shards.unassigned.current";
|
||||
/** See {@link #totalAllocations} */
|
||||
public static final String TOTAL_SHARDS_METRIC_NAME = "es.allocator.desired_balance.shards.current";
|
||||
/** See {@link #undesiredAllocationsExcludingShuttingDownNodes} */
|
||||
public static final String UNDESIRED_ALLOCATION_COUNT_METRIC_NAME = "es.allocator.desired_balance.allocations.undesired.current";
|
||||
/** {@link #UNDESIRED_ALLOCATION_COUNT_METRIC_NAME} / {@link #TOTAL_SHARDS_METRIC_NAME} */
|
||||
public static final String UNDESIRED_ALLOCATION_RATIO_METRIC_NAME = "es.allocator.desired_balance.allocations.undesired.ratio";
|
||||
|
||||
// Desired balance node metrics.
|
||||
public static final String DESIRED_BALANCE_NODE_WEIGHT_METRIC_NAME = "es.allocator.desired_balance.allocations.node_weight.current";
|
||||
public static final String DESIRED_BALANCE_NODE_SHARD_COUNT_METRIC_NAME =
|
||||
"es.allocator.desired_balance.allocations.node_shard_count.current";
|
||||
|
@ -47,6 +59,7 @@ public class DesiredBalanceMetrics {
|
|||
public static final String DESIRED_BALANCE_NODE_DISK_USAGE_METRIC_NAME =
|
||||
"es.allocator.desired_balance.allocations.node_disk_usage_bytes.current";
|
||||
|
||||
// Node weight metrics.
|
||||
public static final String CURRENT_NODE_WEIGHT_METRIC_NAME = "es.allocator.allocations.node.weight.current";
|
||||
public static final String CURRENT_NODE_SHARD_COUNT_METRIC_NAME = "es.allocator.allocations.node.shard_count.current";
|
||||
public static final String CURRENT_NODE_WRITE_LOAD_METRIC_NAME = "es.allocator.allocations.node.write_load.current";
|
||||
|
@ -59,6 +72,7 @@ public class DesiredBalanceMetrics {
|
|||
public static final AllocationStats EMPTY_ALLOCATION_STATS = new AllocationStats(-1, -1, -1);
|
||||
|
||||
private volatile boolean nodeIsMaster = false;
|
||||
|
||||
/**
|
||||
* Number of unassigned shards during last reconciliation
|
||||
*/
|
||||
|
@ -70,9 +84,10 @@ public class DesiredBalanceMetrics {
|
|||
private volatile long totalAllocations;
|
||||
|
||||
/**
|
||||
* Number of assigned shards during last reconciliation that are not allocated on desired node and need to be moved
|
||||
* Number of assigned shards during last reconciliation that are not allocated on a desired node and need to be moved.
|
||||
* This excludes shards that must be reassigned due to a shutting down node.
|
||||
*/
|
||||
private volatile long undesiredAllocations;
|
||||
private volatile long undesiredAllocationsExcludingShuttingDownNodes;
|
||||
|
||||
private final AtomicReference<Map<DiscoveryNode, NodeWeightStats>> weightStatsPerNodeRef = new AtomicReference<>(Map.of());
|
||||
private final AtomicReference<Map<DiscoveryNode, NodeAllocationStatsAndWeight>> allocationStatsPerNodeRef = new AtomicReference<>(
|
||||
|
@ -89,7 +104,7 @@ public class DesiredBalanceMetrics {
|
|||
if (allocationStats != EMPTY_ALLOCATION_STATS) {
|
||||
this.unassignedShards = allocationStats.unassignedShards;
|
||||
this.totalAllocations = allocationStats.totalAllocations;
|
||||
this.undesiredAllocations = allocationStats.undesiredAllocationsExcludingShuttingDownNodes;
|
||||
this.undesiredAllocationsExcludingShuttingDownNodes = allocationStats.undesiredAllocationsExcludingShuttingDownNodes;
|
||||
}
|
||||
weightStatsPerNodeRef.set(weightStatsPerNode);
|
||||
allocationStatsPerNodeRef.set(nodeAllocationStats);
|
||||
|
@ -107,7 +122,7 @@ public class DesiredBalanceMetrics {
|
|||
UNDESIRED_ALLOCATION_COUNT_METRIC_NAME,
|
||||
"Total number of shards allocated on undesired nodes excluding shutting down nodes",
|
||||
"{shard}",
|
||||
this::getUndesiredAllocationsMetrics
|
||||
this::getUndesiredAllocationsExcludingShuttingDownNodesMetrics
|
||||
);
|
||||
meterRegistry.registerDoublesGauge(
|
||||
UNDESIRED_ALLOCATION_RATIO_METRIC_NAME,
|
||||
|
@ -115,6 +130,7 @@ public class DesiredBalanceMetrics {
|
|||
"1",
|
||||
this::getUndesiredAllocationsRatioMetrics
|
||||
);
|
||||
|
||||
meterRegistry.registerDoublesGauge(
|
||||
DESIRED_BALANCE_NODE_WEIGHT_METRIC_NAME,
|
||||
"Weight of nodes in the computed desired balance",
|
||||
|
@ -133,18 +149,19 @@ public class DesiredBalanceMetrics {
|
|||
"bytes",
|
||||
this::getDesiredBalanceNodeDiskUsageMetrics
|
||||
);
|
||||
meterRegistry.registerDoublesGauge(
|
||||
CURRENT_NODE_WEIGHT_METRIC_NAME,
|
||||
"The weight of nodes based on the current allocation state",
|
||||
"unit",
|
||||
this::getCurrentNodeWeightMetrics
|
||||
);
|
||||
meterRegistry.registerLongsGauge(
|
||||
DESIRED_BALANCE_NODE_SHARD_COUNT_METRIC_NAME,
|
||||
"Shard count of nodes in the computed desired balance",
|
||||
"unit",
|
||||
this::getDesiredBalanceNodeShardCountMetrics
|
||||
);
|
||||
|
||||
meterRegistry.registerDoublesGauge(
|
||||
CURRENT_NODE_WEIGHT_METRIC_NAME,
|
||||
"The weight of nodes based on the current allocation state",
|
||||
"unit",
|
||||
this::getCurrentNodeWeightMetrics
|
||||
);
|
||||
meterRegistry.registerDoublesGauge(
|
||||
CURRENT_NODE_WRITE_LOAD_METRIC_NAME,
|
||||
"The current write load of nodes",
|
||||
|
@ -194,7 +211,7 @@ public class DesiredBalanceMetrics {
|
|||
}
|
||||
|
||||
public long undesiredAllocations() {
|
||||
return undesiredAllocations;
|
||||
return undesiredAllocationsExcludingShuttingDownNodes;
|
||||
}
|
||||
|
||||
private List<LongWithAttributes> getUnassignedShardsMetrics() {
|
||||
|
@ -330,8 +347,8 @@ public class DesiredBalanceMetrics {
|
|||
return getIfPublishing(totalAllocations);
|
||||
}
|
||||
|
||||
private List<LongWithAttributes> getUndesiredAllocationsMetrics() {
|
||||
return getIfPublishing(undesiredAllocations);
|
||||
private List<LongWithAttributes> getUndesiredAllocationsExcludingShuttingDownNodesMetrics() {
|
||||
return getIfPublishing(undesiredAllocationsExcludingShuttingDownNodes);
|
||||
}
|
||||
|
||||
private List<LongWithAttributes> getIfPublishing(long value) {
|
||||
|
@ -344,7 +361,7 @@ public class DesiredBalanceMetrics {
|
|||
private List<DoubleWithAttributes> getUndesiredAllocationsRatioMetrics() {
|
||||
if (nodeIsMaster) {
|
||||
var total = totalAllocations;
|
||||
var undesired = undesiredAllocations;
|
||||
var undesired = undesiredAllocationsExcludingShuttingDownNodes;
|
||||
return List.of(new DoubleWithAttributes(total != 0 ? (double) undesired / total : 0.0));
|
||||
}
|
||||
return List.of();
|
||||
|
@ -357,7 +374,7 @@ public class DesiredBalanceMetrics {
|
|||
public void zeroAllMetrics() {
|
||||
unassignedShards = 0;
|
||||
totalAllocations = 0;
|
||||
undesiredAllocations = 0;
|
||||
undesiredAllocationsExcludingShuttingDownNodes = 0;
|
||||
weightStatsPerNodeRef.set(Map.of());
|
||||
allocationStatsPerNodeRef.set(Map.of());
|
||||
}
|
||||
|
|
|
@ -21,10 +21,7 @@ import org.elasticsearch.cluster.routing.RoutingNodes;
|
|||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.routing.UnassignedInfo;
|
||||
import org.elasticsearch.cluster.routing.UnassignedInfo.AllocationStatus;
|
||||
import org.elasticsearch.cluster.routing.allocation.NodeAllocationStatsAndWeightsCalculator;
|
||||
import org.elasticsearch.cluster.routing.allocation.NodeAllocationStatsAndWeightsCalculator.NodeAllocationStatsAndWeight;
|
||||
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
|
||||
import org.elasticsearch.cluster.routing.allocation.allocator.DesiredBalanceMetrics.AllocationStats;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.Decision;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.settings.ClusterSettings;
|
||||
|
@ -37,9 +34,7 @@ import org.elasticsearch.index.shard.ShardId;
|
|||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
||||
import java.util.Comparator;
|
||||
import java.util.HashMap;
|
||||
import java.util.Iterator;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.function.BiFunction;
|
||||
import java.util.stream.Collectors;
|
||||
|
@ -83,16 +78,8 @@ public class DesiredBalanceReconciler {
|
|||
private double undesiredAllocationsLogThreshold;
|
||||
private final NodeAllocationOrdering allocationOrdering = new NodeAllocationOrdering();
|
||||
private final NodeAllocationOrdering moveOrdering = new NodeAllocationOrdering();
|
||||
private final DesiredBalanceMetrics desiredBalanceMetrics;
|
||||
private final NodeAllocationStatsAndWeightsCalculator nodeAllocationStatsAndWeightsCalculator;
|
||||
|
||||
public DesiredBalanceReconciler(
|
||||
ClusterSettings clusterSettings,
|
||||
ThreadPool threadPool,
|
||||
DesiredBalanceMetrics desiredBalanceMetrics,
|
||||
NodeAllocationStatsAndWeightsCalculator nodeAllocationStatsAndWeightsCalculator
|
||||
) {
|
||||
this.desiredBalanceMetrics = desiredBalanceMetrics;
|
||||
public DesiredBalanceReconciler(ClusterSettings clusterSettings, ThreadPool threadPool) {
|
||||
this.undesiredAllocationLogInterval = new FrequencyCappedAction(
|
||||
threadPool.relativeTimeInMillisSupplier(),
|
||||
TimeValue.timeValueMinutes(5)
|
||||
|
@ -102,7 +89,6 @@ public class DesiredBalanceReconciler {
|
|||
UNDESIRED_ALLOCATIONS_LOG_THRESHOLD_SETTING,
|
||||
value -> this.undesiredAllocationsLogThreshold = value
|
||||
);
|
||||
this.nodeAllocationStatsAndWeightsCalculator = nodeAllocationStatsAndWeightsCalculator;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -111,12 +97,13 @@ public class DesiredBalanceReconciler {
|
|||
* @param desiredBalance The new desired cluster shard allocation
|
||||
* @param allocation Cluster state information with which to make decisions, contains routing table metadata that will be modified to
|
||||
* reach the given desired balance.
|
||||
* @return {@link DesiredBalanceMetrics.AllocationStats} for this round of reconciliation changes.
|
||||
*/
|
||||
public void reconcile(DesiredBalance desiredBalance, RoutingAllocation allocation) {
|
||||
public DesiredBalanceMetrics.AllocationStats reconcile(DesiredBalance desiredBalance, RoutingAllocation allocation) {
|
||||
var nodeIds = allocation.routingNodes().getAllNodeIds();
|
||||
allocationOrdering.retainNodes(nodeIds);
|
||||
moveOrdering.retainNodes(nodeIds);
|
||||
new Reconciliation(desiredBalance, allocation).run();
|
||||
return new Reconciliation(desiredBalance, allocation).run();
|
||||
}
|
||||
|
||||
public void clear() {
|
||||
|
@ -124,6 +111,11 @@ public class DesiredBalanceReconciler {
|
|||
moveOrdering.clear();
|
||||
}
|
||||
|
||||
/**
|
||||
* Handles updating the {@code RoutingNodes} to reflect the next steps towards the new {@code DesiredBalance}. Updates are limited by
|
||||
* throttling (there are limits on the number of concurrent shard moves) or resource constraints (some shard moves might not be
|
||||
* immediately possible until other shards move first).
|
||||
*/
|
||||
private class Reconciliation {
|
||||
|
||||
private final DesiredBalance desiredBalance;
|
||||
|
@ -136,7 +128,7 @@ public class DesiredBalanceReconciler {
|
|||
this.routingNodes = allocation.routingNodes();
|
||||
}
|
||||
|
||||
void run() {
|
||||
DesiredBalanceMetrics.AllocationStats run() {
|
||||
try (var ignored = allocation.withReconcilingFlag()) {
|
||||
|
||||
logger.debug("Reconciling desired balance for [{}]", desiredBalance.lastConvergedIndex());
|
||||
|
@ -145,13 +137,13 @@ public class DesiredBalanceReconciler {
|
|||
// no data nodes, so fail allocation to report red health
|
||||
failAllocationOfNewPrimaries(allocation);
|
||||
logger.trace("no nodes available, nothing to reconcile");
|
||||
return;
|
||||
return DesiredBalanceMetrics.EMPTY_ALLOCATION_STATS;
|
||||
}
|
||||
|
||||
if (desiredBalance.assignments().isEmpty()) {
|
||||
// no desired state yet but it is on its way and we'll reroute again when it is ready
|
||||
logger.trace("desired balance is empty, nothing to reconcile");
|
||||
return;
|
||||
return DesiredBalanceMetrics.EMPTY_ALLOCATION_STATS;
|
||||
}
|
||||
|
||||
// compute next moves towards current desired balance:
|
||||
|
@ -164,38 +156,22 @@ public class DesiredBalanceReconciler {
|
|||
// 2. move any shards that cannot remain where they are
|
||||
logger.trace("Reconciler#moveShards");
|
||||
moveShards();
|
||||
|
||||
// 3. move any other shards that are desired elsewhere
|
||||
// This is the rebalancing work. The previous calls were necessary, to assign unassigned shard copies, and move shards that
|
||||
// violate resource thresholds. Now we run moves to improve the relative node resource loads.
|
||||
logger.trace("Reconciler#balance");
|
||||
var allocationStats = balance();
|
||||
DesiredBalanceMetrics.AllocationStats allocationStats = balance();
|
||||
|
||||
logger.debug("Reconciliation is complete");
|
||||
|
||||
updateDesireBalanceMetrics(allocationStats);
|
||||
return allocationStats;
|
||||
}
|
||||
}
|
||||
|
||||
private void updateDesireBalanceMetrics(AllocationStats allocationStats) {
|
||||
var nodesStatsAndWeights = nodeAllocationStatsAndWeightsCalculator.nodesAllocationStatsAndWeights(
|
||||
allocation.metadata(),
|
||||
allocation.routingNodes(),
|
||||
allocation.clusterInfo(),
|
||||
desiredBalance
|
||||
);
|
||||
Map<DiscoveryNode, NodeAllocationStatsAndWeight> filteredNodeAllocationStatsAndWeights = new HashMap<>(
|
||||
nodesStatsAndWeights.size()
|
||||
);
|
||||
for (var nodeStatsAndWeight : nodesStatsAndWeights.entrySet()) {
|
||||
var node = allocation.nodes().get(nodeStatsAndWeight.getKey());
|
||||
if (node != null) {
|
||||
filteredNodeAllocationStatsAndWeights.put(node, nodeStatsAndWeight.getValue());
|
||||
}
|
||||
}
|
||||
desiredBalanceMetrics.updateMetrics(allocationStats, desiredBalance.weightsPerNode(), filteredNodeAllocationStatsAndWeights);
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks whether every shard is either assigned or ignored. Expected to be called after {@link #allocateUnassigned()}.
|
||||
*/
|
||||
private boolean allocateUnassignedInvariant() {
|
||||
// after allocateUnassigned, every shard must be either assigned or ignored
|
||||
|
||||
assert routingNodes.unassigned().isEmpty();
|
||||
|
||||
final var shardCounts = allocation.metadata()
|
||||
|
@ -269,45 +245,55 @@ public class DesiredBalanceReconciler {
|
|||
}
|
||||
|
||||
/*
|
||||
* Create some comparators to sort the unassigned shard copies in priority to allocate order.
|
||||
* TODO: We could be smarter here and group the shards by index and then
|
||||
* use the sorter to save some iterations.
|
||||
*/
|
||||
final PriorityComparator secondaryComparator = PriorityComparator.getAllocationComparator(allocation);
|
||||
final Comparator<ShardRouting> comparator = (o1, o2) -> {
|
||||
final PriorityComparator indexPriorityComparator = PriorityComparator.getAllocationComparator(allocation);
|
||||
final Comparator<ShardRouting> shardAllocationPriorityComparator = (o1, o2) -> {
|
||||
// Prioritize assigning a primary shard copy, if one is a primary and the other is not.
|
||||
if (o1.primary() ^ o2.primary()) {
|
||||
return o1.primary() ? -1 : 1;
|
||||
}
|
||||
|
||||
// Then order shards in the same index arbitrarily by shard ID.
|
||||
if (o1.getIndexName().compareTo(o2.getIndexName()) == 0) {
|
||||
return o1.getId() - o2.getId();
|
||||
}
|
||||
|
||||
// Lastly, prioritize system indices, then use index priority of non-system indices, then by age, etc.
|
||||
//
|
||||
// this comparator is more expensive than all the others up there
|
||||
// that's why it's added last even though it could be easier to read
|
||||
// if we'd apply it earlier. this comparator will only differentiate across
|
||||
// indices all shards of the same index is treated equally.
|
||||
final int secondary = secondaryComparator.compare(o1, o2);
|
||||
assert secondary != 0 : "Index names are equal, should be returned early.";
|
||||
return secondary;
|
||||
final int secondaryComparison = indexPriorityComparator.compare(o1, o2);
|
||||
assert secondaryComparison != 0 : "Index names are equal, should be returned early.";
|
||||
return secondaryComparison;
|
||||
};
|
||||
|
||||
/*
|
||||
* we use 2 arrays and move replicas to the second array once we allocated an identical
|
||||
* replica in the current iteration to make sure all indices get allocated in the same manner.
|
||||
* The arrays are sorted by primaries first and then by index and shard ID so a 2 indices with
|
||||
* The arrays are sorted by primaries first and then by index and shard ID so 2 indices with
|
||||
* 2 replica and 1 shard would look like:
|
||||
* [(0,P,IDX1), (0,P,IDX2), (0,R,IDX1), (0,R,IDX1), (0,R,IDX2), (0,R,IDX2)]
|
||||
* if we allocate for instance (0, R, IDX1) we move the second replica to the secondary array and proceed with
|
||||
* the next replica. If we could not find a node to allocate (0,R,IDX1) we move all it's replicas to ignoreUnassigned.
|
||||
*/
|
||||
ShardRouting[] primary = unassigned.drain();
|
||||
ShardRouting[] secondary = new ShardRouting[primary.length];
|
||||
int secondaryLength = 0;
|
||||
int primaryLength = primary.length;
|
||||
ArrayUtil.timSort(primary, comparator);
|
||||
ShardRouting[] orderedShardAllocationList = unassigned.drain();
|
||||
ShardRouting[] deferredShardAllocationList = new ShardRouting[orderedShardAllocationList.length];
|
||||
int deferredShardAllocationListLength = 0;
|
||||
int orderedShardAllocationListLength = orderedShardAllocationList.length;
|
||||
ArrayUtil.timSort(orderedShardAllocationList, shardAllocationPriorityComparator);
|
||||
|
||||
do {
|
||||
nextShard: for (int i = 0; i < primaryLength; i++) {
|
||||
final var shard = primary[i];
|
||||
nextShard: for (int i = 0; i < orderedShardAllocationListLength; i++) {
|
||||
final var shard = orderedShardAllocationList[i];
|
||||
final var assignment = desiredBalance.getAssignment(shard.shardId());
|
||||
// An ignored shard copy is one that has no desired balance assignment.
|
||||
final boolean ignored = assignment == null || isIgnored(routingNodes, shard, assignment);
|
||||
|
||||
AllocationStatus unallocatedStatus;
|
||||
if (ignored) {
|
||||
unallocatedStatus = AllocationStatus.NO_ATTEMPT;
|
||||
|
@ -337,8 +323,13 @@ public class DesiredBalanceReconciler {
|
|||
if (shard.primary() == false) {
|
||||
// copy over the same replica shards to the secondary array so they will get allocated
|
||||
// in a subsequent iteration, allowing replicas of other shards to be allocated first
|
||||
while (i < primaryLength - 1 && comparator.compare(primary[i], primary[i + 1]) == 0) {
|
||||
secondary[secondaryLength++] = primary[++i];
|
||||
while (i < orderedShardAllocationListLength - 1
|
||||
&& shardAllocationPriorityComparator.compare(
|
||||
orderedShardAllocationList[i],
|
||||
orderedShardAllocationList[i + 1]
|
||||
) == 0) {
|
||||
deferredShardAllocationList[deferredShardAllocationListLength++] =
|
||||
orderedShardAllocationList[++i];
|
||||
}
|
||||
}
|
||||
continue nextShard;
|
||||
|
@ -358,18 +349,23 @@ public class DesiredBalanceReconciler {
|
|||
logger.debug("No eligible node found to assign shard [{}]", shard);
|
||||
unassigned.ignoreShard(shard, unallocatedStatus, allocation.changes());
|
||||
if (shard.primary() == false) {
|
||||
// we could not allocate it and we are a replica - check if we can ignore the other replicas
|
||||
while (i < primaryLength - 1 && comparator.compare(primary[i], primary[i + 1]) == 0) {
|
||||
unassigned.ignoreShard(primary[++i], unallocatedStatus, allocation.changes());
|
||||
// We could not allocate the shard copy and the copy is a replica: check if we can ignore the other unassigned
|
||||
// replicas.
|
||||
while (i < orderedShardAllocationListLength - 1
|
||||
&& shardAllocationPriorityComparator.compare(
|
||||
orderedShardAllocationList[i],
|
||||
orderedShardAllocationList[i + 1]
|
||||
) == 0) {
|
||||
unassigned.ignoreShard(orderedShardAllocationList[++i], unallocatedStatus, allocation.changes());
|
||||
}
|
||||
}
|
||||
}
|
||||
primaryLength = secondaryLength;
|
||||
ShardRouting[] tmp = primary;
|
||||
primary = secondary;
|
||||
secondary = tmp;
|
||||
secondaryLength = 0;
|
||||
} while (primaryLength > 0);
|
||||
ShardRouting[] tmp = orderedShardAllocationList;
|
||||
orderedShardAllocationList = deferredShardAllocationList;
|
||||
deferredShardAllocationList = tmp;
|
||||
orderedShardAllocationListLength = deferredShardAllocationListLength;
|
||||
deferredShardAllocationListLength = 0;
|
||||
} while (orderedShardAllocationListLength > 0);
|
||||
}
|
||||
|
||||
private final class NodeIdsIterator implements Iterator<String> {
|
||||
|
@ -377,11 +373,7 @@ public class DesiredBalanceReconciler {
|
|||
private final ShardRouting shard;
|
||||
private final RoutingNodes routingNodes;
|
||||
/**
|
||||
* Contains the source of the nodeIds used for shard assignment. It could be:
|
||||
* * desired - when using desired nodes
|
||||
* * forced initial allocation - when initial allocation is forced to certain nodes by shrink/split/clone index operation
|
||||
* * fallback - when assigning the primary shard is temporarily not possible on desired nodes,
|
||||
* and it is assigned elsewhere in the cluster
|
||||
* Contains the source of the nodeIds used for shard assignment.
|
||||
*/
|
||||
private NodeIdSource source;
|
||||
private Iterator<String> nodeIds;
|
||||
|
@ -437,11 +429,21 @@ public class DesiredBalanceReconciler {
|
|||
}
|
||||
|
||||
private enum NodeIdSource {
|
||||
// Using desired nodes.
|
||||
DESIRED,
|
||||
// Initial allocation is forced to certain nodes by shrink/split/clone index operation.
|
||||
FORCED_INITIAL_ALLOCATION,
|
||||
// Assigning the primary shard is temporarily not possible on desired nodes, and it is assigned elsewhere in the cluster.
|
||||
FALLBACK;
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks whether the {@code shard} copy has been assigned to a node or not in {@code assignment}.
|
||||
* @param routingNodes The current routing information
|
||||
* @param shard A particular shard copy
|
||||
* @param assignment The assignments for shard primary and replica copies
|
||||
* @return Whether the shard has a node assignment.
|
||||
*/
|
||||
private boolean isIgnored(RoutingNodes routingNodes, ShardRouting shard, ShardAssignment assignment) {
|
||||
if (assignment.ignored() == 0) {
|
||||
// no shards are ignored
|
||||
|
@ -518,7 +520,8 @@ public class DesiredBalanceReconciler {
|
|||
}
|
||||
}
|
||||
|
||||
private AllocationStats balance() {
|
||||
private DesiredBalanceMetrics.AllocationStats balance() {
|
||||
// Check if rebalancing is disabled.
|
||||
if (allocation.deciders().canRebalance(allocation).type() != Decision.Type.YES) {
|
||||
return DesiredBalanceMetrics.EMPTY_ALLOCATION_STATS;
|
||||
}
|
||||
|
@ -587,8 +590,11 @@ public class DesiredBalanceReconciler {
|
|||
}
|
||||
|
||||
maybeLogUndesiredAllocationsWarning(totalAllocations, undesiredAllocationsExcludingShuttingDownNodes, routingNodes.size());
|
||||
|
||||
return new AllocationStats(unassignedShards, totalAllocations, undesiredAllocationsExcludingShuttingDownNodes);
|
||||
return new DesiredBalanceMetrics.AllocationStats(
|
||||
unassignedShards,
|
||||
totalAllocations,
|
||||
undesiredAllocationsExcludingShuttingDownNodes
|
||||
);
|
||||
}
|
||||
|
||||
private void maybeLogUndesiredAllocationsWarning(int totalAllocations, int undesiredAllocations, int nodeCount) {
|
||||
|
|
|
@ -16,6 +16,7 @@ import org.elasticsearch.cluster.ClusterState;
|
|||
import org.elasticsearch.cluster.ClusterStateTaskExecutor;
|
||||
import org.elasticsearch.cluster.ClusterStateTaskListener;
|
||||
import org.elasticsearch.cluster.metadata.SingleNodeShutdownMetadata;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.routing.allocation.AllocationService;
|
||||
import org.elasticsearch.cluster.routing.allocation.AllocationService.RerouteStrategy;
|
||||
|
@ -39,6 +40,7 @@ import org.elasticsearch.threadpool.ThreadPool;
|
|||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Comparator;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
@ -87,6 +89,7 @@ public class DesiredBalanceShardsAllocator implements ShardsAllocator {
|
|||
private final AtomicReference<DesiredBalance> currentDesiredBalanceRef = new AtomicReference<>(DesiredBalance.NOT_MASTER);
|
||||
private volatile boolean resetCurrentDesiredBalance = false;
|
||||
private final Set<String> processedNodeShutdowns = new HashSet<>();
|
||||
private final NodeAllocationStatsAndWeightsCalculator nodeAllocationStatsAndWeightsCalculator;
|
||||
private final DesiredBalanceMetrics desiredBalanceMetrics;
|
||||
/**
|
||||
* Manages balancer round results in order to report on the balancer activity in a configurable manner.
|
||||
|
@ -136,17 +139,13 @@ public class DesiredBalanceShardsAllocator implements ShardsAllocator {
|
|||
NodeAllocationStatsAndWeightsCalculator nodeAllocationStatsAndWeightsCalculator
|
||||
) {
|
||||
this.desiredBalanceMetrics = new DesiredBalanceMetrics(telemetryProvider.getMeterRegistry());
|
||||
this.nodeAllocationStatsAndWeightsCalculator = nodeAllocationStatsAndWeightsCalculator;
|
||||
this.balancerRoundSummaryService = new AllocationBalancingRoundSummaryService(threadPool, clusterService.getClusterSettings());
|
||||
this.delegateAllocator = delegateAllocator;
|
||||
this.threadPool = threadPool;
|
||||
this.reconciler = reconciler;
|
||||
this.desiredBalanceComputer = desiredBalanceComputer;
|
||||
this.desiredBalanceReconciler = new DesiredBalanceReconciler(
|
||||
clusterService.getClusterSettings(),
|
||||
threadPool,
|
||||
desiredBalanceMetrics,
|
||||
nodeAllocationStatsAndWeightsCalculator
|
||||
);
|
||||
this.desiredBalanceReconciler = new DesiredBalanceReconciler(clusterService.getClusterSettings(), threadPool);
|
||||
this.desiredBalanceComputation = new ContinuousComputation<>(threadPool.generic()) {
|
||||
|
||||
@Override
|
||||
|
@ -347,6 +346,10 @@ public class DesiredBalanceShardsAllocator implements ShardsAllocator {
|
|||
return new BalancingRoundSummary(DesiredBalance.shardMovements(oldDesiredBalance, newDesiredBalance));
|
||||
}
|
||||
|
||||
/**
|
||||
* Submits the desired balance to be reconciled (applies the desired changes to the routing table) and creates and publishes a new
|
||||
* cluster state. The data nodes will receive and apply the new cluster state to start/move/remove shards.
|
||||
*/
|
||||
protected void submitReconcileTask(DesiredBalance desiredBalance) {
|
||||
masterServiceTaskQueue.submitTask("reconcile-desired-balance", new ReconcileDesiredBalanceTask(desiredBalance), null);
|
||||
}
|
||||
|
@ -357,7 +360,11 @@ public class DesiredBalanceShardsAllocator implements ShardsAllocator {
|
|||
} else {
|
||||
logger.debug("Reconciling desired balance for [{}]", desiredBalance.lastConvergedIndex());
|
||||
}
|
||||
recordTime(cumulativeReconciliationTime, () -> desiredBalanceReconciler.reconcile(desiredBalance, allocation));
|
||||
recordTime(cumulativeReconciliationTime, () -> {
|
||||
DesiredBalanceMetrics.AllocationStats allocationStats = desiredBalanceReconciler.reconcile(desiredBalance, allocation);
|
||||
updateDesireBalanceMetrics(desiredBalance, allocation, allocationStats);
|
||||
});
|
||||
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("Reconciled desired balance: {}", desiredBalance);
|
||||
} else {
|
||||
|
@ -391,6 +398,28 @@ public class DesiredBalanceShardsAllocator implements ShardsAllocator {
|
|||
resetCurrentDesiredBalance = true;
|
||||
}
|
||||
|
||||
private void updateDesireBalanceMetrics(
|
||||
DesiredBalance desiredBalance,
|
||||
RoutingAllocation routingAllocation,
|
||||
DesiredBalanceMetrics.AllocationStats allocationStats
|
||||
) {
|
||||
var nodesStatsAndWeights = nodeAllocationStatsAndWeightsCalculator.nodesAllocationStatsAndWeights(
|
||||
routingAllocation.metadata(),
|
||||
routingAllocation.routingNodes(),
|
||||
routingAllocation.clusterInfo(),
|
||||
desiredBalance
|
||||
);
|
||||
Map<DiscoveryNode, NodeAllocationStatsAndWeightsCalculator.NodeAllocationStatsAndWeight> filteredNodeAllocationStatsAndWeights =
|
||||
new HashMap<>(nodesStatsAndWeights.size());
|
||||
for (var nodeStatsAndWeight : nodesStatsAndWeights.entrySet()) {
|
||||
var node = routingAllocation.nodes().get(nodeStatsAndWeight.getKey());
|
||||
if (node != null) {
|
||||
filteredNodeAllocationStatsAndWeights.put(node, nodeStatsAndWeight.getValue());
|
||||
}
|
||||
}
|
||||
desiredBalanceMetrics.updateMetrics(allocationStats, desiredBalance.weightsPerNode(), filteredNodeAllocationStatsAndWeights);
|
||||
}
|
||||
|
||||
public DesiredBalanceStats getStats() {
|
||||
return new DesiredBalanceStats(
|
||||
Math.max(currentDesiredBalanceRef.get().lastConvergedIndex(), 0L),
|
||||
|
|
|
@ -33,10 +33,28 @@ public class OrderedShardsIterator implements Iterator<ShardRouting> {
|
|||
|
||||
private final ArrayDeque<NodeAndShardIterator> queue;
|
||||
|
||||
/**
|
||||
* This iterator will progress through the shards node by node, each node's shards ordered from most write active to least.
|
||||
*
|
||||
* @param allocation
|
||||
* @param ordering
|
||||
* @return An iterator over all shards in the {@link RoutingNodes} held by {@code allocation} (all shards assigned to a node). The
|
||||
* iterator will progress node by node, where each node's shards are ordered from data stream write indices, to regular indices and
|
||||
* lastly to data stream read indices.
|
||||
*/
|
||||
public static OrderedShardsIterator createForNecessaryMoves(RoutingAllocation allocation, NodeAllocationOrdering ordering) {
|
||||
return create(allocation.routingNodes(), createShardsComparator(allocation), ordering);
|
||||
}
|
||||
|
||||
/**
|
||||
* This iterator will progress through the shards node by node, each node's shards ordered from least write active to most.
|
||||
*
|
||||
* @param allocation
|
||||
* @param ordering
|
||||
* @return An iterator over all shards in the {@link RoutingNodes} held by {@code allocation} (all shards assigned to a node). The
|
||||
* iterator will progress node by node, where each node's shards are ordered from data stream read indices, to regular indices and
|
||||
* lastly to data stream write indices.
|
||||
*/
|
||||
public static OrderedShardsIterator createForBalancing(RoutingAllocation allocation, NodeAllocationOrdering ordering) {
|
||||
return create(allocation.routingNodes(), createShardsComparator(allocation).reversed(), ordering);
|
||||
}
|
||||
|
@ -61,6 +79,9 @@ public class OrderedShardsIterator implements Iterator<ShardRouting> {
|
|||
return Iterators.forArray(shards);
|
||||
}
|
||||
|
||||
/**
|
||||
* Prioritizes write indices of data streams, and deprioritizes data stream read indices, relative to regular indices.
|
||||
*/
|
||||
private static Comparator<ShardRouting> createShardsComparator(RoutingAllocation allocation) {
|
||||
return Comparator.comparing(shard -> {
|
||||
final ProjectMetadata project = allocation.metadata().projectFor(shard.index());
|
||||
|
|
|
@ -20,6 +20,8 @@ import org.apache.lucene.document.NumericDocValuesField;
|
|||
import org.apache.lucene.index.ConcurrentMergeScheduler;
|
||||
import org.apache.lucene.index.CorruptIndexException;
|
||||
import org.apache.lucene.index.DirectoryReader;
|
||||
import org.apache.lucene.index.FieldInfo;
|
||||
import org.apache.lucene.index.FieldInfos;
|
||||
import org.apache.lucene.index.FilterCodecReader;
|
||||
import org.apache.lucene.index.FilterDirectoryReader;
|
||||
import org.apache.lucene.index.FilterLeafReader;
|
||||
|
@ -190,7 +192,18 @@ public class Lucene {
|
|||
throw new IllegalStateException("no commit found in the directory");
|
||||
}
|
||||
}
|
||||
// Need to figure out what the parent field is that, so that validation in IndexWriter doesn't fail
|
||||
// if no parent field is configured, but FieldInfo says there is a parent field.
|
||||
String parentField = null;
|
||||
final IndexCommit cp = getIndexCommit(si, directory);
|
||||
try (var reader = DirectoryReader.open(cp)) {
|
||||
var topLevelFieldInfos = FieldInfos.getMergedFieldInfos(reader);
|
||||
for (FieldInfo fieldInfo : topLevelFieldInfos) {
|
||||
if (fieldInfo.isParentField()) {
|
||||
parentField = fieldInfo.getName();
|
||||
}
|
||||
}
|
||||
}
|
||||
try (
|
||||
IndexWriter writer = new IndexWriter(
|
||||
directory,
|
||||
|
@ -198,6 +211,7 @@ public class Lucene {
|
|||
.setIndexCommit(cp)
|
||||
.setCommitOnClose(false)
|
||||
.setOpenMode(IndexWriterConfig.OpenMode.APPEND)
|
||||
.setParentField(parentField)
|
||||
)
|
||||
) {
|
||||
// do nothing and close this will kick off IndexFileDeleter which will remove all pending files
|
||||
|
|
|
@ -30,18 +30,29 @@ public class SizeLimitingStringWriter extends StringWriter {
|
|||
this.sizeLimit = sizeLimit;
|
||||
}
|
||||
|
||||
private void checkSizeLimit(int additionalChars) {
|
||||
int bufLen = getBuffer().length();
|
||||
if (bufLen + additionalChars > sizeLimit) {
|
||||
throw new SizeLimitExceededException(
|
||||
Strings.format("String [%s...] has exceeded the size limit [%s]", getBuffer().substring(0, Math.min(bufLen, 20)), sizeLimit)
|
||||
);
|
||||
private int limitSize(int additionalChars) {
|
||||
int neededSize = getBuffer().length() + additionalChars;
|
||||
if (neededSize > sizeLimit) {
|
||||
return additionalChars - (neededSize - sizeLimit);
|
||||
}
|
||||
return additionalChars;
|
||||
}
|
||||
|
||||
private void throwSizeLimitExceeded(int limitedChars, int requestedChars) {
|
||||
assert limitedChars < requestedChars;
|
||||
int bufLen = getBuffer().length();
|
||||
int foundSize = bufLen - limitedChars + requestedChars; // reconstitute original
|
||||
String selection = getBuffer().substring(0, Math.min(bufLen, 20));
|
||||
throw new SizeLimitExceededException(
|
||||
Strings.format("String [%s...] has size [%d] which exceeds the size limit [%d]", selection, foundSize, sizeLimit)
|
||||
);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void write(int c) {
|
||||
checkSizeLimit(1);
|
||||
if (limitSize(1) != 1) {
|
||||
throwSizeLimitExceeded(0, 1);
|
||||
}
|
||||
super.write(c);
|
||||
}
|
||||
|
||||
|
@ -49,20 +60,29 @@ public class SizeLimitingStringWriter extends StringWriter {
|
|||
|
||||
@Override
|
||||
public void write(char[] cbuf, int off, int len) {
|
||||
checkSizeLimit(len);
|
||||
super.write(cbuf, off, len);
|
||||
int limitedLen = limitSize(len);
|
||||
if (limitedLen > 0) {
|
||||
super.write(cbuf, off, limitedLen);
|
||||
}
|
||||
if (limitedLen != len) {
|
||||
throwSizeLimitExceeded(limitedLen, len);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void write(String str) {
|
||||
checkSizeLimit(str.length());
|
||||
super.write(str);
|
||||
this.write(str, 0, str.length());
|
||||
}
|
||||
|
||||
@Override
|
||||
public void write(String str, int off, int len) {
|
||||
checkSizeLimit(len);
|
||||
super.write(str, off, len);
|
||||
int limitedLen = limitSize(len);
|
||||
if (limitedLen > 0) {
|
||||
super.write(str, off, limitedLen);
|
||||
}
|
||||
if (limitedLen != len) {
|
||||
throwSizeLimitExceeded(limitedLen, len);
|
||||
}
|
||||
}
|
||||
|
||||
// append(...) delegates to write(...) methods
|
||||
|
|
|
@ -13,7 +13,7 @@ import java.util.Collections;
|
|||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
|
||||
public class PlainIterator<T> implements Iterable<T>, Countable {
|
||||
public class PlainIterator<T> implements Iterable<T> {
|
||||
private final List<T> elements;
|
||||
|
||||
// Calls to nextOrNull might be performed on different threads in the transport actions so we need the volatile
|
||||
|
@ -43,7 +43,6 @@ public class PlainIterator<T> implements Iterable<T>, Countable {
|
|||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public int size() {
|
||||
return elements.size();
|
||||
}
|
||||
|
|
|
@ -29,7 +29,6 @@ import org.elasticsearch.index.mapper.KeywordFieldMapper;
|
|||
import org.elasticsearch.index.mapper.MapperService;
|
||||
import org.elasticsearch.index.mapper.MappingLookup;
|
||||
import org.elasticsearch.index.mapper.MetadataFieldMapper;
|
||||
import org.elasticsearch.index.mapper.NestedLookup;
|
||||
import org.elasticsearch.index.mapper.ProvidedIdFieldMapper;
|
||||
import org.elasticsearch.index.mapper.RoutingFieldMapper;
|
||||
import org.elasticsearch.index.mapper.RoutingFields;
|
||||
|
@ -156,9 +155,6 @@ public enum IndexMode {
|
|||
|
||||
@Override
|
||||
public void validateMapping(MappingLookup lookup) {
|
||||
if (lookup.nestedLookup() != NestedLookup.EMPTY) {
|
||||
throw new IllegalArgumentException("cannot have nested fields when index is in " + tsdbMode());
|
||||
}
|
||||
if (((RoutingFieldMapper) lookup.getMapper(RoutingFieldMapper.NAME)).required()) {
|
||||
throw new IllegalArgumentException(routingRequiredBad());
|
||||
}
|
||||
|
|
|
@ -232,7 +232,8 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust
|
|||
mapperMetrics
|
||||
);
|
||||
this.indexFieldData = new IndexFieldDataService(indexSettings, indicesFieldDataCache, circuitBreakerService);
|
||||
if (indexSettings.getIndexSortConfig().hasIndexSort()) {
|
||||
boolean sourceOnly = Boolean.parseBoolean(indexSettings.getSettings().get("index.source_only"));
|
||||
if (indexSettings.getIndexSortConfig().hasIndexSort() && sourceOnly == false) {
|
||||
// we delay the actual creation of the sort order for this index because the mapping has not been merged yet.
|
||||
// The sort order is validated right after the merge of the mapping later in the process.
|
||||
this.indexSortSupplier = () -> indexSettings.getIndexSortConfig()
|
||||
|
|
|
@ -22,6 +22,7 @@ import org.elasticsearch.search.fetch.StoredFieldsSpec;
|
|||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
* Loads values from {@code _source}. This whole process is very slow and cast-tastic,
|
||||
|
@ -230,7 +231,7 @@ public abstract class BlockSourceReader implements BlockLoader.RowStrideReader {
|
|||
|
||||
@Override
|
||||
protected void append(BlockLoader.Builder builder, Object v) {
|
||||
((BlockLoader.BytesRefBuilder) builder).appendBytesRef(toBytesRef(scratch, (String) v));
|
||||
((BlockLoader.BytesRefBuilder) builder).appendBytesRef(toBytesRef(scratch, Objects.toString(v)));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -665,8 +665,14 @@ public abstract class DocumentParserContext {
|
|||
if (idField != null) {
|
||||
// We just need to store the id as indexed field, so that IndexWriter#deleteDocuments(term) can then
|
||||
// delete it when the root document is deleted too.
|
||||
// NOTE: we don't support nested fields in tsdb so it's safe to assume the standard id mapper.
|
||||
doc.add(new StringField(IdFieldMapper.NAME, idField.binaryValue(), Field.Store.NO));
|
||||
} else if (indexSettings().getMode() == IndexMode.TIME_SERIES) {
|
||||
// For time series indices, the _id is generated from the _tsid, which in turn is generated from the values of the configured
|
||||
// routing fields. At this point in document parsing, we can't guarantee that we've parsed all the routing fields yet, so the
|
||||
// parent document's _id is not yet available.
|
||||
// So we just add the child document without the parent _id, then in TimeSeriesIdFieldMapper#postParse we set the _id on all
|
||||
// child documents once we've calculated it.
|
||||
assert getRoutingFields().equals(RoutingFields.Noop.INSTANCE) == false;
|
||||
} else {
|
||||
throw new IllegalStateException("The root document of a nested document should have an _id field");
|
||||
}
|
||||
|
|
|
@ -23,6 +23,7 @@ import java.util.List;
|
|||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
import java.util.Set;
|
||||
import java.util.Stack;
|
||||
|
||||
/**
|
||||
* Block loader for fields that use fallback synthetic source implementation.
|
||||
|
@ -191,18 +192,45 @@ public abstract class FallbackSyntheticSourceBlockLoader implements BlockLoader
|
|||
.createParser(filterParserConfig, nameValue.value().bytes, nameValue.value().offset + 1, nameValue.value().length - 1)
|
||||
) {
|
||||
parser.nextToken();
|
||||
var fieldNameInParser = new StringBuilder(nameValue.name());
|
||||
while (true) {
|
||||
if (parser.currentToken() == XContentParser.Token.FIELD_NAME) {
|
||||
fieldNameInParser.append('.').append(parser.currentName());
|
||||
if (fieldNameInParser.toString().equals(fieldName)) {
|
||||
parser.nextToken();
|
||||
break;
|
||||
}
|
||||
var fieldNames = new Stack<String>() {
|
||||
{
|
||||
push(nameValue.name());
|
||||
}
|
||||
};
|
||||
|
||||
while (parser.currentToken() != null) {
|
||||
// We are descending into an object/array hierarchy of arbitrary depth
|
||||
// until we find the field that we need.
|
||||
while (true) {
|
||||
if (parser.currentToken() == XContentParser.Token.FIELD_NAME) {
|
||||
fieldNames.push(parser.currentName());
|
||||
var nameInParser = String.join(".", fieldNames);
|
||||
if (nameInParser.equals(fieldName)) {
|
||||
parser.nextToken();
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
assert parser.currentToken() == XContentParser.Token.START_OBJECT
|
||||
|| parser.currentToken() == XContentParser.Token.START_ARRAY;
|
||||
}
|
||||
|
||||
parser.nextToken();
|
||||
}
|
||||
parseWithReader(parser, blockValues);
|
||||
parser.nextToken();
|
||||
|
||||
// We are coming back up in object/array hierarchy.
|
||||
// If arrays are present we will explore all array items by going back down again.
|
||||
while (parser.currentToken() == XContentParser.Token.END_OBJECT
|
||||
|| parser.currentToken() == XContentParser.Token.END_ARRAY) {
|
||||
// When exiting an object arrays we'll see END_OBJECT followed by END_ARRAY, but we only need to pop the object name
|
||||
// once.
|
||||
if (parser.currentToken() == XContentParser.Token.END_OBJECT) {
|
||||
fieldNames.pop();
|
||||
}
|
||||
parser.nextToken();
|
||||
}
|
||||
}
|
||||
parseWithReader(parser, blockValues);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -27,6 +27,7 @@ public class MapperFeatures implements FeatureSpecification {
|
|||
"mapper.counted_keyword.synthetic_source_native_support"
|
||||
);
|
||||
|
||||
public static final NodeFeature TSDB_NESTED_FIELD_SUPPORT = new NodeFeature("mapper.tsdb_nested_field_support");
|
||||
public static final NodeFeature META_FETCH_FIELDS_ERROR_CODE_CHANGED = new NodeFeature("meta_fetch_fields_error_code_changed");
|
||||
public static final NodeFeature SPARSE_VECTOR_STORE_SUPPORT = new NodeFeature("mapper.sparse_vector.store_support");
|
||||
public static final NodeFeature SORT_FIELDS_CHECK_FOR_NESTED_OBJECT_FIX = new NodeFeature("mapper.nested.sorting_fields_check_fix");
|
||||
|
@ -49,6 +50,7 @@ public class MapperFeatures implements FeatureSpecification {
|
|||
COUNTED_KEYWORD_SYNTHETIC_SOURCE_NATIVE_SUPPORT,
|
||||
SORT_FIELDS_CHECK_FOR_NESTED_OBJECT_FIX,
|
||||
DYNAMIC_HANDLING_IN_COPY_TO,
|
||||
TSDB_NESTED_FIELD_SUPPORT,
|
||||
SourceFieldMapper.SYNTHETIC_RECOVERY_SOURCE,
|
||||
ObjectMapper.SUBOBJECTS_FALSE_MAPPING_UPDATE_FIX
|
||||
);
|
||||
|
|
|
@ -269,7 +269,7 @@ public class NumberFieldMapper extends FieldMapper {
|
|||
dimension.setValue(true);
|
||||
}
|
||||
|
||||
MappedFieldType ft = new NumberFieldType(context.buildFullName(leafName()), this);
|
||||
MappedFieldType ft = new NumberFieldType(context.buildFullName(leafName()), this, context.isSourceSynthetic());
|
||||
hasScript = script.get() != null;
|
||||
onScriptError = onScriptErrorParam.getValue();
|
||||
return new NumberFieldMapper(leafName(), ft, builderParams(this, context), context.isSourceSynthetic(), this);
|
||||
|
@ -463,6 +463,11 @@ public class NumberFieldMapper extends FieldMapper {
|
|||
BlockLoader blockLoaderFromSource(SourceValueFetcher sourceValueFetcher, BlockSourceReader.LeafIteratorLookup lookup) {
|
||||
return new BlockSourceReader.DoublesBlockLoader(sourceValueFetcher, lookup);
|
||||
}
|
||||
|
||||
@Override
|
||||
BlockLoader blockLoaderFromFallbackSyntheticSource(String fieldName, Number nullValue, boolean coerce) {
|
||||
return floatingPointBlockLoaderFromFallbackSyntheticSource(this, fieldName, nullValue, coerce);
|
||||
}
|
||||
},
|
||||
FLOAT("float", NumericType.FLOAT) {
|
||||
@Override
|
||||
|
@ -647,6 +652,11 @@ public class NumberFieldMapper extends FieldMapper {
|
|||
BlockLoader blockLoaderFromSource(SourceValueFetcher sourceValueFetcher, BlockSourceReader.LeafIteratorLookup lookup) {
|
||||
return new BlockSourceReader.DoublesBlockLoader(sourceValueFetcher, lookup);
|
||||
}
|
||||
|
||||
@Override
|
||||
BlockLoader blockLoaderFromFallbackSyntheticSource(String fieldName, Number nullValue, boolean coerce) {
|
||||
return floatingPointBlockLoaderFromFallbackSyntheticSource(this, fieldName, nullValue, coerce);
|
||||
}
|
||||
},
|
||||
DOUBLE("double", NumericType.DOUBLE) {
|
||||
@Override
|
||||
|
@ -797,6 +807,11 @@ public class NumberFieldMapper extends FieldMapper {
|
|||
BlockLoader blockLoaderFromSource(SourceValueFetcher sourceValueFetcher, BlockSourceReader.LeafIteratorLookup lookup) {
|
||||
return new BlockSourceReader.DoublesBlockLoader(sourceValueFetcher, lookup);
|
||||
}
|
||||
|
||||
@Override
|
||||
BlockLoader blockLoaderFromFallbackSyntheticSource(String fieldName, Number nullValue, boolean coerce) {
|
||||
return floatingPointBlockLoaderFromFallbackSyntheticSource(this, fieldName, nullValue, coerce);
|
||||
}
|
||||
},
|
||||
BYTE("byte", NumericType.BYTE) {
|
||||
@Override
|
||||
|
@ -911,6 +926,11 @@ public class NumberFieldMapper extends FieldMapper {
|
|||
return new BlockSourceReader.IntsBlockLoader(sourceValueFetcher, lookup);
|
||||
}
|
||||
|
||||
@Override
|
||||
BlockLoader blockLoaderFromFallbackSyntheticSource(String fieldName, Number nullValue, boolean coerce) {
|
||||
return integerBlockLoaderFromFallbackSyntheticSource(this, fieldName, nullValue, coerce);
|
||||
}
|
||||
|
||||
private boolean isOutOfRange(Object value) {
|
||||
double doubleValue = objectToDouble(value);
|
||||
return doubleValue < Byte.MIN_VALUE || doubleValue > Byte.MAX_VALUE;
|
||||
|
@ -1024,6 +1044,11 @@ public class NumberFieldMapper extends FieldMapper {
|
|||
return new BlockSourceReader.IntsBlockLoader(sourceValueFetcher, lookup);
|
||||
}
|
||||
|
||||
@Override
|
||||
BlockLoader blockLoaderFromFallbackSyntheticSource(String fieldName, Number nullValue, boolean coerce) {
|
||||
return integerBlockLoaderFromFallbackSyntheticSource(this, fieldName, nullValue, coerce);
|
||||
}
|
||||
|
||||
private boolean isOutOfRange(Object value) {
|
||||
double doubleValue = objectToDouble(value);
|
||||
return doubleValue < Short.MIN_VALUE || doubleValue > Short.MAX_VALUE;
|
||||
|
@ -1210,6 +1235,11 @@ public class NumberFieldMapper extends FieldMapper {
|
|||
BlockLoader blockLoaderFromSource(SourceValueFetcher sourceValueFetcher, BlockSourceReader.LeafIteratorLookup lookup) {
|
||||
return new BlockSourceReader.IntsBlockLoader(sourceValueFetcher, lookup);
|
||||
}
|
||||
|
||||
@Override
|
||||
BlockLoader blockLoaderFromFallbackSyntheticSource(String fieldName, Number nullValue, boolean coerce) {
|
||||
return integerBlockLoaderFromFallbackSyntheticSource(this, fieldName, nullValue, coerce);
|
||||
}
|
||||
},
|
||||
LONG("long", NumericType.LONG) {
|
||||
@Override
|
||||
|
@ -1358,6 +1388,26 @@ public class NumberFieldMapper extends FieldMapper {
|
|||
return new BlockSourceReader.LongsBlockLoader(sourceValueFetcher, lookup);
|
||||
}
|
||||
|
||||
@Override
|
||||
BlockLoader blockLoaderFromFallbackSyntheticSource(String fieldName, Number nullValue, boolean coerce) {
|
||||
var reader = new NumberFallbackSyntheticSourceReader(this, nullValue, coerce) {
|
||||
@Override
|
||||
public void writeToBlock(List<Number> values, BlockLoader.Builder blockBuilder) {
|
||||
var builder = (BlockLoader.LongBuilder) blockBuilder;
|
||||
for (var value : values) {
|
||||
builder.appendLong(value.longValue());
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
return new FallbackSyntheticSourceBlockLoader(reader, fieldName) {
|
||||
@Override
|
||||
public Builder builder(BlockFactory factory, int expectedCount) {
|
||||
return factory.longs(expectedCount);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
private boolean isOutOfRange(Object value) {
|
||||
if (value instanceof Long) {
|
||||
return false;
|
||||
|
@ -1626,6 +1676,106 @@ public class NumberFieldMapper extends FieldMapper {
|
|||
abstract BlockLoader blockLoaderFromDocValues(String fieldName);
|
||||
|
||||
abstract BlockLoader blockLoaderFromSource(SourceValueFetcher sourceValueFetcher, BlockSourceReader.LeafIteratorLookup lookup);
|
||||
|
||||
abstract BlockLoader blockLoaderFromFallbackSyntheticSource(String fieldName, Number nullValue, boolean coerce);
|
||||
|
||||
// All values that fit into integer are returned as integers
|
||||
private static BlockLoader integerBlockLoaderFromFallbackSyntheticSource(
|
||||
NumberType type,
|
||||
String fieldName,
|
||||
Number nullValue,
|
||||
boolean coerce
|
||||
) {
|
||||
var reader = new NumberFallbackSyntheticSourceReader(type, nullValue, coerce) {
|
||||
@Override
|
||||
public void writeToBlock(List<Number> values, BlockLoader.Builder blockBuilder) {
|
||||
var builder = (BlockLoader.IntBuilder) blockBuilder;
|
||||
for (var value : values) {
|
||||
builder.appendInt(value.intValue());
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
return new FallbackSyntheticSourceBlockLoader(reader, fieldName) {
|
||||
@Override
|
||||
public Builder builder(BlockFactory factory, int expectedCount) {
|
||||
return factory.ints(expectedCount);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
// All floating point values are returned as doubles
|
||||
private static BlockLoader floatingPointBlockLoaderFromFallbackSyntheticSource(
|
||||
NumberType type,
|
||||
String fieldName,
|
||||
Number nullValue,
|
||||
boolean coerce
|
||||
) {
|
||||
var reader = new NumberFallbackSyntheticSourceReader(type, nullValue, coerce) {
|
||||
@Override
|
||||
public void writeToBlock(List<Number> values, BlockLoader.Builder blockBuilder) {
|
||||
var builder = (BlockLoader.DoubleBuilder) blockBuilder;
|
||||
for (var value : values) {
|
||||
builder.appendDouble(value.doubleValue());
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
return new FallbackSyntheticSourceBlockLoader(reader, fieldName) {
|
||||
@Override
|
||||
public Builder builder(BlockFactory factory, int expectedCount) {
|
||||
return factory.doubles(expectedCount);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
abstract static class NumberFallbackSyntheticSourceReader extends FallbackSyntheticSourceBlockLoader.ReaderWithNullValueSupport<
|
||||
Number> {
|
||||
private final NumberType type;
|
||||
private final Number nullValue;
|
||||
private final boolean coerce;
|
||||
|
||||
NumberFallbackSyntheticSourceReader(NumberType type, Number nullValue, boolean coerce) {
|
||||
super(nullValue);
|
||||
this.type = type;
|
||||
this.nullValue = nullValue;
|
||||
this.coerce = coerce;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void convertValue(Object value, List<Number> accumulator) {
|
||||
if (coerce && value.equals("")) {
|
||||
if (nullValue != null) {
|
||||
accumulator.add(nullValue);
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
var converted = type.parse(value, coerce);
|
||||
accumulator.add(converted);
|
||||
} catch (Exception e) {
|
||||
// Malformed value, skip it
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void parseNonNullValue(XContentParser parser, List<Number> accumulator) throws IOException {
|
||||
// Aligned with implementation of `value(XContentParser)`
|
||||
if (coerce && parser.currentToken() == Token.VALUE_STRING && parser.textLength() == 0) {
|
||||
if (nullValue != null) {
|
||||
accumulator.add(nullValue);
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
Number rawValue = type.parse(parser, coerce);
|
||||
// Transform number to correct type (e.g. reduce precision)
|
||||
accumulator.add(type.parse(rawValue, coerce));
|
||||
} catch (Exception e) {
|
||||
// Malformed value, skip it
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
public static class NumberFieldType extends SimpleMappedFieldType {
|
||||
|
@ -1637,6 +1787,7 @@ public class NumberFieldMapper extends FieldMapper {
|
|||
private final boolean isDimension;
|
||||
private final MetricType metricType;
|
||||
private final IndexMode indexMode;
|
||||
private final boolean isSyntheticSource;
|
||||
|
||||
public NumberFieldType(
|
||||
String name,
|
||||
|
@ -1650,7 +1801,8 @@ public class NumberFieldMapper extends FieldMapper {
|
|||
FieldValues<Number> script,
|
||||
boolean isDimension,
|
||||
MetricType metricType,
|
||||
IndexMode indexMode
|
||||
IndexMode indexMode,
|
||||
boolean isSyntheticSource
|
||||
) {
|
||||
super(name, isIndexed, isStored, hasDocValues, TextSearchInfo.SIMPLE_MATCH_WITHOUT_TERMS, meta);
|
||||
this.type = Objects.requireNonNull(type);
|
||||
|
@ -1660,9 +1812,10 @@ public class NumberFieldMapper extends FieldMapper {
|
|||
this.isDimension = isDimension;
|
||||
this.metricType = metricType;
|
||||
this.indexMode = indexMode;
|
||||
this.isSyntheticSource = isSyntheticSource;
|
||||
}
|
||||
|
||||
NumberFieldType(String name, Builder builder) {
|
||||
NumberFieldType(String name, Builder builder, boolean isSyntheticSource) {
|
||||
this(
|
||||
name,
|
||||
builder.type,
|
||||
|
@ -1675,7 +1828,8 @@ public class NumberFieldMapper extends FieldMapper {
|
|||
builder.scriptValues(),
|
||||
builder.dimension.getValue(),
|
||||
builder.metric.getValue(),
|
||||
builder.indexMode
|
||||
builder.indexMode,
|
||||
isSyntheticSource
|
||||
);
|
||||
}
|
||||
|
||||
|
@ -1684,7 +1838,7 @@ public class NumberFieldMapper extends FieldMapper {
|
|||
}
|
||||
|
||||
public NumberFieldType(String name, NumberType type, boolean isIndexed) {
|
||||
this(name, type, isIndexed, false, true, true, null, Collections.emptyMap(), null, false, null, null);
|
||||
this(name, type, isIndexed, false, true, true, null, Collections.emptyMap(), null, false, null, null, false);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -1761,6 +1915,11 @@ public class NumberFieldMapper extends FieldMapper {
|
|||
if (hasDocValues()) {
|
||||
return type.blockLoaderFromDocValues(name());
|
||||
}
|
||||
|
||||
if (isSyntheticSource) {
|
||||
return type.blockLoaderFromFallbackSyntheticSource(name(), nullValue, coerce);
|
||||
}
|
||||
|
||||
BlockSourceReader.LeafIteratorLookup lookup = isStored() || isIndexed()
|
||||
? BlockSourceReader.lookupFromFieldNames(blContext.fieldNames(), name())
|
||||
: BlockSourceReader.lookupMatchingAll();
|
||||
|
@ -1876,7 +2035,7 @@ public class NumberFieldMapper extends FieldMapper {
|
|||
private final MetricType metricType;
|
||||
private boolean allowMultipleValues;
|
||||
private final IndexVersion indexCreatedVersion;
|
||||
private final boolean storeMalformedFields;
|
||||
private final boolean isSyntheticSource;
|
||||
|
||||
private final IndexMode indexMode;
|
||||
|
||||
|
@ -1884,7 +2043,7 @@ public class NumberFieldMapper extends FieldMapper {
|
|||
String simpleName,
|
||||
MappedFieldType mappedFieldType,
|
||||
BuilderParams builderParams,
|
||||
boolean storeMalformedFields,
|
||||
boolean isSyntheticSource,
|
||||
Builder builder
|
||||
) {
|
||||
super(simpleName, mappedFieldType, builderParams);
|
||||
|
@ -1904,7 +2063,7 @@ public class NumberFieldMapper extends FieldMapper {
|
|||
this.metricType = builder.metric.getValue();
|
||||
this.allowMultipleValues = builder.allowMultipleValues;
|
||||
this.indexCreatedVersion = builder.indexCreatedVersion;
|
||||
this.storeMalformedFields = storeMalformedFields;
|
||||
this.isSyntheticSource = isSyntheticSource;
|
||||
this.indexMode = builder.indexMode;
|
||||
}
|
||||
|
||||
|
@ -1939,7 +2098,7 @@ public class NumberFieldMapper extends FieldMapper {
|
|||
} catch (IllegalArgumentException e) {
|
||||
if (ignoreMalformed.value() && context.parser().currentToken().isValue()) {
|
||||
context.addIgnoredField(mappedFieldType.name());
|
||||
if (storeMalformedFields) {
|
||||
if (isSyntheticSource) {
|
||||
// Save a copy of the field so synthetic source can load it
|
||||
context.doc().add(IgnoreMalformedStoredValues.storedField(fullPath(), context.parser()));
|
||||
}
|
||||
|
|
|
@ -9,7 +9,9 @@
|
|||
|
||||
package org.elasticsearch.index.mapper;
|
||||
|
||||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.document.SortedDocValuesField;
|
||||
import org.apache.lucene.document.StringField;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.elasticsearch.common.Strings;
|
||||
|
@ -135,13 +137,21 @@ public class TimeSeriesIdFieldMapper extends MetadataFieldMapper {
|
|||
}
|
||||
context.doc().add(new SortedDocValuesField(fieldType().name(), timeSeriesId));
|
||||
|
||||
TsidExtractingIdFieldMapper.createField(
|
||||
BytesRef uidEncoded = TsidExtractingIdFieldMapper.createField(
|
||||
context,
|
||||
getIndexVersionCreated(context).before(IndexVersions.TIME_SERIES_ROUTING_HASH_IN_ID)
|
||||
? routingPathFields.routingBuilder()
|
||||
: null,
|
||||
timeSeriesId
|
||||
);
|
||||
|
||||
// We need to add the uid or id to nested Lucene documents so that when a document gets deleted, the nested documents are
|
||||
// also deleted. Usually this happens when the nested document is created (in DocumentParserContext#createNestedContext), but
|
||||
// for time-series indices the _id isn't available at that point.
|
||||
for (LuceneDocument doc : context.nonRootDocuments()) {
|
||||
assert doc.getField(IdFieldMapper.NAME) == null;
|
||||
doc.add(new StringField(IdFieldMapper.NAME, uidEncoded, Field.Store.NO));
|
||||
}
|
||||
}
|
||||
|
||||
private IndexVersion getIndexVersionCreated(final DocumentParserContext context) {
|
||||
|
|
|
@ -46,7 +46,11 @@ public class TsidExtractingIdFieldMapper extends IdFieldMapper {
|
|||
|
||||
private static final long SEED = 0;
|
||||
|
||||
public static void createField(DocumentParserContext context, IndexRouting.ExtractFromSource.Builder routingBuilder, BytesRef tsid) {
|
||||
public static BytesRef createField(
|
||||
DocumentParserContext context,
|
||||
IndexRouting.ExtractFromSource.Builder routingBuilder,
|
||||
BytesRef tsid
|
||||
) {
|
||||
final long timestamp = DataStreamTimestampFieldMapper.extractTimestampValue(context.doc());
|
||||
String id;
|
||||
if (routingBuilder != null) {
|
||||
|
@ -94,6 +98,7 @@ public class TsidExtractingIdFieldMapper extends IdFieldMapper {
|
|||
|
||||
BytesRef uidEncoded = Uid.encodeId(context.id());
|
||||
context.doc().add(new StringField(NAME, uidEncoded, Field.Store.YES));
|
||||
return uidEncoded;
|
||||
}
|
||||
|
||||
public static String createId(int routingHash, BytesRef tsid, long timestamp) {
|
||||
|
|
|
@ -2404,6 +2404,12 @@ public class DenseVectorFieldMapper extends FieldMapper {
|
|||
}
|
||||
KnnVectorValues.DocIndexIterator iterator = values.iterator();
|
||||
return docId -> {
|
||||
if (iterator.docID() > docId) {
|
||||
return hasValue = false;
|
||||
}
|
||||
if (iterator.docID() == docId) {
|
||||
return hasValue = true;
|
||||
}
|
||||
hasValue = docId == iterator.advance(docId);
|
||||
hasMagnitude = hasValue && magnitudeReader != null && magnitudeReader.advanceExact(docId);
|
||||
ord = iterator.index();
|
||||
|
@ -2414,6 +2420,12 @@ public class DenseVectorFieldMapper extends FieldMapper {
|
|||
if (byteVectorValues != null) {
|
||||
KnnVectorValues.DocIndexIterator iterator = byteVectorValues.iterator();
|
||||
return docId -> {
|
||||
if (iterator.docID() > docId) {
|
||||
return hasValue = false;
|
||||
}
|
||||
if (iterator.docID() == docId) {
|
||||
return hasValue = true;
|
||||
}
|
||||
hasValue = docId == iterator.advance(docId);
|
||||
ord = iterator.index();
|
||||
return hasValue;
|
||||
|
@ -2476,6 +2488,12 @@ public class DenseVectorFieldMapper extends FieldMapper {
|
|||
return null;
|
||||
}
|
||||
return docId -> {
|
||||
if (values.docID() > docId) {
|
||||
return hasValue = false;
|
||||
}
|
||||
if (values.docID() == docId) {
|
||||
return hasValue = true;
|
||||
}
|
||||
hasValue = docId == values.advance(docId);
|
||||
return hasValue;
|
||||
};
|
||||
|
|
|
@ -4313,17 +4313,15 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
|
|||
assert waitForEngineOrClosedShardListeners.isDone();
|
||||
try {
|
||||
synchronized (engineMutex) {
|
||||
final var currentEngine = getEngine();
|
||||
currentEngine.prepareForEngineReset();
|
||||
var engineConfig = newEngineConfig(replicationTracker);
|
||||
verifyNotClosed();
|
||||
IOUtils.close(currentEngine);
|
||||
var newEngine = createEngine(engineConfig);
|
||||
currentEngineReference.set(newEngine);
|
||||
getEngine().prepareForEngineReset();
|
||||
var newEngine = createEngine(newEngineConfig(replicationTracker));
|
||||
IOUtils.close(currentEngineReference.getAndSet(newEngine));
|
||||
onNewEngine(newEngine);
|
||||
}
|
||||
onSettingsChanged();
|
||||
} catch (Exception e) {
|
||||
// we want to fail the shard in the case prepareForEngineReset throws
|
||||
failShard("unable to reset engine", e);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -57,14 +57,17 @@ public record IngestStats(Stats totalStats, List<PipelineStat> pipelineStats, Ma
|
|||
* Read from a stream.
|
||||
*/
|
||||
public static IngestStats read(StreamInput in) throws IOException {
|
||||
var stats = new Stats(in);
|
||||
var stats = readStats(in);
|
||||
var size = in.readVInt();
|
||||
if (stats == Stats.IDENTITY && size == 0) {
|
||||
return IDENTITY;
|
||||
}
|
||||
var pipelineStats = new ArrayList<PipelineStat>(size);
|
||||
var processorStats = Maps.<String, List<ProcessorStat>>newMapWithExpectedSize(size);
|
||||
|
||||
for (var i = 0; i < size; i++) {
|
||||
var pipelineId = in.readString();
|
||||
var pipelineStat = new Stats(in);
|
||||
var pipelineStat = readStats(in);
|
||||
var byteStat = in.getTransportVersion().onOrAfter(TransportVersions.V_8_15_0) ? new ByteStats(in) : new ByteStats(0, 0);
|
||||
pipelineStats.add(new PipelineStat(pipelineId, pipelineStat, byteStat));
|
||||
int processorsSize = in.readVInt();
|
||||
|
@ -72,7 +75,7 @@ public record IngestStats(Stats totalStats, List<PipelineStat> pipelineStats, Ma
|
|||
for (var j = 0; j < processorsSize; j++) {
|
||||
var processorName = in.readString();
|
||||
var processorType = in.readString();
|
||||
var processorStat = new Stats(in);
|
||||
var processorStat = readStats(in);
|
||||
processorStatsPerPipeline.add(new ProcessorStat(processorName, processorType, processorStat));
|
||||
}
|
||||
processorStats.put(pipelineId, Collections.unmodifiableList(processorStatsPerPipeline));
|
||||
|
@ -167,6 +170,21 @@ public record IngestStats(Stats totalStats, List<PipelineStat> pipelineStats, Ma
|
|||
return totalsPerPipelineProcessor;
|
||||
}
|
||||
|
||||
/**
|
||||
* Read {@link Stats} from a stream.
|
||||
*/
|
||||
private static Stats readStats(StreamInput in) throws IOException {
|
||||
long ingestCount = in.readVLong();
|
||||
long ingestTimeInMillis = in.readVLong();
|
||||
long ingestCurrent = in.readVLong();
|
||||
long ingestFailedCount = in.readVLong();
|
||||
if (ingestCount == 0 && ingestTimeInMillis == 0 && ingestCurrent == 0 && ingestFailedCount == 0) {
|
||||
return Stats.IDENTITY;
|
||||
} else {
|
||||
return new Stats(ingestCount, ingestTimeInMillis, ingestCurrent, ingestFailedCount);
|
||||
}
|
||||
}
|
||||
|
||||
public record Stats(long ingestCount, long ingestTimeInMillis, long ingestCurrent, long ingestFailedCount)
|
||||
implements
|
||||
Writeable,
|
||||
|
@ -174,13 +192,6 @@ public record IngestStats(Stats totalStats, List<PipelineStat> pipelineStats, Ma
|
|||
|
||||
public static final Stats IDENTITY = new Stats(0, 0, 0, 0);
|
||||
|
||||
/**
|
||||
* Read from a stream.
|
||||
*/
|
||||
public Stats(StreamInput in) throws IOException {
|
||||
this(in.readVLong(), in.readVLong(), in.readVLong(), in.readVLong());
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeVLong(ingestCount);
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue