mirror of
https://github.com/elastic/elasticsearch.git
synced 2025-06-28 01:22:26 -04:00
Add ability to redirect ingestion failures on data streams to a failure store (#126973)
Removes the feature flags and guards that prevent the new failure store functionality from operating in production runtimes.
This commit is contained in:
parent
72b4ed255b
commit
7b89f4d4a6
90 changed files with 2020 additions and 2261 deletions
|
@ -85,9 +85,6 @@ testClusters.matching { it.name == "yamlRestTest"}.configureEach {
|
||||||
setting 'xpack.license.self_generated.type', 'trial'
|
setting 'xpack.license.self_generated.type', 'trial'
|
||||||
setting 'indices.lifecycle.history_index_enabled', 'false'
|
setting 'indices.lifecycle.history_index_enabled', 'false'
|
||||||
keystorePassword 'keystore-password'
|
keystorePassword 'keystore-password'
|
||||||
if (buildParams.snapshotBuild == false) {
|
|
||||||
requiresFeature 'es.failure_store_feature_flag_enabled', new Version(8, 12, 0)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// debug ccr test failures:
|
// debug ccr test failures:
|
||||||
|
@ -126,7 +123,6 @@ testClusters.matching { it.name == "yamlRestTest"}.configureEach {
|
||||||
|
|
||||||
|
|
||||||
requiresFeature 'es.index_mode_feature_flag_registered', Version.fromString("8.0.0")
|
requiresFeature 'es.index_mode_feature_flag_registered', Version.fromString("8.0.0")
|
||||||
requiresFeature 'es.failure_store_feature_flag_enabled', Version.fromString("8.12.0")
|
|
||||||
|
|
||||||
// TODO Rene: clean up this kind of cross project file references
|
// TODO Rene: clean up this kind of cross project file references
|
||||||
extraConfigFile 'op-jwks.json', project(':x-pack:test:idp-fixture').file("src/main/resources/oidc/op-jwks.json")
|
extraConfigFile 'op-jwks.json', project(':x-pack:test:idp-fixture').file("src/main/resources/oidc/op-jwks.json")
|
||||||
|
|
89
docs/changelog/126973.yaml
Normal file
89
docs/changelog/126973.yaml
Normal file
|
@ -0,0 +1,89 @@
|
||||||
|
pr: 126973
|
||||||
|
summary: Add ability to redirect ingestion failures on data streams to a failure store
|
||||||
|
area: Data streams
|
||||||
|
type: feature
|
||||||
|
issues: []
|
||||||
|
highlight:
|
||||||
|
title: Add ability to redirect ingestion failures on data streams to a failure store
|
||||||
|
body: |-
|
||||||
|
Documents that encountered ingest pipeline failures or mapping conflicts
|
||||||
|
would previously be returned to the client as errors in the bulk and
|
||||||
|
index operations. Many client applications are not equipped to respond
|
||||||
|
to these failures. This leads to the failed documents often being
|
||||||
|
dropped by the client which cannot hold the broken documents
|
||||||
|
indefinitely. In many end user workloads, these failed documents
|
||||||
|
represent events that could be critical signals for observability or
|
||||||
|
security use cases.
|
||||||
|
|
||||||
|
To help mitigate this problem, data streams can now maintain a "failure
|
||||||
|
store" which is used to accept and hold documents that fail to be
|
||||||
|
ingested due to preventable configuration errors. The data stream's
|
||||||
|
failure store operates like a separate set of backing indices with their
|
||||||
|
own mappings and access patterns that allow Elasticsearch to accept
|
||||||
|
documents that would otherwise be rejected due to unhandled ingest
|
||||||
|
pipeline exceptions or mapping conflicts.
|
||||||
|
|
||||||
|
Users can enable redirection of ingest failures to the failure store on
|
||||||
|
new data streams by specifying it in the new `data_stream_options` field
|
||||||
|
inside of a component or index template:
|
||||||
|
|
||||||
|
[source,yaml]
|
||||||
|
----
|
||||||
|
PUT _index_template/my-template
|
||||||
|
{
|
||||||
|
"index_patterns": ["logs-test-*"],
|
||||||
|
"data_stream": {},
|
||||||
|
"template": {
|
||||||
|
"data_stream_options": {
|
||||||
|
"failure_store": {
|
||||||
|
"enabled": true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}'
|
||||||
|
----
|
||||||
|
|
||||||
|
Existing data streams can be configured with the new data stream
|
||||||
|
`_options` endpoint:
|
||||||
|
|
||||||
|
[source,yaml]
|
||||||
|
----
|
||||||
|
PUT _data_stream/logs-test-apache/_options
|
||||||
|
{
|
||||||
|
"failure_store": {
|
||||||
|
"enabled": "true"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
----
|
||||||
|
|
||||||
|
When redirection is enabled, any ingestion related failures will be
|
||||||
|
captured in the failure store if the cluster is able to, along with the
|
||||||
|
timestamp that the failure occurred, details about the error
|
||||||
|
encountered, and the document that could not be ingested. Since failure
|
||||||
|
stores are a kind of Elasticsearch index, we can search the data stream
|
||||||
|
for the failures that it has collected. The failures are not shown by
|
||||||
|
default as they are stored in different indices than the normal data
|
||||||
|
stream data. In order to retrieve the failures, we use the `_search` API
|
||||||
|
along with a new bit of index pattern syntax, the `::` selector.
|
||||||
|
|
||||||
|
[source,yaml]
|
||||||
|
----
|
||||||
|
POST logs-test-apache::failures/_search
|
||||||
|
----
|
||||||
|
|
||||||
|
This index syntax informs the search operation to target the indices in
|
||||||
|
its failure store instead of its backing indices. It can be mixed in a
|
||||||
|
number of ways with other index patterns to include their failure store
|
||||||
|
indices in the search operation:
|
||||||
|
|
||||||
|
[source,yaml]
|
||||||
|
----
|
||||||
|
POST logs-*::failures/_search
|
||||||
|
POST logs-*,logs-*::failures/_search
|
||||||
|
POST *::failures/_search
|
||||||
|
POST _query
|
||||||
|
{
|
||||||
|
"query": "FROM my_data_stream*::failures"
|
||||||
|
}
|
||||||
|
----
|
||||||
|
notable: true
|
|
@ -36,12 +36,6 @@ if (buildParams.inFipsJvm){
|
||||||
tasks.named("yamlRestTest").configure{enabled = false }
|
tasks.named("yamlRestTest").configure{enabled = false }
|
||||||
}
|
}
|
||||||
|
|
||||||
if (buildParams.snapshotBuild == false) {
|
|
||||||
tasks.withType(Test).configureEach {
|
|
||||||
systemProperty 'es.failure_store_feature_flag_enabled', 'true'
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
tasks.named("yamlRestCompatTestTransform").configure({ task ->
|
tasks.named("yamlRestCompatTestTransform").configure({ task ->
|
||||||
task.skipTest("data_stream/10_basic/Create hidden data stream", "warning does not exist for compatibility")
|
task.skipTest("data_stream/10_basic/Create hidden data stream", "warning does not exist for compatibility")
|
||||||
|
|
||||||
|
|
|
@ -16,7 +16,6 @@ import org.elasticsearch.common.settings.SecureString;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
import org.elasticsearch.common.util.concurrent.ThreadContext;
|
import org.elasticsearch.common.util.concurrent.ThreadContext;
|
||||||
import org.elasticsearch.test.cluster.ElasticsearchCluster;
|
import org.elasticsearch.test.cluster.ElasticsearchCluster;
|
||||||
import org.elasticsearch.test.cluster.FeatureFlag;
|
|
||||||
import org.elasticsearch.test.cluster.local.distribution.DistributionType;
|
import org.elasticsearch.test.cluster.local.distribution.DistributionType;
|
||||||
import org.elasticsearch.test.rest.ESRestTestCase;
|
import org.elasticsearch.test.rest.ESRestTestCase;
|
||||||
import org.junit.After;
|
import org.junit.After;
|
||||||
|
@ -38,7 +37,6 @@ public abstract class AbstractDataStreamIT extends ESRestTestCase {
|
||||||
@ClassRule
|
@ClassRule
|
||||||
public static ElasticsearchCluster cluster = ElasticsearchCluster.local()
|
public static ElasticsearchCluster cluster = ElasticsearchCluster.local()
|
||||||
.distribution(DistributionType.DEFAULT)
|
.distribution(DistributionType.DEFAULT)
|
||||||
.feature(FeatureFlag.FAILURE_STORE_ENABLED)
|
|
||||||
.setting("xpack.security.enabled", "false")
|
.setting("xpack.security.enabled", "false")
|
||||||
.setting("xpack.watcher.enabled", "false")
|
.setting("xpack.watcher.enabled", "false")
|
||||||
// Disable apm-data so the index templates it installs do not impact
|
// Disable apm-data so the index templates it installs do not impact
|
||||||
|
|
|
@ -15,7 +15,6 @@ import org.elasticsearch.common.settings.SecureString;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
import org.elasticsearch.common.util.concurrent.ThreadContext;
|
import org.elasticsearch.common.util.concurrent.ThreadContext;
|
||||||
import org.elasticsearch.test.cluster.ElasticsearchCluster;
|
import org.elasticsearch.test.cluster.ElasticsearchCluster;
|
||||||
import org.elasticsearch.test.cluster.FeatureFlag;
|
|
||||||
import org.elasticsearch.test.cluster.local.distribution.DistributionType;
|
import org.elasticsearch.test.cluster.local.distribution.DistributionType;
|
||||||
import org.elasticsearch.test.cluster.util.resource.Resource;
|
import org.elasticsearch.test.cluster.util.resource.Resource;
|
||||||
import org.elasticsearch.test.rest.ESRestTestCase;
|
import org.elasticsearch.test.rest.ESRestTestCase;
|
||||||
|
@ -29,7 +28,6 @@ public class DataStreamWithSecurityIT extends ESRestTestCase {
|
||||||
@ClassRule
|
@ClassRule
|
||||||
public static ElasticsearchCluster cluster = ElasticsearchCluster.local()
|
public static ElasticsearchCluster cluster = ElasticsearchCluster.local()
|
||||||
.distribution(DistributionType.DEFAULT)
|
.distribution(DistributionType.DEFAULT)
|
||||||
.feature(FeatureFlag.FAILURE_STORE_ENABLED)
|
|
||||||
.setting("xpack.watcher.enabled", "false")
|
.setting("xpack.watcher.enabled", "false")
|
||||||
.setting("xpack.ml.enabled", "false")
|
.setting("xpack.ml.enabled", "false")
|
||||||
.setting("xpack.security.enabled", "true")
|
.setting("xpack.security.enabled", "true")
|
||||||
|
|
|
@ -13,7 +13,6 @@ import org.elasticsearch.common.settings.SecureString;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
import org.elasticsearch.common.util.concurrent.ThreadContext;
|
import org.elasticsearch.common.util.concurrent.ThreadContext;
|
||||||
import org.elasticsearch.test.cluster.ElasticsearchCluster;
|
import org.elasticsearch.test.cluster.ElasticsearchCluster;
|
||||||
import org.elasticsearch.test.cluster.FeatureFlag;
|
|
||||||
import org.elasticsearch.test.cluster.local.distribution.DistributionType;
|
import org.elasticsearch.test.cluster.local.distribution.DistributionType;
|
||||||
import org.elasticsearch.test.rest.ESRestTestCase;
|
import org.elasticsearch.test.rest.ESRestTestCase;
|
||||||
import org.junit.ClassRule;
|
import org.junit.ClassRule;
|
||||||
|
@ -27,7 +26,6 @@ public abstract class DisabledSecurityDataStreamTestCase extends ESRestTestCase
|
||||||
@ClassRule
|
@ClassRule
|
||||||
public static ElasticsearchCluster cluster = ElasticsearchCluster.local()
|
public static ElasticsearchCluster cluster = ElasticsearchCluster.local()
|
||||||
.distribution(DistributionType.DEFAULT)
|
.distribution(DistributionType.DEFAULT)
|
||||||
.feature(FeatureFlag.FAILURE_STORE_ENABLED)
|
|
||||||
.setting("xpack.security.enabled", "false")
|
.setting("xpack.security.enabled", "false")
|
||||||
.setting("xpack.watcher.enabled", "false")
|
.setting("xpack.watcher.enabled", "false")
|
||||||
.build();
|
.build();
|
||||||
|
|
|
@ -17,7 +17,6 @@ import org.elasticsearch.common.settings.SecureString;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
import org.elasticsearch.common.util.concurrent.ThreadContext;
|
import org.elasticsearch.common.util.concurrent.ThreadContext;
|
||||||
import org.elasticsearch.test.cluster.ElasticsearchCluster;
|
import org.elasticsearch.test.cluster.ElasticsearchCluster;
|
||||||
import org.elasticsearch.test.cluster.FeatureFlag;
|
|
||||||
import org.elasticsearch.test.cluster.local.distribution.DistributionType;
|
import org.elasticsearch.test.cluster.local.distribution.DistributionType;
|
||||||
import org.elasticsearch.test.cluster.util.resource.Resource;
|
import org.elasticsearch.test.cluster.util.resource.Resource;
|
||||||
import org.elasticsearch.test.rest.ESRestTestCase;
|
import org.elasticsearch.test.rest.ESRestTestCase;
|
||||||
|
@ -41,7 +40,6 @@ public class LazyRolloverDataStreamIT extends ESRestTestCase {
|
||||||
@ClassRule
|
@ClassRule
|
||||||
public static ElasticsearchCluster cluster = ElasticsearchCluster.local()
|
public static ElasticsearchCluster cluster = ElasticsearchCluster.local()
|
||||||
.distribution(DistributionType.DEFAULT)
|
.distribution(DistributionType.DEFAULT)
|
||||||
.feature(FeatureFlag.FAILURE_STORE_ENABLED)
|
|
||||||
.setting("xpack.watcher.enabled", "false")
|
.setting("xpack.watcher.enabled", "false")
|
||||||
.setting("xpack.ml.enabled", "false")
|
.setting("xpack.ml.enabled", "false")
|
||||||
.setting("xpack.security.enabled", "true")
|
.setting("xpack.security.enabled", "true")
|
||||||
|
|
|
@ -19,7 +19,6 @@ import org.elasticsearch.common.settings.Settings;
|
||||||
import org.elasticsearch.common.util.concurrent.ThreadContext;
|
import org.elasticsearch.common.util.concurrent.ThreadContext;
|
||||||
import org.elasticsearch.rest.RestStatus;
|
import org.elasticsearch.rest.RestStatus;
|
||||||
import org.elasticsearch.test.cluster.ElasticsearchCluster;
|
import org.elasticsearch.test.cluster.ElasticsearchCluster;
|
||||||
import org.elasticsearch.test.cluster.FeatureFlag;
|
|
||||||
import org.elasticsearch.test.cluster.local.distribution.DistributionType;
|
import org.elasticsearch.test.cluster.local.distribution.DistributionType;
|
||||||
import org.elasticsearch.test.cluster.util.resource.Resource;
|
import org.elasticsearch.test.cluster.util.resource.Resource;
|
||||||
import org.elasticsearch.test.rest.ESRestTestCase;
|
import org.elasticsearch.test.rest.ESRestTestCase;
|
||||||
|
@ -46,7 +45,6 @@ public abstract class DataStreamLifecyclePermissionsTestCase extends ESRestTestC
|
||||||
@ClassRule
|
@ClassRule
|
||||||
public static ElasticsearchCluster cluster = ElasticsearchCluster.local()
|
public static ElasticsearchCluster cluster = ElasticsearchCluster.local()
|
||||||
.distribution(DistributionType.DEFAULT)
|
.distribution(DistributionType.DEFAULT)
|
||||||
.feature(FeatureFlag.FAILURE_STORE_ENABLED)
|
|
||||||
.setting("xpack.watcher.enabled", "false")
|
.setting("xpack.watcher.enabled", "false")
|
||||||
.setting("xpack.ml.enabled", "false")
|
.setting("xpack.ml.enabled", "false")
|
||||||
.setting("xpack.security.enabled", "true")
|
.setting("xpack.security.enabled", "true")
|
||||||
|
|
|
@ -28,7 +28,7 @@ public class DataStreamFeatures implements FeatureSpecification {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Set<NodeFeature> getFeatures() {
|
public Set<NodeFeature> getFeatures() {
|
||||||
return DataStream.isFailureStoreFeatureFlagEnabled() ? Set.of(DataStream.DATA_STREAM_FAILURE_STORE_FEATURE) : Set.of();
|
return Set.of(DataStream.DATA_STREAM_FAILURE_STORE_FEATURE);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|
|
@ -21,7 +21,6 @@ import org.elasticsearch.action.datastreams.lifecycle.ExplainDataStreamLifecycle
|
||||||
import org.elasticsearch.action.datastreams.lifecycle.GetDataStreamLifecycleAction;
|
import org.elasticsearch.action.datastreams.lifecycle.GetDataStreamLifecycleAction;
|
||||||
import org.elasticsearch.action.datastreams.lifecycle.PutDataStreamLifecycleAction;
|
import org.elasticsearch.action.datastreams.lifecycle.PutDataStreamLifecycleAction;
|
||||||
import org.elasticsearch.client.internal.OriginSettingClient;
|
import org.elasticsearch.client.internal.OriginSettingClient;
|
||||||
import org.elasticsearch.cluster.metadata.DataStream;
|
|
||||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||||
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
|
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
|
||||||
|
@ -237,11 +236,9 @@ public class DataStreamsPlugin extends Plugin implements ActionPlugin, HealthPlu
|
||||||
actions.add(new ActionHandler(DeleteDataStreamLifecycleAction.INSTANCE, TransportDeleteDataStreamLifecycleAction.class));
|
actions.add(new ActionHandler(DeleteDataStreamLifecycleAction.INSTANCE, TransportDeleteDataStreamLifecycleAction.class));
|
||||||
actions.add(new ActionHandler(ExplainDataStreamLifecycleAction.INSTANCE, TransportExplainDataStreamLifecycleAction.class));
|
actions.add(new ActionHandler(ExplainDataStreamLifecycleAction.INSTANCE, TransportExplainDataStreamLifecycleAction.class));
|
||||||
actions.add(new ActionHandler(GetDataStreamLifecycleStatsAction.INSTANCE, TransportGetDataStreamLifecycleStatsAction.class));
|
actions.add(new ActionHandler(GetDataStreamLifecycleStatsAction.INSTANCE, TransportGetDataStreamLifecycleStatsAction.class));
|
||||||
if (DataStream.isFailureStoreFeatureFlagEnabled()) {
|
|
||||||
actions.add(new ActionHandler(GetDataStreamOptionsAction.INSTANCE, TransportGetDataStreamOptionsAction.class));
|
actions.add(new ActionHandler(GetDataStreamOptionsAction.INSTANCE, TransportGetDataStreamOptionsAction.class));
|
||||||
actions.add(new ActionHandler(PutDataStreamOptionsAction.INSTANCE, TransportPutDataStreamOptionsAction.class));
|
actions.add(new ActionHandler(PutDataStreamOptionsAction.INSTANCE, TransportPutDataStreamOptionsAction.class));
|
||||||
actions.add(new ActionHandler(DeleteDataStreamOptionsAction.INSTANCE, TransportDeleteDataStreamOptionsAction.class));
|
actions.add(new ActionHandler(DeleteDataStreamOptionsAction.INSTANCE, TransportDeleteDataStreamOptionsAction.class));
|
||||||
}
|
|
||||||
return actions;
|
return actions;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -274,11 +271,9 @@ public class DataStreamsPlugin extends Plugin implements ActionPlugin, HealthPlu
|
||||||
handlers.add(new RestDeleteDataStreamLifecycleAction());
|
handlers.add(new RestDeleteDataStreamLifecycleAction());
|
||||||
handlers.add(new RestExplainDataStreamLifecycleAction());
|
handlers.add(new RestExplainDataStreamLifecycleAction());
|
||||||
handlers.add(new RestDataStreamLifecycleStatsAction());
|
handlers.add(new RestDataStreamLifecycleStatsAction());
|
||||||
if (DataStream.isFailureStoreFeatureFlagEnabled()) {
|
|
||||||
handlers.add(new RestGetDataStreamOptionsAction());
|
handlers.add(new RestGetDataStreamOptionsAction());
|
||||||
handlers.add(new RestPutDataStreamOptionsAction());
|
handlers.add(new RestPutDataStreamOptionsAction());
|
||||||
handlers.add(new RestDeleteDataStreamOptionsAction());
|
handlers.add(new RestDeleteDataStreamOptionsAction());
|
||||||
}
|
|
||||||
return handlers;
|
return handlers;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -230,8 +230,7 @@ public class TransportGetDataStreamsAction extends TransportLocalProjectMetadata
|
||||||
for (DataStream dataStream : dataStreams) {
|
for (DataStream dataStream : dataStreams) {
|
||||||
// For this action, we are returning whether the failure store is effectively enabled, either in metadata or by cluster setting.
|
// For this action, we are returning whether the failure store is effectively enabled, either in metadata or by cluster setting.
|
||||||
// Users can use the get data stream options API to find out whether it is explicitly enabled in metadata.
|
// Users can use the get data stream options API to find out whether it is explicitly enabled in metadata.
|
||||||
boolean failureStoreEffectivelyEnabled = DataStream.isFailureStoreFeatureFlagEnabled()
|
boolean failureStoreEffectivelyEnabled = dataStream.isFailureStoreEffectivelyEnabled(dataStreamFailureStoreSettings);
|
||||||
&& dataStream.isFailureStoreEffectivelyEnabled(dataStreamFailureStoreSettings);
|
|
||||||
final String indexTemplate;
|
final String indexTemplate;
|
||||||
boolean indexTemplatePreferIlmValue = true;
|
boolean indexTemplatePreferIlmValue = true;
|
||||||
String ilmPolicyName = null;
|
String ilmPolicyName = null;
|
||||||
|
@ -289,7 +288,7 @@ public class TransportGetDataStreamsAction extends TransportLocalProjectMetadata
|
||||||
Map<Index, IndexProperties> backingIndicesSettingsValues = new HashMap<>();
|
Map<Index, IndexProperties> backingIndicesSettingsValues = new HashMap<>();
|
||||||
ProjectMetadata metadata = state.metadata();
|
ProjectMetadata metadata = state.metadata();
|
||||||
collectIndexSettingsValues(dataStream, backingIndicesSettingsValues, metadata, dataStream.getIndices());
|
collectIndexSettingsValues(dataStream, backingIndicesSettingsValues, metadata, dataStream.getIndices());
|
||||||
if (DataStream.isFailureStoreFeatureFlagEnabled() && dataStream.getFailureIndices().isEmpty() == false) {
|
if (dataStream.getFailureIndices().isEmpty() == false) {
|
||||||
collectIndexSettingsValues(dataStream, backingIndicesSettingsValues, metadata, dataStream.getFailureIndices());
|
collectIndexSettingsValues(dataStream, backingIndicesSettingsValues, metadata, dataStream.getFailureIndices());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -375,12 +375,10 @@ public class DataStreamLifecycleService implements ClusterStateListener, Closeab
|
||||||
// These are the pre-rollover write indices. They may or may not be the write index after maybeExecuteRollover has executed,
|
// These are the pre-rollover write indices. They may or may not be the write index after maybeExecuteRollover has executed,
|
||||||
// depending on rollover criteria, for this reason we exclude them for the remaining run.
|
// depending on rollover criteria, for this reason we exclude them for the remaining run.
|
||||||
indicesToExcludeForRemainingRun.add(maybeExecuteRollover(project, dataStream, false));
|
indicesToExcludeForRemainingRun.add(maybeExecuteRollover(project, dataStream, false));
|
||||||
if (DataStream.isFailureStoreFeatureFlagEnabled()) {
|
|
||||||
Index failureStoreWriteIndex = maybeExecuteRollover(project, dataStream, true);
|
Index failureStoreWriteIndex = maybeExecuteRollover(project, dataStream, true);
|
||||||
if (failureStoreWriteIndex != null) {
|
if (failureStoreWriteIndex != null) {
|
||||||
indicesToExcludeForRemainingRun.add(failureStoreWriteIndex);
|
indicesToExcludeForRemainingRun.add(failureStoreWriteIndex);
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
// tsds indices that are still within their time bounds (i.e. now < time_series.end_time) - we don't want these indices to be
|
// tsds indices that are still within their time bounds (i.e. now < time_series.end_time) - we don't want these indices to be
|
||||||
// deleted, forcemerged, or downsampled as they're still expected to receive large amounts of writes
|
// deleted, forcemerged, or downsampled as they're still expected to receive large amounts of writes
|
||||||
|
@ -802,7 +800,7 @@ public class DataStreamLifecycleService implements ClusterStateListener, Closeab
|
||||||
targetIndices.add(index);
|
targetIndices.add(index);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (withFailureStore && DataStream.isFailureStoreFeatureFlagEnabled() && dataStream.getFailureIndices().isEmpty() == false) {
|
if (withFailureStore && dataStream.getFailureIndices().isEmpty() == false) {
|
||||||
for (Index index : dataStream.getFailureIndices()) {
|
for (Index index : dataStream.getFailureIndices()) {
|
||||||
if (dataStream.isIndexManagedByDataStreamLifecycle(index, indexMetadataSupplier)
|
if (dataStream.isIndexManagedByDataStreamLifecycle(index, indexMetadataSupplier)
|
||||||
&& indicesToExcludeForRemainingRun.contains(index) == false) {
|
&& indicesToExcludeForRemainingRun.contains(index) == false) {
|
||||||
|
|
|
@ -27,7 +27,6 @@ import org.elasticsearch.snapshots.Snapshot;
|
||||||
import org.elasticsearch.snapshots.SnapshotId;
|
import org.elasticsearch.snapshots.SnapshotId;
|
||||||
import org.elasticsearch.snapshots.SnapshotInProgressException;
|
import org.elasticsearch.snapshots.SnapshotInProgressException;
|
||||||
import org.elasticsearch.test.ESTestCase;
|
import org.elasticsearch.test.ESTestCase;
|
||||||
import org.junit.Assume;
|
|
||||||
|
|
||||||
import java.util.Collections;
|
import java.util.Collections;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
@ -69,8 +68,6 @@ public class DeleteDataStreamTransportActionTests extends ESTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testDeleteDataStreamWithFailureStore() {
|
public void testDeleteDataStreamWithFailureStore() {
|
||||||
Assume.assumeTrue(DataStream.isFailureStoreFeatureFlagEnabled());
|
|
||||||
|
|
||||||
final String dataStreamName = "my-data-stream";
|
final String dataStreamName = "my-data-stream";
|
||||||
final List<String> otherIndices = randomSubsetOf(List.of("foo", "bar", "baz"));
|
final List<String> otherIndices = randomSubsetOf(List.of("foo", "bar", "baz"));
|
||||||
|
|
||||||
|
|
|
@ -134,23 +134,18 @@ public class GetDataStreamsResponseTests extends ESTestCase {
|
||||||
is(ManagedBy.LIFECYCLE.displayValue)
|
is(ManagedBy.LIFECYCLE.displayValue)
|
||||||
);
|
);
|
||||||
|
|
||||||
if (DataStream.isFailureStoreFeatureFlagEnabled()) {
|
|
||||||
var failureStore = (Map<String, Object>) dataStreamMap.get(DataStream.FAILURE_STORE_FIELD.getPreferredName());
|
var failureStore = (Map<String, Object>) dataStreamMap.get(DataStream.FAILURE_STORE_FIELD.getPreferredName());
|
||||||
List<Object> failureStoresRepresentation = (List<Object>) failureStore.get(DataStream.INDICES_FIELD.getPreferredName());
|
List<Object> failureStoresRepresentation = (List<Object>) failureStore.get(DataStream.INDICES_FIELD.getPreferredName());
|
||||||
Map<String, Object> failureStoreRepresentation = (Map<String, Object>) failureStoresRepresentation.get(0);
|
Map<String, Object> failureStoreRepresentation = (Map<String, Object>) failureStoresRepresentation.get(0);
|
||||||
assertThat(failureStoreRepresentation.get("index_name"), is(failureStoreIndex.getName()));
|
assertThat(failureStoreRepresentation.get("index_name"), is(failureStoreIndex.getName()));
|
||||||
assertThat(failureStoreRepresentation.get(Response.DataStreamInfo.PREFER_ILM.getPreferredName()), is(false));
|
assertThat(failureStoreRepresentation.get(Response.DataStreamInfo.PREFER_ILM.getPreferredName()), is(false));
|
||||||
assertThat(
|
assertThat(failureStoreRepresentation.get(Response.DataStreamInfo.ILM_POLICY_FIELD.getPreferredName()), is(nullValue()));
|
||||||
failureStoreRepresentation.get(Response.DataStreamInfo.ILM_POLICY_FIELD.getPreferredName()),
|
|
||||||
is(nullValue())
|
|
||||||
);
|
|
||||||
assertThat(
|
assertThat(
|
||||||
failureStoreRepresentation.get(Response.DataStreamInfo.MANAGED_BY.getPreferredName()),
|
failureStoreRepresentation.get(Response.DataStreamInfo.MANAGED_BY.getPreferredName()),
|
||||||
is(ManagedBy.LIFECYCLE.displayValue)
|
is(ManagedBy.LIFECYCLE.displayValue)
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
{
|
{
|
||||||
// data stream has a lifecycle that's not enabled
|
// data stream has a lifecycle that's not enabled
|
||||||
|
@ -228,16 +223,12 @@ public class GetDataStreamsResponseTests extends ESTestCase {
|
||||||
is(ManagedBy.UNMANAGED.displayValue)
|
is(ManagedBy.UNMANAGED.displayValue)
|
||||||
);
|
);
|
||||||
|
|
||||||
if (DataStream.isFailureStoreFeatureFlagEnabled()) {
|
|
||||||
var failureStore = (Map<String, Object>) dataStreamMap.get(DataStream.FAILURE_STORE_FIELD.getPreferredName());
|
var failureStore = (Map<String, Object>) dataStreamMap.get(DataStream.FAILURE_STORE_FIELD.getPreferredName());
|
||||||
List<Object> failureStoresRepresentation = (List<Object>) failureStore.get(DataStream.INDICES_FIELD.getPreferredName());
|
List<Object> failureStoresRepresentation = (List<Object>) failureStore.get(DataStream.INDICES_FIELD.getPreferredName());
|
||||||
Map<String, Object> failureStoreRepresentation = (Map<String, Object>) failureStoresRepresentation.get(0);
|
Map<String, Object> failureStoreRepresentation = (Map<String, Object>) failureStoresRepresentation.get(0);
|
||||||
assertThat(failureStoreRepresentation.get("index_name"), is(failureStoreIndex.getName()));
|
assertThat(failureStoreRepresentation.get("index_name"), is(failureStoreIndex.getName()));
|
||||||
assertThat(failureStoreRepresentation.get(Response.DataStreamInfo.PREFER_ILM.getPreferredName()), is(false));
|
assertThat(failureStoreRepresentation.get(Response.DataStreamInfo.PREFER_ILM.getPreferredName()), is(false));
|
||||||
assertThat(
|
assertThat(failureStoreRepresentation.get(Response.DataStreamInfo.ILM_POLICY_FIELD.getPreferredName()), is(nullValue()));
|
||||||
failureStoreRepresentation.get(Response.DataStreamInfo.ILM_POLICY_FIELD.getPreferredName()),
|
|
||||||
is(nullValue())
|
|
||||||
);
|
|
||||||
assertThat(
|
assertThat(
|
||||||
failureStoreRepresentation.get(Response.DataStreamInfo.MANAGED_BY.getPreferredName()),
|
failureStoreRepresentation.get(Response.DataStreamInfo.MANAGED_BY.getPreferredName()),
|
||||||
is(ManagedBy.UNMANAGED.displayValue)
|
is(ManagedBy.UNMANAGED.displayValue)
|
||||||
|
@ -245,7 +236,6 @@ public class GetDataStreamsResponseTests extends ESTestCase {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
public void testManagedByDisplayValuesDontAccidentalyChange() {
|
public void testManagedByDisplayValuesDontAccidentalyChange() {
|
||||||
// UI might derive logic based on the display values so any changes should be coordinated with the UI team
|
// UI might derive logic based on the display values so any changes should be coordinated with the UI team
|
||||||
|
|
|
@ -20,8 +20,6 @@ import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate;
|
||||||
import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase;
|
import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase;
|
||||||
import org.junit.ClassRule;
|
import org.junit.ClassRule;
|
||||||
|
|
||||||
import static org.elasticsearch.test.cluster.FeatureFlag.FAILURE_STORE_ENABLED;
|
|
||||||
|
|
||||||
public class DataStreamsClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase {
|
public class DataStreamsClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase {
|
||||||
|
|
||||||
public DataStreamsClientYamlTestSuiteIT(final ClientYamlTestCandidate testCandidate) {
|
public DataStreamsClientYamlTestSuiteIT(final ClientYamlTestCandidate testCandidate) {
|
||||||
|
@ -46,7 +44,6 @@ public class DataStreamsClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase
|
||||||
private static ElasticsearchCluster createCluster() {
|
private static ElasticsearchCluster createCluster() {
|
||||||
LocalClusterSpecBuilder<ElasticsearchCluster> clusterBuilder = ElasticsearchCluster.local()
|
LocalClusterSpecBuilder<ElasticsearchCluster> clusterBuilder = ElasticsearchCluster.local()
|
||||||
.distribution(DistributionType.DEFAULT)
|
.distribution(DistributionType.DEFAULT)
|
||||||
.feature(FAILURE_STORE_ENABLED)
|
|
||||||
.setting("xpack.security.enabled", "true")
|
.setting("xpack.security.enabled", "true")
|
||||||
.keystore("bootstrap.password", "x-pack-test-password")
|
.keystore("bootstrap.password", "x-pack-test-password")
|
||||||
.user("x_pack_rest_user", "x-pack-test-password")
|
.user("x_pack_rest_user", "x-pack-test-password")
|
||||||
|
|
|
@ -23,8 +23,6 @@ import org.junit.ClassRule;
|
||||||
|
|
||||||
import java.util.Objects;
|
import java.util.Objects;
|
||||||
|
|
||||||
import static org.elasticsearch.test.cluster.FeatureFlag.FAILURE_STORE_ENABLED;
|
|
||||||
|
|
||||||
public class DotPrefixClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase {
|
public class DotPrefixClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase {
|
||||||
|
|
||||||
public DotPrefixClientYamlTestSuiteIT(final ClientYamlTestCandidate testCandidate) {
|
public DotPrefixClientYamlTestSuiteIT(final ClientYamlTestCandidate testCandidate) {
|
||||||
|
@ -49,7 +47,6 @@ public class DotPrefixClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase {
|
||||||
private static ElasticsearchCluster createCluster() {
|
private static ElasticsearchCluster createCluster() {
|
||||||
LocalClusterSpecBuilder<ElasticsearchCluster> clusterBuilder = ElasticsearchCluster.local()
|
LocalClusterSpecBuilder<ElasticsearchCluster> clusterBuilder = ElasticsearchCluster.local()
|
||||||
.distribution(DistributionType.DEFAULT)
|
.distribution(DistributionType.DEFAULT)
|
||||||
.feature(FAILURE_STORE_ENABLED)
|
|
||||||
.setting("xpack.security.enabled", "true")
|
.setting("xpack.security.enabled", "true")
|
||||||
.keystore("bootstrap.password", "x-pack-test-password")
|
.keystore("bootstrap.password", "x-pack-test-password")
|
||||||
.user("x_pack_rest_user", "x-pack-test-password");
|
.user("x_pack_rest_user", "x-pack-test-password");
|
||||||
|
|
|
@ -29,7 +29,6 @@ buildParams.bwcVersions.withWireCompatible { bwcVersion, baseName ->
|
||||||
versions = [bwcVersion.toString(), project.version]
|
versions = [bwcVersion.toString(), project.version]
|
||||||
setting 'cluster.remote.node.attr', 'gateway'
|
setting 'cluster.remote.node.attr', 'gateway'
|
||||||
setting 'xpack.security.enabled', 'false'
|
setting 'xpack.security.enabled', 'false'
|
||||||
requiresFeature 'es.failure_store_feature_flag_enabled', new Version(8, 12, 0)
|
|
||||||
}
|
}
|
||||||
def remoteCluster = testClusters.register("${baseName}-remote") {
|
def remoteCluster = testClusters.register("${baseName}-remote") {
|
||||||
numberOfNodes = 3
|
numberOfNodes = 3
|
||||||
|
@ -37,7 +36,6 @@ buildParams.bwcVersions.withWireCompatible { bwcVersion, baseName ->
|
||||||
firstNode.setting 'node.attr.gateway', 'true'
|
firstNode.setting 'node.attr.gateway', 'true'
|
||||||
lastNode.setting 'node.attr.gateway', 'true'
|
lastNode.setting 'node.attr.gateway', 'true'
|
||||||
setting 'xpack.security.enabled', 'false'
|
setting 'xpack.security.enabled', 'false'
|
||||||
requiresFeature 'es.failure_store_feature_flag_enabled', new Version(8, 12, 0)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -53,7 +53,6 @@ public class FullClusterRestartArchivedSettingsIT extends ParameterizedFullClust
|
||||||
.setting("indices.memory.shard_inactive_time", "60m")
|
.setting("indices.memory.shard_inactive_time", "60m")
|
||||||
.apply(() -> clusterConfig)
|
.apply(() -> clusterConfig)
|
||||||
.feature(FeatureFlag.TIME_SERIES_MODE)
|
.feature(FeatureFlag.TIME_SERIES_MODE)
|
||||||
.feature(FeatureFlag.FAILURE_STORE_ENABLED)
|
|
||||||
.build();
|
.build();
|
||||||
|
|
||||||
@ClassRule
|
@ClassRule
|
||||||
|
|
|
@ -55,8 +55,7 @@ public class FullClusterRestartDownsampleIT extends ParameterizedFullClusterRest
|
||||||
.setting("xpack.security.enabled", "false")
|
.setting("xpack.security.enabled", "false")
|
||||||
.setting("indices.lifecycle.poll_interval", "5s")
|
.setting("indices.lifecycle.poll_interval", "5s")
|
||||||
.apply(() -> clusterConfig)
|
.apply(() -> clusterConfig)
|
||||||
.feature(FeatureFlag.TIME_SERIES_MODE)
|
.feature(FeatureFlag.TIME_SERIES_MODE);
|
||||||
.feature(FeatureFlag.FAILURE_STORE_ENABLED);
|
|
||||||
|
|
||||||
if (oldVersion.before(Version.fromString("8.18.0"))) {
|
if (oldVersion.before(Version.fromString("8.18.0"))) {
|
||||||
cluster.jvmArg("-da:org.elasticsearch.index.mapper.DocumentMapper");
|
cluster.jvmArg("-da:org.elasticsearch.index.mapper.DocumentMapper");
|
||||||
|
|
|
@ -116,8 +116,7 @@ public class FullClusterRestartIT extends ParameterizedFullClusterRestartTestCas
|
||||||
// some tests rely on the translog not being flushed
|
// some tests rely on the translog not being flushed
|
||||||
.setting("indices.memory.shard_inactive_time", "60m")
|
.setting("indices.memory.shard_inactive_time", "60m")
|
||||||
.apply(() -> clusterConfig)
|
.apply(() -> clusterConfig)
|
||||||
.feature(FeatureFlag.TIME_SERIES_MODE)
|
.feature(FeatureFlag.TIME_SERIES_MODE);
|
||||||
.feature(FeatureFlag.FAILURE_STORE_ENABLED);
|
|
||||||
|
|
||||||
if (oldVersion.before(Version.fromString("8.18.0"))) {
|
if (oldVersion.before(Version.fromString("8.18.0"))) {
|
||||||
cluster.jvmArg("-da:org.elasticsearch.index.mapper.DocumentMapper");
|
cluster.jvmArg("-da:org.elasticsearch.index.mapper.DocumentMapper");
|
||||||
|
|
|
@ -40,7 +40,6 @@ import org.elasticsearch.index.query.functionscore.FunctionScoreQueryBuilder;
|
||||||
import org.elasticsearch.index.query.functionscore.RandomScoreFunctionBuilder;
|
import org.elasticsearch.index.query.functionscore.RandomScoreFunctionBuilder;
|
||||||
import org.elasticsearch.search.SearchModule;
|
import org.elasticsearch.search.SearchModule;
|
||||||
import org.elasticsearch.test.cluster.ElasticsearchCluster;
|
import org.elasticsearch.test.cluster.ElasticsearchCluster;
|
||||||
import org.elasticsearch.test.cluster.FeatureFlag;
|
|
||||||
import org.elasticsearch.test.cluster.local.LocalClusterConfigProvider;
|
import org.elasticsearch.test.cluster.local.LocalClusterConfigProvider;
|
||||||
import org.elasticsearch.test.cluster.local.distribution.DistributionType;
|
import org.elasticsearch.test.cluster.local.distribution.DistributionType;
|
||||||
import org.elasticsearch.xcontent.XContentBuilder;
|
import org.elasticsearch.xcontent.XContentBuilder;
|
||||||
|
@ -78,7 +77,6 @@ public class QueryBuilderBWCIT extends ParameterizedFullClusterRestartTestCase {
|
||||||
.version(org.elasticsearch.test.cluster.util.Version.fromString(OLD_CLUSTER_VERSION))
|
.version(org.elasticsearch.test.cluster.util.Version.fromString(OLD_CLUSTER_VERSION))
|
||||||
.nodes(2)
|
.nodes(2)
|
||||||
.setting("xpack.security.enabled", "false")
|
.setting("xpack.security.enabled", "false")
|
||||||
.feature(FeatureFlag.FAILURE_STORE_ENABLED)
|
|
||||||
.apply(() -> clusterConfig)
|
.apply(() -> clusterConfig)
|
||||||
.build();
|
.build();
|
||||||
|
|
||||||
|
|
|
@ -4,9 +4,8 @@
|
||||||
"url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/index.html",
|
"url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/index.html",
|
||||||
"description":"Deletes the data stream options of the selected data streams."
|
"description":"Deletes the data stream options of the selected data streams."
|
||||||
},
|
},
|
||||||
"stability": "experimental",
|
"stability": "stable",
|
||||||
"visibility": "feature_flag",
|
"visibility": "public",
|
||||||
"feature_flag": "es.failure_store_feature_flag_enabled",
|
|
||||||
"headers":{
|
"headers":{
|
||||||
"accept": [ "application/json"]
|
"accept": [ "application/json"]
|
||||||
},
|
},
|
||||||
|
|
|
@ -4,9 +4,8 @@
|
||||||
"url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/index.html",
|
"url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/index.html",
|
||||||
"description":"Returns the data stream options of the selected data streams."
|
"description":"Returns the data stream options of the selected data streams."
|
||||||
},
|
},
|
||||||
"stability": "experimental",
|
"stability": "stable",
|
||||||
"visibility": "feature_flag",
|
"visibility": "public",
|
||||||
"feature_flag": "es.failure_store_feature_flag_enabled",
|
|
||||||
"headers":{
|
"headers":{
|
||||||
"accept": [ "application/json"]
|
"accept": [ "application/json"]
|
||||||
},
|
},
|
||||||
|
|
|
@ -4,9 +4,8 @@
|
||||||
"url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/index.html",
|
"url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/index.html",
|
||||||
"description":"Updates the data stream options of the selected data streams."
|
"description":"Updates the data stream options of the selected data streams."
|
||||||
},
|
},
|
||||||
"stability": "experimental",
|
"stability": "stable",
|
||||||
"visibility": "feature_flag",
|
"visibility": "public",
|
||||||
"feature_flag": "es.failure_store_feature_flag_enabled",
|
|
||||||
"headers":{
|
"headers":{
|
||||||
"accept": [ "application/json"],
|
"accept": [ "application/json"],
|
||||||
"content_type": ["application/json"]
|
"content_type": ["application/json"]
|
||||||
|
|
|
@ -135,11 +135,9 @@ sourceSets.main.compiledBy(generateModulesList, generatePluginsList)
|
||||||
if (buildParams.snapshotBuild == false) {
|
if (buildParams.snapshotBuild == false) {
|
||||||
tasks.named("test").configure {
|
tasks.named("test").configure {
|
||||||
systemProperty 'es.index_mode_feature_flag_registered', 'true'
|
systemProperty 'es.index_mode_feature_flag_registered', 'true'
|
||||||
systemProperty 'es.failure_store_feature_flag_enabled', 'true'
|
|
||||||
}
|
}
|
||||||
tasks.named("internalClusterTest").configure {
|
tasks.named("internalClusterTest").configure {
|
||||||
systemProperty 'es.index_mode_feature_flag_registered', 'true'
|
systemProperty 'es.index_mode_feature_flag_registered', 'true'
|
||||||
systemProperty 'es.failure_store_feature_flag_enabled', 'true'
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -11,7 +11,6 @@ package org.elasticsearch.action;
|
||||||
|
|
||||||
import org.elasticsearch.action.search.SearchContextId;
|
import org.elasticsearch.action.search.SearchContextId;
|
||||||
import org.elasticsearch.action.support.IndicesOptions;
|
import org.elasticsearch.action.support.IndicesOptions;
|
||||||
import org.elasticsearch.cluster.metadata.DataStream;
|
|
||||||
import org.elasticsearch.cluster.metadata.IndexMetadata;
|
import org.elasticsearch.cluster.metadata.IndexMetadata;
|
||||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||||
import org.elasticsearch.cluster.metadata.ProjectMetadata;
|
import org.elasticsearch.cluster.metadata.ProjectMetadata;
|
||||||
|
@ -179,7 +178,6 @@ public class ResolvedIndices {
|
||||||
: indexNameExpressionResolver.concreteIndices(projectMetadata, localIndices, startTimeInMillis);
|
: indexNameExpressionResolver.concreteIndices(projectMetadata, localIndices, startTimeInMillis);
|
||||||
|
|
||||||
// prevent using selectors with remote cluster patterns
|
// prevent using selectors with remote cluster patterns
|
||||||
if (DataStream.isFailureStoreFeatureFlagEnabled()) {
|
|
||||||
for (final var indicesPerRemoteClusterAlias : remoteClusterIndices.entrySet()) {
|
for (final var indicesPerRemoteClusterAlias : remoteClusterIndices.entrySet()) {
|
||||||
final String[] indices = indicesPerRemoteClusterAlias.getValue().indices();
|
final String[] indices = indicesPerRemoteClusterAlias.getValue().indices();
|
||||||
if (indices != null) {
|
if (indices != null) {
|
||||||
|
@ -190,7 +188,6 @@ public class ResolvedIndices {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
return new ResolvedIndices(
|
return new ResolvedIndices(
|
||||||
remoteClusterIndices,
|
remoteClusterIndices,
|
||||||
|
|
|
@ -222,7 +222,7 @@ final class BulkOperation extends ActionRunnable<BulkResponse> {
|
||||||
*/
|
*/
|
||||||
private void rollOverFailureStores(Runnable runnable) {
|
private void rollOverFailureStores(Runnable runnable) {
|
||||||
// Skip allocation of some objects if we don't need to roll over anything.
|
// Skip allocation of some objects if we don't need to roll over anything.
|
||||||
if (failureStoresToBeRolledOver.isEmpty() || DataStream.isFailureStoreFeatureFlagEnabled() == false) {
|
if (failureStoresToBeRolledOver.isEmpty()) {
|
||||||
runnable.run();
|
runnable.run();
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -423,7 +423,7 @@ final class BulkOperation extends ActionRunnable<BulkResponse> {
|
||||||
}
|
}
|
||||||
|
|
||||||
private void redirectFailuresOrCompleteBulkOperation() {
|
private void redirectFailuresOrCompleteBulkOperation() {
|
||||||
if (DataStream.isFailureStoreFeatureFlagEnabled() && failureStoreRedirects.isEmpty() == false) {
|
if (failureStoreRedirects.isEmpty() == false) {
|
||||||
doRedirectFailures();
|
doRedirectFailures();
|
||||||
} else {
|
} else {
|
||||||
completeBulkOperation();
|
completeBulkOperation();
|
||||||
|
@ -615,10 +615,7 @@ final class BulkOperation extends ActionRunnable<BulkResponse> {
|
||||||
* @return a data stream if the write request points to a data stream, or {@code null} if it does not
|
* @return a data stream if the write request points to a data stream, or {@code null} if it does not
|
||||||
*/
|
*/
|
||||||
private static DataStream getRedirectTargetCandidate(DocWriteRequest<?> docWriteRequest, ProjectMetadata project) {
|
private static DataStream getRedirectTargetCandidate(DocWriteRequest<?> docWriteRequest, ProjectMetadata project) {
|
||||||
// Feature flag guard
|
// PRTODO: We could check for cluster feature here instead
|
||||||
if (DataStream.isFailureStoreFeatureFlagEnabled() == false) {
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
// If there is no index abstraction, then the request is using a pattern of some sort, which data streams do not support
|
// If there is no index abstraction, then the request is using a pattern of some sort, which data streams do not support
|
||||||
IndexAbstraction ia = project.getIndicesLookup().get(docWriteRequest.index());
|
IndexAbstraction ia = project.getIndicesLookup().get(docWriteRequest.index());
|
||||||
return DataStream.resolveDataStream(ia, project);
|
return DataStream.resolveDataStream(ia, project);
|
||||||
|
|
|
@ -17,7 +17,6 @@ import org.elasticsearch.action.DocWriteRequest;
|
||||||
import org.elasticsearch.action.DocWriteResponse;
|
import org.elasticsearch.action.DocWriteResponse;
|
||||||
import org.elasticsearch.action.index.IndexRequest;
|
import org.elasticsearch.action.index.IndexRequest;
|
||||||
import org.elasticsearch.action.update.UpdateResponse;
|
import org.elasticsearch.action.update.UpdateResponse;
|
||||||
import org.elasticsearch.cluster.metadata.DataStream;
|
|
||||||
import org.elasticsearch.cluster.metadata.IndexMetadata;
|
import org.elasticsearch.cluster.metadata.IndexMetadata;
|
||||||
import org.elasticsearch.common.util.set.Sets;
|
import org.elasticsearch.common.util.set.Sets;
|
||||||
import org.elasticsearch.core.Assertions;
|
import org.elasticsearch.core.Assertions;
|
||||||
|
@ -218,13 +217,6 @@ final class BulkRequestModifier implements Iterator<DocWriteRequest<?>> {
|
||||||
* @param e the failure encountered.
|
* @param e the failure encountered.
|
||||||
*/
|
*/
|
||||||
public void markItemForFailureStore(int slot, String targetIndexName, Exception e) {
|
public void markItemForFailureStore(int slot, String targetIndexName, Exception e) {
|
||||||
if (DataStream.isFailureStoreFeatureFlagEnabled() == false) {
|
|
||||||
// Assert false for development, but if we somehow find ourselves here, default to failure logic.
|
|
||||||
assert false
|
|
||||||
: "Attempting to route a failed write request type to a failure store but the failure store is not enabled! "
|
|
||||||
+ "This should be guarded against in TransportBulkAction#shouldStoreFailure()";
|
|
||||||
markItemAsFailed(slot, e, IndexDocFailureStoreStatus.NOT_APPLICABLE_OR_UNKNOWN);
|
|
||||||
} else {
|
|
||||||
// We get the index write request to find the source of the failed document
|
// We get the index write request to find the source of the failed document
|
||||||
IndexRequest indexRequest = TransportBulkAction.getIndexWriteRequest(bulkRequest.requests().get(slot));
|
IndexRequest indexRequest = TransportBulkAction.getIndexWriteRequest(bulkRequest.requests().get(slot));
|
||||||
if (indexRequest == null) {
|
if (indexRequest == null) {
|
||||||
|
@ -271,5 +263,4 @@ final class BulkRequestModifier implements Iterator<DocWriteRequest<?>> {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -12,7 +12,6 @@ package org.elasticsearch.action.bulk;
|
||||||
import org.elasticsearch.ElasticsearchException;
|
import org.elasticsearch.ElasticsearchException;
|
||||||
import org.elasticsearch.ExceptionsHelper;
|
import org.elasticsearch.ExceptionsHelper;
|
||||||
import org.elasticsearch.TransportVersions;
|
import org.elasticsearch.TransportVersions;
|
||||||
import org.elasticsearch.cluster.metadata.DataStream;
|
|
||||||
import org.elasticsearch.common.io.stream.StreamInput;
|
import org.elasticsearch.common.io.stream.StreamInput;
|
||||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||||
import org.elasticsearch.common.io.stream.Writeable;
|
import org.elasticsearch.common.io.stream.Writeable;
|
||||||
|
@ -102,7 +101,7 @@ public enum IndexDocFailureStoreStatus implements ToXContentFragment, Writeable
|
||||||
@Override
|
@Override
|
||||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||||
// We avoid adding the not_applicable status in the response to not increase the size of bulk responses.
|
// We avoid adding the not_applicable status in the response to not increase the size of bulk responses.
|
||||||
if (DataStream.isFailureStoreFeatureFlagEnabled() && this.equals(NOT_APPLICABLE_OR_UNKNOWN) == false) {
|
if (this.equals(NOT_APPLICABLE_OR_UNKNOWN) == false) {
|
||||||
builder.field("failure_store", label);
|
builder.field("failure_store", label);
|
||||||
}
|
}
|
||||||
return builder;
|
return builder;
|
||||||
|
|
|
@ -308,7 +308,6 @@ public class TransportBulkAction extends TransportAbstractBulkAction {
|
||||||
index,
|
index,
|
||||||
projectState.metadata()
|
projectState.metadata()
|
||||||
);
|
);
|
||||||
boolean lazyRolloverFailureStoreFeature = DataStream.isFailureStoreFeatureFlagEnabled();
|
|
||||||
Set<String> indicesThatRequireAlias = new HashSet<>();
|
Set<String> indicesThatRequireAlias = new HashSet<>();
|
||||||
|
|
||||||
for (DocWriteRequest<?> request : bulkRequest.requests) {
|
for (DocWriteRequest<?> request : bulkRequest.requests) {
|
||||||
|
@ -356,7 +355,7 @@ public class TransportBulkAction extends TransportAbstractBulkAction {
|
||||||
if (dataStream != null) {
|
if (dataStream != null) {
|
||||||
if (writeToFailureStore == false && dataStream.getDataComponent().isRolloverOnWrite()) {
|
if (writeToFailureStore == false && dataStream.getDataComponent().isRolloverOnWrite()) {
|
||||||
dataStreamsToBeRolledOver.add(request.index());
|
dataStreamsToBeRolledOver.add(request.index());
|
||||||
} else if (lazyRolloverFailureStoreFeature && writeToFailureStore && dataStream.getFailureComponent().isRolloverOnWrite()) {
|
} else if (writeToFailureStore && dataStream.getFailureComponent().isRolloverOnWrite()) {
|
||||||
failureStoresToBeRolledOver.add(request.index());
|
failureStoresToBeRolledOver.add(request.index());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -627,9 +626,6 @@ public class TransportBulkAction extends TransportAbstractBulkAction {
|
||||||
*/
|
*/
|
||||||
// Visibility for testing
|
// Visibility for testing
|
||||||
Boolean resolveFailureInternal(String indexName, ProjectMetadata projectMetadata, long epochMillis) {
|
Boolean resolveFailureInternal(String indexName, ProjectMetadata projectMetadata, long epochMillis) {
|
||||||
if (DataStream.isFailureStoreFeatureFlagEnabled() == false) {
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
var resolution = resolveFailureStoreFromMetadata(indexName, projectMetadata, epochMillis);
|
var resolution = resolveFailureStoreFromMetadata(indexName, projectMetadata, epochMillis);
|
||||||
if (resolution != null) {
|
if (resolution != null) {
|
||||||
return resolution;
|
return resolution;
|
||||||
|
|
|
@ -416,17 +416,12 @@ public class GetDataStreamAction extends ActionType<GetDataStreamAction.Response
|
||||||
builder.endArray();
|
builder.endArray();
|
||||||
builder.endObject();
|
builder.endObject();
|
||||||
}
|
}
|
||||||
if (DataStream.isFailureStoreFeatureFlagEnabled()) {
|
|
||||||
builder.startObject(DataStream.FAILURE_STORE_FIELD.getPreferredName());
|
builder.startObject(DataStream.FAILURE_STORE_FIELD.getPreferredName());
|
||||||
builder.field(FAILURE_STORE_ENABLED.getPreferredName(), failureStoreEffectivelyEnabled);
|
builder.field(FAILURE_STORE_ENABLED.getPreferredName(), failureStoreEffectivelyEnabled);
|
||||||
builder.field(
|
builder.field(DataStream.ROLLOVER_ON_WRITE_FIELD.getPreferredName(), dataStream.getFailureComponent().isRolloverOnWrite());
|
||||||
DataStream.ROLLOVER_ON_WRITE_FIELD.getPreferredName(),
|
|
||||||
dataStream.getFailureComponent().isRolloverOnWrite()
|
|
||||||
);
|
|
||||||
indicesToXContent(builder, dataStream.getFailureIndices());
|
indicesToXContent(builder, dataStream.getFailureIndices());
|
||||||
addAutoShardingEvent(builder, params, dataStream.getFailureComponent().getAutoShardingEvent());
|
addAutoShardingEvent(builder, params, dataStream.getFailureComponent().getAutoShardingEvent());
|
||||||
builder.endObject();
|
builder.endObject();
|
||||||
}
|
|
||||||
builder.endObject();
|
builder.endObject();
|
||||||
return builder;
|
return builder;
|
||||||
}
|
}
|
||||||
|
|
|
@ -894,7 +894,7 @@ public class IndexRequest extends ReplicatedWriteRequest<IndexRequest> implement
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Index getConcreteWriteIndex(IndexAbstraction ia, ProjectMetadata project) {
|
public Index getConcreteWriteIndex(IndexAbstraction ia, ProjectMetadata project) {
|
||||||
if (DataStream.isFailureStoreFeatureFlagEnabled() && writeToFailureStore) {
|
if (writeToFailureStore) {
|
||||||
if (ia.isDataStreamRelated() == false) {
|
if (ia.isDataStreamRelated() == false) {
|
||||||
throw new ElasticsearchException(
|
throw new ElasticsearchException(
|
||||||
"Attempting to write a document to a failure store but the targeted index is not a data stream"
|
"Attempting to write a document to a failure store but the targeted index is not a data stream"
|
||||||
|
|
|
@ -10,7 +10,6 @@ package org.elasticsearch.action.support;
|
||||||
|
|
||||||
import org.elasticsearch.ElasticsearchParseException;
|
import org.elasticsearch.ElasticsearchParseException;
|
||||||
import org.elasticsearch.TransportVersions;
|
import org.elasticsearch.TransportVersions;
|
||||||
import org.elasticsearch.cluster.metadata.DataStream;
|
|
||||||
import org.elasticsearch.common.io.stream.StreamInput;
|
import org.elasticsearch.common.io.stream.StreamInput;
|
||||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||||
import org.elasticsearch.common.logging.DeprecationCategory;
|
import org.elasticsearch.common.logging.DeprecationCategory;
|
||||||
|
@ -827,14 +826,14 @@ public record IndicesOptions(
|
||||||
* @return Whether selectors (::) are allowed in the index expression.
|
* @return Whether selectors (::) are allowed in the index expression.
|
||||||
*/
|
*/
|
||||||
public boolean allowSelectors() {
|
public boolean allowSelectors() {
|
||||||
return DataStream.isFailureStoreFeatureFlagEnabled() && gatekeeperOptions.allowSelectors();
|
return gatekeeperOptions.allowSelectors();
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @return true when selectors should be included in the resolution, false otherwise.
|
* @return true when selectors should be included in the resolution, false otherwise.
|
||||||
*/
|
*/
|
||||||
public boolean includeFailureIndices() {
|
public boolean includeFailureIndices() {
|
||||||
return DataStream.isFailureStoreFeatureFlagEnabled() && gatekeeperOptions.includeFailureIndices();
|
return gatekeeperOptions.includeFailureIndices();
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -1135,19 +1134,6 @@ public record IndicesOptions(
|
||||||
}
|
}
|
||||||
|
|
||||||
public static IndicesOptions fromMap(Map<String, Object> map, IndicesOptions defaultSettings) {
|
public static IndicesOptions fromMap(Map<String, Object> map, IndicesOptions defaultSettings) {
|
||||||
if (DataStream.isFailureStoreFeatureFlagEnabled()) {
|
|
||||||
return fromParameters(
|
|
||||||
map.containsKey(WildcardOptions.EXPAND_WILDCARDS) ? map.get(WildcardOptions.EXPAND_WILDCARDS) : map.get("expandWildcards"),
|
|
||||||
map.containsKey(ConcreteTargetOptions.IGNORE_UNAVAILABLE)
|
|
||||||
? map.get(ConcreteTargetOptions.IGNORE_UNAVAILABLE)
|
|
||||||
: map.get("ignoreUnavailable"),
|
|
||||||
map.containsKey(WildcardOptions.ALLOW_NO_INDICES) ? map.get(WildcardOptions.ALLOW_NO_INDICES) : map.get("allowNoIndices"),
|
|
||||||
map.containsKey(GatekeeperOptions.IGNORE_THROTTLED)
|
|
||||||
? map.get(GatekeeperOptions.IGNORE_THROTTLED)
|
|
||||||
: map.get("ignoreThrottled"),
|
|
||||||
defaultSettings
|
|
||||||
);
|
|
||||||
}
|
|
||||||
return fromParameters(
|
return fromParameters(
|
||||||
map.containsKey(WildcardOptions.EXPAND_WILDCARDS) ? map.get(WildcardOptions.EXPAND_WILDCARDS) : map.get("expandWildcards"),
|
map.containsKey(WildcardOptions.EXPAND_WILDCARDS) ? map.get(WildcardOptions.EXPAND_WILDCARDS) : map.get("expandWildcards"),
|
||||||
map.containsKey(ConcreteTargetOptions.IGNORE_UNAVAILABLE)
|
map.containsKey(ConcreteTargetOptions.IGNORE_UNAVAILABLE)
|
||||||
|
@ -1469,11 +1455,10 @@ public record IndicesOptions(
|
||||||
+ ignoreAliases()
|
+ ignoreAliases()
|
||||||
+ ", ignore_throttled="
|
+ ", ignore_throttled="
|
||||||
+ ignoreThrottled()
|
+ ignoreThrottled()
|
||||||
// Until the feature flag is removed we access the field directly from the gatekeeper options.
|
+ ", allow_selectors="
|
||||||
+ (DataStream.isFailureStoreFeatureFlagEnabled() ? ", allow_selectors=" + gatekeeperOptions().allowSelectors() : "")
|
+ allowSelectors()
|
||||||
+ (DataStream.isFailureStoreFeatureFlagEnabled()
|
+ ", include_failure_indices="
|
||||||
? ", include_failure_indices=" + gatekeeperOptions().includeFailureIndices()
|
+ includeFailureIndices()
|
||||||
: "")
|
|
||||||
+ ']';
|
+ ']';
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -383,10 +383,8 @@ public class ComposableIndexTemplate implements SimpleDiffable<ComposableIndexTe
|
||||||
static {
|
static {
|
||||||
PARSER.declareBoolean(ConstructingObjectParser.optionalConstructorArg(), HIDDEN);
|
PARSER.declareBoolean(ConstructingObjectParser.optionalConstructorArg(), HIDDEN);
|
||||||
PARSER.declareBoolean(ConstructingObjectParser.optionalConstructorArg(), ALLOW_CUSTOM_ROUTING);
|
PARSER.declareBoolean(ConstructingObjectParser.optionalConstructorArg(), ALLOW_CUSTOM_ROUTING);
|
||||||
if (DataStream.isFailureStoreFeatureFlagEnabled()) {
|
|
||||||
PARSER.declareBoolean(ConstructingObjectParser.optionalConstructorArg(), FAILURE_STORE);
|
PARSER.declareBoolean(ConstructingObjectParser.optionalConstructorArg(), FAILURE_STORE);
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
private final boolean hidden;
|
private final boolean hidden;
|
||||||
private final boolean allowCustomRouting;
|
private final boolean allowCustomRouting;
|
||||||
|
|
|
@ -33,7 +33,6 @@ import org.elasticsearch.common.io.stream.StreamInput;
|
||||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||||
import org.elasticsearch.common.time.DateFormatter;
|
import org.elasticsearch.common.time.DateFormatter;
|
||||||
import org.elasticsearch.common.time.DateFormatters;
|
import org.elasticsearch.common.time.DateFormatters;
|
||||||
import org.elasticsearch.common.util.FeatureFlag;
|
|
||||||
import org.elasticsearch.common.xcontent.XContentHelper;
|
import org.elasticsearch.common.xcontent.XContentHelper;
|
||||||
import org.elasticsearch.core.Nullable;
|
import org.elasticsearch.core.Nullable;
|
||||||
import org.elasticsearch.core.TimeValue;
|
import org.elasticsearch.core.TimeValue;
|
||||||
|
@ -76,16 +75,11 @@ public final class DataStream implements SimpleDiffable<DataStream>, ToXContentO
|
||||||
|
|
||||||
private static final Logger LOGGER = LogManager.getLogger(DataStream.class);
|
private static final Logger LOGGER = LogManager.getLogger(DataStream.class);
|
||||||
|
|
||||||
public static final boolean FAILURE_STORE_FEATURE_FLAG = new FeatureFlag("failure_store").isEnabled();
|
|
||||||
public static final NodeFeature DATA_STREAM_FAILURE_STORE_FEATURE = new NodeFeature("data_stream.failure_store");
|
public static final NodeFeature DATA_STREAM_FAILURE_STORE_FEATURE = new NodeFeature("data_stream.failure_store");
|
||||||
public static final TransportVersion ADDED_FAILURE_STORE_TRANSPORT_VERSION = TransportVersions.V_8_12_0;
|
public static final TransportVersion ADDED_FAILURE_STORE_TRANSPORT_VERSION = TransportVersions.V_8_12_0;
|
||||||
public static final TransportVersion ADDED_AUTO_SHARDING_EVENT_VERSION = TransportVersions.V_8_14_0;
|
public static final TransportVersion ADDED_AUTO_SHARDING_EVENT_VERSION = TransportVersions.V_8_14_0;
|
||||||
public static final TransportVersion ADD_DATA_STREAM_OPTIONS_VERSION = TransportVersions.V_8_16_0;
|
public static final TransportVersion ADD_DATA_STREAM_OPTIONS_VERSION = TransportVersions.V_8_16_0;
|
||||||
|
|
||||||
public static boolean isFailureStoreFeatureFlagEnabled() {
|
|
||||||
return FAILURE_STORE_FEATURE_FLAG;
|
|
||||||
}
|
|
||||||
|
|
||||||
public static final String BACKING_INDEX_PREFIX = ".ds-";
|
public static final String BACKING_INDEX_PREFIX = ".ds-";
|
||||||
public static final String FAILURE_STORE_PREFIX = ".fs-";
|
public static final String FAILURE_STORE_PREFIX = ".fs-";
|
||||||
public static final DateFormatter DATE_FORMATTER = DateFormatter.forPattern("uuuu.MM.dd");
|
public static final DateFormatter DATE_FORMATTER = DateFormatter.forPattern("uuuu.MM.dd");
|
||||||
|
@ -214,7 +208,7 @@ public final class DataStream implements SimpleDiffable<DataStream>, ToXContentO
|
||||||
var lifecycle = in.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)
|
var lifecycle = in.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)
|
||||||
? in.readOptionalWriteable(DataStreamLifecycle::new)
|
? in.readOptionalWriteable(DataStreamLifecycle::new)
|
||||||
: null;
|
: null;
|
||||||
// This boolean flag has been moved in data stream options
|
// TODO: clear out the failure_store field, which is redundant https://github.com/elastic/elasticsearch/issues/127071
|
||||||
var failureStoreEnabled = in.getTransportVersion()
|
var failureStoreEnabled = in.getTransportVersion()
|
||||||
.between(DataStream.ADDED_FAILURE_STORE_TRANSPORT_VERSION, TransportVersions.V_8_16_0) ? in.readBoolean() : false;
|
.between(DataStream.ADDED_FAILURE_STORE_TRANSPORT_VERSION, TransportVersions.V_8_16_0) ? in.readBoolean() : false;
|
||||||
var failureIndices = in.getTransportVersion().onOrAfter(DataStream.ADDED_FAILURE_STORE_TRANSPORT_VERSION)
|
var failureIndices = in.getTransportVersion().onOrAfter(DataStream.ADDED_FAILURE_STORE_TRANSPORT_VERSION)
|
||||||
|
@ -942,8 +936,7 @@ public final class DataStream implements SimpleDiffable<DataStream>, ToXContentO
|
||||||
}
|
}
|
||||||
|
|
||||||
List<Index> reconciledFailureIndices = this.failureIndices.indices;
|
List<Index> reconciledFailureIndices = this.failureIndices.indices;
|
||||||
if (DataStream.isFailureStoreFeatureFlagEnabled()
|
if (isAnyIndexMissing(failureIndices.indices, snapshotMetadataBuilder, indicesInSnapshot)) {
|
||||||
&& isAnyIndexMissing(failureIndices.indices, snapshotMetadataBuilder, indicesInSnapshot)) {
|
|
||||||
reconciledFailureIndices = new ArrayList<>(this.failureIndices.indices);
|
reconciledFailureIndices = new ArrayList<>(this.failureIndices.indices);
|
||||||
failureIndicesChanged = reconciledFailureIndices.removeIf(x -> indicesInSnapshot.contains(x.getName()) == false);
|
failureIndicesChanged = reconciledFailureIndices.removeIf(x -> indicesInSnapshot.contains(x.getName()) == false);
|
||||||
}
|
}
|
||||||
|
@ -1010,8 +1003,7 @@ public final class DataStream implements SimpleDiffable<DataStream>, ToXContentO
|
||||||
LongSupplier nowSupplier,
|
LongSupplier nowSupplier,
|
||||||
DataStreamGlobalRetention globalRetention
|
DataStreamGlobalRetention globalRetention
|
||||||
) {
|
) {
|
||||||
if (DataStream.isFailureStoreFeatureFlagEnabled() == false
|
if (getFailuresLifecycle() == null
|
||||||
|| getFailuresLifecycle() == null
|
|
||||||
|| getFailuresLifecycle().enabled() == false
|
|| getFailuresLifecycle().enabled() == false
|
||||||
|| getFailuresLifecycle().getEffectiveDataRetention(globalRetention, isInternal()) == null) {
|
|| getFailuresLifecycle().getEffectiveDataRetention(globalRetention, isInternal()) == null) {
|
||||||
return List.of();
|
return List.of();
|
||||||
|
@ -1252,6 +1244,7 @@ public final class DataStream implements SimpleDiffable<DataStream>, ToXContentO
|
||||||
}
|
}
|
||||||
if (out.getTransportVersion()
|
if (out.getTransportVersion()
|
||||||
.between(DataStream.ADDED_FAILURE_STORE_TRANSPORT_VERSION, DataStream.ADD_DATA_STREAM_OPTIONS_VERSION)) {
|
.between(DataStream.ADDED_FAILURE_STORE_TRANSPORT_VERSION, DataStream.ADD_DATA_STREAM_OPTIONS_VERSION)) {
|
||||||
|
// TODO: clear out the failure_store field, which is redundant https://github.com/elastic/elasticsearch/issues/127071
|
||||||
out.writeBoolean(isFailureStoreExplicitlyEnabled());
|
out.writeBoolean(isFailureStoreExplicitlyEnabled());
|
||||||
}
|
}
|
||||||
if (out.getTransportVersion().onOrAfter(DataStream.ADDED_FAILURE_STORE_TRANSPORT_VERSION)) {
|
if (out.getTransportVersion().onOrAfter(DataStream.ADDED_FAILURE_STORE_TRANSPORT_VERSION)) {
|
||||||
|
@ -1283,6 +1276,7 @@ public final class DataStream implements SimpleDiffable<DataStream>, ToXContentO
|
||||||
public static final ParseField ALLOW_CUSTOM_ROUTING = new ParseField("allow_custom_routing");
|
public static final ParseField ALLOW_CUSTOM_ROUTING = new ParseField("allow_custom_routing");
|
||||||
public static final ParseField INDEX_MODE = new ParseField("index_mode");
|
public static final ParseField INDEX_MODE = new ParseField("index_mode");
|
||||||
public static final ParseField LIFECYCLE = new ParseField("lifecycle");
|
public static final ParseField LIFECYCLE = new ParseField("lifecycle");
|
||||||
|
// TODO: clear out the failure_store field, which is redundant https://github.com/elastic/elasticsearch/issues/127071
|
||||||
public static final ParseField FAILURE_STORE_FIELD = new ParseField("failure_store");
|
public static final ParseField FAILURE_STORE_FIELD = new ParseField("failure_store");
|
||||||
public static final ParseField FAILURE_INDICES_FIELD = new ParseField("failure_indices");
|
public static final ParseField FAILURE_INDICES_FIELD = new ParseField("failure_indices");
|
||||||
public static final ParseField ROLLOVER_ON_WRITE_FIELD = new ParseField("rollover_on_write");
|
public static final ParseField ROLLOVER_ON_WRITE_FIELD = new ParseField("rollover_on_write");
|
||||||
|
@ -1292,29 +1286,9 @@ public final class DataStream implements SimpleDiffable<DataStream>, ToXContentO
|
||||||
public static final ParseField DATA_STREAM_OPTIONS_FIELD = new ParseField("options");
|
public static final ParseField DATA_STREAM_OPTIONS_FIELD = new ParseField("options");
|
||||||
|
|
||||||
@SuppressWarnings("unchecked")
|
@SuppressWarnings("unchecked")
|
||||||
private static final ConstructingObjectParser<DataStream, Void> PARSER = new ConstructingObjectParser<>("data_stream", args -> {
|
private static final ConstructingObjectParser<DataStream, Void> PARSER = new ConstructingObjectParser<>(
|
||||||
// Fields behind a feature flag need to be parsed last otherwise the parser will fail when the feature flag is disabled.
|
"data_stream",
|
||||||
// Until the feature flag is removed we keep them separately to be mindful of this.
|
args -> new DataStream(
|
||||||
boolean failureStoreEnabled = DataStream.isFailureStoreFeatureFlagEnabled() && args[12] != null && (boolean) args[12];
|
|
||||||
DataStreamIndices failureIndices = DataStream.isFailureStoreFeatureFlagEnabled()
|
|
||||||
? new DataStreamIndices(
|
|
||||||
FAILURE_STORE_PREFIX,
|
|
||||||
args[13] != null ? (List<Index>) args[13] : List.of(),
|
|
||||||
args[14] != null && (boolean) args[14],
|
|
||||||
(DataStreamAutoShardingEvent) args[15]
|
|
||||||
)
|
|
||||||
: new DataStreamIndices(FAILURE_STORE_PREFIX, List.of(), false, null);
|
|
||||||
// We cannot distinguish if failure store was explicitly disabled or not. Given that failure store
|
|
||||||
// is still behind a feature flag in previous version we use the default value instead of explicitly disabling it.
|
|
||||||
DataStreamOptions dataStreamOptions = DataStreamOptions.EMPTY;
|
|
||||||
if (DataStream.isFailureStoreFeatureFlagEnabled()) {
|
|
||||||
if (args[16] != null) {
|
|
||||||
dataStreamOptions = (DataStreamOptions) args[16];
|
|
||||||
} else if (failureStoreEnabled) {
|
|
||||||
dataStreamOptions = DataStreamOptions.FAILURE_STORE_ENABLED;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return new DataStream(
|
|
||||||
(String) args[0],
|
(String) args[0],
|
||||||
(Long) args[2],
|
(Long) args[2],
|
||||||
(Map<String, Object>) args[3],
|
(Map<String, Object>) args[3],
|
||||||
|
@ -1325,16 +1299,21 @@ public final class DataStream implements SimpleDiffable<DataStream>, ToXContentO
|
||||||
args[7] != null && (boolean) args[7],
|
args[7] != null && (boolean) args[7],
|
||||||
args[8] != null ? IndexMode.fromString((String) args[8]) : null,
|
args[8] != null ? IndexMode.fromString((String) args[8]) : null,
|
||||||
(DataStreamLifecycle) args[9],
|
(DataStreamLifecycle) args[9],
|
||||||
dataStreamOptions,
|
args[16] != null ? (DataStreamOptions) args[16] : DataStreamOptions.EMPTY,
|
||||||
new DataStreamIndices(
|
new DataStreamIndices(
|
||||||
BACKING_INDEX_PREFIX,
|
BACKING_INDEX_PREFIX,
|
||||||
(List<Index>) args[1],
|
(List<Index>) args[1],
|
||||||
args[10] != null && (boolean) args[10],
|
args[10] != null && (boolean) args[10],
|
||||||
(DataStreamAutoShardingEvent) args[11]
|
(DataStreamAutoShardingEvent) args[11]
|
||||||
),
|
),
|
||||||
failureIndices
|
new DataStreamIndices(
|
||||||
|
FAILURE_STORE_PREFIX,
|
||||||
|
args[13] != null ? (List<Index>) args[13] : List.of(),
|
||||||
|
args[14] != null && (boolean) args[14],
|
||||||
|
(DataStreamAutoShardingEvent) args[15]
|
||||||
|
)
|
||||||
|
)
|
||||||
);
|
);
|
||||||
});
|
|
||||||
|
|
||||||
static {
|
static {
|
||||||
PARSER.declareString(ConstructingObjectParser.constructorArg(), NAME_FIELD);
|
PARSER.declareString(ConstructingObjectParser.constructorArg(), NAME_FIELD);
|
||||||
|
@ -1361,9 +1340,7 @@ public final class DataStream implements SimpleDiffable<DataStream>, ToXContentO
|
||||||
(p, c) -> DataStreamAutoShardingEvent.fromXContent(p),
|
(p, c) -> DataStreamAutoShardingEvent.fromXContent(p),
|
||||||
AUTO_SHARDING_FIELD
|
AUTO_SHARDING_FIELD
|
||||||
);
|
);
|
||||||
// The fields behind the feature flag should always be last.
|
// TODO: clear out the failure_store field, which is redundant https://github.com/elastic/elasticsearch/issues/127071
|
||||||
if (DataStream.isFailureStoreFeatureFlagEnabled()) {
|
|
||||||
// Should be removed after backport
|
|
||||||
PARSER.declareBoolean(ConstructingObjectParser.optionalConstructorArg(), FAILURE_STORE_FIELD);
|
PARSER.declareBoolean(ConstructingObjectParser.optionalConstructorArg(), FAILURE_STORE_FIELD);
|
||||||
PARSER.declareObjectArray(
|
PARSER.declareObjectArray(
|
||||||
ConstructingObjectParser.optionalConstructorArg(),
|
ConstructingObjectParser.optionalConstructorArg(),
|
||||||
|
@ -1382,7 +1359,6 @@ public final class DataStream implements SimpleDiffable<DataStream>, ToXContentO
|
||||||
DATA_STREAM_OPTIONS_FIELD
|
DATA_STREAM_OPTIONS_FIELD
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
public static DataStream fromXContent(XContentParser parser) throws IOException {
|
public static DataStream fromXContent(XContentParser parser) throws IOException {
|
||||||
return PARSER.parse(parser, null);
|
return PARSER.parse(parser, null);
|
||||||
|
@ -1417,7 +1393,6 @@ public final class DataStream implements SimpleDiffable<DataStream>, ToXContentO
|
||||||
builder.field(REPLICATED_FIELD.getPreferredName(), replicated);
|
builder.field(REPLICATED_FIELD.getPreferredName(), replicated);
|
||||||
builder.field(SYSTEM_FIELD.getPreferredName(), system);
|
builder.field(SYSTEM_FIELD.getPreferredName(), system);
|
||||||
builder.field(ALLOW_CUSTOM_ROUTING.getPreferredName(), allowCustomRouting);
|
builder.field(ALLOW_CUSTOM_ROUTING.getPreferredName(), allowCustomRouting);
|
||||||
if (DataStream.isFailureStoreFeatureFlagEnabled()) {
|
|
||||||
if (failureIndices.indices.isEmpty() == false) {
|
if (failureIndices.indices.isEmpty() == false) {
|
||||||
builder.xContentList(FAILURE_INDICES_FIELD.getPreferredName(), failureIndices.indices);
|
builder.xContentList(FAILURE_INDICES_FIELD.getPreferredName(), failureIndices.indices);
|
||||||
}
|
}
|
||||||
|
@ -1431,7 +1406,6 @@ public final class DataStream implements SimpleDiffable<DataStream>, ToXContentO
|
||||||
builder.field(DATA_STREAM_OPTIONS_FIELD.getPreferredName());
|
builder.field(DATA_STREAM_OPTIONS_FIELD.getPreferredName());
|
||||||
dataStreamOptions.toXContent(builder, params);
|
dataStreamOptions.toXContent(builder, params);
|
||||||
}
|
}
|
||||||
}
|
|
||||||
if (indexMode != null) {
|
if (indexMode != null) {
|
||||||
builder.field(INDEX_MODE.getPreferredName(), indexMode);
|
builder.field(INDEX_MODE.getPreferredName(), indexMode);
|
||||||
}
|
}
|
||||||
|
|
|
@ -143,7 +143,7 @@ public class DataStreamAction implements Writeable, ToXContentObject {
|
||||||
builder.startObject(type.fieldName);
|
builder.startObject(type.fieldName);
|
||||||
builder.field(DATA_STREAM.getPreferredName(), dataStream);
|
builder.field(DATA_STREAM.getPreferredName(), dataStream);
|
||||||
builder.field(INDEX.getPreferredName(), index);
|
builder.field(INDEX.getPreferredName(), index);
|
||||||
if (DataStream.isFailureStoreFeatureFlagEnabled() && failureStore) {
|
if (failureStore) {
|
||||||
builder.field(FAILURE_STORE.getPreferredName(), failureStore);
|
builder.field(FAILURE_STORE.getPreferredName(), failureStore);
|
||||||
}
|
}
|
||||||
builder.endObject();
|
builder.endObject();
|
||||||
|
@ -181,14 +181,12 @@ public class DataStreamAction implements Writeable, ToXContentObject {
|
||||||
ObjectParser.ValueType.STRING
|
ObjectParser.ValueType.STRING
|
||||||
);
|
);
|
||||||
ADD_BACKING_INDEX_PARSER.declareField(DataStreamAction::setIndex, XContentParser::text, INDEX, ObjectParser.ValueType.STRING);
|
ADD_BACKING_INDEX_PARSER.declareField(DataStreamAction::setIndex, XContentParser::text, INDEX, ObjectParser.ValueType.STRING);
|
||||||
if (DataStream.isFailureStoreFeatureFlagEnabled()) {
|
|
||||||
ADD_BACKING_INDEX_PARSER.declareField(
|
ADD_BACKING_INDEX_PARSER.declareField(
|
||||||
DataStreamAction::setFailureStore,
|
DataStreamAction::setFailureStore,
|
||||||
XContentParser::booleanValue,
|
XContentParser::booleanValue,
|
||||||
FAILURE_STORE,
|
FAILURE_STORE,
|
||||||
ObjectParser.ValueType.BOOLEAN
|
ObjectParser.ValueType.BOOLEAN
|
||||||
);
|
);
|
||||||
}
|
|
||||||
REMOVE_BACKING_INDEX_PARSER.declareField(
|
REMOVE_BACKING_INDEX_PARSER.declareField(
|
||||||
DataStreamAction::setDataStream,
|
DataStreamAction::setDataStream,
|
||||||
XContentParser::text,
|
XContentParser::text,
|
||||||
|
@ -196,7 +194,6 @@ public class DataStreamAction implements Writeable, ToXContentObject {
|
||||||
ObjectParser.ValueType.STRING
|
ObjectParser.ValueType.STRING
|
||||||
);
|
);
|
||||||
REMOVE_BACKING_INDEX_PARSER.declareField(DataStreamAction::setIndex, XContentParser::text, INDEX, ObjectParser.ValueType.STRING);
|
REMOVE_BACKING_INDEX_PARSER.declareField(DataStreamAction::setIndex, XContentParser::text, INDEX, ObjectParser.ValueType.STRING);
|
||||||
if (DataStream.isFailureStoreFeatureFlagEnabled()) {
|
|
||||||
REMOVE_BACKING_INDEX_PARSER.declareField(
|
REMOVE_BACKING_INDEX_PARSER.declareField(
|
||||||
DataStreamAction::setFailureStore,
|
DataStreamAction::setFailureStore,
|
||||||
XContentParser::booleanValue,
|
XContentParser::booleanValue,
|
||||||
|
@ -204,7 +201,6 @@ public class DataStreamAction implements Writeable, ToXContentObject {
|
||||||
ObjectParser.ValueType.BOOLEAN
|
ObjectParser.ValueType.BOOLEAN
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
private static ObjectParser<DataStreamAction, Void> parser(String name, Supplier<DataStreamAction> supplier) {
|
private static ObjectParser<DataStreamAction, Void> parser(String name, Supplier<DataStreamAction> supplier) {
|
||||||
ObjectParser<DataStreamAction, Void> parser = new ObjectParser<>(name, supplier);
|
ObjectParser<DataStreamAction, Void> parser = new ObjectParser<>(name, supplier);
|
||||||
|
|
|
@ -46,12 +46,10 @@ public class DataStreamFailureStoreSettings {
|
||||||
*/
|
*/
|
||||||
public static DataStreamFailureStoreSettings create(ClusterSettings clusterSettings) {
|
public static DataStreamFailureStoreSettings create(ClusterSettings clusterSettings) {
|
||||||
DataStreamFailureStoreSettings dataStreamFailureStoreSettings = new DataStreamFailureStoreSettings();
|
DataStreamFailureStoreSettings dataStreamFailureStoreSettings = new DataStreamFailureStoreSettings();
|
||||||
if (DataStream.isFailureStoreFeatureFlagEnabled()) {
|
|
||||||
clusterSettings.initializeAndWatch(
|
clusterSettings.initializeAndWatch(
|
||||||
DATA_STREAM_FAILURE_STORED_ENABLED_SETTING,
|
DATA_STREAM_FAILURE_STORED_ENABLED_SETTING,
|
||||||
dataStreamFailureStoreSettings::setEnabledByNamePatterns
|
dataStreamFailureStoreSettings::setEnabledByNamePatterns
|
||||||
);
|
);
|
||||||
}
|
|
||||||
return dataStreamFailureStoreSettings;
|
return dataStreamFailureStoreSettings;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -61,7 +59,6 @@ public class DataStreamFailureStoreSettings {
|
||||||
* @param name The data stream name
|
* @param name The data stream name
|
||||||
*/
|
*/
|
||||||
public boolean failureStoreEnabledForDataStreamName(String name) {
|
public boolean failureStoreEnabledForDataStreamName(String name) {
|
||||||
assert DataStream.isFailureStoreFeatureFlagEnabled() : "Testing whether failure store is enabled should be behind by feature flag";
|
|
||||||
return failureStoreEnabledByName.test(name);
|
return failureStoreEnabledByName.test(name);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -422,10 +422,6 @@ public class MetadataCreateDataStreamService {
|
||||||
String failureStoreIndexName,
|
String failureStoreIndexName,
|
||||||
@Nullable BiConsumer<ProjectMetadata.Builder, IndexMetadata> metadataTransformer
|
@Nullable BiConsumer<ProjectMetadata.Builder, IndexMetadata> metadataTransformer
|
||||||
) throws Exception {
|
) throws Exception {
|
||||||
if (DataStream.isFailureStoreFeatureFlagEnabled() == false) {
|
|
||||||
return currentState;
|
|
||||||
}
|
|
||||||
|
|
||||||
var indexSettings = DataStreamFailureStoreDefinition.buildFailureStoreIndexSettings(nodeSettings);
|
var indexSettings = DataStreamFailureStoreDefinition.buildFailureStoreIndexSettings(nodeSettings);
|
||||||
|
|
||||||
CreateIndexClusterStateUpdateRequest createIndexRequest = new CreateIndexClusterStateUpdateRequest(
|
CreateIndexClusterStateUpdateRequest createIndexRequest = new CreateIndexClusterStateUpdateRequest(
|
||||||
|
|
|
@ -1944,11 +1944,10 @@ public class ProjectMetadata implements Iterable<IndexMetadata>, Diffable<Projec
|
||||||
private static boolean assertContainsIndexIfDataStream(DataStream parent, IndexMetadata indexMetadata) {
|
private static boolean assertContainsIndexIfDataStream(DataStream parent, IndexMetadata indexMetadata) {
|
||||||
assert parent == null
|
assert parent == null
|
||||||
|| parent.getIndices().stream().anyMatch(index -> indexMetadata.getIndex().getName().equals(index.getName()))
|
|| parent.getIndices().stream().anyMatch(index -> indexMetadata.getIndex().getName().equals(index.getName()))
|
||||||
|| (DataStream.isFailureStoreFeatureFlagEnabled()
|
|| parent.getFailureComponent()
|
||||||
&& parent.getFailureComponent()
|
|
||||||
.getIndices()
|
.getIndices()
|
||||||
.stream()
|
.stream()
|
||||||
.anyMatch(index -> indexMetadata.getIndex().getName().equals(index.getName())))
|
.anyMatch(index -> indexMetadata.getIndex().getName().equals(index.getName()))
|
||||||
: "Expected data stream [" + parent.getName() + "] to contain index " + indexMetadata.getIndex();
|
: "Expected data stream [" + parent.getName() + "] to contain index " + indexMetadata.getIndex();
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
@ -1970,13 +1969,11 @@ public class ProjectMetadata implements Iterable<IndexMetadata>, Diffable<Projec
|
||||||
for (Index i : dataStream.getIndices()) {
|
for (Index i : dataStream.getIndices()) {
|
||||||
indexToDataStreamLookup.put(i.getName(), dataStream);
|
indexToDataStreamLookup.put(i.getName(), dataStream);
|
||||||
}
|
}
|
||||||
if (DataStream.isFailureStoreFeatureFlagEnabled()) {
|
|
||||||
for (Index i : dataStream.getFailureIndices()) {
|
for (Index i : dataStream.getFailureIndices()) {
|
||||||
indexToDataStreamLookup.put(i.getName(), dataStream);
|
indexToDataStreamLookup.put(i.getName(), dataStream);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
private static IndexAbstraction.Alias makeDsAliasAbstraction(Map<String, DataStream> dataStreams, DataStreamAlias alias) {
|
private static IndexAbstraction.Alias makeDsAliasAbstraction(Map<String, DataStream> dataStreams, DataStreamAlias alias) {
|
||||||
Index writeIndexOfWriteDataStream = null;
|
Index writeIndexOfWriteDataStream = null;
|
||||||
|
|
|
@ -314,9 +314,7 @@ public class Template implements SimpleDiffable<Template>, ToXContentObject {
|
||||||
builder.field(LIFECYCLE.getPreferredName());
|
builder.field(LIFECYCLE.getPreferredName());
|
||||||
lifecycle.toXContent(builder, params, rolloverConfiguration, null, false);
|
lifecycle.toXContent(builder, params, rolloverConfiguration, null, false);
|
||||||
}
|
}
|
||||||
if (DataStream.isFailureStoreFeatureFlagEnabled()) {
|
|
||||||
dataStreamOptions.toXContent(builder, params, DATA_STREAM_OPTIONS.getPreferredName());
|
dataStreamOptions.toXContent(builder, params, DATA_STREAM_OPTIONS.getPreferredName());
|
||||||
}
|
|
||||||
builder.endObject();
|
builder.endObject();
|
||||||
return builder;
|
return builder;
|
||||||
}
|
}
|
||||||
|
|
|
@ -38,7 +38,6 @@ import org.elasticsearch.cluster.coordination.LeaderChecker;
|
||||||
import org.elasticsearch.cluster.coordination.MasterHistory;
|
import org.elasticsearch.cluster.coordination.MasterHistory;
|
||||||
import org.elasticsearch.cluster.coordination.NoMasterBlockService;
|
import org.elasticsearch.cluster.coordination.NoMasterBlockService;
|
||||||
import org.elasticsearch.cluster.coordination.Reconfigurator;
|
import org.elasticsearch.cluster.coordination.Reconfigurator;
|
||||||
import org.elasticsearch.cluster.metadata.DataStream;
|
|
||||||
import org.elasticsearch.cluster.metadata.DataStreamFailureStoreSettings;
|
import org.elasticsearch.cluster.metadata.DataStreamFailureStoreSettings;
|
||||||
import org.elasticsearch.cluster.metadata.DataStreamGlobalRetentionSettings;
|
import org.elasticsearch.cluster.metadata.DataStreamGlobalRetentionSettings;
|
||||||
import org.elasticsearch.cluster.metadata.DataStreamLifecycle;
|
import org.elasticsearch.cluster.metadata.DataStreamLifecycle;
|
||||||
|
@ -139,12 +138,8 @@ import org.elasticsearch.transport.TransportService;
|
||||||
import org.elasticsearch.transport.TransportSettings;
|
import org.elasticsearch.transport.TransportSettings;
|
||||||
import org.elasticsearch.watcher.ResourceWatcherService;
|
import org.elasticsearch.watcher.ResourceWatcherService;
|
||||||
|
|
||||||
import java.util.Objects;
|
|
||||||
import java.util.Set;
|
import java.util.Set;
|
||||||
import java.util.function.Predicate;
|
import java.util.function.Predicate;
|
||||||
import java.util.stream.Stream;
|
|
||||||
|
|
||||||
import static java.util.stream.Collectors.toSet;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Encapsulates all valid cluster level settings.
|
* Encapsulates all valid cluster level settings.
|
||||||
|
@ -215,7 +210,7 @@ public final class ClusterSettings extends AbstractScopedSettings {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public static final Set<Setting<?>> BUILT_IN_CLUSTER_SETTINGS = Stream.of(
|
public static final Set<Setting<?>> BUILT_IN_CLUSTER_SETTINGS = Set.of(
|
||||||
AllocationBalancingRoundSummaryService.ENABLE_BALANCER_ROUND_SUMMARIES_SETTING,
|
AllocationBalancingRoundSummaryService.ENABLE_BALANCER_ROUND_SUMMARIES_SETTING,
|
||||||
AllocationBalancingRoundSummaryService.BALANCER_ROUND_SUMMARIES_LOG_INTERVAL_SETTING,
|
AllocationBalancingRoundSummaryService.BALANCER_ROUND_SUMMARIES_LOG_INTERVAL_SETTING,
|
||||||
AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING,
|
AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING,
|
||||||
|
@ -637,8 +632,8 @@ public final class ClusterSettings extends AbstractScopedSettings {
|
||||||
DataStreamGlobalRetentionSettings.DATA_STREAMS_DEFAULT_RETENTION_SETTING,
|
DataStreamGlobalRetentionSettings.DATA_STREAMS_DEFAULT_RETENTION_SETTING,
|
||||||
DataStreamGlobalRetentionSettings.DATA_STREAMS_MAX_RETENTION_SETTING,
|
DataStreamGlobalRetentionSettings.DATA_STREAMS_MAX_RETENTION_SETTING,
|
||||||
ShardsAvailabilityHealthIndicatorService.REPLICA_UNASSIGNED_BUFFER_TIME,
|
ShardsAvailabilityHealthIndicatorService.REPLICA_UNASSIGNED_BUFFER_TIME,
|
||||||
DataStream.isFailureStoreFeatureFlagEnabled() ? DataStreamFailureStoreSettings.DATA_STREAM_FAILURE_STORED_ENABLED_SETTING : null,
|
DataStreamFailureStoreSettings.DATA_STREAM_FAILURE_STORED_ENABLED_SETTING,
|
||||||
IndexingStatsSettings.RECENT_WRITE_LOAD_HALF_LIFE_SETTING,
|
IndexingStatsSettings.RECENT_WRITE_LOAD_HALF_LIFE_SETTING,
|
||||||
TransportGetAllocationStatsAction.CACHE_TTL_SETTING
|
TransportGetAllocationStatsAction.CACHE_TTL_SETTING
|
||||||
).filter(Objects::nonNull).collect(toSet());
|
);
|
||||||
}
|
}
|
||||||
|
|
|
@ -12,7 +12,6 @@ package org.elasticsearch.rest.action.admin.indices;
|
||||||
import org.elasticsearch.action.admin.indices.template.put.PutComponentTemplateAction;
|
import org.elasticsearch.action.admin.indices.template.put.PutComponentTemplateAction;
|
||||||
import org.elasticsearch.client.internal.node.NodeClient;
|
import org.elasticsearch.client.internal.node.NodeClient;
|
||||||
import org.elasticsearch.cluster.metadata.ComponentTemplate;
|
import org.elasticsearch.cluster.metadata.ComponentTemplate;
|
||||||
import org.elasticsearch.cluster.metadata.DataStream;
|
|
||||||
import org.elasticsearch.rest.BaseRestHandler;
|
import org.elasticsearch.rest.BaseRestHandler;
|
||||||
import org.elasticsearch.rest.RestRequest;
|
import org.elasticsearch.rest.RestRequest;
|
||||||
import org.elasticsearch.rest.Scope;
|
import org.elasticsearch.rest.Scope;
|
||||||
|
@ -59,6 +58,6 @@ public class RestPutComponentTemplateAction extends BaseRestHandler {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Set<String> supportedCapabilities() {
|
public Set<String> supportedCapabilities() {
|
||||||
return DataStream.isFailureStoreFeatureFlagEnabled() ? capabilities : Set.of();
|
return capabilities;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -12,7 +12,6 @@ package org.elasticsearch.rest.action.admin.indices;
|
||||||
import org.elasticsearch.action.admin.indices.template.put.TransportPutComposableIndexTemplateAction;
|
import org.elasticsearch.action.admin.indices.template.put.TransportPutComposableIndexTemplateAction;
|
||||||
import org.elasticsearch.client.internal.node.NodeClient;
|
import org.elasticsearch.client.internal.node.NodeClient;
|
||||||
import org.elasticsearch.cluster.metadata.ComposableIndexTemplate;
|
import org.elasticsearch.cluster.metadata.ComposableIndexTemplate;
|
||||||
import org.elasticsearch.cluster.metadata.DataStream;
|
|
||||||
import org.elasticsearch.rest.BaseRestHandler;
|
import org.elasticsearch.rest.BaseRestHandler;
|
||||||
import org.elasticsearch.rest.RestRequest;
|
import org.elasticsearch.rest.RestRequest;
|
||||||
import org.elasticsearch.rest.Scope;
|
import org.elasticsearch.rest.Scope;
|
||||||
|
@ -61,6 +60,6 @@ public class RestPutComposableIndexTemplateAction extends BaseRestHandler {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Set<String> supportedCapabilities() {
|
public Set<String> supportedCapabilities() {
|
||||||
return DataStream.isFailureStoreFeatureFlagEnabled() ? capabilities : Set.of();
|
return capabilities;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -12,7 +12,6 @@ package org.elasticsearch.rest.action.admin.indices;
|
||||||
import org.elasticsearch.action.admin.indices.rollover.RolloverRequest;
|
import org.elasticsearch.action.admin.indices.rollover.RolloverRequest;
|
||||||
import org.elasticsearch.action.support.ActiveShardCount;
|
import org.elasticsearch.action.support.ActiveShardCount;
|
||||||
import org.elasticsearch.client.internal.node.NodeClient;
|
import org.elasticsearch.client.internal.node.NodeClient;
|
||||||
import org.elasticsearch.cluster.metadata.DataStream;
|
|
||||||
import org.elasticsearch.rest.BaseRestHandler;
|
import org.elasticsearch.rest.BaseRestHandler;
|
||||||
import org.elasticsearch.rest.RestRequest;
|
import org.elasticsearch.rest.RestRequest;
|
||||||
import org.elasticsearch.rest.Scope;
|
import org.elasticsearch.rest.Scope;
|
||||||
|
@ -43,11 +42,7 @@ public class RestRolloverIndexAction extends BaseRestHandler {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Set<String> supportedCapabilities() {
|
public Set<String> supportedCapabilities() {
|
||||||
if (DataStream.isFailureStoreFeatureFlagEnabled()) {
|
|
||||||
return Set.of("return-404-on-missing-target", "index_expression_selectors");
|
return Set.of("return-404-on-missing-target", "index_expression_selectors");
|
||||||
} else {
|
|
||||||
return Set.of("return-404-on-missing-target");
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|
|
@ -18,7 +18,6 @@ import org.elasticsearch.action.bulk.BulkShardRequest;
|
||||||
import org.elasticsearch.action.bulk.IncrementalBulkService;
|
import org.elasticsearch.action.bulk.IncrementalBulkService;
|
||||||
import org.elasticsearch.action.support.ActiveShardCount;
|
import org.elasticsearch.action.support.ActiveShardCount;
|
||||||
import org.elasticsearch.client.internal.node.NodeClient;
|
import org.elasticsearch.client.internal.node.NodeClient;
|
||||||
import org.elasticsearch.cluster.metadata.DataStream;
|
|
||||||
import org.elasticsearch.common.bytes.BytesReference;
|
import org.elasticsearch.common.bytes.BytesReference;
|
||||||
import org.elasticsearch.common.bytes.CompositeBytesReference;
|
import org.elasticsearch.common.bytes.CompositeBytesReference;
|
||||||
import org.elasticsearch.common.bytes.ReleasableBytesReference;
|
import org.elasticsearch.common.bytes.ReleasableBytesReference;
|
||||||
|
@ -69,7 +68,7 @@ public class RestBulkAction extends BaseRestHandler {
|
||||||
public RestBulkAction(Settings settings, IncrementalBulkService bulkHandler) {
|
public RestBulkAction(Settings settings, IncrementalBulkService bulkHandler) {
|
||||||
this.allowExplicitIndex = MULTI_ALLOW_EXPLICIT_INDEX.get(settings);
|
this.allowExplicitIndex = MULTI_ALLOW_EXPLICIT_INDEX.get(settings);
|
||||||
this.bulkHandler = bulkHandler;
|
this.bulkHandler = bulkHandler;
|
||||||
this.capabilities = DataStream.isFailureStoreFeatureFlagEnabled() ? Set.of(FAILURE_STORE_STATUS_CAPABILITY) : Set.of();
|
this.capabilities = Set.of(FAILURE_STORE_STATUS_CAPABILITY);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|
|
@ -15,7 +15,6 @@ import org.elasticsearch.action.DocWriteResponse;
|
||||||
import org.elasticsearch.action.index.IndexRequest;
|
import org.elasticsearch.action.index.IndexRequest;
|
||||||
import org.elasticsearch.action.support.ActiveShardCount;
|
import org.elasticsearch.action.support.ActiveShardCount;
|
||||||
import org.elasticsearch.client.internal.node.NodeClient;
|
import org.elasticsearch.client.internal.node.NodeClient;
|
||||||
import org.elasticsearch.cluster.metadata.DataStream;
|
|
||||||
import org.elasticsearch.common.bytes.ReleasableBytesReference;
|
import org.elasticsearch.common.bytes.ReleasableBytesReference;
|
||||||
import org.elasticsearch.index.VersionType;
|
import org.elasticsearch.index.VersionType;
|
||||||
import org.elasticsearch.rest.BaseRestHandler;
|
import org.elasticsearch.rest.BaseRestHandler;
|
||||||
|
@ -40,9 +39,7 @@ public class RestIndexAction extends BaseRestHandler {
|
||||||
static final String TYPES_DEPRECATION_MESSAGE = "[types removal] Specifying types in document "
|
static final String TYPES_DEPRECATION_MESSAGE = "[types removal] Specifying types in document "
|
||||||
+ "index requests is deprecated, use the typeless endpoints instead (/{index}/_doc/{id}, /{index}/_doc, "
|
+ "index requests is deprecated, use the typeless endpoints instead (/{index}/_doc/{id}, /{index}/_doc, "
|
||||||
+ "or /{index}/_create/{id}).";
|
+ "or /{index}/_create/{id}).";
|
||||||
private final Set<String> capabilities = DataStream.isFailureStoreFeatureFlagEnabled()
|
private final Set<String> capabilities = Set.of(FAILURE_STORE_STATUS_CAPABILITY);
|
||||||
? Set.of(FAILURE_STORE_STATUS_CAPABILITY)
|
|
||||||
: Set.of();
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public List<Route> routes() {
|
public List<Route> routes() {
|
||||||
|
|
|
@ -425,13 +425,10 @@ public final class RestoreService implements ClusterStateApplier {
|
||||||
.stream()
|
.stream()
|
||||||
.flatMap(ds -> ds.getIndices().stream().map(idx -> new Tuple<>(ds.isSystem(), idx.getName())))
|
.flatMap(ds -> ds.getIndices().stream().map(idx -> new Tuple<>(ds.isSystem(), idx.getName())))
|
||||||
.collect(Collectors.partitioningBy(Tuple::v1, Collectors.mapping(Tuple::v2, Collectors.toSet())));
|
.collect(Collectors.partitioningBy(Tuple::v1, Collectors.mapping(Tuple::v2, Collectors.toSet())));
|
||||||
Map<Boolean, Set<String>> failureIndices = Map.of();
|
Map<Boolean, Set<String>> failureIndices = dataStreamsToRestore.values()
|
||||||
if (DataStream.isFailureStoreFeatureFlagEnabled()) {
|
|
||||||
failureIndices = dataStreamsToRestore.values()
|
|
||||||
.stream()
|
.stream()
|
||||||
.flatMap(ds -> ds.getFailureIndices().stream().map(idx -> new Tuple<>(ds.isSystem(), idx.getName())))
|
.flatMap(ds -> ds.getFailureIndices().stream().map(idx -> new Tuple<>(ds.isSystem(), idx.getName())))
|
||||||
.collect(Collectors.partitioningBy(Tuple::v1, Collectors.mapping(Tuple::v2, Collectors.toSet())));
|
.collect(Collectors.partitioningBy(Tuple::v1, Collectors.mapping(Tuple::v2, Collectors.toSet())));
|
||||||
}
|
|
||||||
systemDataStreamIndices = Sets.union(backingIndices.getOrDefault(true, Set.of()), failureIndices.getOrDefault(true, Set.of()));
|
systemDataStreamIndices = Sets.union(backingIndices.getOrDefault(true, Set.of()), failureIndices.getOrDefault(true, Set.of()));
|
||||||
nonSystemDataStreamBackingIndices = backingIndices.getOrDefault(false, Set.of());
|
nonSystemDataStreamBackingIndices = backingIndices.getOrDefault(false, Set.of());
|
||||||
nonSystemDataStreamFailureIndices = failureIndices.getOrDefault(false, Set.of());
|
nonSystemDataStreamFailureIndices = failureIndices.getOrDefault(false, Set.of());
|
||||||
|
@ -812,13 +809,11 @@ public final class RestoreService implements ClusterStateApplier {
|
||||||
.stream()
|
.stream()
|
||||||
.map(i -> metadata.get(renameIndex(i.getName(), request, true, false)).getIndex())
|
.map(i -> metadata.get(renameIndex(i.getName(), request, true, false)).getIndex())
|
||||||
.toList();
|
.toList();
|
||||||
List<Index> updatedFailureIndices = DataStream.isFailureStoreFeatureFlagEnabled()
|
List<Index> updatedFailureIndices = dataStream.getFailureComponent()
|
||||||
? dataStream.getFailureComponent()
|
|
||||||
.getIndices()
|
.getIndices()
|
||||||
.stream()
|
.stream()
|
||||||
.map(i -> metadata.get(renameIndex(i.getName(), request, false, true)).getIndex())
|
.map(i -> metadata.get(renameIndex(i.getName(), request, false, true)).getIndex())
|
||||||
.toList()
|
.toList();
|
||||||
: List.of();
|
|
||||||
return dataStream.copy()
|
return dataStream.copy()
|
||||||
.setName(dataStreamName)
|
.setName(dataStreamName)
|
||||||
.setBackingIndices(dataStream.getDataComponent().copy().setIndices(updatedIndices).build())
|
.setBackingIndices(dataStream.getDataComponent().copy().setIndices(updatedIndices).build())
|
||||||
|
|
|
@ -60,7 +60,6 @@ import org.elasticsearch.test.client.NoOpNodeClient;
|
||||||
import org.elasticsearch.threadpool.TestThreadPool;
|
import org.elasticsearch.threadpool.TestThreadPool;
|
||||||
import org.elasticsearch.threadpool.ThreadPool;
|
import org.elasticsearch.threadpool.ThreadPool;
|
||||||
import org.junit.After;
|
import org.junit.After;
|
||||||
import org.junit.Assume;
|
|
||||||
import org.junit.Before;
|
import org.junit.Before;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
@ -392,8 +391,6 @@ public class BulkOperationTests extends ESTestCase {
|
||||||
* A bulk operation to a data stream with a failure store enabled should redirect any shard level failures to the failure store.
|
* A bulk operation to a data stream with a failure store enabled should redirect any shard level failures to the failure store.
|
||||||
*/
|
*/
|
||||||
public void testFailingEntireShardRedirectsToFailureStore() throws Exception {
|
public void testFailingEntireShardRedirectsToFailureStore() throws Exception {
|
||||||
Assume.assumeTrue(DataStream.isFailureStoreFeatureFlagEnabled());
|
|
||||||
|
|
||||||
// Requests that go to two separate shards
|
// Requests that go to two separate shards
|
||||||
BulkRequest bulkRequest = new BulkRequest();
|
BulkRequest bulkRequest = new BulkRequest();
|
||||||
bulkRequest.add(new IndexRequest(fsDataStreamName).id("1").source(Map.of("key", "val")).opType(DocWriteRequest.OpType.CREATE));
|
bulkRequest.add(new IndexRequest(fsDataStreamName).id("1").source(Map.of("key", "val")).opType(DocWriteRequest.OpType.CREATE));
|
||||||
|
@ -419,8 +416,6 @@ public class BulkOperationTests extends ESTestCase {
|
||||||
* failure store.
|
* failure store.
|
||||||
*/
|
*/
|
||||||
public void testFailingDocumentRedirectsToFailureStore() throws Exception {
|
public void testFailingDocumentRedirectsToFailureStore() throws Exception {
|
||||||
Assume.assumeTrue(DataStream.isFailureStoreFeatureFlagEnabled());
|
|
||||||
|
|
||||||
// Requests that go to two separate shards
|
// Requests that go to two separate shards
|
||||||
BulkRequest bulkRequest = new BulkRequest();
|
BulkRequest bulkRequest = new BulkRequest();
|
||||||
bulkRequest.add(new IndexRequest(fsDataStreamName).id("1").source(Map.of("key", "val")).opType(DocWriteRequest.OpType.CREATE));
|
bulkRequest.add(new IndexRequest(fsDataStreamName).id("1").source(Map.of("key", "val")).opType(DocWriteRequest.OpType.CREATE));
|
||||||
|
@ -446,8 +441,6 @@ public class BulkOperationTests extends ESTestCase {
|
||||||
* failure store if the failure store node feature is not on every node in the cluster
|
* failure store if the failure store node feature is not on every node in the cluster
|
||||||
*/
|
*/
|
||||||
public void testFailingDocumentIgnoredByFailureStoreWhenFeatureIsDisabled() throws Exception {
|
public void testFailingDocumentIgnoredByFailureStoreWhenFeatureIsDisabled() throws Exception {
|
||||||
Assume.assumeTrue(DataStream.isFailureStoreFeatureFlagEnabled());
|
|
||||||
|
|
||||||
// Requests that go to two separate shards
|
// Requests that go to two separate shards
|
||||||
BulkRequest bulkRequest = new BulkRequest();
|
BulkRequest bulkRequest = new BulkRequest();
|
||||||
bulkRequest.add(new IndexRequest(fsDataStreamName).id("1").source(Map.of("key", "val")).opType(DocWriteRequest.OpType.CREATE));
|
bulkRequest.add(new IndexRequest(fsDataStreamName).id("1").source(Map.of("key", "val")).opType(DocWriteRequest.OpType.CREATE));
|
||||||
|
@ -481,8 +474,6 @@ public class BulkOperationTests extends ESTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testFailingDocumentRedirectsToFailureStoreWhenEnabledByClusterSetting() {
|
public void testFailingDocumentRedirectsToFailureStoreWhenEnabledByClusterSetting() {
|
||||||
Assume.assumeTrue(DataStream.isFailureStoreFeatureFlagEnabled());
|
|
||||||
|
|
||||||
BulkRequest bulkRequest = new BulkRequest();
|
BulkRequest bulkRequest = new BulkRequest();
|
||||||
bulkRequest.add(
|
bulkRequest.add(
|
||||||
new IndexRequest(fsBySettingsDataStreamName).id("1").source(Map.of("key", "val")).opType(DocWriteRequest.OpType.CREATE)
|
new IndexRequest(fsBySettingsDataStreamName).id("1").source(Map.of("key", "val")).opType(DocWriteRequest.OpType.CREATE)
|
||||||
|
@ -538,8 +529,6 @@ public class BulkOperationTests extends ESTestCase {
|
||||||
* a shard-level failure while writing to the failure store indices.
|
* a shard-level failure while writing to the failure store indices.
|
||||||
*/
|
*/
|
||||||
public void testFailureStoreShardFailureRejectsDocument() throws Exception {
|
public void testFailureStoreShardFailureRejectsDocument() throws Exception {
|
||||||
Assume.assumeTrue(DataStream.isFailureStoreFeatureFlagEnabled());
|
|
||||||
|
|
||||||
// Requests that go to two separate shards
|
// Requests that go to two separate shards
|
||||||
BulkRequest bulkRequest = new BulkRequest();
|
BulkRequest bulkRequest = new BulkRequest();
|
||||||
bulkRequest.add(new IndexRequest(fsDataStreamName).id("1").source(Map.of("key", "val")).opType(DocWriteRequest.OpType.CREATE));
|
bulkRequest.add(new IndexRequest(fsDataStreamName).id("1").source(Map.of("key", "val")).opType(DocWriteRequest.OpType.CREATE));
|
||||||
|
@ -578,8 +567,6 @@ public class BulkOperationTests extends ESTestCase {
|
||||||
* instead will simply report its original failure in the response, with the conversion failure present as a suppressed exception.
|
* instead will simply report its original failure in the response, with the conversion failure present as a suppressed exception.
|
||||||
*/
|
*/
|
||||||
public void testFailedDocumentCanNotBeConvertedFails() throws Exception {
|
public void testFailedDocumentCanNotBeConvertedFails() throws Exception {
|
||||||
Assume.assumeTrue(DataStream.isFailureStoreFeatureFlagEnabled());
|
|
||||||
|
|
||||||
// Requests that go to two separate shards
|
// Requests that go to two separate shards
|
||||||
BulkRequest bulkRequest = new BulkRequest();
|
BulkRequest bulkRequest = new BulkRequest();
|
||||||
bulkRequest.add(new IndexRequest(fsDataStreamName).id("1").source(Map.of("key", "val")).opType(DocWriteRequest.OpType.CREATE));
|
bulkRequest.add(new IndexRequest(fsDataStreamName).id("1").source(Map.of("key", "val")).opType(DocWriteRequest.OpType.CREATE));
|
||||||
|
@ -614,8 +601,6 @@ public class BulkOperationTests extends ESTestCase {
|
||||||
* returns an unblocked cluster, the redirection of failure documents should proceed and not return early.
|
* returns an unblocked cluster, the redirection of failure documents should proceed and not return early.
|
||||||
*/
|
*/
|
||||||
public void testRetryableBlockAcceptsFailureStoreDocument() throws Exception {
|
public void testRetryableBlockAcceptsFailureStoreDocument() throws Exception {
|
||||||
Assume.assumeTrue(DataStream.isFailureStoreFeatureFlagEnabled());
|
|
||||||
|
|
||||||
// Requests that go to two separate shards
|
// Requests that go to two separate shards
|
||||||
BulkRequest bulkRequest = new BulkRequest();
|
BulkRequest bulkRequest = new BulkRequest();
|
||||||
bulkRequest.add(new IndexRequest(fsDataStreamName).id("1").source(Map.of("key", "val")).opType(DocWriteRequest.OpType.CREATE));
|
bulkRequest.add(new IndexRequest(fsDataStreamName).id("1").source(Map.of("key", "val")).opType(DocWriteRequest.OpType.CREATE));
|
||||||
|
@ -707,8 +692,6 @@ public class BulkOperationTests extends ESTestCase {
|
||||||
* non-retryable block when the redirected documents would be sent to the shard-level action.
|
* non-retryable block when the redirected documents would be sent to the shard-level action.
|
||||||
*/
|
*/
|
||||||
public void testBlockedClusterRejectsFailureStoreDocument() throws Exception {
|
public void testBlockedClusterRejectsFailureStoreDocument() throws Exception {
|
||||||
Assume.assumeTrue(DataStream.isFailureStoreFeatureFlagEnabled());
|
|
||||||
|
|
||||||
// Requests that go to two separate shards
|
// Requests that go to two separate shards
|
||||||
BulkRequest bulkRequest = new BulkRequest();
|
BulkRequest bulkRequest = new BulkRequest();
|
||||||
bulkRequest.add(new IndexRequest(fsDataStreamName).id("1").source(Map.of("key", "val")).opType(DocWriteRequest.OpType.CREATE));
|
bulkRequest.add(new IndexRequest(fsDataStreamName).id("1").source(Map.of("key", "val")).opType(DocWriteRequest.OpType.CREATE));
|
||||||
|
@ -760,8 +743,6 @@ public class BulkOperationTests extends ESTestCase {
|
||||||
* retryable block to clear when the redirected documents would be sent to the shard-level action.
|
* retryable block to clear when the redirected documents would be sent to the shard-level action.
|
||||||
*/
|
*/
|
||||||
public void testOperationTimeoutRejectsFailureStoreDocument() throws Exception {
|
public void testOperationTimeoutRejectsFailureStoreDocument() throws Exception {
|
||||||
Assume.assumeTrue(DataStream.isFailureStoreFeatureFlagEnabled());
|
|
||||||
|
|
||||||
// Requests that go to two separate shards
|
// Requests that go to two separate shards
|
||||||
BulkRequest bulkRequest = new BulkRequest();
|
BulkRequest bulkRequest = new BulkRequest();
|
||||||
bulkRequest.add(new IndexRequest(fsDataStreamName).id("1").source(Map.of("key", "val")).opType(DocWriteRequest.OpType.CREATE));
|
bulkRequest.add(new IndexRequest(fsDataStreamName).id("1").source(Map.of("key", "val")).opType(DocWriteRequest.OpType.CREATE));
|
||||||
|
@ -821,8 +802,6 @@ public class BulkOperationTests extends ESTestCase {
|
||||||
* for a retryable block to clear when the redirected documents would be sent to the shard-level action.
|
* for a retryable block to clear when the redirected documents would be sent to the shard-level action.
|
||||||
*/
|
*/
|
||||||
public void testNodeClosureRejectsFailureStoreDocument() {
|
public void testNodeClosureRejectsFailureStoreDocument() {
|
||||||
Assume.assumeTrue(DataStream.isFailureStoreFeatureFlagEnabled());
|
|
||||||
|
|
||||||
// Requests that go to two separate shards
|
// Requests that go to two separate shards
|
||||||
BulkRequest bulkRequest = new BulkRequest();
|
BulkRequest bulkRequest = new BulkRequest();
|
||||||
bulkRequest.add(new IndexRequest(fsDataStreamName).id("1").source(Map.of("key", "val")).opType(DocWriteRequest.OpType.CREATE));
|
bulkRequest.add(new IndexRequest(fsDataStreamName).id("1").source(Map.of("key", "val")).opType(DocWriteRequest.OpType.CREATE));
|
||||||
|
@ -866,8 +845,6 @@ public class BulkOperationTests extends ESTestCase {
|
||||||
* rollover, it first needs to roll over the failure store and then redirect the failure to the <i>new</i> failure index.
|
* rollover, it first needs to roll over the failure store and then redirect the failure to the <i>new</i> failure index.
|
||||||
*/
|
*/
|
||||||
public void testLazilyRollingOverFailureStore() throws Exception {
|
public void testLazilyRollingOverFailureStore() throws Exception {
|
||||||
Assume.assumeTrue(DataStream.isFailureStoreFeatureFlagEnabled());
|
|
||||||
|
|
||||||
// Requests that go to two separate shards
|
// Requests that go to two separate shards
|
||||||
BulkRequest bulkRequest = new BulkRequest();
|
BulkRequest bulkRequest = new BulkRequest();
|
||||||
bulkRequest.add(
|
bulkRequest.add(
|
||||||
|
@ -925,8 +902,6 @@ public class BulkOperationTests extends ESTestCase {
|
||||||
* should be added to the list of suppressed causes in the <code>BulkItemResponse</code>.
|
* should be added to the list of suppressed causes in the <code>BulkItemResponse</code>.
|
||||||
*/
|
*/
|
||||||
public void testFailureWhileRollingOverFailureStore() throws Exception {
|
public void testFailureWhileRollingOverFailureStore() throws Exception {
|
||||||
Assume.assumeTrue(DataStream.isFailureStoreFeatureFlagEnabled());
|
|
||||||
|
|
||||||
// Requests that go to two separate shards
|
// Requests that go to two separate shards
|
||||||
BulkRequest bulkRequest = new BulkRequest();
|
BulkRequest bulkRequest = new BulkRequest();
|
||||||
bulkRequest.add(
|
bulkRequest.add(
|
||||||
|
|
|
@ -90,7 +90,6 @@ import static org.hamcrest.Matchers.equalTo;
|
||||||
import static org.hamcrest.Matchers.greaterThan;
|
import static org.hamcrest.Matchers.greaterThan;
|
||||||
import static org.hamcrest.Matchers.is;
|
import static org.hamcrest.Matchers.is;
|
||||||
import static org.hamcrest.Matchers.nullValue;
|
import static org.hamcrest.Matchers.nullValue;
|
||||||
import static org.junit.Assume.assumeThat;
|
|
||||||
import static org.mockito.ArgumentMatchers.any;
|
import static org.mockito.ArgumentMatchers.any;
|
||||||
import static org.mockito.Mockito.mock;
|
import static org.mockito.Mockito.mock;
|
||||||
import static org.mockito.Mockito.when;
|
import static org.mockito.Mockito.when;
|
||||||
|
@ -453,8 +452,6 @@ public class TransportBulkActionTests extends ESTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testResolveFailureStoreFromMetadata() throws Exception {
|
public void testResolveFailureStoreFromMetadata() throws Exception {
|
||||||
assumeThat(DataStream.isFailureStoreFeatureFlagEnabled(), is(true));
|
|
||||||
|
|
||||||
String dataStreamWithFailureStoreEnabled = "test-data-stream-failure-enabled";
|
String dataStreamWithFailureStoreEnabled = "test-data-stream-failure-enabled";
|
||||||
String dataStreamWithFailureStoreDefault = "test-data-stream-failure-default";
|
String dataStreamWithFailureStoreDefault = "test-data-stream-failure-default";
|
||||||
String dataStreamWithFailureStoreDisabled = "test-data-stream-failure-disabled";
|
String dataStreamWithFailureStoreDisabled = "test-data-stream-failure-disabled";
|
||||||
|
@ -544,8 +541,6 @@ public class TransportBulkActionTests extends ESTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testResolveFailureStoreFromTemplate() throws Exception {
|
public void testResolveFailureStoreFromTemplate() throws Exception {
|
||||||
assumeThat(DataStream.isFailureStoreFeatureFlagEnabled(), is(true));
|
|
||||||
|
|
||||||
String dsTemplateWithFailureStoreEnabled = "test-data-stream-failure-enabled";
|
String dsTemplateWithFailureStoreEnabled = "test-data-stream-failure-enabled";
|
||||||
String dsTemplateWithFailureStoreDefault = "test-data-stream-failure-default";
|
String dsTemplateWithFailureStoreDefault = "test-data-stream-failure-default";
|
||||||
String dsTemplateWithFailureStoreDisabled = "test-data-stream-failure-disabled";
|
String dsTemplateWithFailureStoreDisabled = "test-data-stream-failure-disabled";
|
||||||
|
|
|
@ -491,12 +491,7 @@ public final class DataStreamTestHelper {
|
||||||
"template_1",
|
"template_1",
|
||||||
ComposableIndexTemplate.builder()
|
ComposableIndexTemplate.builder()
|
||||||
.indexPatterns(List.of("*"))
|
.indexPatterns(List.of("*"))
|
||||||
.template(
|
.template(Template.builder().dataStreamOptions(createDataStreamOptionsTemplate(storeFailures)))
|
||||||
Template.builder()
|
|
||||||
.dataStreamOptions(
|
|
||||||
DataStream.isFailureStoreFeatureFlagEnabled() ? createDataStreamOptionsTemplate(storeFailures) : null
|
|
||||||
)
|
|
||||||
)
|
|
||||||
.dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate())
|
.dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate())
|
||||||
.build()
|
.build()
|
||||||
);
|
);
|
||||||
|
@ -512,7 +507,7 @@ public final class DataStreamTestHelper {
|
||||||
allIndices.addAll(backingIndices);
|
allIndices.addAll(backingIndices);
|
||||||
|
|
||||||
List<IndexMetadata> failureStores = new ArrayList<>();
|
List<IndexMetadata> failureStores = new ArrayList<>();
|
||||||
if (DataStream.isFailureStoreFeatureFlagEnabled() && Boolean.TRUE.equals(storeFailures)) {
|
if (Boolean.TRUE.equals(storeFailures)) {
|
||||||
for (int failureStoreNumber = 1; failureStoreNumber <= dsTuple.v2(); failureStoreNumber++) {
|
for (int failureStoreNumber = 1; failureStoreNumber <= dsTuple.v2(); failureStoreNumber++) {
|
||||||
failureStores.add(
|
failureStores.add(
|
||||||
createIndexMetadata(
|
createIndexMetadata(
|
||||||
|
|
|
@ -17,7 +17,6 @@ import org.elasticsearch.test.cluster.util.Version;
|
||||||
*/
|
*/
|
||||||
public enum FeatureFlag {
|
public enum FeatureFlag {
|
||||||
TIME_SERIES_MODE("es.index_mode_feature_flag_registered=true", Version.fromString("8.0.0"), null),
|
TIME_SERIES_MODE("es.index_mode_feature_flag_registered=true", Version.fromString("8.0.0"), null),
|
||||||
FAILURE_STORE_ENABLED("es.failure_store_feature_flag_enabled=true", Version.fromString("8.12.0"), null),
|
|
||||||
SUB_OBJECTS_AUTO_ENABLED("es.sub_objects_auto_feature_flag_enabled=true", Version.fromString("8.16.0"), null),
|
SUB_OBJECTS_AUTO_ENABLED("es.sub_objects_auto_feature_flag_enabled=true", Version.fromString("8.16.0"), null),
|
||||||
DOC_VALUES_SKIPPER("es.doc_values_skipper_feature_flag_enabled=true", Version.fromString("8.18.1"), null),
|
DOC_VALUES_SKIPPER("es.doc_values_skipper_feature_flag_enabled=true", Version.fromString("8.18.1"), null),
|
||||||
USE_LUCENE101_POSTINGS_FORMAT("es.use_lucene101_postings_format_feature_flag_enabled=true", Version.fromString("9.1.0"), null);
|
USE_LUCENE101_POSTINGS_FORMAT("es.use_lucene101_postings_format_feature_flag_enabled=true", Version.fromString("9.1.0"), null);
|
||||||
|
|
|
@ -153,12 +153,6 @@ tasks.named("javaRestTest") {
|
||||||
usesDefaultDistribution("uses the _xpack api")
|
usesDefaultDistribution("uses the _xpack api")
|
||||||
}
|
}
|
||||||
|
|
||||||
if (buildParams.snapshotBuild == false) {
|
|
||||||
tasks.withType(Test).configureEach {
|
|
||||||
systemProperty 'es.failure_store_feature_flag_enabled', 'true'
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (buildParams.inFipsJvm) {
|
if (buildParams.inFipsJvm) {
|
||||||
// Test clusters run with security disabled
|
// Test clusters run with security disabled
|
||||||
tasks.named("javaRestTest").configure { enabled = false }
|
tasks.named("javaRestTest").configure { enabled = false }
|
||||||
|
|
|
@ -14,7 +14,6 @@ import org.elasticsearch.common.settings.SecureString;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
import org.elasticsearch.common.util.concurrent.ThreadContext;
|
import org.elasticsearch.common.util.concurrent.ThreadContext;
|
||||||
import org.elasticsearch.test.cluster.ElasticsearchCluster;
|
import org.elasticsearch.test.cluster.ElasticsearchCluster;
|
||||||
import org.elasticsearch.test.cluster.FeatureFlag;
|
|
||||||
import org.elasticsearch.test.cluster.local.distribution.DistributionType;
|
import org.elasticsearch.test.cluster.local.distribution.DistributionType;
|
||||||
import org.elasticsearch.test.rest.ESRestTestCase;
|
import org.elasticsearch.test.rest.ESRestTestCase;
|
||||||
import org.elasticsearch.xcontent.XContentParser;
|
import org.elasticsearch.xcontent.XContentParser;
|
||||||
|
@ -37,7 +36,6 @@ public class DataStreamRestIT extends ESRestTestCase {
|
||||||
.setting("xpack.security.enabled", "true")
|
.setting("xpack.security.enabled", "true")
|
||||||
.setting("xpack.license.self_generated.type", "trial")
|
.setting("xpack.license.self_generated.type", "trial")
|
||||||
.setting("indices.lifecycle.history_index_enabled", "false")
|
.setting("indices.lifecycle.history_index_enabled", "false")
|
||||||
.feature(FeatureFlag.FAILURE_STORE_ENABLED)
|
|
||||||
.keystore("bootstrap.password", "x-pack-test-password")
|
.keystore("bootstrap.password", "x-pack-test-password")
|
||||||
.user("x_pack_rest_user", "x-pack-test-password")
|
.user("x_pack_rest_user", "x-pack-test-password")
|
||||||
.systemProperty("es.queryable_built_in_roles_enabled", "false")
|
.systemProperty("es.queryable_built_in_roles_enabled", "false")
|
||||||
|
|
|
@ -19,7 +19,6 @@ import org.elasticsearch.license.License;
|
||||||
import org.elasticsearch.license.LicenseSettings;
|
import org.elasticsearch.license.LicenseSettings;
|
||||||
import org.elasticsearch.license.TestUtils;
|
import org.elasticsearch.license.TestUtils;
|
||||||
import org.elasticsearch.test.cluster.ElasticsearchCluster;
|
import org.elasticsearch.test.cluster.ElasticsearchCluster;
|
||||||
import org.elasticsearch.test.cluster.FeatureFlag;
|
|
||||||
import org.elasticsearch.test.rest.ESRestTestCase;
|
import org.elasticsearch.test.rest.ESRestTestCase;
|
||||||
import org.elasticsearch.xcontent.ToXContent;
|
import org.elasticsearch.xcontent.ToXContent;
|
||||||
import org.elasticsearch.xcontent.XContentBuilder;
|
import org.elasticsearch.xcontent.XContentBuilder;
|
||||||
|
@ -46,7 +45,6 @@ public class LicenseInstallationIT extends ESRestTestCase {
|
||||||
public static ElasticsearchCluster cluster = ElasticsearchCluster.local()
|
public static ElasticsearchCluster cluster = ElasticsearchCluster.local()
|
||||||
.setting("xpack.security.enabled", "true")
|
.setting("xpack.security.enabled", "true")
|
||||||
.setting("xpack.license.self_generated.type", "trial")
|
.setting("xpack.license.self_generated.type", "trial")
|
||||||
.feature(FeatureFlag.FAILURE_STORE_ENABLED)
|
|
||||||
.keystore("bootstrap.password", "x-pack-test-password")
|
.keystore("bootstrap.password", "x-pack-test-password")
|
||||||
.user("x_pack_rest_user", "x-pack-test-password")
|
.user("x_pack_rest_user", "x-pack-test-password")
|
||||||
.systemProperty("es.queryable_built_in_roles_enabled", "false")
|
.systemProperty("es.queryable_built_in_roles_enabled", "false")
|
||||||
|
|
|
@ -13,7 +13,6 @@ import org.elasticsearch.common.settings.SecureString;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
import org.elasticsearch.common.util.concurrent.ThreadContext;
|
import org.elasticsearch.common.util.concurrent.ThreadContext;
|
||||||
import org.elasticsearch.test.cluster.ElasticsearchCluster;
|
import org.elasticsearch.test.cluster.ElasticsearchCluster;
|
||||||
import org.elasticsearch.test.cluster.FeatureFlag;
|
|
||||||
import org.elasticsearch.test.cluster.local.distribution.DistributionType;
|
import org.elasticsearch.test.cluster.local.distribution.DistributionType;
|
||||||
import org.elasticsearch.test.rest.ESRestTestCase;
|
import org.elasticsearch.test.rest.ESRestTestCase;
|
||||||
import org.junit.ClassRule;
|
import org.junit.ClassRule;
|
||||||
|
@ -27,7 +26,6 @@ public class StackTemplatesRestIT extends ESRestTestCase {
|
||||||
.distribution(DistributionType.DEFAULT)
|
.distribution(DistributionType.DEFAULT)
|
||||||
.setting("xpack.security.enabled", "true")
|
.setting("xpack.security.enabled", "true")
|
||||||
.setting("xpack.license.self_generated.type", "trial")
|
.setting("xpack.license.self_generated.type", "trial")
|
||||||
.feature(FeatureFlag.FAILURE_STORE_ENABLED)
|
|
||||||
.keystore("bootstrap.password", "x-pack-test-password")
|
.keystore("bootstrap.password", "x-pack-test-password")
|
||||||
.user("x_pack_rest_user", "x-pack-test-password")
|
.user("x_pack_rest_user", "x-pack-test-password")
|
||||||
.systemProperty("es.queryable_built_in_roles_enabled", "false")
|
.systemProperty("es.queryable_built_in_roles_enabled", "false")
|
||||||
|
|
|
@ -52,7 +52,6 @@ public class DataStreamUsageTransportAction extends XPackUsageFeatureTransportAc
|
||||||
long failureIndicesCounter = 0;
|
long failureIndicesCounter = 0;
|
||||||
for (DataStream ds : dataStreams.values()) {
|
for (DataStream ds : dataStreams.values()) {
|
||||||
backingIndicesCounter += ds.getIndices().size();
|
backingIndicesCounter += ds.getIndices().size();
|
||||||
if (DataStream.isFailureStoreFeatureFlagEnabled()) {
|
|
||||||
if (ds.isFailureStoreExplicitlyEnabled()) {
|
if (ds.isFailureStoreExplicitlyEnabled()) {
|
||||||
failureStoreExplicitlyEnabledCounter++;
|
failureStoreExplicitlyEnabledCounter++;
|
||||||
}
|
}
|
||||||
|
@ -63,7 +62,6 @@ public class DataStreamUsageTransportAction extends XPackUsageFeatureTransportAc
|
||||||
failureIndicesCounter += ds.getFailureIndices().size();
|
failureIndicesCounter += ds.getFailureIndices().size();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
final DataStreamFeatureSetUsage.DataStreamStats stats = new DataStreamFeatureSetUsage.DataStreamStats(
|
final DataStreamFeatureSetUsage.DataStreamStats stats = new DataStreamFeatureSetUsage.DataStreamStats(
|
||||||
dataStreams.size(),
|
dataStreams.size(),
|
||||||
backingIndicesCounter,
|
backingIndicesCounter,
|
||||||
|
|
|
@ -9,7 +9,6 @@ package org.elasticsearch.xpack.core.datastreams;
|
||||||
|
|
||||||
import org.elasticsearch.TransportVersion;
|
import org.elasticsearch.TransportVersion;
|
||||||
import org.elasticsearch.TransportVersions;
|
import org.elasticsearch.TransportVersions;
|
||||||
import org.elasticsearch.cluster.metadata.DataStream;
|
|
||||||
import org.elasticsearch.common.Strings;
|
import org.elasticsearch.common.Strings;
|
||||||
import org.elasticsearch.common.io.stream.StreamInput;
|
import org.elasticsearch.common.io.stream.StreamInput;
|
||||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||||
|
@ -50,14 +49,12 @@ public class DataStreamFeatureSetUsage extends XPackFeatureUsage {
|
||||||
super.innerXContent(builder, params);
|
super.innerXContent(builder, params);
|
||||||
builder.field("data_streams", streamStats.totalDataStreamCount);
|
builder.field("data_streams", streamStats.totalDataStreamCount);
|
||||||
builder.field("indices_count", streamStats.indicesBehindDataStream);
|
builder.field("indices_count", streamStats.indicesBehindDataStream);
|
||||||
if (DataStream.isFailureStoreFeatureFlagEnabled()) {
|
|
||||||
builder.startObject("failure_store");
|
builder.startObject("failure_store");
|
||||||
builder.field("explicitly_enabled_count", streamStats.failureStoreExplicitlyEnabledDataStreamCount);
|
builder.field("explicitly_enabled_count", streamStats.failureStoreExplicitlyEnabledDataStreamCount);
|
||||||
builder.field("effectively_enabled_count", streamStats.failureStoreEffectivelyEnabledDataStreamCount);
|
builder.field("effectively_enabled_count", streamStats.failureStoreEffectivelyEnabledDataStreamCount);
|
||||||
builder.field("failure_indices_count", streamStats.failureStoreIndicesCount);
|
builder.field("failure_indices_count", streamStats.failureStoreIndicesCount);
|
||||||
builder.endObject();
|
builder.endObject();
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public String toString() {
|
public String toString() {
|
||||||
|
|
|
@ -8,7 +8,6 @@
|
||||||
package org.elasticsearch.xpack.core.security.action.role;
|
package org.elasticsearch.xpack.core.security.action.role;
|
||||||
|
|
||||||
import org.elasticsearch.action.ActionRequestValidationException;
|
import org.elasticsearch.action.ActionRequestValidationException;
|
||||||
import org.elasticsearch.cluster.metadata.DataStream;
|
|
||||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||||
import org.elasticsearch.xpack.core.security.authz.RoleDescriptor;
|
import org.elasticsearch.xpack.core.security.authz.RoleDescriptor;
|
||||||
import org.elasticsearch.xpack.core.security.authz.privilege.ApplicationPrivilege;
|
import org.elasticsearch.xpack.core.security.authz.privilege.ApplicationPrivilege;
|
||||||
|
@ -55,13 +54,11 @@ public class RoleDescriptorRequestValidator {
|
||||||
} catch (IllegalArgumentException ile) {
|
} catch (IllegalArgumentException ile) {
|
||||||
validationException = addValidationError(ile.getMessage(), validationException);
|
validationException = addValidationError(ile.getMessage(), validationException);
|
||||||
}
|
}
|
||||||
if (DataStream.isFailureStoreFeatureFlagEnabled()) {
|
|
||||||
for (final String indexName : idp.getIndices()) {
|
for (final String indexName : idp.getIndices()) {
|
||||||
validationException = validateIndexNameExpression(indexName, validationException);
|
validationException = validateIndexNameExpression(indexName, validationException);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
final RoleDescriptor.RemoteIndicesPrivileges[] remoteIndicesPrivileges = roleDescriptor.getRemoteIndicesPrivileges();
|
final RoleDescriptor.RemoteIndicesPrivileges[] remoteIndicesPrivileges = roleDescriptor.getRemoteIndicesPrivileges();
|
||||||
for (RoleDescriptor.RemoteIndicesPrivileges ridp : remoteIndicesPrivileges) {
|
for (RoleDescriptor.RemoteIndicesPrivileges ridp : remoteIndicesPrivileges) {
|
||||||
if (Arrays.asList(ridp.remoteClusters()).contains("")) {
|
if (Arrays.asList(ridp.remoteClusters()).contains("")) {
|
||||||
|
@ -78,12 +75,10 @@ public class RoleDescriptorRequestValidator {
|
||||||
} catch (IllegalArgumentException ile) {
|
} catch (IllegalArgumentException ile) {
|
||||||
validationException = addValidationError(ile.getMessage(), validationException);
|
validationException = addValidationError(ile.getMessage(), validationException);
|
||||||
}
|
}
|
||||||
if (DataStream.isFailureStoreFeatureFlagEnabled()) {
|
|
||||||
for (String indexName : ridp.indicesPrivileges().getIndices()) {
|
for (String indexName : ridp.indicesPrivileges().getIndices()) {
|
||||||
validationException = validateIndexNameExpression(indexName, validationException);
|
validationException = validateIndexNameExpression(indexName, validationException);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
if (roleDescriptor.hasRemoteClusterPermissions()) {
|
if (roleDescriptor.hasRemoteClusterPermissions()) {
|
||||||
try {
|
try {
|
||||||
roleDescriptor.getRemoteClusterPermissions().validate();
|
roleDescriptor.getRemoteClusterPermissions().validate();
|
||||||
|
|
|
@ -13,7 +13,6 @@ import org.elasticsearch.action.ActionRequestValidationException;
|
||||||
import org.elasticsearch.action.IndicesRequest;
|
import org.elasticsearch.action.IndicesRequest;
|
||||||
import org.elasticsearch.action.support.IndexComponentSelector;
|
import org.elasticsearch.action.support.IndexComponentSelector;
|
||||||
import org.elasticsearch.action.support.SubscribableListener;
|
import org.elasticsearch.action.support.SubscribableListener;
|
||||||
import org.elasticsearch.cluster.metadata.DataStream;
|
|
||||||
import org.elasticsearch.cluster.metadata.IndexAbstraction;
|
import org.elasticsearch.cluster.metadata.IndexAbstraction;
|
||||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||||
import org.elasticsearch.cluster.metadata.ProjectMetadata;
|
import org.elasticsearch.cluster.metadata.ProjectMetadata;
|
||||||
|
@ -348,7 +347,6 @@ public interface AuthorizationEngine {
|
||||||
validationException
|
validationException
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
if (DataStream.isFailureStoreFeatureFlagEnabled()) {
|
|
||||||
// best effort prevent users from attempting to use selectors in privilege check
|
// best effort prevent users from attempting to use selectors in privilege check
|
||||||
for (String indexPattern : indicesPrivileges.getIndices()) {
|
for (String indexPattern : indicesPrivileges.getIndices()) {
|
||||||
if (IndexNameExpressionResolver.hasSelector(indexPattern, IndexComponentSelector.FAILURES)
|
if (IndexNameExpressionResolver.hasSelector(indexPattern, IndexComponentSelector.FAILURES)
|
||||||
|
@ -362,7 +360,6 @@ public interface AuthorizationEngine {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
if (application == null) {
|
if (application == null) {
|
||||||
validationException = addValidationError("applicationPrivileges must not be null", validationException);
|
validationException = addValidationError("applicationPrivileges must not be null", validationException);
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -32,7 +32,6 @@ import org.elasticsearch.action.datastreams.PromoteDataStreamAction;
|
||||||
import org.elasticsearch.action.fieldcaps.TransportFieldCapabilitiesAction;
|
import org.elasticsearch.action.fieldcaps.TransportFieldCapabilitiesAction;
|
||||||
import org.elasticsearch.action.search.TransportSearchShardsAction;
|
import org.elasticsearch.action.search.TransportSearchShardsAction;
|
||||||
import org.elasticsearch.action.support.IndexComponentSelector;
|
import org.elasticsearch.action.support.IndexComponentSelector;
|
||||||
import org.elasticsearch.cluster.metadata.DataStream;
|
|
||||||
import org.elasticsearch.common.Strings;
|
import org.elasticsearch.common.Strings;
|
||||||
import org.elasticsearch.common.util.set.Sets;
|
import org.elasticsearch.common.util.set.Sets;
|
||||||
import org.elasticsearch.core.Nullable;
|
import org.elasticsearch.core.Nullable;
|
||||||
|
@ -52,7 +51,6 @@ import java.util.HashSet;
|
||||||
import java.util.LinkedHashMap;
|
import java.util.LinkedHashMap;
|
||||||
import java.util.Locale;
|
import java.util.Locale;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.Objects;
|
|
||||||
import java.util.Set;
|
import java.util.Set;
|
||||||
import java.util.SortedMap;
|
import java.util.SortedMap;
|
||||||
import java.util.concurrent.ConcurrentHashMap;
|
import java.util.concurrent.ConcurrentHashMap;
|
||||||
|
@ -238,10 +236,8 @@ public final class IndexPrivilege extends Privilege {
|
||||||
*/
|
*/
|
||||||
private static final Map<String, IndexPrivilege> VALUES = combineSortedInOrder(
|
private static final Map<String, IndexPrivilege> VALUES = combineSortedInOrder(
|
||||||
sortByAccessLevel(
|
sortByAccessLevel(
|
||||||
Stream.of(
|
Stream.of(entry("read_failure_store", READ_FAILURE_STORE), entry("manage_failure_store", MANAGE_FAILURE_STORE))
|
||||||
DataStream.isFailureStoreFeatureFlagEnabled() ? entry("read_failure_store", READ_FAILURE_STORE) : null,
|
.collect(Collectors.toUnmodifiableMap(Map.Entry::getKey, Map.Entry::getValue))
|
||||||
DataStream.isFailureStoreFeatureFlagEnabled() ? entry("manage_failure_store", MANAGE_FAILURE_STORE) : null
|
|
||||||
).filter(Objects::nonNull).collect(Collectors.toUnmodifiableMap(Map.Entry::getKey, Map.Entry::getValue))
|
|
||||||
),
|
),
|
||||||
sortByAccessLevel(
|
sortByAccessLevel(
|
||||||
Stream.of(
|
Stream.of(
|
||||||
|
|
|
@ -28,7 +28,6 @@ import org.elasticsearch.action.downsample.DownsampleAction;
|
||||||
import org.elasticsearch.action.index.TransportIndexAction;
|
import org.elasticsearch.action.index.TransportIndexAction;
|
||||||
import org.elasticsearch.action.search.TransportSearchAction;
|
import org.elasticsearch.action.search.TransportSearchAction;
|
||||||
import org.elasticsearch.action.search.TransportSearchScrollAction;
|
import org.elasticsearch.action.search.TransportSearchScrollAction;
|
||||||
import org.elasticsearch.cluster.metadata.DataStream;
|
|
||||||
import org.elasticsearch.index.reindex.ReindexAction;
|
import org.elasticsearch.index.reindex.ReindexAction;
|
||||||
import org.elasticsearch.xpack.core.XPackPlugin;
|
import org.elasticsearch.xpack.core.XPackPlugin;
|
||||||
import org.elasticsearch.xpack.core.ilm.action.ILMActions;
|
import org.elasticsearch.xpack.core.ilm.action.ILMActions;
|
||||||
|
@ -161,7 +160,7 @@ public class InternalUsers {
|
||||||
.privileges(
|
.privileges(
|
||||||
filterNonNull(
|
filterNonNull(
|
||||||
// needed to rollover failure store
|
// needed to rollover failure store
|
||||||
DataStream.isFailureStoreFeatureFlagEnabled() ? "manage_failure_store" : null,
|
"manage_failure_store",
|
||||||
"delete_index",
|
"delete_index",
|
||||||
RolloverAction.NAME,
|
RolloverAction.NAME,
|
||||||
ForceMergeAction.NAME + "*",
|
ForceMergeAction.NAME + "*",
|
||||||
|
@ -184,7 +183,7 @@ public class InternalUsers {
|
||||||
.privileges(
|
.privileges(
|
||||||
filterNonNull(
|
filterNonNull(
|
||||||
// needed to rollover failure store
|
// needed to rollover failure store
|
||||||
DataStream.isFailureStoreFeatureFlagEnabled() ? "manage_failure_store" : null,
|
"manage_failure_store",
|
||||||
"delete_index",
|
"delete_index",
|
||||||
RolloverAction.NAME,
|
RolloverAction.NAME,
|
||||||
ForceMergeAction.NAME + "*",
|
ForceMergeAction.NAME + "*",
|
||||||
|
@ -262,7 +261,7 @@ public class InternalUsers {
|
||||||
.privileges(
|
.privileges(
|
||||||
filterNonNull(
|
filterNonNull(
|
||||||
// needed to rollover failure store
|
// needed to rollover failure store
|
||||||
DataStream.isFailureStoreFeatureFlagEnabled() ? "manage_failure_store" : null,
|
"manage_failure_store",
|
||||||
LazyRolloverAction.NAME
|
LazyRolloverAction.NAME
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
|
@ -8,7 +8,6 @@ package org.elasticsearch.xpack.core.security.action.role;
|
||||||
|
|
||||||
import org.elasticsearch.action.ActionRequestValidationException;
|
import org.elasticsearch.action.ActionRequestValidationException;
|
||||||
import org.elasticsearch.action.support.WriteRequest;
|
import org.elasticsearch.action.support.WriteRequest;
|
||||||
import org.elasticsearch.cluster.metadata.DataStream;
|
|
||||||
import org.elasticsearch.test.ESTestCase;
|
import org.elasticsearch.test.ESTestCase;
|
||||||
import org.elasticsearch.xpack.core.security.authz.RoleDescriptor.ApplicationResourcePrivileges;
|
import org.elasticsearch.xpack.core.security.authz.RoleDescriptor.ApplicationResourcePrivileges;
|
||||||
import org.elasticsearch.xpack.core.security.authz.permission.RemoteClusterPermissionGroup;
|
import org.elasticsearch.xpack.core.security.authz.permission.RemoteClusterPermissionGroup;
|
||||||
|
@ -50,7 +49,6 @@ public class PutRoleRequestTests extends ESTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testValidationErrorWithFailureStorePrivilegeInRemoteIndices() {
|
public void testValidationErrorWithFailureStorePrivilegeInRemoteIndices() {
|
||||||
assumeTrue("requires failure store feature", DataStream.isFailureStoreFeatureFlagEnabled());
|
|
||||||
final PutRoleRequest request = new PutRoleRequest();
|
final PutRoleRequest request = new PutRoleRequest();
|
||||||
request.name(randomAlphaOfLengthBetween(4, 9));
|
request.name(randomAlphaOfLengthBetween(4, 9));
|
||||||
request.addRemoteIndex(
|
request.addRemoteIndex(
|
||||||
|
|
|
@ -7,7 +7,6 @@
|
||||||
|
|
||||||
package org.elasticsearch.xpack.core.security.action.role;
|
package org.elasticsearch.xpack.core.security.action.role;
|
||||||
|
|
||||||
import org.elasticsearch.cluster.metadata.DataStream;
|
|
||||||
import org.elasticsearch.test.ESTestCase;
|
import org.elasticsearch.test.ESTestCase;
|
||||||
import org.elasticsearch.xpack.core.security.authz.RoleDescriptor;
|
import org.elasticsearch.xpack.core.security.authz.RoleDescriptor;
|
||||||
import org.elasticsearch.xpack.core.security.authz.RoleDescriptor.IndicesPrivileges;
|
import org.elasticsearch.xpack.core.security.authz.RoleDescriptor.IndicesPrivileges;
|
||||||
|
@ -23,7 +22,6 @@ import static org.hamcrest.Matchers.nullValue;
|
||||||
public class RoleDescriptorRequestValidatorTests extends ESTestCase {
|
public class RoleDescriptorRequestValidatorTests extends ESTestCase {
|
||||||
|
|
||||||
public void testSelectorsValidation() {
|
public void testSelectorsValidation() {
|
||||||
assumeTrue("failure store feature flag must be enabled", DataStream.isFailureStoreFeatureFlagEnabled());
|
|
||||||
String[] invalidIndexNames = {
|
String[] invalidIndexNames = {
|
||||||
"index::failures",
|
"index::failures",
|
||||||
".fs-*::failures",
|
".fs-*::failures",
|
||||||
|
|
|
@ -14,7 +14,6 @@ import org.elasticsearch.action.admin.indices.delete.TransportDeleteIndexAction;
|
||||||
import org.elasticsearch.action.bulk.TransportBulkAction;
|
import org.elasticsearch.action.bulk.TransportBulkAction;
|
||||||
import org.elasticsearch.action.search.TransportSearchAction;
|
import org.elasticsearch.action.search.TransportSearchAction;
|
||||||
import org.elasticsearch.cluster.metadata.AliasMetadata;
|
import org.elasticsearch.cluster.metadata.AliasMetadata;
|
||||||
import org.elasticsearch.cluster.metadata.DataStream;
|
|
||||||
import org.elasticsearch.cluster.metadata.IndexAbstraction;
|
import org.elasticsearch.cluster.metadata.IndexAbstraction;
|
||||||
import org.elasticsearch.cluster.metadata.IndexMetadata;
|
import org.elasticsearch.cluster.metadata.IndexMetadata;
|
||||||
import org.elasticsearch.cluster.metadata.Metadata;
|
import org.elasticsearch.cluster.metadata.Metadata;
|
||||||
|
@ -648,7 +647,6 @@ public class LimitedRoleTests extends ESTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testAllowedActionsMatcherWithSelectors() {
|
public void testAllowedActionsMatcherWithSelectors() {
|
||||||
assumeTrue("failure store feature must be enabled", DataStream.isFailureStoreFeatureFlagEnabled());
|
|
||||||
Role fromRole = Role.builder(EMPTY_RESTRICTED_INDICES, "fromRole")
|
Role fromRole = Role.builder(EMPTY_RESTRICTED_INDICES, "fromRole")
|
||||||
.add(IndexPrivilege.READ_FAILURE_STORE, "ind*")
|
.add(IndexPrivilege.READ_FAILURE_STORE, "ind*")
|
||||||
.add(IndexPrivilege.READ, "ind*")
|
.add(IndexPrivilege.READ, "ind*")
|
||||||
|
|
|
@ -13,7 +13,6 @@ import org.elasticsearch.action.delete.TransportDeleteAction;
|
||||||
import org.elasticsearch.action.index.TransportIndexAction;
|
import org.elasticsearch.action.index.TransportIndexAction;
|
||||||
import org.elasticsearch.action.search.TransportSearchAction;
|
import org.elasticsearch.action.search.TransportSearchAction;
|
||||||
import org.elasticsearch.action.update.TransportUpdateAction;
|
import org.elasticsearch.action.update.TransportUpdateAction;
|
||||||
import org.elasticsearch.cluster.metadata.DataStream;
|
|
||||||
import org.elasticsearch.common.util.iterable.Iterables;
|
import org.elasticsearch.common.util.iterable.Iterables;
|
||||||
import org.elasticsearch.test.ESTestCase;
|
import org.elasticsearch.test.ESTestCase;
|
||||||
import org.elasticsearch.xpack.core.rollup.action.GetRollupIndexCapsAction;
|
import org.elasticsearch.xpack.core.rollup.action.GetRollupIndexCapsAction;
|
||||||
|
@ -70,7 +69,6 @@ public class IndexPrivilegeTests extends ESTestCase {
|
||||||
);
|
);
|
||||||
assertThat(findPrivilegesThatGrant(RefreshAction.NAME), equalTo(List.of("maintenance", "manage", "all")));
|
assertThat(findPrivilegesThatGrant(RefreshAction.NAME), equalTo(List.of("maintenance", "manage", "all")));
|
||||||
|
|
||||||
if (DataStream.isFailureStoreFeatureFlagEnabled()) {
|
|
||||||
Predicate<IndexPrivilege> failuresOnly = p -> p.getSelectorPredicate() == IndexComponentSelectorPredicate.FAILURES;
|
Predicate<IndexPrivilege> failuresOnly = p -> p.getSelectorPredicate() == IndexComponentSelectorPredicate.FAILURES;
|
||||||
assertThat(findPrivilegesThatGrant(TransportSearchAction.TYPE.name(), failuresOnly), equalTo(List.of("read_failure_store")));
|
assertThat(findPrivilegesThatGrant(TransportSearchAction.TYPE.name(), failuresOnly), equalTo(List.of("read_failure_store")));
|
||||||
assertThat(findPrivilegesThatGrant(TransportIndexAction.NAME, failuresOnly), equalTo(List.of()));
|
assertThat(findPrivilegesThatGrant(TransportIndexAction.NAME, failuresOnly), equalTo(List.of()));
|
||||||
|
@ -79,7 +77,6 @@ public class IndexPrivilegeTests extends ESTestCase {
|
||||||
assertThat(findPrivilegesThatGrant(IndicesStatsAction.NAME, failuresOnly), equalTo(List.of("manage_failure_store")));
|
assertThat(findPrivilegesThatGrant(IndicesStatsAction.NAME, failuresOnly), equalTo(List.of("manage_failure_store")));
|
||||||
assertThat(findPrivilegesThatGrant(RefreshAction.NAME, failuresOnly), equalTo(List.of("manage_failure_store")));
|
assertThat(findPrivilegesThatGrant(RefreshAction.NAME, failuresOnly), equalTo(List.of("manage_failure_store")));
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
public void testGet() {
|
public void testGet() {
|
||||||
{
|
{
|
||||||
|
@ -117,7 +114,6 @@ public class IndexPrivilegeTests extends ESTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testResolveSameSelectorPrivileges() {
|
public void testResolveSameSelectorPrivileges() {
|
||||||
assumeTrue("requires failure store feature", DataStream.isFailureStoreFeatureFlagEnabled());
|
|
||||||
{
|
{
|
||||||
IndexPrivilege actual = resolvePrivilegeAndAssertSingleton(Set.of("read_failure_store"));
|
IndexPrivilege actual = resolvePrivilegeAndAssertSingleton(Set.of("read_failure_store"));
|
||||||
assertThat(actual, equalTo(IndexPrivilege.READ_FAILURE_STORE));
|
assertThat(actual, equalTo(IndexPrivilege.READ_FAILURE_STORE));
|
||||||
|
@ -144,7 +140,6 @@ public class IndexPrivilegeTests extends ESTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testResolveBySelectorAccess() {
|
public void testResolveBySelectorAccess() {
|
||||||
assumeTrue("requires failure store feature", DataStream.isFailureStoreFeatureFlagEnabled());
|
|
||||||
{
|
{
|
||||||
Set<IndexPrivilege> actual = IndexPrivilege.resolveBySelectorAccess(Set.of("read_failure_store"));
|
Set<IndexPrivilege> actual = IndexPrivilege.resolveBySelectorAccess(Set.of("read_failure_store"));
|
||||||
assertThat(actual, containsInAnyOrder(IndexPrivilege.READ_FAILURE_STORE));
|
assertThat(actual, containsInAnyOrder(IndexPrivilege.READ_FAILURE_STORE));
|
||||||
|
|
|
@ -14,7 +14,6 @@ import org.elasticsearch.common.settings.SecureString;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
import org.elasticsearch.common.util.concurrent.ThreadContext;
|
import org.elasticsearch.common.util.concurrent.ThreadContext;
|
||||||
import org.elasticsearch.test.cluster.ElasticsearchCluster;
|
import org.elasticsearch.test.cluster.ElasticsearchCluster;
|
||||||
import org.elasticsearch.test.cluster.FeatureFlag;
|
|
||||||
import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate;
|
import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate;
|
||||||
import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase;
|
import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase;
|
||||||
import org.junit.ClassRule;
|
import org.junit.ClassRule;
|
||||||
|
@ -25,7 +24,6 @@ public class XPackCoreClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase {
|
||||||
public static ElasticsearchCluster cluster = ElasticsearchCluster.local()
|
public static ElasticsearchCluster cluster = ElasticsearchCluster.local()
|
||||||
.setting("xpack.security.enabled", "true")
|
.setting("xpack.security.enabled", "true")
|
||||||
.setting("xpack.license.self_generated.type", "trial")
|
.setting("xpack.license.self_generated.type", "trial")
|
||||||
.feature(FeatureFlag.FAILURE_STORE_ENABLED)
|
|
||||||
.keystore("bootstrap.password", "x-pack-test-password")
|
.keystore("bootstrap.password", "x-pack-test-password")
|
||||||
.user("x_pack_rest_user", "x-pack-test-password")
|
.user("x_pack_rest_user", "x-pack-test-password")
|
||||||
.systemProperty("es.queryable_built_in_roles_enabled", "false")
|
.systemProperty("es.queryable_built_in_roles_enabled", "false")
|
||||||
|
|
|
@ -22,7 +22,6 @@ import org.elasticsearch.action.index.IndexRequestBuilder;
|
||||||
import org.elasticsearch.action.support.WriteRequest;
|
import org.elasticsearch.action.support.WriteRequest;
|
||||||
import org.elasticsearch.client.internal.ClusterAdminClient;
|
import org.elasticsearch.client.internal.ClusterAdminClient;
|
||||||
import org.elasticsearch.cluster.metadata.ComposableIndexTemplate;
|
import org.elasticsearch.cluster.metadata.ComposableIndexTemplate;
|
||||||
import org.elasticsearch.cluster.metadata.DataStream;
|
|
||||||
import org.elasticsearch.cluster.metadata.DataStreamFailureStore;
|
import org.elasticsearch.cluster.metadata.DataStreamFailureStore;
|
||||||
import org.elasticsearch.cluster.metadata.DataStreamOptions;
|
import org.elasticsearch.cluster.metadata.DataStreamOptions;
|
||||||
import org.elasticsearch.cluster.metadata.IndexMetadata;
|
import org.elasticsearch.cluster.metadata.IndexMetadata;
|
||||||
|
@ -55,7 +54,6 @@ import org.elasticsearch.xpack.esql.core.type.DataType;
|
||||||
import org.elasticsearch.xpack.esql.parser.ParsingException;
|
import org.elasticsearch.xpack.esql.parser.ParsingException;
|
||||||
import org.elasticsearch.xpack.esql.plugin.EsqlPlugin;
|
import org.elasticsearch.xpack.esql.plugin.EsqlPlugin;
|
||||||
import org.elasticsearch.xpack.esql.plugin.QueryPragmas;
|
import org.elasticsearch.xpack.esql.plugin.QueryPragmas;
|
||||||
import org.junit.Assume;
|
|
||||||
import org.junit.Before;
|
import org.junit.Before;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
@ -1027,8 +1025,6 @@ public class EsqlActionIT extends AbstractEsqlIntegTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testDataStreamPatterns() throws Exception {
|
public void testDataStreamPatterns() throws Exception {
|
||||||
Assume.assumeTrue(DataStream.isFailureStoreFeatureFlagEnabled());
|
|
||||||
|
|
||||||
Map<String, Long> testCases = new HashMap<>();
|
Map<String, Long> testCases = new HashMap<>();
|
||||||
// Concrete data stream with each selector
|
// Concrete data stream with each selector
|
||||||
testCases.put("test_ds_patterns_1", 5L);
|
testCases.put("test_ds_patterns_1", 5L);
|
||||||
|
@ -1087,8 +1083,6 @@ public class EsqlActionIT extends AbstractEsqlIntegTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testDataStreamInvalidPatterns() throws Exception {
|
public void testDataStreamInvalidPatterns() throws Exception {
|
||||||
Assume.assumeTrue(DataStream.isFailureStoreFeatureFlagEnabled());
|
|
||||||
|
|
||||||
Map<String, String> testCases = new HashMap<>();
|
Map<String, String> testCases = new HashMap<>();
|
||||||
// === Errors
|
// === Errors
|
||||||
// Only recognized components can be selected
|
// Only recognized components can be selected
|
||||||
|
|
|
@ -100,7 +100,7 @@ indexPatternAndMetadataFields:
|
||||||
|
|
||||||
indexPattern
|
indexPattern
|
||||||
: (clusterString COLON)? indexString
|
: (clusterString COLON)? indexString
|
||||||
| {this.isDevVersion()}? indexString (CAST_OP selectorString)?
|
| indexString (CAST_OP selectorString)?
|
||||||
;
|
;
|
||||||
|
|
||||||
clusterString
|
clusterString
|
||||||
|
|
|
@ -18,7 +18,7 @@ FROM_PIPE : PIPE -> type(PIPE), popMode;
|
||||||
FROM_OPENING_BRACKET : OPENING_BRACKET -> type(OPENING_BRACKET);
|
FROM_OPENING_BRACKET : OPENING_BRACKET -> type(OPENING_BRACKET);
|
||||||
FROM_CLOSING_BRACKET : CLOSING_BRACKET -> type(CLOSING_BRACKET);
|
FROM_CLOSING_BRACKET : CLOSING_BRACKET -> type(CLOSING_BRACKET);
|
||||||
FROM_COLON : COLON -> type(COLON);
|
FROM_COLON : COLON -> type(COLON);
|
||||||
FROM_SELECTOR : {this.isDevVersion()}? CAST_OP -> type(CAST_OP);
|
FROM_SELECTOR : CAST_OP -> type(CAST_OP);
|
||||||
FROM_COMMA : COMMA -> type(COMMA);
|
FROM_COMMA : COMMA -> type(COMMA);
|
||||||
FROM_ASSIGN : ASSIGN -> type(ASSIGN);
|
FROM_ASSIGN : ASSIGN -> type(ASSIGN);
|
||||||
METADATA : 'metadata';
|
METADATA : 'metadata';
|
||||||
|
|
|
@ -8,7 +8,6 @@
|
||||||
package org.elasticsearch.xpack.esql.action;
|
package org.elasticsearch.xpack.esql.action;
|
||||||
|
|
||||||
import org.elasticsearch.Build;
|
import org.elasticsearch.Build;
|
||||||
import org.elasticsearch.cluster.metadata.DataStream;
|
|
||||||
import org.elasticsearch.common.util.FeatureFlag;
|
import org.elasticsearch.common.util.FeatureFlag;
|
||||||
import org.elasticsearch.features.NodeFeature;
|
import org.elasticsearch.features.NodeFeature;
|
||||||
import org.elasticsearch.rest.action.admin.cluster.RestNodesCapabilitiesAction;
|
import org.elasticsearch.rest.action.admin.cluster.RestNodesCapabilitiesAction;
|
||||||
|
@ -948,7 +947,7 @@ public class EsqlCapabilities {
|
||||||
/**
|
/**
|
||||||
* Index component selector syntax (my-data-stream-name::failures)
|
* Index component selector syntax (my-data-stream-name::failures)
|
||||||
*/
|
*/
|
||||||
INDEX_COMPONENT_SELECTORS(DataStream.isFailureStoreFeatureFlagEnabled()),
|
INDEX_COMPONENT_SELECTORS,
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Make numberOfChannels consistent with layout in DefaultLayout by removing duplicated ChannelSet.
|
* Make numberOfChannels consistent with layout in DefaultLayout by removing duplicated ChannelSet.
|
||||||
|
|
File diff suppressed because one or more lines are too long
File diff suppressed because it is too large
Load diff
File diff suppressed because one or more lines are too long
File diff suppressed because it is too large
Load diff
|
@ -17,7 +17,6 @@ import org.elasticsearch.core.Nullable;
|
||||||
import org.elasticsearch.core.Strings;
|
import org.elasticsearch.core.Strings;
|
||||||
import org.elasticsearch.core.Tuple;
|
import org.elasticsearch.core.Tuple;
|
||||||
import org.elasticsearch.test.cluster.ElasticsearchCluster;
|
import org.elasticsearch.test.cluster.ElasticsearchCluster;
|
||||||
import org.elasticsearch.test.cluster.FeatureFlag;
|
|
||||||
import org.elasticsearch.test.cluster.local.distribution.DistributionType;
|
import org.elasticsearch.test.cluster.local.distribution.DistributionType;
|
||||||
import org.elasticsearch.test.cluster.util.resource.Resource;
|
import org.elasticsearch.test.cluster.util.resource.Resource;
|
||||||
import org.elasticsearch.test.rest.ObjectPath;
|
import org.elasticsearch.test.rest.ObjectPath;
|
||||||
|
@ -45,7 +44,6 @@ public class RemoteClusterSecurityRCS1FailureStoreRestIT extends AbstractRemoteC
|
||||||
.name("fulfilling-cluster")
|
.name("fulfilling-cluster")
|
||||||
.nodes(3)
|
.nodes(3)
|
||||||
.apply(commonClusterConfig)
|
.apply(commonClusterConfig)
|
||||||
.feature(FeatureFlag.FAILURE_STORE_ENABLED)
|
|
||||||
.rolesFile(Resource.fromClasspath("roles.yml"))
|
.rolesFile(Resource.fromClasspath("roles.yml"))
|
||||||
.build();
|
.build();
|
||||||
|
|
||||||
|
@ -53,7 +51,6 @@ public class RemoteClusterSecurityRCS1FailureStoreRestIT extends AbstractRemoteC
|
||||||
.distribution(DistributionType.DEFAULT)
|
.distribution(DistributionType.DEFAULT)
|
||||||
.name("query-cluster")
|
.name("query-cluster")
|
||||||
.apply(commonClusterConfig)
|
.apply(commonClusterConfig)
|
||||||
.feature(FeatureFlag.FAILURE_STORE_ENABLED)
|
|
||||||
.rolesFile(Resource.fromClasspath("roles.yml"))
|
.rolesFile(Resource.fromClasspath("roles.yml"))
|
||||||
.build();
|
.build();
|
||||||
}
|
}
|
||||||
|
|
|
@ -11,7 +11,6 @@ import org.elasticsearch.client.Request;
|
||||||
import org.elasticsearch.client.ResponseException;
|
import org.elasticsearch.client.ResponseException;
|
||||||
import org.elasticsearch.core.Tuple;
|
import org.elasticsearch.core.Tuple;
|
||||||
import org.elasticsearch.test.cluster.ElasticsearchCluster;
|
import org.elasticsearch.test.cluster.ElasticsearchCluster;
|
||||||
import org.elasticsearch.test.cluster.FeatureFlag;
|
|
||||||
import org.elasticsearch.test.cluster.local.distribution.DistributionType;
|
import org.elasticsearch.test.cluster.local.distribution.DistributionType;
|
||||||
import org.elasticsearch.test.cluster.util.resource.Resource;
|
import org.elasticsearch.test.cluster.util.resource.Resource;
|
||||||
import org.junit.ClassRule;
|
import org.junit.ClassRule;
|
||||||
|
@ -35,7 +34,6 @@ public class RemoteClusterSecurityRCS2FailureStoreRestIT extends AbstractRemoteC
|
||||||
.distribution(DistributionType.DEFAULT)
|
.distribution(DistributionType.DEFAULT)
|
||||||
.name("fulfilling-cluster")
|
.name("fulfilling-cluster")
|
||||||
.apply(commonClusterConfig)
|
.apply(commonClusterConfig)
|
||||||
.feature(FeatureFlag.FAILURE_STORE_ENABLED)
|
|
||||||
.setting("remote_cluster_server.enabled", "true")
|
.setting("remote_cluster_server.enabled", "true")
|
||||||
.setting("remote_cluster.port", "0")
|
.setting("remote_cluster.port", "0")
|
||||||
.setting("xpack.security.remote_cluster_server.ssl.enabled", "true")
|
.setting("xpack.security.remote_cluster_server.ssl.enabled", "true")
|
||||||
|
@ -49,7 +47,6 @@ public class RemoteClusterSecurityRCS2FailureStoreRestIT extends AbstractRemoteC
|
||||||
.distribution(DistributionType.DEFAULT)
|
.distribution(DistributionType.DEFAULT)
|
||||||
.name("query-cluster")
|
.name("query-cluster")
|
||||||
.apply(commonClusterConfig)
|
.apply(commonClusterConfig)
|
||||||
.feature(FeatureFlag.FAILURE_STORE_ENABLED)
|
|
||||||
.setting("xpack.security.remote_cluster_client.ssl.enabled", "true")
|
.setting("xpack.security.remote_cluster_client.ssl.enabled", "true")
|
||||||
.setting("xpack.security.remote_cluster_client.ssl.certificate_authorities", "remote-cluster-ca.crt")
|
.setting("xpack.security.remote_cluster_client.ssl.certificate_authorities", "remote-cluster-ca.crt")
|
||||||
.setting("xpack.security.authc.token.enabled", "true")
|
.setting("xpack.security.authc.token.enabled", "true")
|
||||||
|
|
|
@ -21,7 +21,6 @@ import org.elasticsearch.core.Tuple;
|
||||||
import org.elasticsearch.search.SearchHit;
|
import org.elasticsearch.search.SearchHit;
|
||||||
import org.elasticsearch.search.SearchResponseUtils;
|
import org.elasticsearch.search.SearchResponseUtils;
|
||||||
import org.elasticsearch.test.cluster.ElasticsearchCluster;
|
import org.elasticsearch.test.cluster.ElasticsearchCluster;
|
||||||
import org.elasticsearch.test.cluster.FeatureFlag;
|
|
||||||
import org.elasticsearch.test.cluster.local.distribution.DistributionType;
|
import org.elasticsearch.test.cluster.local.distribution.DistributionType;
|
||||||
import org.elasticsearch.test.rest.ESRestTestCase;
|
import org.elasticsearch.test.rest.ESRestTestCase;
|
||||||
import org.elasticsearch.test.rest.ObjectPath;
|
import org.elasticsearch.test.rest.ObjectPath;
|
||||||
|
@ -52,7 +51,6 @@ public class RemoteClusterWithoutSecurityFailureStoreRestIT extends ESRestTestCa
|
||||||
.distribution(DistributionType.DEFAULT)
|
.distribution(DistributionType.DEFAULT)
|
||||||
.name("fulfilling-cluster")
|
.name("fulfilling-cluster")
|
||||||
.nodes(3)
|
.nodes(3)
|
||||||
.feature(FeatureFlag.FAILURE_STORE_ENABLED)
|
|
||||||
.module("analysis-common")
|
.module("analysis-common")
|
||||||
.setting("xpack.license.self_generated.type", "trial")
|
.setting("xpack.license.self_generated.type", "trial")
|
||||||
.setting("xpack.security.enabled", "false")
|
.setting("xpack.security.enabled", "false")
|
||||||
|
@ -61,7 +59,6 @@ public class RemoteClusterWithoutSecurityFailureStoreRestIT extends ESRestTestCa
|
||||||
private static ElasticsearchCluster queryCluster = ElasticsearchCluster.local()
|
private static ElasticsearchCluster queryCluster = ElasticsearchCluster.local()
|
||||||
.distribution(DistributionType.DEFAULT)
|
.distribution(DistributionType.DEFAULT)
|
||||||
.name("query-cluster")
|
.name("query-cluster")
|
||||||
.feature(FeatureFlag.FAILURE_STORE_ENABLED)
|
|
||||||
.module("analysis-common")
|
.module("analysis-common")
|
||||||
.setting("xpack.license.self_generated.type", "trial")
|
.setting("xpack.license.self_generated.type", "trial")
|
||||||
.setting("xpack.security.enabled", "false")
|
.setting("xpack.security.enabled", "false")
|
||||||
|
|
|
@ -7,8 +7,6 @@
|
||||||
|
|
||||||
package org.elasticsearch.xpack.security.operator;
|
package org.elasticsearch.xpack.security.operator;
|
||||||
|
|
||||||
import org.elasticsearch.cluster.metadata.DataStream;
|
|
||||||
|
|
||||||
import java.util.Objects;
|
import java.util.Objects;
|
||||||
import java.util.Set;
|
import java.util.Set;
|
||||||
import java.util.stream.Collectors;
|
import java.util.stream.Collectors;
|
||||||
|
@ -513,9 +511,9 @@ public class Constants {
|
||||||
"indices:admin/data_stream/lifecycle/get",
|
"indices:admin/data_stream/lifecycle/get",
|
||||||
"indices:admin/data_stream/lifecycle/put",
|
"indices:admin/data_stream/lifecycle/put",
|
||||||
"indices:admin/data_stream/lifecycle/explain",
|
"indices:admin/data_stream/lifecycle/explain",
|
||||||
DataStream.isFailureStoreFeatureFlagEnabled() ? "indices:admin/data_stream/options/delete" : null,
|
"indices:admin/data_stream/options/delete",
|
||||||
DataStream.isFailureStoreFeatureFlagEnabled() ? "indices:admin/data_stream/options/get" : null,
|
"indices:admin/data_stream/options/get",
|
||||||
DataStream.isFailureStoreFeatureFlagEnabled() ? "indices:admin/data_stream/options/put" : null,
|
"indices:admin/data_stream/options/put",
|
||||||
"indices:admin/delete",
|
"indices:admin/delete",
|
||||||
"indices:admin/flush",
|
"indices:admin/flush",
|
||||||
"indices:admin/flush[s]",
|
"indices:admin/flush[s]",
|
||||||
|
|
|
@ -28,7 +28,6 @@ import org.elasticsearch.search.SearchHit;
|
||||||
import org.elasticsearch.search.SearchResponseUtils;
|
import org.elasticsearch.search.SearchResponseUtils;
|
||||||
import org.elasticsearch.test.TestSecurityClient;
|
import org.elasticsearch.test.TestSecurityClient;
|
||||||
import org.elasticsearch.test.cluster.ElasticsearchCluster;
|
import org.elasticsearch.test.cluster.ElasticsearchCluster;
|
||||||
import org.elasticsearch.test.cluster.FeatureFlag;
|
|
||||||
import org.elasticsearch.test.rest.ESRestTestCase;
|
import org.elasticsearch.test.rest.ESRestTestCase;
|
||||||
import org.elasticsearch.test.rest.ObjectPath;
|
import org.elasticsearch.test.rest.ObjectPath;
|
||||||
import org.elasticsearch.xcontent.XContentBuilder;
|
import org.elasticsearch.xcontent.XContentBuilder;
|
||||||
|
@ -68,7 +67,6 @@ public class FailureStoreSecurityRestIT extends ESRestTestCase {
|
||||||
@ClassRule
|
@ClassRule
|
||||||
public static ElasticsearchCluster cluster = ElasticsearchCluster.local()
|
public static ElasticsearchCluster cluster = ElasticsearchCluster.local()
|
||||||
.apply(SecurityOnTrialLicenseRestTestCase.commonTrialSecurityClusterConfig)
|
.apply(SecurityOnTrialLicenseRestTestCase.commonTrialSecurityClusterConfig)
|
||||||
.feature(FeatureFlag.FAILURE_STORE_ENABLED)
|
|
||||||
.build();
|
.build();
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|
|
@ -9,7 +9,6 @@ package org.elasticsearch.xpack.security.action.user;
|
||||||
import org.elasticsearch.action.ActionListener;
|
import org.elasticsearch.action.ActionListener;
|
||||||
import org.elasticsearch.action.support.ActionFilters;
|
import org.elasticsearch.action.support.ActionFilters;
|
||||||
import org.elasticsearch.action.support.PlainActionFuture;
|
import org.elasticsearch.action.support.PlainActionFuture;
|
||||||
import org.elasticsearch.cluster.metadata.DataStream;
|
|
||||||
import org.elasticsearch.common.bytes.BytesArray;
|
import org.elasticsearch.common.bytes.BytesArray;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
import org.elasticsearch.common.util.concurrent.ThreadContext;
|
import org.elasticsearch.common.util.concurrent.ThreadContext;
|
||||||
|
@ -92,8 +91,6 @@ public class TransportHasPrivilegesActionTests extends ESTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testHasPrivilegesRequestDoesNotAllowSelectorsInIndexPatterns() {
|
public void testHasPrivilegesRequestDoesNotAllowSelectorsInIndexPatterns() {
|
||||||
assumeTrue("failure store required", DataStream.isFailureStoreFeatureFlagEnabled());
|
|
||||||
|
|
||||||
final ThreadContext threadContext = new ThreadContext(Settings.EMPTY);
|
final ThreadContext threadContext = new ThreadContext(Settings.EMPTY);
|
||||||
final SecurityContext context = mock(SecurityContext.class);
|
final SecurityContext context = mock(SecurityContext.class);
|
||||||
final User user = new User("user-1", "superuser");
|
final User user = new User("user-1", "superuser");
|
||||||
|
|
|
@ -8,7 +8,6 @@
|
||||||
package org.elasticsearch.xpack.security.authz;
|
package org.elasticsearch.xpack.security.authz;
|
||||||
|
|
||||||
import org.elasticsearch.action.search.SearchRequest;
|
import org.elasticsearch.action.search.SearchRequest;
|
||||||
import org.elasticsearch.cluster.metadata.DataStream;
|
|
||||||
import org.elasticsearch.common.Strings;
|
import org.elasticsearch.common.Strings;
|
||||||
import org.elasticsearch.core.Tuple;
|
import org.elasticsearch.core.Tuple;
|
||||||
import org.elasticsearch.test.ESTestCase;
|
import org.elasticsearch.test.ESTestCase;
|
||||||
|
@ -238,8 +237,6 @@ public class AuthorizationDenialMessagesTests extends ESTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testActionDeniedWithFailuresAndCorrectActionIncludesFailuresMessage() {
|
public void testActionDeniedWithFailuresAndCorrectActionIncludesFailuresMessage() {
|
||||||
assumeTrue("failure store required", DataStream.isFailureStoreFeatureFlagEnabled());
|
|
||||||
|
|
||||||
Authentication authentication = AuthenticationTestHelper.builder().build();
|
Authentication authentication = AuthenticationTestHelper.builder().build();
|
||||||
|
|
||||||
final String action = "indices:data/read/" + randomAlphaOfLengthBetween(0, 8);
|
final String action = "indices:data/read/" + randomAlphaOfLengthBetween(0, 8);
|
||||||
|
@ -262,8 +259,6 @@ public class AuthorizationDenialMessagesTests extends ESTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testActionDeniedWithNonMatchingActionFailuresOmitsFailuresMessage() {
|
public void testActionDeniedWithNonMatchingActionFailuresOmitsFailuresMessage() {
|
||||||
assumeTrue("failure store required", DataStream.isFailureStoreFeatureFlagEnabled());
|
|
||||||
|
|
||||||
Authentication authentication = AuthenticationTestHelper.builder().build();
|
Authentication authentication = AuthenticationTestHelper.builder().build();
|
||||||
|
|
||||||
// granted only by all, so selector message is omitted
|
// granted only by all, so selector message is omitted
|
||||||
|
@ -284,8 +279,6 @@ public class AuthorizationDenialMessagesTests extends ESTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testActionDeniedWithoutFailuresOmitsFailuresMessage() {
|
public void testActionDeniedWithoutFailuresOmitsFailuresMessage() {
|
||||||
assumeTrue("failure store required", DataStream.isFailureStoreFeatureFlagEnabled());
|
|
||||||
|
|
||||||
Authentication authentication = AuthenticationTestHelper.builder().build();
|
Authentication authentication = AuthenticationTestHelper.builder().build();
|
||||||
|
|
||||||
final String action = "indices:data/read/" + randomAlphaOfLengthBetween(0, 8);
|
final String action = "indices:data/read/" + randomAlphaOfLengthBetween(0, 8);
|
||||||
|
@ -305,8 +298,6 @@ public class AuthorizationDenialMessagesTests extends ESTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testActionDeniedWithoutIndicesOmitsFailuresMessage() {
|
public void testActionDeniedWithoutIndicesOmitsFailuresMessage() {
|
||||||
assumeTrue("failure store required", DataStream.isFailureStoreFeatureFlagEnabled());
|
|
||||||
|
|
||||||
Authentication authentication = AuthenticationTestHelper.builder().build();
|
Authentication authentication = AuthenticationTestHelper.builder().build();
|
||||||
|
|
||||||
final String action = "indices:data/read/" + randomAlphaOfLengthBetween(0, 8);
|
final String action = "indices:data/read/" + randomAlphaOfLengthBetween(0, 8);
|
||||||
|
|
|
@ -312,7 +312,6 @@ public class AuthorizedIndicesTests extends ESTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testDataStreamsAreNotIncludedInAuthorizedIndicesWithFailuresSelectorAndAllPrivilege() {
|
public void testDataStreamsAreNotIncludedInAuthorizedIndicesWithFailuresSelectorAndAllPrivilege() {
|
||||||
assumeTrue("requires failure store", DataStream.isFailureStoreFeatureFlagEnabled());
|
|
||||||
RoleDescriptor aStarRole = new RoleDescriptor(
|
RoleDescriptor aStarRole = new RoleDescriptor(
|
||||||
"a_star",
|
"a_star",
|
||||||
null,
|
null,
|
||||||
|
@ -400,7 +399,6 @@ public class AuthorizedIndicesTests extends ESTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testDataStreamsAreIncludedInAuthorizedIndicesWithFailuresSelectorAndAllPrivilege() {
|
public void testDataStreamsAreIncludedInAuthorizedIndicesWithFailuresSelectorAndAllPrivilege() {
|
||||||
assumeTrue("requires failure store", DataStream.isFailureStoreFeatureFlagEnabled());
|
|
||||||
RoleDescriptor aStarRole = new RoleDescriptor(
|
RoleDescriptor aStarRole = new RoleDescriptor(
|
||||||
"a_star",
|
"a_star",
|
||||||
null,
|
null,
|
||||||
|
@ -494,7 +492,6 @@ public class AuthorizedIndicesTests extends ESTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testDataStreamsAreIncludedInAuthorizedIndicesWithFailuresSelector() {
|
public void testDataStreamsAreIncludedInAuthorizedIndicesWithFailuresSelector() {
|
||||||
assumeTrue("requires failure store", DataStream.isFailureStoreFeatureFlagEnabled());
|
|
||||||
RoleDescriptor aReadFailuresStarRole = new RoleDescriptor(
|
RoleDescriptor aReadFailuresStarRole = new RoleDescriptor(
|
||||||
"a_read_failure_store",
|
"a_read_failure_store",
|
||||||
null,
|
null,
|
||||||
|
@ -577,7 +574,6 @@ public class AuthorizedIndicesTests extends ESTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testDataStreamsAreNotIncludedInAuthorizedIndicesWithFailuresSelector() {
|
public void testDataStreamsAreNotIncludedInAuthorizedIndicesWithFailuresSelector() {
|
||||||
assumeTrue("requires failure store", DataStream.isFailureStoreFeatureFlagEnabled());
|
|
||||||
RoleDescriptor aReadFailuresStarRole = new RoleDescriptor(
|
RoleDescriptor aReadFailuresStarRole = new RoleDescriptor(
|
||||||
"a_read_failure_store",
|
"a_read_failure_store",
|
||||||
null,
|
null,
|
||||||
|
|
|
@ -226,16 +226,12 @@ public class IndicesAndAliasesResolverTests extends ESTestCase {
|
||||||
.put(indexBuilder(securityIndexName).settings(settings));
|
.put(indexBuilder(securityIndexName).settings(settings));
|
||||||
|
|
||||||
// Only add the failure indices if the failure store flag is enabled
|
// Only add the failure indices if the failure store flag is enabled
|
||||||
if (DataStream.isFailureStoreFeatureFlagEnabled()) {
|
|
||||||
projectBuilder.put(dataStreamFailureStore1, true).put(dataStreamFailureStore2, true);
|
projectBuilder.put(dataStreamFailureStore1, true).put(dataStreamFailureStore2, true);
|
||||||
}
|
|
||||||
projectBuilder.put(
|
projectBuilder.put(
|
||||||
newInstance(
|
newInstance(
|
||||||
dataStreamName,
|
dataStreamName,
|
||||||
List.of(dataStreamIndex1.getIndex(), dataStreamIndex2.getIndex()),
|
List.of(dataStreamIndex1.getIndex(), dataStreamIndex2.getIndex()),
|
||||||
DataStream.isFailureStoreFeatureFlagEnabled()
|
List.of(dataStreamFailureStore1.getIndex(), dataStreamFailureStore2.getIndex())
|
||||||
? List.of(dataStreamFailureStore1.getIndex(), dataStreamFailureStore2.getIndex())
|
|
||||||
: List.of()
|
|
||||||
)
|
)
|
||||||
);
|
);
|
||||||
if (withAlias) {
|
if (withAlias) {
|
||||||
|
@ -2681,6 +2677,6 @@ public class IndicesAndAliasesResolverTests extends ESTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
private boolean runFailureStore() {
|
private boolean runFailureStore() {
|
||||||
return DataStream.isFailureStoreFeatureFlagEnabled() && randomBoolean();
|
return randomBoolean();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1416,7 +1416,6 @@ public class RBACEngineTests extends ESTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testBuildUserPrivilegeResponseCombinesIndexPrivileges() {
|
public void testBuildUserPrivilegeResponseCombinesIndexPrivileges() {
|
||||||
assumeTrue("failure store feature must be enabled", DataStream.isFailureStoreFeatureFlagEnabled());
|
|
||||||
final BytesArray query = new BytesArray("""
|
final BytesArray query = new BytesArray("""
|
||||||
{"term":{"public":true}}""");
|
{"term":{"public":true}}""");
|
||||||
final Role role = Role.builder(RESTRICTED_INDICES, "test", "role")
|
final Role role = Role.builder(RESTRICTED_INDICES, "test", "role")
|
||||||
|
|
|
@ -187,8 +187,6 @@ public class IndicesPermissionTests extends ESTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testAuthorizeDataStreamAccessWithFailuresSelector() {
|
public void testAuthorizeDataStreamAccessWithFailuresSelector() {
|
||||||
assumeTrue("failure store required", DataStream.isFailureStoreFeatureFlagEnabled());
|
|
||||||
|
|
||||||
Metadata.Builder builder = Metadata.builder();
|
Metadata.Builder builder = Metadata.builder();
|
||||||
String dataStreamName = randomAlphaOfLength(6);
|
String dataStreamName = randomAlphaOfLength(6);
|
||||||
int numBackingIndices = randomIntBetween(1, 3);
|
int numBackingIndices = randomIntBetween(1, 3);
|
||||||
|
@ -367,7 +365,6 @@ public class IndicesPermissionTests extends ESTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testAuthorizeDataStreamFailureIndices() {
|
public void testAuthorizeDataStreamFailureIndices() {
|
||||||
assumeTrue("failure store required", DataStream.isFailureStoreFeatureFlagEnabled());
|
|
||||||
Metadata.Builder builder = Metadata.builder();
|
Metadata.Builder builder = Metadata.builder();
|
||||||
String dataStreamName = randomAlphaOfLength(6);
|
String dataStreamName = randomAlphaOfLength(6);
|
||||||
int numBackingIndices = randomIntBetween(1, 3);
|
int numBackingIndices = randomIntBetween(1, 3);
|
||||||
|
|
|
@ -23,7 +23,6 @@ import org.elasticsearch.action.search.TransportSearchAction;
|
||||||
import org.elasticsearch.action.support.PlainActionFuture;
|
import org.elasticsearch.action.support.PlainActionFuture;
|
||||||
import org.elasticsearch.client.internal.Client;
|
import org.elasticsearch.client.internal.Client;
|
||||||
import org.elasticsearch.cluster.health.ClusterHealthStatus;
|
import org.elasticsearch.cluster.health.ClusterHealthStatus;
|
||||||
import org.elasticsearch.cluster.metadata.DataStream;
|
|
||||||
import org.elasticsearch.cluster.metadata.IndexAbstraction;
|
import org.elasticsearch.cluster.metadata.IndexAbstraction;
|
||||||
import org.elasticsearch.cluster.metadata.IndexMetadata;
|
import org.elasticsearch.cluster.metadata.IndexMetadata;
|
||||||
import org.elasticsearch.cluster.metadata.Metadata;
|
import org.elasticsearch.cluster.metadata.Metadata;
|
||||||
|
@ -1548,7 +1547,6 @@ public class CompositeRolesStoreTests extends ESTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testBuildRoleWithReadFailureStorePrivilegeOnly() {
|
public void testBuildRoleWithReadFailureStorePrivilegeOnly() {
|
||||||
assumeTrue("requires failure store feature", DataStream.isFailureStoreFeatureFlagEnabled());
|
|
||||||
String indexPattern = randomAlphanumericOfLength(10);
|
String indexPattern = randomAlphanumericOfLength(10);
|
||||||
boolean allowRestrictedIndices = randomBoolean();
|
boolean allowRestrictedIndices = randomBoolean();
|
||||||
final Role role = buildRole(
|
final Role role = buildRole(
|
||||||
|
@ -1566,7 +1564,6 @@ public class CompositeRolesStoreTests extends ESTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testBuildRoleWithReadFailureStorePrivilegeDuplicatesMerged() {
|
public void testBuildRoleWithReadFailureStorePrivilegeDuplicatesMerged() {
|
||||||
assumeTrue("requires failure store feature", DataStream.isFailureStoreFeatureFlagEnabled());
|
|
||||||
String indexPattern = randomAlphanumericOfLength(10);
|
String indexPattern = randomAlphanumericOfLength(10);
|
||||||
boolean allowRestrictedIndices = randomBoolean();
|
boolean allowRestrictedIndices = randomBoolean();
|
||||||
final List<Role> roles = List.of(
|
final List<Role> roles = List.of(
|
||||||
|
@ -1614,7 +1611,6 @@ public class CompositeRolesStoreTests extends ESTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testBuildRoleWithReadFailureStoreAndReadPrivilegeSplit() {
|
public void testBuildRoleWithReadFailureStoreAndReadPrivilegeSplit() {
|
||||||
assumeTrue("requires failure store feature", DataStream.isFailureStoreFeatureFlagEnabled());
|
|
||||||
String indexPattern = randomAlphanumericOfLength(10);
|
String indexPattern = randomAlphanumericOfLength(10);
|
||||||
boolean allowRestrictedIndices = randomBoolean();
|
boolean allowRestrictedIndices = randomBoolean();
|
||||||
final Role role = buildRole(
|
final Role role = buildRole(
|
||||||
|
@ -1636,7 +1632,6 @@ public class CompositeRolesStoreTests extends ESTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testBuildRoleWithReadFailureStoreAndReadPrivilegeAndMultipleIndexPatternsSplit() {
|
public void testBuildRoleWithReadFailureStoreAndReadPrivilegeAndMultipleIndexPatternsSplit() {
|
||||||
assumeTrue("requires failure store feature", DataStream.isFailureStoreFeatureFlagEnabled());
|
|
||||||
String indexPattern = randomAlphanumericOfLength(10);
|
String indexPattern = randomAlphanumericOfLength(10);
|
||||||
String otherIndexPattern = randomValueOtherThan(indexPattern, () -> randomAlphanumericOfLength(10));
|
String otherIndexPattern = randomValueOtherThan(indexPattern, () -> randomAlphanumericOfLength(10));
|
||||||
boolean allowRestrictedIndices = randomBoolean();
|
boolean allowRestrictedIndices = randomBoolean();
|
||||||
|
@ -1694,7 +1689,6 @@ public class CompositeRolesStoreTests extends ESTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testBuildRoleWithReadOnRestrictedAndNonRestrictedIndices() {
|
public void testBuildRoleWithReadOnRestrictedAndNonRestrictedIndices() {
|
||||||
assumeTrue("requires failure store feature", DataStream.isFailureStoreFeatureFlagEnabled());
|
|
||||||
String indexPattern = randomAlphanumericOfLength(10);
|
String indexPattern = randomAlphanumericOfLength(10);
|
||||||
List<Role> roles = List.of(
|
List<Role> roles = List.of(
|
||||||
buildRole(
|
buildRole(
|
||||||
|
@ -1739,7 +1733,6 @@ public class CompositeRolesStoreTests extends ESTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testBuildRoleWithReadFailureStoreOnRestrictedAndNonRestrictedIndices() {
|
public void testBuildRoleWithReadFailureStoreOnRestrictedAndNonRestrictedIndices() {
|
||||||
assumeTrue("requires failure store feature", DataStream.isFailureStoreFeatureFlagEnabled());
|
|
||||||
String indexPattern = randomAlphanumericOfLength(10);
|
String indexPattern = randomAlphanumericOfLength(10);
|
||||||
List<Role> roles = List.of(
|
List<Role> roles = List.of(
|
||||||
buildRole(
|
buildRole(
|
||||||
|
@ -1792,7 +1785,6 @@ public class CompositeRolesStoreTests extends ESTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testBuildRoleWithMultipleReadFailureStoreAndReadPrivilegeSplit() {
|
public void testBuildRoleWithMultipleReadFailureStoreAndReadPrivilegeSplit() {
|
||||||
assumeTrue("requires failure store feature", DataStream.isFailureStoreFeatureFlagEnabled());
|
|
||||||
String indexPattern = randomAlphanumericOfLength(10);
|
String indexPattern = randomAlphanumericOfLength(10);
|
||||||
boolean allowRestrictedIndices = randomBoolean();
|
boolean allowRestrictedIndices = randomBoolean();
|
||||||
final List<Role> roles = List.of(
|
final List<Role> roles = List.of(
|
||||||
|
@ -1844,7 +1836,6 @@ public class CompositeRolesStoreTests extends ESTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testBuildRoleWithAllPrivilegeIsNeverSplit() {
|
public void testBuildRoleWithAllPrivilegeIsNeverSplit() {
|
||||||
assumeTrue("requires failure store feature", DataStream.isFailureStoreFeatureFlagEnabled());
|
|
||||||
String indexPattern = randomAlphanumericOfLength(10);
|
String indexPattern = randomAlphanumericOfLength(10);
|
||||||
boolean allowRestrictedIndices = randomBoolean();
|
boolean allowRestrictedIndices = randomBoolean();
|
||||||
final List<Role> roles = List.of(
|
final List<Role> roles = List.of(
|
||||||
|
@ -1899,7 +1890,6 @@ public class CompositeRolesStoreTests extends ESTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testBuildRoleWithFailureStorePrivilegeCollatesToRemoveDlsFlsFromAnotherGroup() {
|
public void testBuildRoleWithFailureStorePrivilegeCollatesToRemoveDlsFlsFromAnotherGroup() {
|
||||||
assumeTrue("requires failure store feature", DataStream.isFailureStoreFeatureFlagEnabled());
|
|
||||||
String indexPattern = randomAlphanumericOfLength(10);
|
String indexPattern = randomAlphanumericOfLength(10);
|
||||||
boolean allowRestrictedIndices = randomBoolean();
|
boolean allowRestrictedIndices = randomBoolean();
|
||||||
final List<Role> roles = List.of(
|
final List<Role> roles = List.of(
|
||||||
|
@ -1977,7 +1967,6 @@ public class CompositeRolesStoreTests extends ESTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testBuildRoleWithFailureStorePrivilegeCollatesToKeepDlsFlsFromAnotherGroup() {
|
public void testBuildRoleWithFailureStorePrivilegeCollatesToKeepDlsFlsFromAnotherGroup() {
|
||||||
assumeTrue("requires failure store feature", DataStream.isFailureStoreFeatureFlagEnabled());
|
|
||||||
String indexPattern = randomAlphanumericOfLength(10);
|
String indexPattern = randomAlphanumericOfLength(10);
|
||||||
boolean allowRestrictedIndices = randomBoolean();
|
boolean allowRestrictedIndices = randomBoolean();
|
||||||
final Role role = buildRole(
|
final Role role = buildRole(
|
||||||
|
@ -2025,17 +2014,11 @@ public class CompositeRolesStoreTests extends ESTestCase {
|
||||||
|
|
||||||
public void testBuildRoleDoesNotSplitIfAllPrivilegesHaveTheSameSelector() {
|
public void testBuildRoleDoesNotSplitIfAllPrivilegesHaveTheSameSelector() {
|
||||||
String indexPattern = randomAlphanumericOfLength(10);
|
String indexPattern = randomAlphanumericOfLength(10);
|
||||||
IndexComponentSelectorPredicate predicate = (DataStream.isFailureStoreFeatureFlagEnabled())
|
IndexComponentSelectorPredicate predicate = randomFrom(
|
||||||
? randomFrom(
|
|
||||||
IndexComponentSelectorPredicate.ALL,
|
IndexComponentSelectorPredicate.ALL,
|
||||||
IndexComponentSelectorPredicate.DATA,
|
IndexComponentSelectorPredicate.DATA,
|
||||||
IndexComponentSelectorPredicate.FAILURES,
|
IndexComponentSelectorPredicate.FAILURES,
|
||||||
IndexComponentSelectorPredicate.DATA_AND_FAILURES
|
IndexComponentSelectorPredicate.DATA_AND_FAILURES
|
||||||
)
|
|
||||||
: randomFrom(
|
|
||||||
IndexComponentSelectorPredicate.ALL,
|
|
||||||
IndexComponentSelectorPredicate.DATA,
|
|
||||||
IndexComponentSelectorPredicate.DATA_AND_FAILURES
|
|
||||||
);
|
);
|
||||||
|
|
||||||
List<String> privilegesWithSelector = IndexPrivilege.names()
|
List<String> privilegesWithSelector = IndexPrivilege.names()
|
||||||
|
|
|
@ -62,7 +62,6 @@ public class FullClusterRestartIT extends ParameterizedFullClusterRestartTestCas
|
||||||
.keystore("xpack.watcher.encryption_key", Resource.fromClasspath("system_key"))
|
.keystore("xpack.watcher.encryption_key", Resource.fromClasspath("system_key"))
|
||||||
.keystore("xpack.security.transport.ssl.secure_key_passphrase", "testnode")
|
.keystore("xpack.security.transport.ssl.secure_key_passphrase", "testnode")
|
||||||
.feature(FeatureFlag.TIME_SERIES_MODE)
|
.feature(FeatureFlag.TIME_SERIES_MODE)
|
||||||
.feature(FeatureFlag.FAILURE_STORE_ENABLED)
|
|
||||||
.build();
|
.build();
|
||||||
|
|
||||||
public FullClusterRestartIT(@Name("cluster") FullClusterRestartUpgradeStatus upgradeStatus) {
|
public FullClusterRestartIT(@Name("cluster") FullClusterRestartUpgradeStatus upgradeStatus) {
|
||||||
|
|
|
@ -37,7 +37,6 @@ public abstract class AbstractXpackFullClusterRestartTestCase extends Parameteri
|
||||||
.keystore("xpack.watcher.encryption_key", Resource.fromClasspath("system_key"))
|
.keystore("xpack.watcher.encryption_key", Resource.fromClasspath("system_key"))
|
||||||
.keystore("xpack.security.transport.ssl.secure_key_passphrase", "testnode")
|
.keystore("xpack.security.transport.ssl.secure_key_passphrase", "testnode")
|
||||||
.feature(FeatureFlag.TIME_SERIES_MODE)
|
.feature(FeatureFlag.TIME_SERIES_MODE)
|
||||||
.feature(FeatureFlag.FAILURE_STORE_ENABLED)
|
|
||||||
.build();
|
.build();
|
||||||
|
|
||||||
public AbstractXpackFullClusterRestartTestCase(FullClusterRestartUpgradeStatus upgradeStatus) {
|
public AbstractXpackFullClusterRestartTestCase(FullClusterRestartUpgradeStatus upgradeStatus) {
|
||||||
|
|
|
@ -46,7 +46,6 @@ public class CoreWithMultipleProjectsClientYamlTestSuiteIT extends MultipleProje
|
||||||
.user(USER, PASS)
|
.user(USER, PASS)
|
||||||
.systemProperty("es.queryable_built_in_roles_enabled", "false")
|
.systemProperty("es.queryable_built_in_roles_enabled", "false")
|
||||||
.feature(FeatureFlag.TIME_SERIES_MODE)
|
.feature(FeatureFlag.TIME_SERIES_MODE)
|
||||||
.feature(FeatureFlag.FAILURE_STORE_ENABLED)
|
|
||||||
.nodes(2)
|
.nodes(2)
|
||||||
.build();
|
.build();
|
||||||
|
|
||||||
|
|
|
@ -44,7 +44,6 @@ public class XpackWithMultipleProjectsClientYamlTestSuiteIT extends MultipleProj
|
||||||
.configFile("service_tokens", Resource.fromClasspath("service_tokens"))
|
.configFile("service_tokens", Resource.fromClasspath("service_tokens"))
|
||||||
.user(USER, PASS)
|
.user(USER, PASS)
|
||||||
.feature(FeatureFlag.TIME_SERIES_MODE)
|
.feature(FeatureFlag.TIME_SERIES_MODE)
|
||||||
.feature(FeatureFlag.FAILURE_STORE_ENABLED)
|
|
||||||
.build();
|
.build();
|
||||||
|
|
||||||
public XpackWithMultipleProjectsClientYamlTestSuiteIT(@Name("yaml") ClientYamlTestCandidate testCandidate) {
|
public XpackWithMultipleProjectsClientYamlTestSuiteIT(@Name("yaml") ClientYamlTestCandidate testCandidate) {
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue