mirror of
https://github.com/elastic/elasticsearch.git
synced 2025-06-27 17:10:22 -04:00
Add ability to redirect ingestion failures on data streams to a failure store (#126973)
Removes the feature flags and guards that prevent the new failure store functionality from operating in production runtimes.
This commit is contained in:
parent
72b4ed255b
commit
7b89f4d4a6
90 changed files with 2020 additions and 2261 deletions
|
@ -85,9 +85,6 @@ testClusters.matching { it.name == "yamlRestTest"}.configureEach {
|
|||
setting 'xpack.license.self_generated.type', 'trial'
|
||||
setting 'indices.lifecycle.history_index_enabled', 'false'
|
||||
keystorePassword 'keystore-password'
|
||||
if (buildParams.snapshotBuild == false) {
|
||||
requiresFeature 'es.failure_store_feature_flag_enabled', new Version(8, 12, 0)
|
||||
}
|
||||
}
|
||||
|
||||
// debug ccr test failures:
|
||||
|
@ -126,7 +123,6 @@ testClusters.matching { it.name == "yamlRestTest"}.configureEach {
|
|||
|
||||
|
||||
requiresFeature 'es.index_mode_feature_flag_registered', Version.fromString("8.0.0")
|
||||
requiresFeature 'es.failure_store_feature_flag_enabled', Version.fromString("8.12.0")
|
||||
|
||||
// TODO Rene: clean up this kind of cross project file references
|
||||
extraConfigFile 'op-jwks.json', project(':x-pack:test:idp-fixture').file("src/main/resources/oidc/op-jwks.json")
|
||||
|
|
89
docs/changelog/126973.yaml
Normal file
89
docs/changelog/126973.yaml
Normal file
|
@ -0,0 +1,89 @@
|
|||
pr: 126973
|
||||
summary: Add ability to redirect ingestion failures on data streams to a failure store
|
||||
area: Data streams
|
||||
type: feature
|
||||
issues: []
|
||||
highlight:
|
||||
title: Add ability to redirect ingestion failures on data streams to a failure store
|
||||
body: |-
|
||||
Documents that encountered ingest pipeline failures or mapping conflicts
|
||||
would previously be returned to the client as errors in the bulk and
|
||||
index operations. Many client applications are not equipped to respond
|
||||
to these failures. This leads to the failed documents often being
|
||||
dropped by the client which cannot hold the broken documents
|
||||
indefinitely. In many end user workloads, these failed documents
|
||||
represent events that could be critical signals for observability or
|
||||
security use cases.
|
||||
|
||||
To help mitigate this problem, data streams can now maintain a "failure
|
||||
store" which is used to accept and hold documents that fail to be
|
||||
ingested due to preventable configuration errors. The data stream's
|
||||
failure store operates like a separate set of backing indices with their
|
||||
own mappings and access patterns that allow Elasticsearch to accept
|
||||
documents that would otherwise be rejected due to unhandled ingest
|
||||
pipeline exceptions or mapping conflicts.
|
||||
|
||||
Users can enable redirection of ingest failures to the failure store on
|
||||
new data streams by specifying it in the new `data_stream_options` field
|
||||
inside of a component or index template:
|
||||
|
||||
[source,yaml]
|
||||
----
|
||||
PUT _index_template/my-template
|
||||
{
|
||||
"index_patterns": ["logs-test-*"],
|
||||
"data_stream": {},
|
||||
"template": {
|
||||
"data_stream_options": {
|
||||
"failure_store": {
|
||||
"enabled": true
|
||||
}
|
||||
}
|
||||
}
|
||||
}'
|
||||
----
|
||||
|
||||
Existing data streams can be configured with the new data stream
|
||||
`_options` endpoint:
|
||||
|
||||
[source,yaml]
|
||||
----
|
||||
PUT _data_stream/logs-test-apache/_options
|
||||
{
|
||||
"failure_store": {
|
||||
"enabled": "true"
|
||||
}
|
||||
}
|
||||
----
|
||||
|
||||
When redirection is enabled, any ingestion related failures will be
|
||||
captured in the failure store if the cluster is able to, along with the
|
||||
timestamp that the failure occurred, details about the error
|
||||
encountered, and the document that could not be ingested. Since failure
|
||||
stores are a kind of Elasticsearch index, we can search the data stream
|
||||
for the failures that it has collected. The failures are not shown by
|
||||
default as they are stored in different indices than the normal data
|
||||
stream data. In order to retrieve the failures, we use the `_search` API
|
||||
along with a new bit of index pattern syntax, the `::` selector.
|
||||
|
||||
[source,yaml]
|
||||
----
|
||||
POST logs-test-apache::failures/_search
|
||||
----
|
||||
|
||||
This index syntax informs the search operation to target the indices in
|
||||
its failure store instead of its backing indices. It can be mixed in a
|
||||
number of ways with other index patterns to include their failure store
|
||||
indices in the search operation:
|
||||
|
||||
[source,yaml]
|
||||
----
|
||||
POST logs-*::failures/_search
|
||||
POST logs-*,logs-*::failures/_search
|
||||
POST *::failures/_search
|
||||
POST _query
|
||||
{
|
||||
"query": "FROM my_data_stream*::failures"
|
||||
}
|
||||
----
|
||||
notable: true
|
|
@ -36,12 +36,6 @@ if (buildParams.inFipsJvm){
|
|||
tasks.named("yamlRestTest").configure{enabled = false }
|
||||
}
|
||||
|
||||
if (buildParams.snapshotBuild == false) {
|
||||
tasks.withType(Test).configureEach {
|
||||
systemProperty 'es.failure_store_feature_flag_enabled', 'true'
|
||||
}
|
||||
}
|
||||
|
||||
tasks.named("yamlRestCompatTestTransform").configure({ task ->
|
||||
task.skipTest("data_stream/10_basic/Create hidden data stream", "warning does not exist for compatibility")
|
||||
|
||||
|
|
|
@ -16,7 +16,6 @@ import org.elasticsearch.common.settings.SecureString;
|
|||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.concurrent.ThreadContext;
|
||||
import org.elasticsearch.test.cluster.ElasticsearchCluster;
|
||||
import org.elasticsearch.test.cluster.FeatureFlag;
|
||||
import org.elasticsearch.test.cluster.local.distribution.DistributionType;
|
||||
import org.elasticsearch.test.rest.ESRestTestCase;
|
||||
import org.junit.After;
|
||||
|
@ -38,7 +37,6 @@ public abstract class AbstractDataStreamIT extends ESRestTestCase {
|
|||
@ClassRule
|
||||
public static ElasticsearchCluster cluster = ElasticsearchCluster.local()
|
||||
.distribution(DistributionType.DEFAULT)
|
||||
.feature(FeatureFlag.FAILURE_STORE_ENABLED)
|
||||
.setting("xpack.security.enabled", "false")
|
||||
.setting("xpack.watcher.enabled", "false")
|
||||
// Disable apm-data so the index templates it installs do not impact
|
||||
|
|
|
@ -15,7 +15,6 @@ import org.elasticsearch.common.settings.SecureString;
|
|||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.concurrent.ThreadContext;
|
||||
import org.elasticsearch.test.cluster.ElasticsearchCluster;
|
||||
import org.elasticsearch.test.cluster.FeatureFlag;
|
||||
import org.elasticsearch.test.cluster.local.distribution.DistributionType;
|
||||
import org.elasticsearch.test.cluster.util.resource.Resource;
|
||||
import org.elasticsearch.test.rest.ESRestTestCase;
|
||||
|
@ -29,7 +28,6 @@ public class DataStreamWithSecurityIT extends ESRestTestCase {
|
|||
@ClassRule
|
||||
public static ElasticsearchCluster cluster = ElasticsearchCluster.local()
|
||||
.distribution(DistributionType.DEFAULT)
|
||||
.feature(FeatureFlag.FAILURE_STORE_ENABLED)
|
||||
.setting("xpack.watcher.enabled", "false")
|
||||
.setting("xpack.ml.enabled", "false")
|
||||
.setting("xpack.security.enabled", "true")
|
||||
|
|
|
@ -13,7 +13,6 @@ import org.elasticsearch.common.settings.SecureString;
|
|||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.concurrent.ThreadContext;
|
||||
import org.elasticsearch.test.cluster.ElasticsearchCluster;
|
||||
import org.elasticsearch.test.cluster.FeatureFlag;
|
||||
import org.elasticsearch.test.cluster.local.distribution.DistributionType;
|
||||
import org.elasticsearch.test.rest.ESRestTestCase;
|
||||
import org.junit.ClassRule;
|
||||
|
@ -27,7 +26,6 @@ public abstract class DisabledSecurityDataStreamTestCase extends ESRestTestCase
|
|||
@ClassRule
|
||||
public static ElasticsearchCluster cluster = ElasticsearchCluster.local()
|
||||
.distribution(DistributionType.DEFAULT)
|
||||
.feature(FeatureFlag.FAILURE_STORE_ENABLED)
|
||||
.setting("xpack.security.enabled", "false")
|
||||
.setting("xpack.watcher.enabled", "false")
|
||||
.build();
|
||||
|
|
|
@ -17,7 +17,6 @@ import org.elasticsearch.common.settings.SecureString;
|
|||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.concurrent.ThreadContext;
|
||||
import org.elasticsearch.test.cluster.ElasticsearchCluster;
|
||||
import org.elasticsearch.test.cluster.FeatureFlag;
|
||||
import org.elasticsearch.test.cluster.local.distribution.DistributionType;
|
||||
import org.elasticsearch.test.cluster.util.resource.Resource;
|
||||
import org.elasticsearch.test.rest.ESRestTestCase;
|
||||
|
@ -41,7 +40,6 @@ public class LazyRolloverDataStreamIT extends ESRestTestCase {
|
|||
@ClassRule
|
||||
public static ElasticsearchCluster cluster = ElasticsearchCluster.local()
|
||||
.distribution(DistributionType.DEFAULT)
|
||||
.feature(FeatureFlag.FAILURE_STORE_ENABLED)
|
||||
.setting("xpack.watcher.enabled", "false")
|
||||
.setting("xpack.ml.enabled", "false")
|
||||
.setting("xpack.security.enabled", "true")
|
||||
|
|
|
@ -19,7 +19,6 @@ import org.elasticsearch.common.settings.Settings;
|
|||
import org.elasticsearch.common.util.concurrent.ThreadContext;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
import org.elasticsearch.test.cluster.ElasticsearchCluster;
|
||||
import org.elasticsearch.test.cluster.FeatureFlag;
|
||||
import org.elasticsearch.test.cluster.local.distribution.DistributionType;
|
||||
import org.elasticsearch.test.cluster.util.resource.Resource;
|
||||
import org.elasticsearch.test.rest.ESRestTestCase;
|
||||
|
@ -46,7 +45,6 @@ public abstract class DataStreamLifecyclePermissionsTestCase extends ESRestTestC
|
|||
@ClassRule
|
||||
public static ElasticsearchCluster cluster = ElasticsearchCluster.local()
|
||||
.distribution(DistributionType.DEFAULT)
|
||||
.feature(FeatureFlag.FAILURE_STORE_ENABLED)
|
||||
.setting("xpack.watcher.enabled", "false")
|
||||
.setting("xpack.ml.enabled", "false")
|
||||
.setting("xpack.security.enabled", "true")
|
||||
|
|
|
@ -28,7 +28,7 @@ public class DataStreamFeatures implements FeatureSpecification {
|
|||
|
||||
@Override
|
||||
public Set<NodeFeature> getFeatures() {
|
||||
return DataStream.isFailureStoreFeatureFlagEnabled() ? Set.of(DataStream.DATA_STREAM_FAILURE_STORE_FEATURE) : Set.of();
|
||||
return Set.of(DataStream.DATA_STREAM_FAILURE_STORE_FEATURE);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -21,7 +21,6 @@ import org.elasticsearch.action.datastreams.lifecycle.ExplainDataStreamLifecycle
|
|||
import org.elasticsearch.action.datastreams.lifecycle.GetDataStreamLifecycleAction;
|
||||
import org.elasticsearch.action.datastreams.lifecycle.PutDataStreamLifecycleAction;
|
||||
import org.elasticsearch.client.internal.OriginSettingClient;
|
||||
import org.elasticsearch.cluster.metadata.DataStream;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
|
||||
|
@ -237,11 +236,9 @@ public class DataStreamsPlugin extends Plugin implements ActionPlugin, HealthPlu
|
|||
actions.add(new ActionHandler(DeleteDataStreamLifecycleAction.INSTANCE, TransportDeleteDataStreamLifecycleAction.class));
|
||||
actions.add(new ActionHandler(ExplainDataStreamLifecycleAction.INSTANCE, TransportExplainDataStreamLifecycleAction.class));
|
||||
actions.add(new ActionHandler(GetDataStreamLifecycleStatsAction.INSTANCE, TransportGetDataStreamLifecycleStatsAction.class));
|
||||
if (DataStream.isFailureStoreFeatureFlagEnabled()) {
|
||||
actions.add(new ActionHandler(GetDataStreamOptionsAction.INSTANCE, TransportGetDataStreamOptionsAction.class));
|
||||
actions.add(new ActionHandler(PutDataStreamOptionsAction.INSTANCE, TransportPutDataStreamOptionsAction.class));
|
||||
actions.add(new ActionHandler(DeleteDataStreamOptionsAction.INSTANCE, TransportDeleteDataStreamOptionsAction.class));
|
||||
}
|
||||
actions.add(new ActionHandler(GetDataStreamOptionsAction.INSTANCE, TransportGetDataStreamOptionsAction.class));
|
||||
actions.add(new ActionHandler(PutDataStreamOptionsAction.INSTANCE, TransportPutDataStreamOptionsAction.class));
|
||||
actions.add(new ActionHandler(DeleteDataStreamOptionsAction.INSTANCE, TransportDeleteDataStreamOptionsAction.class));
|
||||
return actions;
|
||||
}
|
||||
|
||||
|
@ -274,11 +271,9 @@ public class DataStreamsPlugin extends Plugin implements ActionPlugin, HealthPlu
|
|||
handlers.add(new RestDeleteDataStreamLifecycleAction());
|
||||
handlers.add(new RestExplainDataStreamLifecycleAction());
|
||||
handlers.add(new RestDataStreamLifecycleStatsAction());
|
||||
if (DataStream.isFailureStoreFeatureFlagEnabled()) {
|
||||
handlers.add(new RestGetDataStreamOptionsAction());
|
||||
handlers.add(new RestPutDataStreamOptionsAction());
|
||||
handlers.add(new RestDeleteDataStreamOptionsAction());
|
||||
}
|
||||
handlers.add(new RestGetDataStreamOptionsAction());
|
||||
handlers.add(new RestPutDataStreamOptionsAction());
|
||||
handlers.add(new RestDeleteDataStreamOptionsAction());
|
||||
return handlers;
|
||||
}
|
||||
|
||||
|
|
|
@ -230,8 +230,7 @@ public class TransportGetDataStreamsAction extends TransportLocalProjectMetadata
|
|||
for (DataStream dataStream : dataStreams) {
|
||||
// For this action, we are returning whether the failure store is effectively enabled, either in metadata or by cluster setting.
|
||||
// Users can use the get data stream options API to find out whether it is explicitly enabled in metadata.
|
||||
boolean failureStoreEffectivelyEnabled = DataStream.isFailureStoreFeatureFlagEnabled()
|
||||
&& dataStream.isFailureStoreEffectivelyEnabled(dataStreamFailureStoreSettings);
|
||||
boolean failureStoreEffectivelyEnabled = dataStream.isFailureStoreEffectivelyEnabled(dataStreamFailureStoreSettings);
|
||||
final String indexTemplate;
|
||||
boolean indexTemplatePreferIlmValue = true;
|
||||
String ilmPolicyName = null;
|
||||
|
@ -289,7 +288,7 @@ public class TransportGetDataStreamsAction extends TransportLocalProjectMetadata
|
|||
Map<Index, IndexProperties> backingIndicesSettingsValues = new HashMap<>();
|
||||
ProjectMetadata metadata = state.metadata();
|
||||
collectIndexSettingsValues(dataStream, backingIndicesSettingsValues, metadata, dataStream.getIndices());
|
||||
if (DataStream.isFailureStoreFeatureFlagEnabled() && dataStream.getFailureIndices().isEmpty() == false) {
|
||||
if (dataStream.getFailureIndices().isEmpty() == false) {
|
||||
collectIndexSettingsValues(dataStream, backingIndicesSettingsValues, metadata, dataStream.getFailureIndices());
|
||||
}
|
||||
|
||||
|
|
|
@ -375,11 +375,9 @@ public class DataStreamLifecycleService implements ClusterStateListener, Closeab
|
|||
// These are the pre-rollover write indices. They may or may not be the write index after maybeExecuteRollover has executed,
|
||||
// depending on rollover criteria, for this reason we exclude them for the remaining run.
|
||||
indicesToExcludeForRemainingRun.add(maybeExecuteRollover(project, dataStream, false));
|
||||
if (DataStream.isFailureStoreFeatureFlagEnabled()) {
|
||||
Index failureStoreWriteIndex = maybeExecuteRollover(project, dataStream, true);
|
||||
if (failureStoreWriteIndex != null) {
|
||||
indicesToExcludeForRemainingRun.add(failureStoreWriteIndex);
|
||||
}
|
||||
Index failureStoreWriteIndex = maybeExecuteRollover(project, dataStream, true);
|
||||
if (failureStoreWriteIndex != null) {
|
||||
indicesToExcludeForRemainingRun.add(failureStoreWriteIndex);
|
||||
}
|
||||
|
||||
// tsds indices that are still within their time bounds (i.e. now < time_series.end_time) - we don't want these indices to be
|
||||
|
@ -802,7 +800,7 @@ public class DataStreamLifecycleService implements ClusterStateListener, Closeab
|
|||
targetIndices.add(index);
|
||||
}
|
||||
}
|
||||
if (withFailureStore && DataStream.isFailureStoreFeatureFlagEnabled() && dataStream.getFailureIndices().isEmpty() == false) {
|
||||
if (withFailureStore && dataStream.getFailureIndices().isEmpty() == false) {
|
||||
for (Index index : dataStream.getFailureIndices()) {
|
||||
if (dataStream.isIndexManagedByDataStreamLifecycle(index, indexMetadataSupplier)
|
||||
&& indicesToExcludeForRemainingRun.contains(index) == false) {
|
||||
|
|
|
@ -27,7 +27,6 @@ import org.elasticsearch.snapshots.Snapshot;
|
|||
import org.elasticsearch.snapshots.SnapshotId;
|
||||
import org.elasticsearch.snapshots.SnapshotInProgressException;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.junit.Assume;
|
||||
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
|
@ -69,8 +68,6 @@ public class DeleteDataStreamTransportActionTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public void testDeleteDataStreamWithFailureStore() {
|
||||
Assume.assumeTrue(DataStream.isFailureStoreFeatureFlagEnabled());
|
||||
|
||||
final String dataStreamName = "my-data-stream";
|
||||
final List<String> otherIndices = randomSubsetOf(List.of("foo", "bar", "baz"));
|
||||
|
||||
|
|
|
@ -134,21 +134,16 @@ public class GetDataStreamsResponseTests extends ESTestCase {
|
|||
is(ManagedBy.LIFECYCLE.displayValue)
|
||||
);
|
||||
|
||||
if (DataStream.isFailureStoreFeatureFlagEnabled()) {
|
||||
var failureStore = (Map<String, Object>) dataStreamMap.get(DataStream.FAILURE_STORE_FIELD.getPreferredName());
|
||||
List<Object> failureStoresRepresentation = (List<Object>) failureStore.get(DataStream.INDICES_FIELD.getPreferredName());
|
||||
Map<String, Object> failureStoreRepresentation = (Map<String, Object>) failureStoresRepresentation.get(0);
|
||||
assertThat(failureStoreRepresentation.get("index_name"), is(failureStoreIndex.getName()));
|
||||
assertThat(failureStoreRepresentation.get(Response.DataStreamInfo.PREFER_ILM.getPreferredName()), is(false));
|
||||
assertThat(
|
||||
failureStoreRepresentation.get(Response.DataStreamInfo.ILM_POLICY_FIELD.getPreferredName()),
|
||||
is(nullValue())
|
||||
);
|
||||
assertThat(
|
||||
failureStoreRepresentation.get(Response.DataStreamInfo.MANAGED_BY.getPreferredName()),
|
||||
is(ManagedBy.LIFECYCLE.displayValue)
|
||||
);
|
||||
}
|
||||
var failureStore = (Map<String, Object>) dataStreamMap.get(DataStream.FAILURE_STORE_FIELD.getPreferredName());
|
||||
List<Object> failureStoresRepresentation = (List<Object>) failureStore.get(DataStream.INDICES_FIELD.getPreferredName());
|
||||
Map<String, Object> failureStoreRepresentation = (Map<String, Object>) failureStoresRepresentation.get(0);
|
||||
assertThat(failureStoreRepresentation.get("index_name"), is(failureStoreIndex.getName()));
|
||||
assertThat(failureStoreRepresentation.get(Response.DataStreamInfo.PREFER_ILM.getPreferredName()), is(false));
|
||||
assertThat(failureStoreRepresentation.get(Response.DataStreamInfo.ILM_POLICY_FIELD.getPreferredName()), is(nullValue()));
|
||||
assertThat(
|
||||
failureStoreRepresentation.get(Response.DataStreamInfo.MANAGED_BY.getPreferredName()),
|
||||
is(ManagedBy.LIFECYCLE.displayValue)
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -228,21 +223,16 @@ public class GetDataStreamsResponseTests extends ESTestCase {
|
|||
is(ManagedBy.UNMANAGED.displayValue)
|
||||
);
|
||||
|
||||
if (DataStream.isFailureStoreFeatureFlagEnabled()) {
|
||||
var failureStore = (Map<String, Object>) dataStreamMap.get(DataStream.FAILURE_STORE_FIELD.getPreferredName());
|
||||
List<Object> failureStoresRepresentation = (List<Object>) failureStore.get(DataStream.INDICES_FIELD.getPreferredName());
|
||||
Map<String, Object> failureStoreRepresentation = (Map<String, Object>) failureStoresRepresentation.get(0);
|
||||
assertThat(failureStoreRepresentation.get("index_name"), is(failureStoreIndex.getName()));
|
||||
assertThat(failureStoreRepresentation.get(Response.DataStreamInfo.PREFER_ILM.getPreferredName()), is(false));
|
||||
assertThat(
|
||||
failureStoreRepresentation.get(Response.DataStreamInfo.ILM_POLICY_FIELD.getPreferredName()),
|
||||
is(nullValue())
|
||||
);
|
||||
assertThat(
|
||||
failureStoreRepresentation.get(Response.DataStreamInfo.MANAGED_BY.getPreferredName()),
|
||||
is(ManagedBy.UNMANAGED.displayValue)
|
||||
);
|
||||
}
|
||||
var failureStore = (Map<String, Object>) dataStreamMap.get(DataStream.FAILURE_STORE_FIELD.getPreferredName());
|
||||
List<Object> failureStoresRepresentation = (List<Object>) failureStore.get(DataStream.INDICES_FIELD.getPreferredName());
|
||||
Map<String, Object> failureStoreRepresentation = (Map<String, Object>) failureStoresRepresentation.get(0);
|
||||
assertThat(failureStoreRepresentation.get("index_name"), is(failureStoreIndex.getName()));
|
||||
assertThat(failureStoreRepresentation.get(Response.DataStreamInfo.PREFER_ILM.getPreferredName()), is(false));
|
||||
assertThat(failureStoreRepresentation.get(Response.DataStreamInfo.ILM_POLICY_FIELD.getPreferredName()), is(nullValue()));
|
||||
assertThat(
|
||||
failureStoreRepresentation.get(Response.DataStreamInfo.MANAGED_BY.getPreferredName()),
|
||||
is(ManagedBy.UNMANAGED.displayValue)
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,8 +20,6 @@ import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate;
|
|||
import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase;
|
||||
import org.junit.ClassRule;
|
||||
|
||||
import static org.elasticsearch.test.cluster.FeatureFlag.FAILURE_STORE_ENABLED;
|
||||
|
||||
public class DataStreamsClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase {
|
||||
|
||||
public DataStreamsClientYamlTestSuiteIT(final ClientYamlTestCandidate testCandidate) {
|
||||
|
@ -46,7 +44,6 @@ public class DataStreamsClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase
|
|||
private static ElasticsearchCluster createCluster() {
|
||||
LocalClusterSpecBuilder<ElasticsearchCluster> clusterBuilder = ElasticsearchCluster.local()
|
||||
.distribution(DistributionType.DEFAULT)
|
||||
.feature(FAILURE_STORE_ENABLED)
|
||||
.setting("xpack.security.enabled", "true")
|
||||
.keystore("bootstrap.password", "x-pack-test-password")
|
||||
.user("x_pack_rest_user", "x-pack-test-password")
|
||||
|
|
|
@ -23,8 +23,6 @@ import org.junit.ClassRule;
|
|||
|
||||
import java.util.Objects;
|
||||
|
||||
import static org.elasticsearch.test.cluster.FeatureFlag.FAILURE_STORE_ENABLED;
|
||||
|
||||
public class DotPrefixClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase {
|
||||
|
||||
public DotPrefixClientYamlTestSuiteIT(final ClientYamlTestCandidate testCandidate) {
|
||||
|
@ -49,7 +47,6 @@ public class DotPrefixClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase {
|
|||
private static ElasticsearchCluster createCluster() {
|
||||
LocalClusterSpecBuilder<ElasticsearchCluster> clusterBuilder = ElasticsearchCluster.local()
|
||||
.distribution(DistributionType.DEFAULT)
|
||||
.feature(FAILURE_STORE_ENABLED)
|
||||
.setting("xpack.security.enabled", "true")
|
||||
.keystore("bootstrap.password", "x-pack-test-password")
|
||||
.user("x_pack_rest_user", "x-pack-test-password");
|
||||
|
|
|
@ -29,7 +29,6 @@ buildParams.bwcVersions.withWireCompatible { bwcVersion, baseName ->
|
|||
versions = [bwcVersion.toString(), project.version]
|
||||
setting 'cluster.remote.node.attr', 'gateway'
|
||||
setting 'xpack.security.enabled', 'false'
|
||||
requiresFeature 'es.failure_store_feature_flag_enabled', new Version(8, 12, 0)
|
||||
}
|
||||
def remoteCluster = testClusters.register("${baseName}-remote") {
|
||||
numberOfNodes = 3
|
||||
|
@ -37,7 +36,6 @@ buildParams.bwcVersions.withWireCompatible { bwcVersion, baseName ->
|
|||
firstNode.setting 'node.attr.gateway', 'true'
|
||||
lastNode.setting 'node.attr.gateway', 'true'
|
||||
setting 'xpack.security.enabled', 'false'
|
||||
requiresFeature 'es.failure_store_feature_flag_enabled', new Version(8, 12, 0)
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -53,7 +53,6 @@ public class FullClusterRestartArchivedSettingsIT extends ParameterizedFullClust
|
|||
.setting("indices.memory.shard_inactive_time", "60m")
|
||||
.apply(() -> clusterConfig)
|
||||
.feature(FeatureFlag.TIME_SERIES_MODE)
|
||||
.feature(FeatureFlag.FAILURE_STORE_ENABLED)
|
||||
.build();
|
||||
|
||||
@ClassRule
|
||||
|
|
|
@ -55,8 +55,7 @@ public class FullClusterRestartDownsampleIT extends ParameterizedFullClusterRest
|
|||
.setting("xpack.security.enabled", "false")
|
||||
.setting("indices.lifecycle.poll_interval", "5s")
|
||||
.apply(() -> clusterConfig)
|
||||
.feature(FeatureFlag.TIME_SERIES_MODE)
|
||||
.feature(FeatureFlag.FAILURE_STORE_ENABLED);
|
||||
.feature(FeatureFlag.TIME_SERIES_MODE);
|
||||
|
||||
if (oldVersion.before(Version.fromString("8.18.0"))) {
|
||||
cluster.jvmArg("-da:org.elasticsearch.index.mapper.DocumentMapper");
|
||||
|
|
|
@ -116,8 +116,7 @@ public class FullClusterRestartIT extends ParameterizedFullClusterRestartTestCas
|
|||
// some tests rely on the translog not being flushed
|
||||
.setting("indices.memory.shard_inactive_time", "60m")
|
||||
.apply(() -> clusterConfig)
|
||||
.feature(FeatureFlag.TIME_SERIES_MODE)
|
||||
.feature(FeatureFlag.FAILURE_STORE_ENABLED);
|
||||
.feature(FeatureFlag.TIME_SERIES_MODE);
|
||||
|
||||
if (oldVersion.before(Version.fromString("8.18.0"))) {
|
||||
cluster.jvmArg("-da:org.elasticsearch.index.mapper.DocumentMapper");
|
||||
|
|
|
@ -40,7 +40,6 @@ import org.elasticsearch.index.query.functionscore.FunctionScoreQueryBuilder;
|
|||
import org.elasticsearch.index.query.functionscore.RandomScoreFunctionBuilder;
|
||||
import org.elasticsearch.search.SearchModule;
|
||||
import org.elasticsearch.test.cluster.ElasticsearchCluster;
|
||||
import org.elasticsearch.test.cluster.FeatureFlag;
|
||||
import org.elasticsearch.test.cluster.local.LocalClusterConfigProvider;
|
||||
import org.elasticsearch.test.cluster.local.distribution.DistributionType;
|
||||
import org.elasticsearch.xcontent.XContentBuilder;
|
||||
|
@ -78,7 +77,6 @@ public class QueryBuilderBWCIT extends ParameterizedFullClusterRestartTestCase {
|
|||
.version(org.elasticsearch.test.cluster.util.Version.fromString(OLD_CLUSTER_VERSION))
|
||||
.nodes(2)
|
||||
.setting("xpack.security.enabled", "false")
|
||||
.feature(FeatureFlag.FAILURE_STORE_ENABLED)
|
||||
.apply(() -> clusterConfig)
|
||||
.build();
|
||||
|
||||
|
|
|
@ -4,9 +4,8 @@
|
|||
"url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/index.html",
|
||||
"description":"Deletes the data stream options of the selected data streams."
|
||||
},
|
||||
"stability": "experimental",
|
||||
"visibility": "feature_flag",
|
||||
"feature_flag": "es.failure_store_feature_flag_enabled",
|
||||
"stability": "stable",
|
||||
"visibility": "public",
|
||||
"headers":{
|
||||
"accept": [ "application/json"]
|
||||
},
|
||||
|
|
|
@ -4,9 +4,8 @@
|
|||
"url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/index.html",
|
||||
"description":"Returns the data stream options of the selected data streams."
|
||||
},
|
||||
"stability": "experimental",
|
||||
"visibility": "feature_flag",
|
||||
"feature_flag": "es.failure_store_feature_flag_enabled",
|
||||
"stability": "stable",
|
||||
"visibility": "public",
|
||||
"headers":{
|
||||
"accept": [ "application/json"]
|
||||
},
|
||||
|
|
|
@ -4,9 +4,8 @@
|
|||
"url":"https://www.elastic.co/guide/en/elasticsearch/reference/current/index.html",
|
||||
"description":"Updates the data stream options of the selected data streams."
|
||||
},
|
||||
"stability": "experimental",
|
||||
"visibility": "feature_flag",
|
||||
"feature_flag": "es.failure_store_feature_flag_enabled",
|
||||
"stability": "stable",
|
||||
"visibility": "public",
|
||||
"headers":{
|
||||
"accept": [ "application/json"],
|
||||
"content_type": ["application/json"]
|
||||
|
|
|
@ -135,11 +135,9 @@ sourceSets.main.compiledBy(generateModulesList, generatePluginsList)
|
|||
if (buildParams.snapshotBuild == false) {
|
||||
tasks.named("test").configure {
|
||||
systemProperty 'es.index_mode_feature_flag_registered', 'true'
|
||||
systemProperty 'es.failure_store_feature_flag_enabled', 'true'
|
||||
}
|
||||
tasks.named("internalClusterTest").configure {
|
||||
systemProperty 'es.index_mode_feature_flag_registered', 'true'
|
||||
systemProperty 'es.failure_store_feature_flag_enabled', 'true'
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -11,7 +11,6 @@ package org.elasticsearch.action;
|
|||
|
||||
import org.elasticsearch.action.search.SearchContextId;
|
||||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.cluster.metadata.DataStream;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetadata;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.metadata.ProjectMetadata;
|
||||
|
@ -179,14 +178,12 @@ public class ResolvedIndices {
|
|||
: indexNameExpressionResolver.concreteIndices(projectMetadata, localIndices, startTimeInMillis);
|
||||
|
||||
// prevent using selectors with remote cluster patterns
|
||||
if (DataStream.isFailureStoreFeatureFlagEnabled()) {
|
||||
for (final var indicesPerRemoteClusterAlias : remoteClusterIndices.entrySet()) {
|
||||
final String[] indices = indicesPerRemoteClusterAlias.getValue().indices();
|
||||
if (indices != null) {
|
||||
for (final String index : indices) {
|
||||
if (IndexNameExpressionResolver.hasSelectorSuffix(index)) {
|
||||
throw new InvalidIndexNameException(index, "Selectors are not yet supported on remote cluster patterns");
|
||||
}
|
||||
for (final var indicesPerRemoteClusterAlias : remoteClusterIndices.entrySet()) {
|
||||
final String[] indices = indicesPerRemoteClusterAlias.getValue().indices();
|
||||
if (indices != null) {
|
||||
for (final String index : indices) {
|
||||
if (IndexNameExpressionResolver.hasSelectorSuffix(index)) {
|
||||
throw new InvalidIndexNameException(index, "Selectors are not yet supported on remote cluster patterns");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -222,7 +222,7 @@ final class BulkOperation extends ActionRunnable<BulkResponse> {
|
|||
*/
|
||||
private void rollOverFailureStores(Runnable runnable) {
|
||||
// Skip allocation of some objects if we don't need to roll over anything.
|
||||
if (failureStoresToBeRolledOver.isEmpty() || DataStream.isFailureStoreFeatureFlagEnabled() == false) {
|
||||
if (failureStoresToBeRolledOver.isEmpty()) {
|
||||
runnable.run();
|
||||
return;
|
||||
}
|
||||
|
@ -423,7 +423,7 @@ final class BulkOperation extends ActionRunnable<BulkResponse> {
|
|||
}
|
||||
|
||||
private void redirectFailuresOrCompleteBulkOperation() {
|
||||
if (DataStream.isFailureStoreFeatureFlagEnabled() && failureStoreRedirects.isEmpty() == false) {
|
||||
if (failureStoreRedirects.isEmpty() == false) {
|
||||
doRedirectFailures();
|
||||
} else {
|
||||
completeBulkOperation();
|
||||
|
@ -615,10 +615,7 @@ final class BulkOperation extends ActionRunnable<BulkResponse> {
|
|||
* @return a data stream if the write request points to a data stream, or {@code null} if it does not
|
||||
*/
|
||||
private static DataStream getRedirectTargetCandidate(DocWriteRequest<?> docWriteRequest, ProjectMetadata project) {
|
||||
// Feature flag guard
|
||||
if (DataStream.isFailureStoreFeatureFlagEnabled() == false) {
|
||||
return null;
|
||||
}
|
||||
// PRTODO: We could check for cluster feature here instead
|
||||
// If there is no index abstraction, then the request is using a pattern of some sort, which data streams do not support
|
||||
IndexAbstraction ia = project.getIndicesLookup().get(docWriteRequest.index());
|
||||
return DataStream.resolveDataStream(ia, project);
|
||||
|
|
|
@ -17,7 +17,6 @@ import org.elasticsearch.action.DocWriteRequest;
|
|||
import org.elasticsearch.action.DocWriteResponse;
|
||||
import org.elasticsearch.action.index.IndexRequest;
|
||||
import org.elasticsearch.action.update.UpdateResponse;
|
||||
import org.elasticsearch.cluster.metadata.DataStream;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetadata;
|
||||
import org.elasticsearch.common.util.set.Sets;
|
||||
import org.elasticsearch.core.Assertions;
|
||||
|
@ -218,57 +217,49 @@ final class BulkRequestModifier implements Iterator<DocWriteRequest<?>> {
|
|||
* @param e the failure encountered.
|
||||
*/
|
||||
public void markItemForFailureStore(int slot, String targetIndexName, Exception e) {
|
||||
if (DataStream.isFailureStoreFeatureFlagEnabled() == false) {
|
||||
// Assert false for development, but if we somehow find ourselves here, default to failure logic.
|
||||
// We get the index write request to find the source of the failed document
|
||||
IndexRequest indexRequest = TransportBulkAction.getIndexWriteRequest(bulkRequest.requests().get(slot));
|
||||
if (indexRequest == null) {
|
||||
// This is unlikely to happen ever since only source oriented operations (index, create, upsert) are considered for
|
||||
// ingest, but if it does happen, attempt to trip an assertion. If running in production, be defensive: Mark it failed
|
||||
// as normal, and log the info for later debugging if needed.
|
||||
assert false
|
||||
: "Attempting to route a failed write request type to a failure store but the failure store is not enabled! "
|
||||
+ "This should be guarded against in TransportBulkAction#shouldStoreFailure()";
|
||||
: "Attempting to mark invalid write request type for failure store. Only IndexRequest or UpdateRequest allowed. "
|
||||
+ "type: ["
|
||||
+ bulkRequest.requests().get(slot).getClass().getName()
|
||||
+ "], index: ["
|
||||
+ targetIndexName
|
||||
+ "]";
|
||||
markItemAsFailed(slot, e, IndexDocFailureStoreStatus.NOT_APPLICABLE_OR_UNKNOWN);
|
||||
logger.debug(
|
||||
() -> "Attempted to redirect an invalid write operation after ingest failure - type: ["
|
||||
+ bulkRequest.requests().get(slot).getClass().getName()
|
||||
+ "], index: ["
|
||||
+ targetIndexName
|
||||
+ "]"
|
||||
);
|
||||
} else {
|
||||
// We get the index write request to find the source of the failed document
|
||||
IndexRequest indexRequest = TransportBulkAction.getIndexWriteRequest(bulkRequest.requests().get(slot));
|
||||
if (indexRequest == null) {
|
||||
// This is unlikely to happen ever since only source oriented operations (index, create, upsert) are considered for
|
||||
// ingest, but if it does happen, attempt to trip an assertion. If running in production, be defensive: Mark it failed
|
||||
// as normal, and log the info for later debugging if needed.
|
||||
assert false
|
||||
: "Attempting to mark invalid write request type for failure store. Only IndexRequest or UpdateRequest allowed. "
|
||||
+ "type: ["
|
||||
+ bulkRequest.requests().get(slot).getClass().getName()
|
||||
+ "], index: ["
|
||||
+ targetIndexName
|
||||
+ "]";
|
||||
markItemAsFailed(slot, e, IndexDocFailureStoreStatus.NOT_APPLICABLE_OR_UNKNOWN);
|
||||
try {
|
||||
IndexRequest errorDocument = failureStoreDocumentConverter.transformFailedRequest(indexRequest, e, targetIndexName);
|
||||
// This is a fresh index request! We need to do some preprocessing on it. If we do not, when this is returned to
|
||||
// the bulk action, the action will see that it hasn't been processed by ingest yet and attempt to ingest it again.
|
||||
errorDocument.isPipelineResolved(true);
|
||||
errorDocument.setPipeline(IngestService.NOOP_PIPELINE_NAME);
|
||||
errorDocument.setFinalPipeline(IngestService.NOOP_PIPELINE_NAME);
|
||||
bulkRequest.requests.set(slot, errorDocument);
|
||||
} catch (IOException ioException) {
|
||||
// This is unlikely to happen because the conversion is so simple, but be defensive and attempt to report about it
|
||||
// if we need the info later.
|
||||
e.addSuppressed(ioException); // Prefer to return the original exception to the end user instead of this new one.
|
||||
logger.debug(
|
||||
() -> "Attempted to redirect an invalid write operation after ingest failure - type: ["
|
||||
+ bulkRequest.requests().get(slot).getClass().getName()
|
||||
+ "], index: ["
|
||||
() -> "Encountered exception while attempting to redirect a failed ingest operation: index ["
|
||||
+ targetIndexName
|
||||
+ "]"
|
||||
+ "], source: ["
|
||||
+ indexRequest.source().utf8ToString()
|
||||
+ "]",
|
||||
ioException
|
||||
);
|
||||
} else {
|
||||
try {
|
||||
IndexRequest errorDocument = failureStoreDocumentConverter.transformFailedRequest(indexRequest, e, targetIndexName);
|
||||
// This is a fresh index request! We need to do some preprocessing on it. If we do not, when this is returned to
|
||||
// the bulk action, the action will see that it hasn't been processed by ingest yet and attempt to ingest it again.
|
||||
errorDocument.isPipelineResolved(true);
|
||||
errorDocument.setPipeline(IngestService.NOOP_PIPELINE_NAME);
|
||||
errorDocument.setFinalPipeline(IngestService.NOOP_PIPELINE_NAME);
|
||||
bulkRequest.requests.set(slot, errorDocument);
|
||||
} catch (IOException ioException) {
|
||||
// This is unlikely to happen because the conversion is so simple, but be defensive and attempt to report about it
|
||||
// if we need the info later.
|
||||
e.addSuppressed(ioException); // Prefer to return the original exception to the end user instead of this new one.
|
||||
logger.debug(
|
||||
() -> "Encountered exception while attempting to redirect a failed ingest operation: index ["
|
||||
+ targetIndexName
|
||||
+ "], source: ["
|
||||
+ indexRequest.source().utf8ToString()
|
||||
+ "]",
|
||||
ioException
|
||||
);
|
||||
markItemAsFailed(slot, e, IndexDocFailureStoreStatus.FAILED);
|
||||
}
|
||||
markItemAsFailed(slot, e, IndexDocFailureStoreStatus.FAILED);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -12,7 +12,6 @@ package org.elasticsearch.action.bulk;
|
|||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.TransportVersions;
|
||||
import org.elasticsearch.cluster.metadata.DataStream;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
|
@ -102,7 +101,7 @@ public enum IndexDocFailureStoreStatus implements ToXContentFragment, Writeable
|
|||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
// We avoid adding the not_applicable status in the response to not increase the size of bulk responses.
|
||||
if (DataStream.isFailureStoreFeatureFlagEnabled() && this.equals(NOT_APPLICABLE_OR_UNKNOWN) == false) {
|
||||
if (this.equals(NOT_APPLICABLE_OR_UNKNOWN) == false) {
|
||||
builder.field("failure_store", label);
|
||||
}
|
||||
return builder;
|
||||
|
|
|
@ -308,7 +308,6 @@ public class TransportBulkAction extends TransportAbstractBulkAction {
|
|||
index,
|
||||
projectState.metadata()
|
||||
);
|
||||
boolean lazyRolloverFailureStoreFeature = DataStream.isFailureStoreFeatureFlagEnabled();
|
||||
Set<String> indicesThatRequireAlias = new HashSet<>();
|
||||
|
||||
for (DocWriteRequest<?> request : bulkRequest.requests) {
|
||||
|
@ -356,7 +355,7 @@ public class TransportBulkAction extends TransportAbstractBulkAction {
|
|||
if (dataStream != null) {
|
||||
if (writeToFailureStore == false && dataStream.getDataComponent().isRolloverOnWrite()) {
|
||||
dataStreamsToBeRolledOver.add(request.index());
|
||||
} else if (lazyRolloverFailureStoreFeature && writeToFailureStore && dataStream.getFailureComponent().isRolloverOnWrite()) {
|
||||
} else if (writeToFailureStore && dataStream.getFailureComponent().isRolloverOnWrite()) {
|
||||
failureStoresToBeRolledOver.add(request.index());
|
||||
}
|
||||
}
|
||||
|
@ -627,9 +626,6 @@ public class TransportBulkAction extends TransportAbstractBulkAction {
|
|||
*/
|
||||
// Visibility for testing
|
||||
Boolean resolveFailureInternal(String indexName, ProjectMetadata projectMetadata, long epochMillis) {
|
||||
if (DataStream.isFailureStoreFeatureFlagEnabled() == false) {
|
||||
return null;
|
||||
}
|
||||
var resolution = resolveFailureStoreFromMetadata(indexName, projectMetadata, epochMillis);
|
||||
if (resolution != null) {
|
||||
return resolution;
|
||||
|
|
|
@ -416,17 +416,12 @@ public class GetDataStreamAction extends ActionType<GetDataStreamAction.Response
|
|||
builder.endArray();
|
||||
builder.endObject();
|
||||
}
|
||||
if (DataStream.isFailureStoreFeatureFlagEnabled()) {
|
||||
builder.startObject(DataStream.FAILURE_STORE_FIELD.getPreferredName());
|
||||
builder.field(FAILURE_STORE_ENABLED.getPreferredName(), failureStoreEffectivelyEnabled);
|
||||
builder.field(
|
||||
DataStream.ROLLOVER_ON_WRITE_FIELD.getPreferredName(),
|
||||
dataStream.getFailureComponent().isRolloverOnWrite()
|
||||
);
|
||||
indicesToXContent(builder, dataStream.getFailureIndices());
|
||||
addAutoShardingEvent(builder, params, dataStream.getFailureComponent().getAutoShardingEvent());
|
||||
builder.endObject();
|
||||
}
|
||||
builder.startObject(DataStream.FAILURE_STORE_FIELD.getPreferredName());
|
||||
builder.field(FAILURE_STORE_ENABLED.getPreferredName(), failureStoreEffectivelyEnabled);
|
||||
builder.field(DataStream.ROLLOVER_ON_WRITE_FIELD.getPreferredName(), dataStream.getFailureComponent().isRolloverOnWrite());
|
||||
indicesToXContent(builder, dataStream.getFailureIndices());
|
||||
addAutoShardingEvent(builder, params, dataStream.getFailureComponent().getAutoShardingEvent());
|
||||
builder.endObject();
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
|
|
|
@ -894,7 +894,7 @@ public class IndexRequest extends ReplicatedWriteRequest<IndexRequest> implement
|
|||
|
||||
@Override
|
||||
public Index getConcreteWriteIndex(IndexAbstraction ia, ProjectMetadata project) {
|
||||
if (DataStream.isFailureStoreFeatureFlagEnabled() && writeToFailureStore) {
|
||||
if (writeToFailureStore) {
|
||||
if (ia.isDataStreamRelated() == false) {
|
||||
throw new ElasticsearchException(
|
||||
"Attempting to write a document to a failure store but the targeted index is not a data stream"
|
||||
|
|
|
@ -10,7 +10,6 @@ package org.elasticsearch.action.support;
|
|||
|
||||
import org.elasticsearch.ElasticsearchParseException;
|
||||
import org.elasticsearch.TransportVersions;
|
||||
import org.elasticsearch.cluster.metadata.DataStream;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.logging.DeprecationCategory;
|
||||
|
@ -827,14 +826,14 @@ public record IndicesOptions(
|
|||
* @return Whether selectors (::) are allowed in the index expression.
|
||||
*/
|
||||
public boolean allowSelectors() {
|
||||
return DataStream.isFailureStoreFeatureFlagEnabled() && gatekeeperOptions.allowSelectors();
|
||||
return gatekeeperOptions.allowSelectors();
|
||||
}
|
||||
|
||||
/**
|
||||
* @return true when selectors should be included in the resolution, false otherwise.
|
||||
*/
|
||||
public boolean includeFailureIndices() {
|
||||
return DataStream.isFailureStoreFeatureFlagEnabled() && gatekeeperOptions.includeFailureIndices();
|
||||
return gatekeeperOptions.includeFailureIndices();
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1135,19 +1134,6 @@ public record IndicesOptions(
|
|||
}
|
||||
|
||||
public static IndicesOptions fromMap(Map<String, Object> map, IndicesOptions defaultSettings) {
|
||||
if (DataStream.isFailureStoreFeatureFlagEnabled()) {
|
||||
return fromParameters(
|
||||
map.containsKey(WildcardOptions.EXPAND_WILDCARDS) ? map.get(WildcardOptions.EXPAND_WILDCARDS) : map.get("expandWildcards"),
|
||||
map.containsKey(ConcreteTargetOptions.IGNORE_UNAVAILABLE)
|
||||
? map.get(ConcreteTargetOptions.IGNORE_UNAVAILABLE)
|
||||
: map.get("ignoreUnavailable"),
|
||||
map.containsKey(WildcardOptions.ALLOW_NO_INDICES) ? map.get(WildcardOptions.ALLOW_NO_INDICES) : map.get("allowNoIndices"),
|
||||
map.containsKey(GatekeeperOptions.IGNORE_THROTTLED)
|
||||
? map.get(GatekeeperOptions.IGNORE_THROTTLED)
|
||||
: map.get("ignoreThrottled"),
|
||||
defaultSettings
|
||||
);
|
||||
}
|
||||
return fromParameters(
|
||||
map.containsKey(WildcardOptions.EXPAND_WILDCARDS) ? map.get(WildcardOptions.EXPAND_WILDCARDS) : map.get("expandWildcards"),
|
||||
map.containsKey(ConcreteTargetOptions.IGNORE_UNAVAILABLE)
|
||||
|
@ -1469,11 +1455,10 @@ public record IndicesOptions(
|
|||
+ ignoreAliases()
|
||||
+ ", ignore_throttled="
|
||||
+ ignoreThrottled()
|
||||
// Until the feature flag is removed we access the field directly from the gatekeeper options.
|
||||
+ (DataStream.isFailureStoreFeatureFlagEnabled() ? ", allow_selectors=" + gatekeeperOptions().allowSelectors() : "")
|
||||
+ (DataStream.isFailureStoreFeatureFlagEnabled()
|
||||
? ", include_failure_indices=" + gatekeeperOptions().includeFailureIndices()
|
||||
: "")
|
||||
+ ", allow_selectors="
|
||||
+ allowSelectors()
|
||||
+ ", include_failure_indices="
|
||||
+ includeFailureIndices()
|
||||
+ ']';
|
||||
}
|
||||
}
|
||||
|
|
|
@ -383,9 +383,7 @@ public class ComposableIndexTemplate implements SimpleDiffable<ComposableIndexTe
|
|||
static {
|
||||
PARSER.declareBoolean(ConstructingObjectParser.optionalConstructorArg(), HIDDEN);
|
||||
PARSER.declareBoolean(ConstructingObjectParser.optionalConstructorArg(), ALLOW_CUSTOM_ROUTING);
|
||||
if (DataStream.isFailureStoreFeatureFlagEnabled()) {
|
||||
PARSER.declareBoolean(ConstructingObjectParser.optionalConstructorArg(), FAILURE_STORE);
|
||||
}
|
||||
PARSER.declareBoolean(ConstructingObjectParser.optionalConstructorArg(), FAILURE_STORE);
|
||||
}
|
||||
|
||||
private final boolean hidden;
|
||||
|
|
|
@ -33,7 +33,6 @@ import org.elasticsearch.common.io.stream.StreamInput;
|
|||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.time.DateFormatter;
|
||||
import org.elasticsearch.common.time.DateFormatters;
|
||||
import org.elasticsearch.common.util.FeatureFlag;
|
||||
import org.elasticsearch.common.xcontent.XContentHelper;
|
||||
import org.elasticsearch.core.Nullable;
|
||||
import org.elasticsearch.core.TimeValue;
|
||||
|
@ -76,16 +75,11 @@ public final class DataStream implements SimpleDiffable<DataStream>, ToXContentO
|
|||
|
||||
private static final Logger LOGGER = LogManager.getLogger(DataStream.class);
|
||||
|
||||
public static final boolean FAILURE_STORE_FEATURE_FLAG = new FeatureFlag("failure_store").isEnabled();
|
||||
public static final NodeFeature DATA_STREAM_FAILURE_STORE_FEATURE = new NodeFeature("data_stream.failure_store");
|
||||
public static final TransportVersion ADDED_FAILURE_STORE_TRANSPORT_VERSION = TransportVersions.V_8_12_0;
|
||||
public static final TransportVersion ADDED_AUTO_SHARDING_EVENT_VERSION = TransportVersions.V_8_14_0;
|
||||
public static final TransportVersion ADD_DATA_STREAM_OPTIONS_VERSION = TransportVersions.V_8_16_0;
|
||||
|
||||
public static boolean isFailureStoreFeatureFlagEnabled() {
|
||||
return FAILURE_STORE_FEATURE_FLAG;
|
||||
}
|
||||
|
||||
public static final String BACKING_INDEX_PREFIX = ".ds-";
|
||||
public static final String FAILURE_STORE_PREFIX = ".fs-";
|
||||
public static final DateFormatter DATE_FORMATTER = DateFormatter.forPattern("uuuu.MM.dd");
|
||||
|
@ -214,7 +208,7 @@ public final class DataStream implements SimpleDiffable<DataStream>, ToXContentO
|
|||
var lifecycle = in.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)
|
||||
? in.readOptionalWriteable(DataStreamLifecycle::new)
|
||||
: null;
|
||||
// This boolean flag has been moved in data stream options
|
||||
// TODO: clear out the failure_store field, which is redundant https://github.com/elastic/elasticsearch/issues/127071
|
||||
var failureStoreEnabled = in.getTransportVersion()
|
||||
.between(DataStream.ADDED_FAILURE_STORE_TRANSPORT_VERSION, TransportVersions.V_8_16_0) ? in.readBoolean() : false;
|
||||
var failureIndices = in.getTransportVersion().onOrAfter(DataStream.ADDED_FAILURE_STORE_TRANSPORT_VERSION)
|
||||
|
@ -942,8 +936,7 @@ public final class DataStream implements SimpleDiffable<DataStream>, ToXContentO
|
|||
}
|
||||
|
||||
List<Index> reconciledFailureIndices = this.failureIndices.indices;
|
||||
if (DataStream.isFailureStoreFeatureFlagEnabled()
|
||||
&& isAnyIndexMissing(failureIndices.indices, snapshotMetadataBuilder, indicesInSnapshot)) {
|
||||
if (isAnyIndexMissing(failureIndices.indices, snapshotMetadataBuilder, indicesInSnapshot)) {
|
||||
reconciledFailureIndices = new ArrayList<>(this.failureIndices.indices);
|
||||
failureIndicesChanged = reconciledFailureIndices.removeIf(x -> indicesInSnapshot.contains(x.getName()) == false);
|
||||
}
|
||||
|
@ -1010,8 +1003,7 @@ public final class DataStream implements SimpleDiffable<DataStream>, ToXContentO
|
|||
LongSupplier nowSupplier,
|
||||
DataStreamGlobalRetention globalRetention
|
||||
) {
|
||||
if (DataStream.isFailureStoreFeatureFlagEnabled() == false
|
||||
|| getFailuresLifecycle() == null
|
||||
if (getFailuresLifecycle() == null
|
||||
|| getFailuresLifecycle().enabled() == false
|
||||
|| getFailuresLifecycle().getEffectiveDataRetention(globalRetention, isInternal()) == null) {
|
||||
return List.of();
|
||||
|
@ -1252,6 +1244,7 @@ public final class DataStream implements SimpleDiffable<DataStream>, ToXContentO
|
|||
}
|
||||
if (out.getTransportVersion()
|
||||
.between(DataStream.ADDED_FAILURE_STORE_TRANSPORT_VERSION, DataStream.ADD_DATA_STREAM_OPTIONS_VERSION)) {
|
||||
// TODO: clear out the failure_store field, which is redundant https://github.com/elastic/elasticsearch/issues/127071
|
||||
out.writeBoolean(isFailureStoreExplicitlyEnabled());
|
||||
}
|
||||
if (out.getTransportVersion().onOrAfter(DataStream.ADDED_FAILURE_STORE_TRANSPORT_VERSION)) {
|
||||
|
@ -1283,6 +1276,7 @@ public final class DataStream implements SimpleDiffable<DataStream>, ToXContentO
|
|||
public static final ParseField ALLOW_CUSTOM_ROUTING = new ParseField("allow_custom_routing");
|
||||
public static final ParseField INDEX_MODE = new ParseField("index_mode");
|
||||
public static final ParseField LIFECYCLE = new ParseField("lifecycle");
|
||||
// TODO: clear out the failure_store field, which is redundant https://github.com/elastic/elasticsearch/issues/127071
|
||||
public static final ParseField FAILURE_STORE_FIELD = new ParseField("failure_store");
|
||||
public static final ParseField FAILURE_INDICES_FIELD = new ParseField("failure_indices");
|
||||
public static final ParseField ROLLOVER_ON_WRITE_FIELD = new ParseField("rollover_on_write");
|
||||
|
@ -1292,29 +1286,9 @@ public final class DataStream implements SimpleDiffable<DataStream>, ToXContentO
|
|||
public static final ParseField DATA_STREAM_OPTIONS_FIELD = new ParseField("options");
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
private static final ConstructingObjectParser<DataStream, Void> PARSER = new ConstructingObjectParser<>("data_stream", args -> {
|
||||
// Fields behind a feature flag need to be parsed last otherwise the parser will fail when the feature flag is disabled.
|
||||
// Until the feature flag is removed we keep them separately to be mindful of this.
|
||||
boolean failureStoreEnabled = DataStream.isFailureStoreFeatureFlagEnabled() && args[12] != null && (boolean) args[12];
|
||||
DataStreamIndices failureIndices = DataStream.isFailureStoreFeatureFlagEnabled()
|
||||
? new DataStreamIndices(
|
||||
FAILURE_STORE_PREFIX,
|
||||
args[13] != null ? (List<Index>) args[13] : List.of(),
|
||||
args[14] != null && (boolean) args[14],
|
||||
(DataStreamAutoShardingEvent) args[15]
|
||||
)
|
||||
: new DataStreamIndices(FAILURE_STORE_PREFIX, List.of(), false, null);
|
||||
// We cannot distinguish if failure store was explicitly disabled or not. Given that failure store
|
||||
// is still behind a feature flag in previous version we use the default value instead of explicitly disabling it.
|
||||
DataStreamOptions dataStreamOptions = DataStreamOptions.EMPTY;
|
||||
if (DataStream.isFailureStoreFeatureFlagEnabled()) {
|
||||
if (args[16] != null) {
|
||||
dataStreamOptions = (DataStreamOptions) args[16];
|
||||
} else if (failureStoreEnabled) {
|
||||
dataStreamOptions = DataStreamOptions.FAILURE_STORE_ENABLED;
|
||||
}
|
||||
}
|
||||
return new DataStream(
|
||||
private static final ConstructingObjectParser<DataStream, Void> PARSER = new ConstructingObjectParser<>(
|
||||
"data_stream",
|
||||
args -> new DataStream(
|
||||
(String) args[0],
|
||||
(Long) args[2],
|
||||
(Map<String, Object>) args[3],
|
||||
|
@ -1325,16 +1299,21 @@ public final class DataStream implements SimpleDiffable<DataStream>, ToXContentO
|
|||
args[7] != null && (boolean) args[7],
|
||||
args[8] != null ? IndexMode.fromString((String) args[8]) : null,
|
||||
(DataStreamLifecycle) args[9],
|
||||
dataStreamOptions,
|
||||
args[16] != null ? (DataStreamOptions) args[16] : DataStreamOptions.EMPTY,
|
||||
new DataStreamIndices(
|
||||
BACKING_INDEX_PREFIX,
|
||||
(List<Index>) args[1],
|
||||
args[10] != null && (boolean) args[10],
|
||||
(DataStreamAutoShardingEvent) args[11]
|
||||
),
|
||||
failureIndices
|
||||
);
|
||||
});
|
||||
new DataStreamIndices(
|
||||
FAILURE_STORE_PREFIX,
|
||||
args[13] != null ? (List<Index>) args[13] : List.of(),
|
||||
args[14] != null && (boolean) args[14],
|
||||
(DataStreamAutoShardingEvent) args[15]
|
||||
)
|
||||
)
|
||||
);
|
||||
|
||||
static {
|
||||
PARSER.declareString(ConstructingObjectParser.constructorArg(), NAME_FIELD);
|
||||
|
@ -1361,27 +1340,24 @@ public final class DataStream implements SimpleDiffable<DataStream>, ToXContentO
|
|||
(p, c) -> DataStreamAutoShardingEvent.fromXContent(p),
|
||||
AUTO_SHARDING_FIELD
|
||||
);
|
||||
// The fields behind the feature flag should always be last.
|
||||
if (DataStream.isFailureStoreFeatureFlagEnabled()) {
|
||||
// Should be removed after backport
|
||||
PARSER.declareBoolean(ConstructingObjectParser.optionalConstructorArg(), FAILURE_STORE_FIELD);
|
||||
PARSER.declareObjectArray(
|
||||
ConstructingObjectParser.optionalConstructorArg(),
|
||||
(p, c) -> Index.fromXContent(p),
|
||||
FAILURE_INDICES_FIELD
|
||||
);
|
||||
PARSER.declareBoolean(ConstructingObjectParser.optionalConstructorArg(), FAILURE_ROLLOVER_ON_WRITE_FIELD);
|
||||
PARSER.declareObject(
|
||||
ConstructingObjectParser.optionalConstructorArg(),
|
||||
(p, c) -> DataStreamAutoShardingEvent.fromXContent(p),
|
||||
FAILURE_AUTO_SHARDING_FIELD
|
||||
);
|
||||
PARSER.declareObject(
|
||||
ConstructingObjectParser.optionalConstructorArg(),
|
||||
(p, c) -> DataStreamOptions.fromXContent(p),
|
||||
DATA_STREAM_OPTIONS_FIELD
|
||||
);
|
||||
}
|
||||
// TODO: clear out the failure_store field, which is redundant https://github.com/elastic/elasticsearch/issues/127071
|
||||
PARSER.declareBoolean(ConstructingObjectParser.optionalConstructorArg(), FAILURE_STORE_FIELD);
|
||||
PARSER.declareObjectArray(
|
||||
ConstructingObjectParser.optionalConstructorArg(),
|
||||
(p, c) -> Index.fromXContent(p),
|
||||
FAILURE_INDICES_FIELD
|
||||
);
|
||||
PARSER.declareBoolean(ConstructingObjectParser.optionalConstructorArg(), FAILURE_ROLLOVER_ON_WRITE_FIELD);
|
||||
PARSER.declareObject(
|
||||
ConstructingObjectParser.optionalConstructorArg(),
|
||||
(p, c) -> DataStreamAutoShardingEvent.fromXContent(p),
|
||||
FAILURE_AUTO_SHARDING_FIELD
|
||||
);
|
||||
PARSER.declareObject(
|
||||
ConstructingObjectParser.optionalConstructorArg(),
|
||||
(p, c) -> DataStreamOptions.fromXContent(p),
|
||||
DATA_STREAM_OPTIONS_FIELD
|
||||
);
|
||||
}
|
||||
|
||||
public static DataStream fromXContent(XContentParser parser) throws IOException {
|
||||
|
@ -1417,20 +1393,18 @@ public final class DataStream implements SimpleDiffable<DataStream>, ToXContentO
|
|||
builder.field(REPLICATED_FIELD.getPreferredName(), replicated);
|
||||
builder.field(SYSTEM_FIELD.getPreferredName(), system);
|
||||
builder.field(ALLOW_CUSTOM_ROUTING.getPreferredName(), allowCustomRouting);
|
||||
if (DataStream.isFailureStoreFeatureFlagEnabled()) {
|
||||
if (failureIndices.indices.isEmpty() == false) {
|
||||
builder.xContentList(FAILURE_INDICES_FIELD.getPreferredName(), failureIndices.indices);
|
||||
}
|
||||
builder.field(FAILURE_ROLLOVER_ON_WRITE_FIELD.getPreferredName(), failureIndices.rolloverOnWrite);
|
||||
if (failureIndices.autoShardingEvent != null) {
|
||||
builder.startObject(FAILURE_AUTO_SHARDING_FIELD.getPreferredName());
|
||||
failureIndices.autoShardingEvent.toXContent(builder, params);
|
||||
builder.endObject();
|
||||
}
|
||||
if (dataStreamOptions.isEmpty() == false) {
|
||||
builder.field(DATA_STREAM_OPTIONS_FIELD.getPreferredName());
|
||||
dataStreamOptions.toXContent(builder, params);
|
||||
}
|
||||
if (failureIndices.indices.isEmpty() == false) {
|
||||
builder.xContentList(FAILURE_INDICES_FIELD.getPreferredName(), failureIndices.indices);
|
||||
}
|
||||
builder.field(FAILURE_ROLLOVER_ON_WRITE_FIELD.getPreferredName(), failureIndices.rolloverOnWrite);
|
||||
if (failureIndices.autoShardingEvent != null) {
|
||||
builder.startObject(FAILURE_AUTO_SHARDING_FIELD.getPreferredName());
|
||||
failureIndices.autoShardingEvent.toXContent(builder, params);
|
||||
builder.endObject();
|
||||
}
|
||||
if (dataStreamOptions.isEmpty() == false) {
|
||||
builder.field(DATA_STREAM_OPTIONS_FIELD.getPreferredName());
|
||||
dataStreamOptions.toXContent(builder, params);
|
||||
}
|
||||
if (indexMode != null) {
|
||||
builder.field(INDEX_MODE.getPreferredName(), indexMode);
|
||||
|
|
|
@ -143,7 +143,7 @@ public class DataStreamAction implements Writeable, ToXContentObject {
|
|||
builder.startObject(type.fieldName);
|
||||
builder.field(DATA_STREAM.getPreferredName(), dataStream);
|
||||
builder.field(INDEX.getPreferredName(), index);
|
||||
if (DataStream.isFailureStoreFeatureFlagEnabled() && failureStore) {
|
||||
if (failureStore) {
|
||||
builder.field(FAILURE_STORE.getPreferredName(), failureStore);
|
||||
}
|
||||
builder.endObject();
|
||||
|
@ -181,14 +181,12 @@ public class DataStreamAction implements Writeable, ToXContentObject {
|
|||
ObjectParser.ValueType.STRING
|
||||
);
|
||||
ADD_BACKING_INDEX_PARSER.declareField(DataStreamAction::setIndex, XContentParser::text, INDEX, ObjectParser.ValueType.STRING);
|
||||
if (DataStream.isFailureStoreFeatureFlagEnabled()) {
|
||||
ADD_BACKING_INDEX_PARSER.declareField(
|
||||
DataStreamAction::setFailureStore,
|
||||
XContentParser::booleanValue,
|
||||
FAILURE_STORE,
|
||||
ObjectParser.ValueType.BOOLEAN
|
||||
);
|
||||
}
|
||||
ADD_BACKING_INDEX_PARSER.declareField(
|
||||
DataStreamAction::setFailureStore,
|
||||
XContentParser::booleanValue,
|
||||
FAILURE_STORE,
|
||||
ObjectParser.ValueType.BOOLEAN
|
||||
);
|
||||
REMOVE_BACKING_INDEX_PARSER.declareField(
|
||||
DataStreamAction::setDataStream,
|
||||
XContentParser::text,
|
||||
|
@ -196,14 +194,12 @@ public class DataStreamAction implements Writeable, ToXContentObject {
|
|||
ObjectParser.ValueType.STRING
|
||||
);
|
||||
REMOVE_BACKING_INDEX_PARSER.declareField(DataStreamAction::setIndex, XContentParser::text, INDEX, ObjectParser.ValueType.STRING);
|
||||
if (DataStream.isFailureStoreFeatureFlagEnabled()) {
|
||||
REMOVE_BACKING_INDEX_PARSER.declareField(
|
||||
DataStreamAction::setFailureStore,
|
||||
XContentParser::booleanValue,
|
||||
FAILURE_STORE,
|
||||
ObjectParser.ValueType.BOOLEAN
|
||||
);
|
||||
}
|
||||
REMOVE_BACKING_INDEX_PARSER.declareField(
|
||||
DataStreamAction::setFailureStore,
|
||||
XContentParser::booleanValue,
|
||||
FAILURE_STORE,
|
||||
ObjectParser.ValueType.BOOLEAN
|
||||
);
|
||||
}
|
||||
|
||||
private static ObjectParser<DataStreamAction, Void> parser(String name, Supplier<DataStreamAction> supplier) {
|
||||
|
|
|
@ -46,12 +46,10 @@ public class DataStreamFailureStoreSettings {
|
|||
*/
|
||||
public static DataStreamFailureStoreSettings create(ClusterSettings clusterSettings) {
|
||||
DataStreamFailureStoreSettings dataStreamFailureStoreSettings = new DataStreamFailureStoreSettings();
|
||||
if (DataStream.isFailureStoreFeatureFlagEnabled()) {
|
||||
clusterSettings.initializeAndWatch(
|
||||
DATA_STREAM_FAILURE_STORED_ENABLED_SETTING,
|
||||
dataStreamFailureStoreSettings::setEnabledByNamePatterns
|
||||
);
|
||||
}
|
||||
clusterSettings.initializeAndWatch(
|
||||
DATA_STREAM_FAILURE_STORED_ENABLED_SETTING,
|
||||
dataStreamFailureStoreSettings::setEnabledByNamePatterns
|
||||
);
|
||||
return dataStreamFailureStoreSettings;
|
||||
}
|
||||
|
||||
|
@ -61,7 +59,6 @@ public class DataStreamFailureStoreSettings {
|
|||
* @param name The data stream name
|
||||
*/
|
||||
public boolean failureStoreEnabledForDataStreamName(String name) {
|
||||
assert DataStream.isFailureStoreFeatureFlagEnabled() : "Testing whether failure store is enabled should be behind by feature flag";
|
||||
return failureStoreEnabledByName.test(name);
|
||||
}
|
||||
|
||||
|
|
|
@ -422,10 +422,6 @@ public class MetadataCreateDataStreamService {
|
|||
String failureStoreIndexName,
|
||||
@Nullable BiConsumer<ProjectMetadata.Builder, IndexMetadata> metadataTransformer
|
||||
) throws Exception {
|
||||
if (DataStream.isFailureStoreFeatureFlagEnabled() == false) {
|
||||
return currentState;
|
||||
}
|
||||
|
||||
var indexSettings = DataStreamFailureStoreDefinition.buildFailureStoreIndexSettings(nodeSettings);
|
||||
|
||||
CreateIndexClusterStateUpdateRequest createIndexRequest = new CreateIndexClusterStateUpdateRequest(
|
||||
|
|
|
@ -1944,11 +1944,10 @@ public class ProjectMetadata implements Iterable<IndexMetadata>, Diffable<Projec
|
|||
private static boolean assertContainsIndexIfDataStream(DataStream parent, IndexMetadata indexMetadata) {
|
||||
assert parent == null
|
||||
|| parent.getIndices().stream().anyMatch(index -> indexMetadata.getIndex().getName().equals(index.getName()))
|
||||
|| (DataStream.isFailureStoreFeatureFlagEnabled()
|
||||
&& parent.getFailureComponent()
|
||||
.getIndices()
|
||||
.stream()
|
||||
.anyMatch(index -> indexMetadata.getIndex().getName().equals(index.getName())))
|
||||
|| parent.getFailureComponent()
|
||||
.getIndices()
|
||||
.stream()
|
||||
.anyMatch(index -> indexMetadata.getIndex().getName().equals(index.getName()))
|
||||
: "Expected data stream [" + parent.getName() + "] to contain index " + indexMetadata.getIndex();
|
||||
return true;
|
||||
}
|
||||
|
@ -1970,10 +1969,8 @@ public class ProjectMetadata implements Iterable<IndexMetadata>, Diffable<Projec
|
|||
for (Index i : dataStream.getIndices()) {
|
||||
indexToDataStreamLookup.put(i.getName(), dataStream);
|
||||
}
|
||||
if (DataStream.isFailureStoreFeatureFlagEnabled()) {
|
||||
for (Index i : dataStream.getFailureIndices()) {
|
||||
indexToDataStreamLookup.put(i.getName(), dataStream);
|
||||
}
|
||||
for (Index i : dataStream.getFailureIndices()) {
|
||||
indexToDataStreamLookup.put(i.getName(), dataStream);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -314,9 +314,7 @@ public class Template implements SimpleDiffable<Template>, ToXContentObject {
|
|||
builder.field(LIFECYCLE.getPreferredName());
|
||||
lifecycle.toXContent(builder, params, rolloverConfiguration, null, false);
|
||||
}
|
||||
if (DataStream.isFailureStoreFeatureFlagEnabled()) {
|
||||
dataStreamOptions.toXContent(builder, params, DATA_STREAM_OPTIONS.getPreferredName());
|
||||
}
|
||||
dataStreamOptions.toXContent(builder, params, DATA_STREAM_OPTIONS.getPreferredName());
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
|
|
|
@ -38,7 +38,6 @@ import org.elasticsearch.cluster.coordination.LeaderChecker;
|
|||
import org.elasticsearch.cluster.coordination.MasterHistory;
|
||||
import org.elasticsearch.cluster.coordination.NoMasterBlockService;
|
||||
import org.elasticsearch.cluster.coordination.Reconfigurator;
|
||||
import org.elasticsearch.cluster.metadata.DataStream;
|
||||
import org.elasticsearch.cluster.metadata.DataStreamFailureStoreSettings;
|
||||
import org.elasticsearch.cluster.metadata.DataStreamGlobalRetentionSettings;
|
||||
import org.elasticsearch.cluster.metadata.DataStreamLifecycle;
|
||||
|
@ -139,12 +138,8 @@ import org.elasticsearch.transport.TransportService;
|
|||
import org.elasticsearch.transport.TransportSettings;
|
||||
import org.elasticsearch.watcher.ResourceWatcherService;
|
||||
|
||||
import java.util.Objects;
|
||||
import java.util.Set;
|
||||
import java.util.function.Predicate;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
import static java.util.stream.Collectors.toSet;
|
||||
|
||||
/**
|
||||
* Encapsulates all valid cluster level settings.
|
||||
|
@ -215,7 +210,7 @@ public final class ClusterSettings extends AbstractScopedSettings {
|
|||
}
|
||||
}
|
||||
|
||||
public static final Set<Setting<?>> BUILT_IN_CLUSTER_SETTINGS = Stream.of(
|
||||
public static final Set<Setting<?>> BUILT_IN_CLUSTER_SETTINGS = Set.of(
|
||||
AllocationBalancingRoundSummaryService.ENABLE_BALANCER_ROUND_SUMMARIES_SETTING,
|
||||
AllocationBalancingRoundSummaryService.BALANCER_ROUND_SUMMARIES_LOG_INTERVAL_SETTING,
|
||||
AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING,
|
||||
|
@ -637,8 +632,8 @@ public final class ClusterSettings extends AbstractScopedSettings {
|
|||
DataStreamGlobalRetentionSettings.DATA_STREAMS_DEFAULT_RETENTION_SETTING,
|
||||
DataStreamGlobalRetentionSettings.DATA_STREAMS_MAX_RETENTION_SETTING,
|
||||
ShardsAvailabilityHealthIndicatorService.REPLICA_UNASSIGNED_BUFFER_TIME,
|
||||
DataStream.isFailureStoreFeatureFlagEnabled() ? DataStreamFailureStoreSettings.DATA_STREAM_FAILURE_STORED_ENABLED_SETTING : null,
|
||||
DataStreamFailureStoreSettings.DATA_STREAM_FAILURE_STORED_ENABLED_SETTING,
|
||||
IndexingStatsSettings.RECENT_WRITE_LOAD_HALF_LIFE_SETTING,
|
||||
TransportGetAllocationStatsAction.CACHE_TTL_SETTING
|
||||
).filter(Objects::nonNull).collect(toSet());
|
||||
);
|
||||
}
|
||||
|
|
|
@ -12,7 +12,6 @@ package org.elasticsearch.rest.action.admin.indices;
|
|||
import org.elasticsearch.action.admin.indices.template.put.PutComponentTemplateAction;
|
||||
import org.elasticsearch.client.internal.node.NodeClient;
|
||||
import org.elasticsearch.cluster.metadata.ComponentTemplate;
|
||||
import org.elasticsearch.cluster.metadata.DataStream;
|
||||
import org.elasticsearch.rest.BaseRestHandler;
|
||||
import org.elasticsearch.rest.RestRequest;
|
||||
import org.elasticsearch.rest.Scope;
|
||||
|
@ -59,6 +58,6 @@ public class RestPutComponentTemplateAction extends BaseRestHandler {
|
|||
|
||||
@Override
|
||||
public Set<String> supportedCapabilities() {
|
||||
return DataStream.isFailureStoreFeatureFlagEnabled() ? capabilities : Set.of();
|
||||
return capabilities;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -12,7 +12,6 @@ package org.elasticsearch.rest.action.admin.indices;
|
|||
import org.elasticsearch.action.admin.indices.template.put.TransportPutComposableIndexTemplateAction;
|
||||
import org.elasticsearch.client.internal.node.NodeClient;
|
||||
import org.elasticsearch.cluster.metadata.ComposableIndexTemplate;
|
||||
import org.elasticsearch.cluster.metadata.DataStream;
|
||||
import org.elasticsearch.rest.BaseRestHandler;
|
||||
import org.elasticsearch.rest.RestRequest;
|
||||
import org.elasticsearch.rest.Scope;
|
||||
|
@ -61,6 +60,6 @@ public class RestPutComposableIndexTemplateAction extends BaseRestHandler {
|
|||
|
||||
@Override
|
||||
public Set<String> supportedCapabilities() {
|
||||
return DataStream.isFailureStoreFeatureFlagEnabled() ? capabilities : Set.of();
|
||||
return capabilities;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -12,7 +12,6 @@ package org.elasticsearch.rest.action.admin.indices;
|
|||
import org.elasticsearch.action.admin.indices.rollover.RolloverRequest;
|
||||
import org.elasticsearch.action.support.ActiveShardCount;
|
||||
import org.elasticsearch.client.internal.node.NodeClient;
|
||||
import org.elasticsearch.cluster.metadata.DataStream;
|
||||
import org.elasticsearch.rest.BaseRestHandler;
|
||||
import org.elasticsearch.rest.RestRequest;
|
||||
import org.elasticsearch.rest.Scope;
|
||||
|
@ -43,11 +42,7 @@ public class RestRolloverIndexAction extends BaseRestHandler {
|
|||
|
||||
@Override
|
||||
public Set<String> supportedCapabilities() {
|
||||
if (DataStream.isFailureStoreFeatureFlagEnabled()) {
|
||||
return Set.of("return-404-on-missing-target", "index_expression_selectors");
|
||||
} else {
|
||||
return Set.of("return-404-on-missing-target");
|
||||
}
|
||||
return Set.of("return-404-on-missing-target", "index_expression_selectors");
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -18,7 +18,6 @@ import org.elasticsearch.action.bulk.BulkShardRequest;
|
|||
import org.elasticsearch.action.bulk.IncrementalBulkService;
|
||||
import org.elasticsearch.action.support.ActiveShardCount;
|
||||
import org.elasticsearch.client.internal.node.NodeClient;
|
||||
import org.elasticsearch.cluster.metadata.DataStream;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.bytes.CompositeBytesReference;
|
||||
import org.elasticsearch.common.bytes.ReleasableBytesReference;
|
||||
|
@ -69,7 +68,7 @@ public class RestBulkAction extends BaseRestHandler {
|
|||
public RestBulkAction(Settings settings, IncrementalBulkService bulkHandler) {
|
||||
this.allowExplicitIndex = MULTI_ALLOW_EXPLICIT_INDEX.get(settings);
|
||||
this.bulkHandler = bulkHandler;
|
||||
this.capabilities = DataStream.isFailureStoreFeatureFlagEnabled() ? Set.of(FAILURE_STORE_STATUS_CAPABILITY) : Set.of();
|
||||
this.capabilities = Set.of(FAILURE_STORE_STATUS_CAPABILITY);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -15,7 +15,6 @@ import org.elasticsearch.action.DocWriteResponse;
|
|||
import org.elasticsearch.action.index.IndexRequest;
|
||||
import org.elasticsearch.action.support.ActiveShardCount;
|
||||
import org.elasticsearch.client.internal.node.NodeClient;
|
||||
import org.elasticsearch.cluster.metadata.DataStream;
|
||||
import org.elasticsearch.common.bytes.ReleasableBytesReference;
|
||||
import org.elasticsearch.index.VersionType;
|
||||
import org.elasticsearch.rest.BaseRestHandler;
|
||||
|
@ -40,9 +39,7 @@ public class RestIndexAction extends BaseRestHandler {
|
|||
static final String TYPES_DEPRECATION_MESSAGE = "[types removal] Specifying types in document "
|
||||
+ "index requests is deprecated, use the typeless endpoints instead (/{index}/_doc/{id}, /{index}/_doc, "
|
||||
+ "or /{index}/_create/{id}).";
|
||||
private final Set<String> capabilities = DataStream.isFailureStoreFeatureFlagEnabled()
|
||||
? Set.of(FAILURE_STORE_STATUS_CAPABILITY)
|
||||
: Set.of();
|
||||
private final Set<String> capabilities = Set.of(FAILURE_STORE_STATUS_CAPABILITY);
|
||||
|
||||
@Override
|
||||
public List<Route> routes() {
|
||||
|
|
|
@ -425,13 +425,10 @@ public final class RestoreService implements ClusterStateApplier {
|
|||
.stream()
|
||||
.flatMap(ds -> ds.getIndices().stream().map(idx -> new Tuple<>(ds.isSystem(), idx.getName())))
|
||||
.collect(Collectors.partitioningBy(Tuple::v1, Collectors.mapping(Tuple::v2, Collectors.toSet())));
|
||||
Map<Boolean, Set<String>> failureIndices = Map.of();
|
||||
if (DataStream.isFailureStoreFeatureFlagEnabled()) {
|
||||
failureIndices = dataStreamsToRestore.values()
|
||||
.stream()
|
||||
.flatMap(ds -> ds.getFailureIndices().stream().map(idx -> new Tuple<>(ds.isSystem(), idx.getName())))
|
||||
.collect(Collectors.partitioningBy(Tuple::v1, Collectors.mapping(Tuple::v2, Collectors.toSet())));
|
||||
}
|
||||
Map<Boolean, Set<String>> failureIndices = dataStreamsToRestore.values()
|
||||
.stream()
|
||||
.flatMap(ds -> ds.getFailureIndices().stream().map(idx -> new Tuple<>(ds.isSystem(), idx.getName())))
|
||||
.collect(Collectors.partitioningBy(Tuple::v1, Collectors.mapping(Tuple::v2, Collectors.toSet())));
|
||||
systemDataStreamIndices = Sets.union(backingIndices.getOrDefault(true, Set.of()), failureIndices.getOrDefault(true, Set.of()));
|
||||
nonSystemDataStreamBackingIndices = backingIndices.getOrDefault(false, Set.of());
|
||||
nonSystemDataStreamFailureIndices = failureIndices.getOrDefault(false, Set.of());
|
||||
|
@ -812,13 +809,11 @@ public final class RestoreService implements ClusterStateApplier {
|
|||
.stream()
|
||||
.map(i -> metadata.get(renameIndex(i.getName(), request, true, false)).getIndex())
|
||||
.toList();
|
||||
List<Index> updatedFailureIndices = DataStream.isFailureStoreFeatureFlagEnabled()
|
||||
? dataStream.getFailureComponent()
|
||||
.getIndices()
|
||||
.stream()
|
||||
.map(i -> metadata.get(renameIndex(i.getName(), request, false, true)).getIndex())
|
||||
.toList()
|
||||
: List.of();
|
||||
List<Index> updatedFailureIndices = dataStream.getFailureComponent()
|
||||
.getIndices()
|
||||
.stream()
|
||||
.map(i -> metadata.get(renameIndex(i.getName(), request, false, true)).getIndex())
|
||||
.toList();
|
||||
return dataStream.copy()
|
||||
.setName(dataStreamName)
|
||||
.setBackingIndices(dataStream.getDataComponent().copy().setIndices(updatedIndices).build())
|
||||
|
|
|
@ -60,7 +60,6 @@ import org.elasticsearch.test.client.NoOpNodeClient;
|
|||
import org.elasticsearch.threadpool.TestThreadPool;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.junit.After;
|
||||
import org.junit.Assume;
|
||||
import org.junit.Before;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -392,8 +391,6 @@ public class BulkOperationTests extends ESTestCase {
|
|||
* A bulk operation to a data stream with a failure store enabled should redirect any shard level failures to the failure store.
|
||||
*/
|
||||
public void testFailingEntireShardRedirectsToFailureStore() throws Exception {
|
||||
Assume.assumeTrue(DataStream.isFailureStoreFeatureFlagEnabled());
|
||||
|
||||
// Requests that go to two separate shards
|
||||
BulkRequest bulkRequest = new BulkRequest();
|
||||
bulkRequest.add(new IndexRequest(fsDataStreamName).id("1").source(Map.of("key", "val")).opType(DocWriteRequest.OpType.CREATE));
|
||||
|
@ -419,8 +416,6 @@ public class BulkOperationTests extends ESTestCase {
|
|||
* failure store.
|
||||
*/
|
||||
public void testFailingDocumentRedirectsToFailureStore() throws Exception {
|
||||
Assume.assumeTrue(DataStream.isFailureStoreFeatureFlagEnabled());
|
||||
|
||||
// Requests that go to two separate shards
|
||||
BulkRequest bulkRequest = new BulkRequest();
|
||||
bulkRequest.add(new IndexRequest(fsDataStreamName).id("1").source(Map.of("key", "val")).opType(DocWriteRequest.OpType.CREATE));
|
||||
|
@ -446,8 +441,6 @@ public class BulkOperationTests extends ESTestCase {
|
|||
* failure store if the failure store node feature is not on every node in the cluster
|
||||
*/
|
||||
public void testFailingDocumentIgnoredByFailureStoreWhenFeatureIsDisabled() throws Exception {
|
||||
Assume.assumeTrue(DataStream.isFailureStoreFeatureFlagEnabled());
|
||||
|
||||
// Requests that go to two separate shards
|
||||
BulkRequest bulkRequest = new BulkRequest();
|
||||
bulkRequest.add(new IndexRequest(fsDataStreamName).id("1").source(Map.of("key", "val")).opType(DocWriteRequest.OpType.CREATE));
|
||||
|
@ -481,8 +474,6 @@ public class BulkOperationTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public void testFailingDocumentRedirectsToFailureStoreWhenEnabledByClusterSetting() {
|
||||
Assume.assumeTrue(DataStream.isFailureStoreFeatureFlagEnabled());
|
||||
|
||||
BulkRequest bulkRequest = new BulkRequest();
|
||||
bulkRequest.add(
|
||||
new IndexRequest(fsBySettingsDataStreamName).id("1").source(Map.of("key", "val")).opType(DocWriteRequest.OpType.CREATE)
|
||||
|
@ -538,8 +529,6 @@ public class BulkOperationTests extends ESTestCase {
|
|||
* a shard-level failure while writing to the failure store indices.
|
||||
*/
|
||||
public void testFailureStoreShardFailureRejectsDocument() throws Exception {
|
||||
Assume.assumeTrue(DataStream.isFailureStoreFeatureFlagEnabled());
|
||||
|
||||
// Requests that go to two separate shards
|
||||
BulkRequest bulkRequest = new BulkRequest();
|
||||
bulkRequest.add(new IndexRequest(fsDataStreamName).id("1").source(Map.of("key", "val")).opType(DocWriteRequest.OpType.CREATE));
|
||||
|
@ -578,8 +567,6 @@ public class BulkOperationTests extends ESTestCase {
|
|||
* instead will simply report its original failure in the response, with the conversion failure present as a suppressed exception.
|
||||
*/
|
||||
public void testFailedDocumentCanNotBeConvertedFails() throws Exception {
|
||||
Assume.assumeTrue(DataStream.isFailureStoreFeatureFlagEnabled());
|
||||
|
||||
// Requests that go to two separate shards
|
||||
BulkRequest bulkRequest = new BulkRequest();
|
||||
bulkRequest.add(new IndexRequest(fsDataStreamName).id("1").source(Map.of("key", "val")).opType(DocWriteRequest.OpType.CREATE));
|
||||
|
@ -614,8 +601,6 @@ public class BulkOperationTests extends ESTestCase {
|
|||
* returns an unblocked cluster, the redirection of failure documents should proceed and not return early.
|
||||
*/
|
||||
public void testRetryableBlockAcceptsFailureStoreDocument() throws Exception {
|
||||
Assume.assumeTrue(DataStream.isFailureStoreFeatureFlagEnabled());
|
||||
|
||||
// Requests that go to two separate shards
|
||||
BulkRequest bulkRequest = new BulkRequest();
|
||||
bulkRequest.add(new IndexRequest(fsDataStreamName).id("1").source(Map.of("key", "val")).opType(DocWriteRequest.OpType.CREATE));
|
||||
|
@ -707,8 +692,6 @@ public class BulkOperationTests extends ESTestCase {
|
|||
* non-retryable block when the redirected documents would be sent to the shard-level action.
|
||||
*/
|
||||
public void testBlockedClusterRejectsFailureStoreDocument() throws Exception {
|
||||
Assume.assumeTrue(DataStream.isFailureStoreFeatureFlagEnabled());
|
||||
|
||||
// Requests that go to two separate shards
|
||||
BulkRequest bulkRequest = new BulkRequest();
|
||||
bulkRequest.add(new IndexRequest(fsDataStreamName).id("1").source(Map.of("key", "val")).opType(DocWriteRequest.OpType.CREATE));
|
||||
|
@ -760,8 +743,6 @@ public class BulkOperationTests extends ESTestCase {
|
|||
* retryable block to clear when the redirected documents would be sent to the shard-level action.
|
||||
*/
|
||||
public void testOperationTimeoutRejectsFailureStoreDocument() throws Exception {
|
||||
Assume.assumeTrue(DataStream.isFailureStoreFeatureFlagEnabled());
|
||||
|
||||
// Requests that go to two separate shards
|
||||
BulkRequest bulkRequest = new BulkRequest();
|
||||
bulkRequest.add(new IndexRequest(fsDataStreamName).id("1").source(Map.of("key", "val")).opType(DocWriteRequest.OpType.CREATE));
|
||||
|
@ -821,8 +802,6 @@ public class BulkOperationTests extends ESTestCase {
|
|||
* for a retryable block to clear when the redirected documents would be sent to the shard-level action.
|
||||
*/
|
||||
public void testNodeClosureRejectsFailureStoreDocument() {
|
||||
Assume.assumeTrue(DataStream.isFailureStoreFeatureFlagEnabled());
|
||||
|
||||
// Requests that go to two separate shards
|
||||
BulkRequest bulkRequest = new BulkRequest();
|
||||
bulkRequest.add(new IndexRequest(fsDataStreamName).id("1").source(Map.of("key", "val")).opType(DocWriteRequest.OpType.CREATE));
|
||||
|
@ -866,8 +845,6 @@ public class BulkOperationTests extends ESTestCase {
|
|||
* rollover, it first needs to roll over the failure store and then redirect the failure to the <i>new</i> failure index.
|
||||
*/
|
||||
public void testLazilyRollingOverFailureStore() throws Exception {
|
||||
Assume.assumeTrue(DataStream.isFailureStoreFeatureFlagEnabled());
|
||||
|
||||
// Requests that go to two separate shards
|
||||
BulkRequest bulkRequest = new BulkRequest();
|
||||
bulkRequest.add(
|
||||
|
@ -925,8 +902,6 @@ public class BulkOperationTests extends ESTestCase {
|
|||
* should be added to the list of suppressed causes in the <code>BulkItemResponse</code>.
|
||||
*/
|
||||
public void testFailureWhileRollingOverFailureStore() throws Exception {
|
||||
Assume.assumeTrue(DataStream.isFailureStoreFeatureFlagEnabled());
|
||||
|
||||
// Requests that go to two separate shards
|
||||
BulkRequest bulkRequest = new BulkRequest();
|
||||
bulkRequest.add(
|
||||
|
|
|
@ -90,7 +90,6 @@ import static org.hamcrest.Matchers.equalTo;
|
|||
import static org.hamcrest.Matchers.greaterThan;
|
||||
import static org.hamcrest.Matchers.is;
|
||||
import static org.hamcrest.Matchers.nullValue;
|
||||
import static org.junit.Assume.assumeThat;
|
||||
import static org.mockito.ArgumentMatchers.any;
|
||||
import static org.mockito.Mockito.mock;
|
||||
import static org.mockito.Mockito.when;
|
||||
|
@ -453,8 +452,6 @@ public class TransportBulkActionTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public void testResolveFailureStoreFromMetadata() throws Exception {
|
||||
assumeThat(DataStream.isFailureStoreFeatureFlagEnabled(), is(true));
|
||||
|
||||
String dataStreamWithFailureStoreEnabled = "test-data-stream-failure-enabled";
|
||||
String dataStreamWithFailureStoreDefault = "test-data-stream-failure-default";
|
||||
String dataStreamWithFailureStoreDisabled = "test-data-stream-failure-disabled";
|
||||
|
@ -544,8 +541,6 @@ public class TransportBulkActionTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public void testResolveFailureStoreFromTemplate() throws Exception {
|
||||
assumeThat(DataStream.isFailureStoreFeatureFlagEnabled(), is(true));
|
||||
|
||||
String dsTemplateWithFailureStoreEnabled = "test-data-stream-failure-enabled";
|
||||
String dsTemplateWithFailureStoreDefault = "test-data-stream-failure-default";
|
||||
String dsTemplateWithFailureStoreDisabled = "test-data-stream-failure-disabled";
|
||||
|
|
|
@ -491,12 +491,7 @@ public final class DataStreamTestHelper {
|
|||
"template_1",
|
||||
ComposableIndexTemplate.builder()
|
||||
.indexPatterns(List.of("*"))
|
||||
.template(
|
||||
Template.builder()
|
||||
.dataStreamOptions(
|
||||
DataStream.isFailureStoreFeatureFlagEnabled() ? createDataStreamOptionsTemplate(storeFailures) : null
|
||||
)
|
||||
)
|
||||
.template(Template.builder().dataStreamOptions(createDataStreamOptionsTemplate(storeFailures)))
|
||||
.dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate())
|
||||
.build()
|
||||
);
|
||||
|
@ -512,7 +507,7 @@ public final class DataStreamTestHelper {
|
|||
allIndices.addAll(backingIndices);
|
||||
|
||||
List<IndexMetadata> failureStores = new ArrayList<>();
|
||||
if (DataStream.isFailureStoreFeatureFlagEnabled() && Boolean.TRUE.equals(storeFailures)) {
|
||||
if (Boolean.TRUE.equals(storeFailures)) {
|
||||
for (int failureStoreNumber = 1; failureStoreNumber <= dsTuple.v2(); failureStoreNumber++) {
|
||||
failureStores.add(
|
||||
createIndexMetadata(
|
||||
|
|
|
@ -17,7 +17,6 @@ import org.elasticsearch.test.cluster.util.Version;
|
|||
*/
|
||||
public enum FeatureFlag {
|
||||
TIME_SERIES_MODE("es.index_mode_feature_flag_registered=true", Version.fromString("8.0.0"), null),
|
||||
FAILURE_STORE_ENABLED("es.failure_store_feature_flag_enabled=true", Version.fromString("8.12.0"), null),
|
||||
SUB_OBJECTS_AUTO_ENABLED("es.sub_objects_auto_feature_flag_enabled=true", Version.fromString("8.16.0"), null),
|
||||
DOC_VALUES_SKIPPER("es.doc_values_skipper_feature_flag_enabled=true", Version.fromString("8.18.1"), null),
|
||||
USE_LUCENE101_POSTINGS_FORMAT("es.use_lucene101_postings_format_feature_flag_enabled=true", Version.fromString("9.1.0"), null);
|
||||
|
|
|
@ -153,12 +153,6 @@ tasks.named("javaRestTest") {
|
|||
usesDefaultDistribution("uses the _xpack api")
|
||||
}
|
||||
|
||||
if (buildParams.snapshotBuild == false) {
|
||||
tasks.withType(Test).configureEach {
|
||||
systemProperty 'es.failure_store_feature_flag_enabled', 'true'
|
||||
}
|
||||
}
|
||||
|
||||
if (buildParams.inFipsJvm) {
|
||||
// Test clusters run with security disabled
|
||||
tasks.named("javaRestTest").configure { enabled = false }
|
||||
|
|
|
@ -14,7 +14,6 @@ import org.elasticsearch.common.settings.SecureString;
|
|||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.concurrent.ThreadContext;
|
||||
import org.elasticsearch.test.cluster.ElasticsearchCluster;
|
||||
import org.elasticsearch.test.cluster.FeatureFlag;
|
||||
import org.elasticsearch.test.cluster.local.distribution.DistributionType;
|
||||
import org.elasticsearch.test.rest.ESRestTestCase;
|
||||
import org.elasticsearch.xcontent.XContentParser;
|
||||
|
@ -37,7 +36,6 @@ public class DataStreamRestIT extends ESRestTestCase {
|
|||
.setting("xpack.security.enabled", "true")
|
||||
.setting("xpack.license.self_generated.type", "trial")
|
||||
.setting("indices.lifecycle.history_index_enabled", "false")
|
||||
.feature(FeatureFlag.FAILURE_STORE_ENABLED)
|
||||
.keystore("bootstrap.password", "x-pack-test-password")
|
||||
.user("x_pack_rest_user", "x-pack-test-password")
|
||||
.systemProperty("es.queryable_built_in_roles_enabled", "false")
|
||||
|
|
|
@ -19,7 +19,6 @@ import org.elasticsearch.license.License;
|
|||
import org.elasticsearch.license.LicenseSettings;
|
||||
import org.elasticsearch.license.TestUtils;
|
||||
import org.elasticsearch.test.cluster.ElasticsearchCluster;
|
||||
import org.elasticsearch.test.cluster.FeatureFlag;
|
||||
import org.elasticsearch.test.rest.ESRestTestCase;
|
||||
import org.elasticsearch.xcontent.ToXContent;
|
||||
import org.elasticsearch.xcontent.XContentBuilder;
|
||||
|
@ -46,7 +45,6 @@ public class LicenseInstallationIT extends ESRestTestCase {
|
|||
public static ElasticsearchCluster cluster = ElasticsearchCluster.local()
|
||||
.setting("xpack.security.enabled", "true")
|
||||
.setting("xpack.license.self_generated.type", "trial")
|
||||
.feature(FeatureFlag.FAILURE_STORE_ENABLED)
|
||||
.keystore("bootstrap.password", "x-pack-test-password")
|
||||
.user("x_pack_rest_user", "x-pack-test-password")
|
||||
.systemProperty("es.queryable_built_in_roles_enabled", "false")
|
||||
|
|
|
@ -13,7 +13,6 @@ import org.elasticsearch.common.settings.SecureString;
|
|||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.concurrent.ThreadContext;
|
||||
import org.elasticsearch.test.cluster.ElasticsearchCluster;
|
||||
import org.elasticsearch.test.cluster.FeatureFlag;
|
||||
import org.elasticsearch.test.cluster.local.distribution.DistributionType;
|
||||
import org.elasticsearch.test.rest.ESRestTestCase;
|
||||
import org.junit.ClassRule;
|
||||
|
@ -27,7 +26,6 @@ public class StackTemplatesRestIT extends ESRestTestCase {
|
|||
.distribution(DistributionType.DEFAULT)
|
||||
.setting("xpack.security.enabled", "true")
|
||||
.setting("xpack.license.self_generated.type", "trial")
|
||||
.feature(FeatureFlag.FAILURE_STORE_ENABLED)
|
||||
.keystore("bootstrap.password", "x-pack-test-password")
|
||||
.user("x_pack_rest_user", "x-pack-test-password")
|
||||
.systemProperty("es.queryable_built_in_roles_enabled", "false")
|
||||
|
|
|
@ -52,16 +52,14 @@ public class DataStreamUsageTransportAction extends XPackUsageFeatureTransportAc
|
|||
long failureIndicesCounter = 0;
|
||||
for (DataStream ds : dataStreams.values()) {
|
||||
backingIndicesCounter += ds.getIndices().size();
|
||||
if (DataStream.isFailureStoreFeatureFlagEnabled()) {
|
||||
if (ds.isFailureStoreExplicitlyEnabled()) {
|
||||
failureStoreExplicitlyEnabledCounter++;
|
||||
}
|
||||
if (ds.isFailureStoreEffectivelyEnabled(dataStreamFailureStoreSettings)) {
|
||||
failureStoreEffectivelyEnabledCounter++;
|
||||
}
|
||||
if (ds.getFailureIndices().isEmpty() == false) {
|
||||
failureIndicesCounter += ds.getFailureIndices().size();
|
||||
}
|
||||
if (ds.isFailureStoreExplicitlyEnabled()) {
|
||||
failureStoreExplicitlyEnabledCounter++;
|
||||
}
|
||||
if (ds.isFailureStoreEffectivelyEnabled(dataStreamFailureStoreSettings)) {
|
||||
failureStoreEffectivelyEnabledCounter++;
|
||||
}
|
||||
if (ds.getFailureIndices().isEmpty() == false) {
|
||||
failureIndicesCounter += ds.getFailureIndices().size();
|
||||
}
|
||||
}
|
||||
final DataStreamFeatureSetUsage.DataStreamStats stats = new DataStreamFeatureSetUsage.DataStreamStats(
|
||||
|
|
|
@ -9,7 +9,6 @@ package org.elasticsearch.xpack.core.datastreams;
|
|||
|
||||
import org.elasticsearch.TransportVersion;
|
||||
import org.elasticsearch.TransportVersions;
|
||||
import org.elasticsearch.cluster.metadata.DataStream;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
|
@ -50,13 +49,11 @@ public class DataStreamFeatureSetUsage extends XPackFeatureUsage {
|
|||
super.innerXContent(builder, params);
|
||||
builder.field("data_streams", streamStats.totalDataStreamCount);
|
||||
builder.field("indices_count", streamStats.indicesBehindDataStream);
|
||||
if (DataStream.isFailureStoreFeatureFlagEnabled()) {
|
||||
builder.startObject("failure_store");
|
||||
builder.field("explicitly_enabled_count", streamStats.failureStoreExplicitlyEnabledDataStreamCount);
|
||||
builder.field("effectively_enabled_count", streamStats.failureStoreEffectivelyEnabledDataStreamCount);
|
||||
builder.field("failure_indices_count", streamStats.failureStoreIndicesCount);
|
||||
builder.endObject();
|
||||
}
|
||||
builder.startObject("failure_store");
|
||||
builder.field("explicitly_enabled_count", streamStats.failureStoreExplicitlyEnabledDataStreamCount);
|
||||
builder.field("effectively_enabled_count", streamStats.failureStoreEffectivelyEnabledDataStreamCount);
|
||||
builder.field("failure_indices_count", streamStats.failureStoreIndicesCount);
|
||||
builder.endObject();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -8,7 +8,6 @@
|
|||
package org.elasticsearch.xpack.core.security.action.role;
|
||||
|
||||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.cluster.metadata.DataStream;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.xpack.core.security.authz.RoleDescriptor;
|
||||
import org.elasticsearch.xpack.core.security.authz.privilege.ApplicationPrivilege;
|
||||
|
@ -55,10 +54,8 @@ public class RoleDescriptorRequestValidator {
|
|||
} catch (IllegalArgumentException ile) {
|
||||
validationException = addValidationError(ile.getMessage(), validationException);
|
||||
}
|
||||
if (DataStream.isFailureStoreFeatureFlagEnabled()) {
|
||||
for (final String indexName : idp.getIndices()) {
|
||||
validationException = validateIndexNameExpression(indexName, validationException);
|
||||
}
|
||||
for (final String indexName : idp.getIndices()) {
|
||||
validationException = validateIndexNameExpression(indexName, validationException);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -78,10 +75,8 @@ public class RoleDescriptorRequestValidator {
|
|||
} catch (IllegalArgumentException ile) {
|
||||
validationException = addValidationError(ile.getMessage(), validationException);
|
||||
}
|
||||
if (DataStream.isFailureStoreFeatureFlagEnabled()) {
|
||||
for (String indexName : ridp.indicesPrivileges().getIndices()) {
|
||||
validationException = validateIndexNameExpression(indexName, validationException);
|
||||
}
|
||||
for (String indexName : ridp.indicesPrivileges().getIndices()) {
|
||||
validationException = validateIndexNameExpression(indexName, validationException);
|
||||
}
|
||||
}
|
||||
if (roleDescriptor.hasRemoteClusterPermissions()) {
|
||||
|
|
|
@ -13,7 +13,6 @@ import org.elasticsearch.action.ActionRequestValidationException;
|
|||
import org.elasticsearch.action.IndicesRequest;
|
||||
import org.elasticsearch.action.support.IndexComponentSelector;
|
||||
import org.elasticsearch.action.support.SubscribableListener;
|
||||
import org.elasticsearch.cluster.metadata.DataStream;
|
||||
import org.elasticsearch.cluster.metadata.IndexAbstraction;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.metadata.ProjectMetadata;
|
||||
|
@ -348,17 +347,15 @@ public interface AuthorizationEngine {
|
|||
validationException
|
||||
);
|
||||
}
|
||||
if (DataStream.isFailureStoreFeatureFlagEnabled()) {
|
||||
// best effort prevent users from attempting to use selectors in privilege check
|
||||
for (String indexPattern : indicesPrivileges.getIndices()) {
|
||||
if (IndexNameExpressionResolver.hasSelector(indexPattern, IndexComponentSelector.FAILURES)
|
||||
|| IndexNameExpressionResolver.hasSelector(indexPattern, IndexComponentSelector.DATA)) {
|
||||
validationException = addValidationError(
|
||||
"may only check index privileges without selectors in index patterns [" + indexPattern + "]",
|
||||
validationException
|
||||
);
|
||||
break;
|
||||
}
|
||||
// best effort prevent users from attempting to use selectors in privilege check
|
||||
for (String indexPattern : indicesPrivileges.getIndices()) {
|
||||
if (IndexNameExpressionResolver.hasSelector(indexPattern, IndexComponentSelector.FAILURES)
|
||||
|| IndexNameExpressionResolver.hasSelector(indexPattern, IndexComponentSelector.DATA)) {
|
||||
validationException = addValidationError(
|
||||
"may only check index privileges without selectors in index patterns [" + indexPattern + "]",
|
||||
validationException
|
||||
);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -32,7 +32,6 @@ import org.elasticsearch.action.datastreams.PromoteDataStreamAction;
|
|||
import org.elasticsearch.action.fieldcaps.TransportFieldCapabilitiesAction;
|
||||
import org.elasticsearch.action.search.TransportSearchShardsAction;
|
||||
import org.elasticsearch.action.support.IndexComponentSelector;
|
||||
import org.elasticsearch.cluster.metadata.DataStream;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.util.set.Sets;
|
||||
import org.elasticsearch.core.Nullable;
|
||||
|
@ -52,7 +51,6 @@ import java.util.HashSet;
|
|||
import java.util.LinkedHashMap;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import java.util.Set;
|
||||
import java.util.SortedMap;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
|
@ -238,10 +236,8 @@ public final class IndexPrivilege extends Privilege {
|
|||
*/
|
||||
private static final Map<String, IndexPrivilege> VALUES = combineSortedInOrder(
|
||||
sortByAccessLevel(
|
||||
Stream.of(
|
||||
DataStream.isFailureStoreFeatureFlagEnabled() ? entry("read_failure_store", READ_FAILURE_STORE) : null,
|
||||
DataStream.isFailureStoreFeatureFlagEnabled() ? entry("manage_failure_store", MANAGE_FAILURE_STORE) : null
|
||||
).filter(Objects::nonNull).collect(Collectors.toUnmodifiableMap(Map.Entry::getKey, Map.Entry::getValue))
|
||||
Stream.of(entry("read_failure_store", READ_FAILURE_STORE), entry("manage_failure_store", MANAGE_FAILURE_STORE))
|
||||
.collect(Collectors.toUnmodifiableMap(Map.Entry::getKey, Map.Entry::getValue))
|
||||
),
|
||||
sortByAccessLevel(
|
||||
Stream.of(
|
||||
|
|
|
@ -28,7 +28,6 @@ import org.elasticsearch.action.downsample.DownsampleAction;
|
|||
import org.elasticsearch.action.index.TransportIndexAction;
|
||||
import org.elasticsearch.action.search.TransportSearchAction;
|
||||
import org.elasticsearch.action.search.TransportSearchScrollAction;
|
||||
import org.elasticsearch.cluster.metadata.DataStream;
|
||||
import org.elasticsearch.index.reindex.ReindexAction;
|
||||
import org.elasticsearch.xpack.core.XPackPlugin;
|
||||
import org.elasticsearch.xpack.core.ilm.action.ILMActions;
|
||||
|
@ -161,7 +160,7 @@ public class InternalUsers {
|
|||
.privileges(
|
||||
filterNonNull(
|
||||
// needed to rollover failure store
|
||||
DataStream.isFailureStoreFeatureFlagEnabled() ? "manage_failure_store" : null,
|
||||
"manage_failure_store",
|
||||
"delete_index",
|
||||
RolloverAction.NAME,
|
||||
ForceMergeAction.NAME + "*",
|
||||
|
@ -184,7 +183,7 @@ public class InternalUsers {
|
|||
.privileges(
|
||||
filterNonNull(
|
||||
// needed to rollover failure store
|
||||
DataStream.isFailureStoreFeatureFlagEnabled() ? "manage_failure_store" : null,
|
||||
"manage_failure_store",
|
||||
"delete_index",
|
||||
RolloverAction.NAME,
|
||||
ForceMergeAction.NAME + "*",
|
||||
|
@ -262,7 +261,7 @@ public class InternalUsers {
|
|||
.privileges(
|
||||
filterNonNull(
|
||||
// needed to rollover failure store
|
||||
DataStream.isFailureStoreFeatureFlagEnabled() ? "manage_failure_store" : null,
|
||||
"manage_failure_store",
|
||||
LazyRolloverAction.NAME
|
||||
)
|
||||
)
|
||||
|
|
|
@ -8,7 +8,6 @@ package org.elasticsearch.xpack.core.security.action.role;
|
|||
|
||||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.action.support.WriteRequest;
|
||||
import org.elasticsearch.cluster.metadata.DataStream;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.xpack.core.security.authz.RoleDescriptor.ApplicationResourcePrivileges;
|
||||
import org.elasticsearch.xpack.core.security.authz.permission.RemoteClusterPermissionGroup;
|
||||
|
@ -50,7 +49,6 @@ public class PutRoleRequestTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public void testValidationErrorWithFailureStorePrivilegeInRemoteIndices() {
|
||||
assumeTrue("requires failure store feature", DataStream.isFailureStoreFeatureFlagEnabled());
|
||||
final PutRoleRequest request = new PutRoleRequest();
|
||||
request.name(randomAlphaOfLengthBetween(4, 9));
|
||||
request.addRemoteIndex(
|
||||
|
|
|
@ -7,7 +7,6 @@
|
|||
|
||||
package org.elasticsearch.xpack.core.security.action.role;
|
||||
|
||||
import org.elasticsearch.cluster.metadata.DataStream;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.xpack.core.security.authz.RoleDescriptor;
|
||||
import org.elasticsearch.xpack.core.security.authz.RoleDescriptor.IndicesPrivileges;
|
||||
|
@ -23,7 +22,6 @@ import static org.hamcrest.Matchers.nullValue;
|
|||
public class RoleDescriptorRequestValidatorTests extends ESTestCase {
|
||||
|
||||
public void testSelectorsValidation() {
|
||||
assumeTrue("failure store feature flag must be enabled", DataStream.isFailureStoreFeatureFlagEnabled());
|
||||
String[] invalidIndexNames = {
|
||||
"index::failures",
|
||||
".fs-*::failures",
|
||||
|
|
|
@ -14,7 +14,6 @@ import org.elasticsearch.action.admin.indices.delete.TransportDeleteIndexAction;
|
|||
import org.elasticsearch.action.bulk.TransportBulkAction;
|
||||
import org.elasticsearch.action.search.TransportSearchAction;
|
||||
import org.elasticsearch.cluster.metadata.AliasMetadata;
|
||||
import org.elasticsearch.cluster.metadata.DataStream;
|
||||
import org.elasticsearch.cluster.metadata.IndexAbstraction;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetadata;
|
||||
import org.elasticsearch.cluster.metadata.Metadata;
|
||||
|
@ -648,7 +647,6 @@ public class LimitedRoleTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public void testAllowedActionsMatcherWithSelectors() {
|
||||
assumeTrue("failure store feature must be enabled", DataStream.isFailureStoreFeatureFlagEnabled());
|
||||
Role fromRole = Role.builder(EMPTY_RESTRICTED_INDICES, "fromRole")
|
||||
.add(IndexPrivilege.READ_FAILURE_STORE, "ind*")
|
||||
.add(IndexPrivilege.READ, "ind*")
|
||||
|
|
|
@ -13,7 +13,6 @@ import org.elasticsearch.action.delete.TransportDeleteAction;
|
|||
import org.elasticsearch.action.index.TransportIndexAction;
|
||||
import org.elasticsearch.action.search.TransportSearchAction;
|
||||
import org.elasticsearch.action.update.TransportUpdateAction;
|
||||
import org.elasticsearch.cluster.metadata.DataStream;
|
||||
import org.elasticsearch.common.util.iterable.Iterables;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.xpack.core.rollup.action.GetRollupIndexCapsAction;
|
||||
|
@ -70,15 +69,13 @@ public class IndexPrivilegeTests extends ESTestCase {
|
|||
);
|
||||
assertThat(findPrivilegesThatGrant(RefreshAction.NAME), equalTo(List.of("maintenance", "manage", "all")));
|
||||
|
||||
if (DataStream.isFailureStoreFeatureFlagEnabled()) {
|
||||
Predicate<IndexPrivilege> failuresOnly = p -> p.getSelectorPredicate() == IndexComponentSelectorPredicate.FAILURES;
|
||||
assertThat(findPrivilegesThatGrant(TransportSearchAction.TYPE.name(), failuresOnly), equalTo(List.of("read_failure_store")));
|
||||
assertThat(findPrivilegesThatGrant(TransportIndexAction.NAME, failuresOnly), equalTo(List.of()));
|
||||
assertThat(findPrivilegesThatGrant(TransportUpdateAction.NAME, failuresOnly), equalTo(List.of()));
|
||||
assertThat(findPrivilegesThatGrant(TransportDeleteAction.NAME, failuresOnly), equalTo(List.of()));
|
||||
assertThat(findPrivilegesThatGrant(IndicesStatsAction.NAME, failuresOnly), equalTo(List.of("manage_failure_store")));
|
||||
assertThat(findPrivilegesThatGrant(RefreshAction.NAME, failuresOnly), equalTo(List.of("manage_failure_store")));
|
||||
}
|
||||
Predicate<IndexPrivilege> failuresOnly = p -> p.getSelectorPredicate() == IndexComponentSelectorPredicate.FAILURES;
|
||||
assertThat(findPrivilegesThatGrant(TransportSearchAction.TYPE.name(), failuresOnly), equalTo(List.of("read_failure_store")));
|
||||
assertThat(findPrivilegesThatGrant(TransportIndexAction.NAME, failuresOnly), equalTo(List.of()));
|
||||
assertThat(findPrivilegesThatGrant(TransportUpdateAction.NAME, failuresOnly), equalTo(List.of()));
|
||||
assertThat(findPrivilegesThatGrant(TransportDeleteAction.NAME, failuresOnly), equalTo(List.of()));
|
||||
assertThat(findPrivilegesThatGrant(IndicesStatsAction.NAME, failuresOnly), equalTo(List.of("manage_failure_store")));
|
||||
assertThat(findPrivilegesThatGrant(RefreshAction.NAME, failuresOnly), equalTo(List.of("manage_failure_store")));
|
||||
}
|
||||
|
||||
public void testGet() {
|
||||
|
@ -117,7 +114,6 @@ public class IndexPrivilegeTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public void testResolveSameSelectorPrivileges() {
|
||||
assumeTrue("requires failure store feature", DataStream.isFailureStoreFeatureFlagEnabled());
|
||||
{
|
||||
IndexPrivilege actual = resolvePrivilegeAndAssertSingleton(Set.of("read_failure_store"));
|
||||
assertThat(actual, equalTo(IndexPrivilege.READ_FAILURE_STORE));
|
||||
|
@ -144,7 +140,6 @@ public class IndexPrivilegeTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public void testResolveBySelectorAccess() {
|
||||
assumeTrue("requires failure store feature", DataStream.isFailureStoreFeatureFlagEnabled());
|
||||
{
|
||||
Set<IndexPrivilege> actual = IndexPrivilege.resolveBySelectorAccess(Set.of("read_failure_store"));
|
||||
assertThat(actual, containsInAnyOrder(IndexPrivilege.READ_FAILURE_STORE));
|
||||
|
|
|
@ -14,7 +14,6 @@ import org.elasticsearch.common.settings.SecureString;
|
|||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.concurrent.ThreadContext;
|
||||
import org.elasticsearch.test.cluster.ElasticsearchCluster;
|
||||
import org.elasticsearch.test.cluster.FeatureFlag;
|
||||
import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate;
|
||||
import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase;
|
||||
import org.junit.ClassRule;
|
||||
|
@ -25,7 +24,6 @@ public class XPackCoreClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase {
|
|||
public static ElasticsearchCluster cluster = ElasticsearchCluster.local()
|
||||
.setting("xpack.security.enabled", "true")
|
||||
.setting("xpack.license.self_generated.type", "trial")
|
||||
.feature(FeatureFlag.FAILURE_STORE_ENABLED)
|
||||
.keystore("bootstrap.password", "x-pack-test-password")
|
||||
.user("x_pack_rest_user", "x-pack-test-password")
|
||||
.systemProperty("es.queryable_built_in_roles_enabled", "false")
|
||||
|
|
|
@ -22,7 +22,6 @@ import org.elasticsearch.action.index.IndexRequestBuilder;
|
|||
import org.elasticsearch.action.support.WriteRequest;
|
||||
import org.elasticsearch.client.internal.ClusterAdminClient;
|
||||
import org.elasticsearch.cluster.metadata.ComposableIndexTemplate;
|
||||
import org.elasticsearch.cluster.metadata.DataStream;
|
||||
import org.elasticsearch.cluster.metadata.DataStreamFailureStore;
|
||||
import org.elasticsearch.cluster.metadata.DataStreamOptions;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetadata;
|
||||
|
@ -55,7 +54,6 @@ import org.elasticsearch.xpack.esql.core.type.DataType;
|
|||
import org.elasticsearch.xpack.esql.parser.ParsingException;
|
||||
import org.elasticsearch.xpack.esql.plugin.EsqlPlugin;
|
||||
import org.elasticsearch.xpack.esql.plugin.QueryPragmas;
|
||||
import org.junit.Assume;
|
||||
import org.junit.Before;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -1027,8 +1025,6 @@ public class EsqlActionIT extends AbstractEsqlIntegTestCase {
|
|||
}
|
||||
|
||||
public void testDataStreamPatterns() throws Exception {
|
||||
Assume.assumeTrue(DataStream.isFailureStoreFeatureFlagEnabled());
|
||||
|
||||
Map<String, Long> testCases = new HashMap<>();
|
||||
// Concrete data stream with each selector
|
||||
testCases.put("test_ds_patterns_1", 5L);
|
||||
|
@ -1087,8 +1083,6 @@ public class EsqlActionIT extends AbstractEsqlIntegTestCase {
|
|||
}
|
||||
|
||||
public void testDataStreamInvalidPatterns() throws Exception {
|
||||
Assume.assumeTrue(DataStream.isFailureStoreFeatureFlagEnabled());
|
||||
|
||||
Map<String, String> testCases = new HashMap<>();
|
||||
// === Errors
|
||||
// Only recognized components can be selected
|
||||
|
|
|
@ -100,7 +100,7 @@ indexPatternAndMetadataFields:
|
|||
|
||||
indexPattern
|
||||
: (clusterString COLON)? indexString
|
||||
| {this.isDevVersion()}? indexString (CAST_OP selectorString)?
|
||||
| indexString (CAST_OP selectorString)?
|
||||
;
|
||||
|
||||
clusterString
|
||||
|
|
|
@ -18,7 +18,7 @@ FROM_PIPE : PIPE -> type(PIPE), popMode;
|
|||
FROM_OPENING_BRACKET : OPENING_BRACKET -> type(OPENING_BRACKET);
|
||||
FROM_CLOSING_BRACKET : CLOSING_BRACKET -> type(CLOSING_BRACKET);
|
||||
FROM_COLON : COLON -> type(COLON);
|
||||
FROM_SELECTOR : {this.isDevVersion()}? CAST_OP -> type(CAST_OP);
|
||||
FROM_SELECTOR : CAST_OP -> type(CAST_OP);
|
||||
FROM_COMMA : COMMA -> type(COMMA);
|
||||
FROM_ASSIGN : ASSIGN -> type(ASSIGN);
|
||||
METADATA : 'metadata';
|
||||
|
|
|
@ -8,7 +8,6 @@
|
|||
package org.elasticsearch.xpack.esql.action;
|
||||
|
||||
import org.elasticsearch.Build;
|
||||
import org.elasticsearch.cluster.metadata.DataStream;
|
||||
import org.elasticsearch.common.util.FeatureFlag;
|
||||
import org.elasticsearch.features.NodeFeature;
|
||||
import org.elasticsearch.rest.action.admin.cluster.RestNodesCapabilitiesAction;
|
||||
|
@ -948,7 +947,7 @@ public class EsqlCapabilities {
|
|||
/**
|
||||
* Index component selector syntax (my-data-stream-name::failures)
|
||||
*/
|
||||
INDEX_COMPONENT_SELECTORS(DataStream.isFailureStoreFeatureFlagEnabled()),
|
||||
INDEX_COMPONENT_SELECTORS,
|
||||
|
||||
/**
|
||||
* Make numberOfChannels consistent with layout in DefaultLayout by removing duplicated ChannelSet.
|
||||
|
|
File diff suppressed because one or more lines are too long
File diff suppressed because it is too large
Load diff
File diff suppressed because one or more lines are too long
File diff suppressed because it is too large
Load diff
|
@ -17,7 +17,6 @@ import org.elasticsearch.core.Nullable;
|
|||
import org.elasticsearch.core.Strings;
|
||||
import org.elasticsearch.core.Tuple;
|
||||
import org.elasticsearch.test.cluster.ElasticsearchCluster;
|
||||
import org.elasticsearch.test.cluster.FeatureFlag;
|
||||
import org.elasticsearch.test.cluster.local.distribution.DistributionType;
|
||||
import org.elasticsearch.test.cluster.util.resource.Resource;
|
||||
import org.elasticsearch.test.rest.ObjectPath;
|
||||
|
@ -45,7 +44,6 @@ public class RemoteClusterSecurityRCS1FailureStoreRestIT extends AbstractRemoteC
|
|||
.name("fulfilling-cluster")
|
||||
.nodes(3)
|
||||
.apply(commonClusterConfig)
|
||||
.feature(FeatureFlag.FAILURE_STORE_ENABLED)
|
||||
.rolesFile(Resource.fromClasspath("roles.yml"))
|
||||
.build();
|
||||
|
||||
|
@ -53,7 +51,6 @@ public class RemoteClusterSecurityRCS1FailureStoreRestIT extends AbstractRemoteC
|
|||
.distribution(DistributionType.DEFAULT)
|
||||
.name("query-cluster")
|
||||
.apply(commonClusterConfig)
|
||||
.feature(FeatureFlag.FAILURE_STORE_ENABLED)
|
||||
.rolesFile(Resource.fromClasspath("roles.yml"))
|
||||
.build();
|
||||
}
|
||||
|
|
|
@ -11,7 +11,6 @@ import org.elasticsearch.client.Request;
|
|||
import org.elasticsearch.client.ResponseException;
|
||||
import org.elasticsearch.core.Tuple;
|
||||
import org.elasticsearch.test.cluster.ElasticsearchCluster;
|
||||
import org.elasticsearch.test.cluster.FeatureFlag;
|
||||
import org.elasticsearch.test.cluster.local.distribution.DistributionType;
|
||||
import org.elasticsearch.test.cluster.util.resource.Resource;
|
||||
import org.junit.ClassRule;
|
||||
|
@ -35,7 +34,6 @@ public class RemoteClusterSecurityRCS2FailureStoreRestIT extends AbstractRemoteC
|
|||
.distribution(DistributionType.DEFAULT)
|
||||
.name("fulfilling-cluster")
|
||||
.apply(commonClusterConfig)
|
||||
.feature(FeatureFlag.FAILURE_STORE_ENABLED)
|
||||
.setting("remote_cluster_server.enabled", "true")
|
||||
.setting("remote_cluster.port", "0")
|
||||
.setting("xpack.security.remote_cluster_server.ssl.enabled", "true")
|
||||
|
@ -49,7 +47,6 @@ public class RemoteClusterSecurityRCS2FailureStoreRestIT extends AbstractRemoteC
|
|||
.distribution(DistributionType.DEFAULT)
|
||||
.name("query-cluster")
|
||||
.apply(commonClusterConfig)
|
||||
.feature(FeatureFlag.FAILURE_STORE_ENABLED)
|
||||
.setting("xpack.security.remote_cluster_client.ssl.enabled", "true")
|
||||
.setting("xpack.security.remote_cluster_client.ssl.certificate_authorities", "remote-cluster-ca.crt")
|
||||
.setting("xpack.security.authc.token.enabled", "true")
|
||||
|
|
|
@ -21,7 +21,6 @@ import org.elasticsearch.core.Tuple;
|
|||
import org.elasticsearch.search.SearchHit;
|
||||
import org.elasticsearch.search.SearchResponseUtils;
|
||||
import org.elasticsearch.test.cluster.ElasticsearchCluster;
|
||||
import org.elasticsearch.test.cluster.FeatureFlag;
|
||||
import org.elasticsearch.test.cluster.local.distribution.DistributionType;
|
||||
import org.elasticsearch.test.rest.ESRestTestCase;
|
||||
import org.elasticsearch.test.rest.ObjectPath;
|
||||
|
@ -52,7 +51,6 @@ public class RemoteClusterWithoutSecurityFailureStoreRestIT extends ESRestTestCa
|
|||
.distribution(DistributionType.DEFAULT)
|
||||
.name("fulfilling-cluster")
|
||||
.nodes(3)
|
||||
.feature(FeatureFlag.FAILURE_STORE_ENABLED)
|
||||
.module("analysis-common")
|
||||
.setting("xpack.license.self_generated.type", "trial")
|
||||
.setting("xpack.security.enabled", "false")
|
||||
|
@ -61,7 +59,6 @@ public class RemoteClusterWithoutSecurityFailureStoreRestIT extends ESRestTestCa
|
|||
private static ElasticsearchCluster queryCluster = ElasticsearchCluster.local()
|
||||
.distribution(DistributionType.DEFAULT)
|
||||
.name("query-cluster")
|
||||
.feature(FeatureFlag.FAILURE_STORE_ENABLED)
|
||||
.module("analysis-common")
|
||||
.setting("xpack.license.self_generated.type", "trial")
|
||||
.setting("xpack.security.enabled", "false")
|
||||
|
|
|
@ -7,8 +7,6 @@
|
|||
|
||||
package org.elasticsearch.xpack.security.operator;
|
||||
|
||||
import org.elasticsearch.cluster.metadata.DataStream;
|
||||
|
||||
import java.util.Objects;
|
||||
import java.util.Set;
|
||||
import java.util.stream.Collectors;
|
||||
|
@ -513,9 +511,9 @@ public class Constants {
|
|||
"indices:admin/data_stream/lifecycle/get",
|
||||
"indices:admin/data_stream/lifecycle/put",
|
||||
"indices:admin/data_stream/lifecycle/explain",
|
||||
DataStream.isFailureStoreFeatureFlagEnabled() ? "indices:admin/data_stream/options/delete" : null,
|
||||
DataStream.isFailureStoreFeatureFlagEnabled() ? "indices:admin/data_stream/options/get" : null,
|
||||
DataStream.isFailureStoreFeatureFlagEnabled() ? "indices:admin/data_stream/options/put" : null,
|
||||
"indices:admin/data_stream/options/delete",
|
||||
"indices:admin/data_stream/options/get",
|
||||
"indices:admin/data_stream/options/put",
|
||||
"indices:admin/delete",
|
||||
"indices:admin/flush",
|
||||
"indices:admin/flush[s]",
|
||||
|
|
|
@ -28,7 +28,6 @@ import org.elasticsearch.search.SearchHit;
|
|||
import org.elasticsearch.search.SearchResponseUtils;
|
||||
import org.elasticsearch.test.TestSecurityClient;
|
||||
import org.elasticsearch.test.cluster.ElasticsearchCluster;
|
||||
import org.elasticsearch.test.cluster.FeatureFlag;
|
||||
import org.elasticsearch.test.rest.ESRestTestCase;
|
||||
import org.elasticsearch.test.rest.ObjectPath;
|
||||
import org.elasticsearch.xcontent.XContentBuilder;
|
||||
|
@ -68,7 +67,6 @@ public class FailureStoreSecurityRestIT extends ESRestTestCase {
|
|||
@ClassRule
|
||||
public static ElasticsearchCluster cluster = ElasticsearchCluster.local()
|
||||
.apply(SecurityOnTrialLicenseRestTestCase.commonTrialSecurityClusterConfig)
|
||||
.feature(FeatureFlag.FAILURE_STORE_ENABLED)
|
||||
.build();
|
||||
|
||||
@Override
|
||||
|
|
|
@ -9,7 +9,6 @@ package org.elasticsearch.xpack.security.action.user;
|
|||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.PlainActionFuture;
|
||||
import org.elasticsearch.cluster.metadata.DataStream;
|
||||
import org.elasticsearch.common.bytes.BytesArray;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.concurrent.ThreadContext;
|
||||
|
@ -92,8 +91,6 @@ public class TransportHasPrivilegesActionTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public void testHasPrivilegesRequestDoesNotAllowSelectorsInIndexPatterns() {
|
||||
assumeTrue("failure store required", DataStream.isFailureStoreFeatureFlagEnabled());
|
||||
|
||||
final ThreadContext threadContext = new ThreadContext(Settings.EMPTY);
|
||||
final SecurityContext context = mock(SecurityContext.class);
|
||||
final User user = new User("user-1", "superuser");
|
||||
|
|
|
@ -8,7 +8,6 @@
|
|||
package org.elasticsearch.xpack.security.authz;
|
||||
|
||||
import org.elasticsearch.action.search.SearchRequest;
|
||||
import org.elasticsearch.cluster.metadata.DataStream;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.core.Tuple;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
@ -238,8 +237,6 @@ public class AuthorizationDenialMessagesTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public void testActionDeniedWithFailuresAndCorrectActionIncludesFailuresMessage() {
|
||||
assumeTrue("failure store required", DataStream.isFailureStoreFeatureFlagEnabled());
|
||||
|
||||
Authentication authentication = AuthenticationTestHelper.builder().build();
|
||||
|
||||
final String action = "indices:data/read/" + randomAlphaOfLengthBetween(0, 8);
|
||||
|
@ -262,8 +259,6 @@ public class AuthorizationDenialMessagesTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public void testActionDeniedWithNonMatchingActionFailuresOmitsFailuresMessage() {
|
||||
assumeTrue("failure store required", DataStream.isFailureStoreFeatureFlagEnabled());
|
||||
|
||||
Authentication authentication = AuthenticationTestHelper.builder().build();
|
||||
|
||||
// granted only by all, so selector message is omitted
|
||||
|
@ -284,8 +279,6 @@ public class AuthorizationDenialMessagesTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public void testActionDeniedWithoutFailuresOmitsFailuresMessage() {
|
||||
assumeTrue("failure store required", DataStream.isFailureStoreFeatureFlagEnabled());
|
||||
|
||||
Authentication authentication = AuthenticationTestHelper.builder().build();
|
||||
|
||||
final String action = "indices:data/read/" + randomAlphaOfLengthBetween(0, 8);
|
||||
|
@ -305,8 +298,6 @@ public class AuthorizationDenialMessagesTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public void testActionDeniedWithoutIndicesOmitsFailuresMessage() {
|
||||
assumeTrue("failure store required", DataStream.isFailureStoreFeatureFlagEnabled());
|
||||
|
||||
Authentication authentication = AuthenticationTestHelper.builder().build();
|
||||
|
||||
final String action = "indices:data/read/" + randomAlphaOfLengthBetween(0, 8);
|
||||
|
|
|
@ -312,7 +312,6 @@ public class AuthorizedIndicesTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public void testDataStreamsAreNotIncludedInAuthorizedIndicesWithFailuresSelectorAndAllPrivilege() {
|
||||
assumeTrue("requires failure store", DataStream.isFailureStoreFeatureFlagEnabled());
|
||||
RoleDescriptor aStarRole = new RoleDescriptor(
|
||||
"a_star",
|
||||
null,
|
||||
|
@ -400,7 +399,6 @@ public class AuthorizedIndicesTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public void testDataStreamsAreIncludedInAuthorizedIndicesWithFailuresSelectorAndAllPrivilege() {
|
||||
assumeTrue("requires failure store", DataStream.isFailureStoreFeatureFlagEnabled());
|
||||
RoleDescriptor aStarRole = new RoleDescriptor(
|
||||
"a_star",
|
||||
null,
|
||||
|
@ -494,7 +492,6 @@ public class AuthorizedIndicesTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public void testDataStreamsAreIncludedInAuthorizedIndicesWithFailuresSelector() {
|
||||
assumeTrue("requires failure store", DataStream.isFailureStoreFeatureFlagEnabled());
|
||||
RoleDescriptor aReadFailuresStarRole = new RoleDescriptor(
|
||||
"a_read_failure_store",
|
||||
null,
|
||||
|
@ -577,7 +574,6 @@ public class AuthorizedIndicesTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public void testDataStreamsAreNotIncludedInAuthorizedIndicesWithFailuresSelector() {
|
||||
assumeTrue("requires failure store", DataStream.isFailureStoreFeatureFlagEnabled());
|
||||
RoleDescriptor aReadFailuresStarRole = new RoleDescriptor(
|
||||
"a_read_failure_store",
|
||||
null,
|
||||
|
|
|
@ -226,16 +226,12 @@ public class IndicesAndAliasesResolverTests extends ESTestCase {
|
|||
.put(indexBuilder(securityIndexName).settings(settings));
|
||||
|
||||
// Only add the failure indices if the failure store flag is enabled
|
||||
if (DataStream.isFailureStoreFeatureFlagEnabled()) {
|
||||
projectBuilder.put(dataStreamFailureStore1, true).put(dataStreamFailureStore2, true);
|
||||
}
|
||||
projectBuilder.put(dataStreamFailureStore1, true).put(dataStreamFailureStore2, true);
|
||||
projectBuilder.put(
|
||||
newInstance(
|
||||
dataStreamName,
|
||||
List.of(dataStreamIndex1.getIndex(), dataStreamIndex2.getIndex()),
|
||||
DataStream.isFailureStoreFeatureFlagEnabled()
|
||||
? List.of(dataStreamFailureStore1.getIndex(), dataStreamFailureStore2.getIndex())
|
||||
: List.of()
|
||||
List.of(dataStreamFailureStore1.getIndex(), dataStreamFailureStore2.getIndex())
|
||||
)
|
||||
);
|
||||
if (withAlias) {
|
||||
|
@ -2681,6 +2677,6 @@ public class IndicesAndAliasesResolverTests extends ESTestCase {
|
|||
}
|
||||
|
||||
private boolean runFailureStore() {
|
||||
return DataStream.isFailureStoreFeatureFlagEnabled() && randomBoolean();
|
||||
return randomBoolean();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1416,7 +1416,6 @@ public class RBACEngineTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public void testBuildUserPrivilegeResponseCombinesIndexPrivileges() {
|
||||
assumeTrue("failure store feature must be enabled", DataStream.isFailureStoreFeatureFlagEnabled());
|
||||
final BytesArray query = new BytesArray("""
|
||||
{"term":{"public":true}}""");
|
||||
final Role role = Role.builder(RESTRICTED_INDICES, "test", "role")
|
||||
|
|
|
@ -187,8 +187,6 @@ public class IndicesPermissionTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public void testAuthorizeDataStreamAccessWithFailuresSelector() {
|
||||
assumeTrue("failure store required", DataStream.isFailureStoreFeatureFlagEnabled());
|
||||
|
||||
Metadata.Builder builder = Metadata.builder();
|
||||
String dataStreamName = randomAlphaOfLength(6);
|
||||
int numBackingIndices = randomIntBetween(1, 3);
|
||||
|
@ -367,7 +365,6 @@ public class IndicesPermissionTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public void testAuthorizeDataStreamFailureIndices() {
|
||||
assumeTrue("failure store required", DataStream.isFailureStoreFeatureFlagEnabled());
|
||||
Metadata.Builder builder = Metadata.builder();
|
||||
String dataStreamName = randomAlphaOfLength(6);
|
||||
int numBackingIndices = randomIntBetween(1, 3);
|
||||
|
|
|
@ -23,7 +23,6 @@ import org.elasticsearch.action.search.TransportSearchAction;
|
|||
import org.elasticsearch.action.support.PlainActionFuture;
|
||||
import org.elasticsearch.client.internal.Client;
|
||||
import org.elasticsearch.cluster.health.ClusterHealthStatus;
|
||||
import org.elasticsearch.cluster.metadata.DataStream;
|
||||
import org.elasticsearch.cluster.metadata.IndexAbstraction;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetadata;
|
||||
import org.elasticsearch.cluster.metadata.Metadata;
|
||||
|
@ -1548,7 +1547,6 @@ public class CompositeRolesStoreTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public void testBuildRoleWithReadFailureStorePrivilegeOnly() {
|
||||
assumeTrue("requires failure store feature", DataStream.isFailureStoreFeatureFlagEnabled());
|
||||
String indexPattern = randomAlphanumericOfLength(10);
|
||||
boolean allowRestrictedIndices = randomBoolean();
|
||||
final Role role = buildRole(
|
||||
|
@ -1566,7 +1564,6 @@ public class CompositeRolesStoreTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public void testBuildRoleWithReadFailureStorePrivilegeDuplicatesMerged() {
|
||||
assumeTrue("requires failure store feature", DataStream.isFailureStoreFeatureFlagEnabled());
|
||||
String indexPattern = randomAlphanumericOfLength(10);
|
||||
boolean allowRestrictedIndices = randomBoolean();
|
||||
final List<Role> roles = List.of(
|
||||
|
@ -1614,7 +1611,6 @@ public class CompositeRolesStoreTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public void testBuildRoleWithReadFailureStoreAndReadPrivilegeSplit() {
|
||||
assumeTrue("requires failure store feature", DataStream.isFailureStoreFeatureFlagEnabled());
|
||||
String indexPattern = randomAlphanumericOfLength(10);
|
||||
boolean allowRestrictedIndices = randomBoolean();
|
||||
final Role role = buildRole(
|
||||
|
@ -1636,7 +1632,6 @@ public class CompositeRolesStoreTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public void testBuildRoleWithReadFailureStoreAndReadPrivilegeAndMultipleIndexPatternsSplit() {
|
||||
assumeTrue("requires failure store feature", DataStream.isFailureStoreFeatureFlagEnabled());
|
||||
String indexPattern = randomAlphanumericOfLength(10);
|
||||
String otherIndexPattern = randomValueOtherThan(indexPattern, () -> randomAlphanumericOfLength(10));
|
||||
boolean allowRestrictedIndices = randomBoolean();
|
||||
|
@ -1694,7 +1689,6 @@ public class CompositeRolesStoreTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public void testBuildRoleWithReadOnRestrictedAndNonRestrictedIndices() {
|
||||
assumeTrue("requires failure store feature", DataStream.isFailureStoreFeatureFlagEnabled());
|
||||
String indexPattern = randomAlphanumericOfLength(10);
|
||||
List<Role> roles = List.of(
|
||||
buildRole(
|
||||
|
@ -1739,7 +1733,6 @@ public class CompositeRolesStoreTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public void testBuildRoleWithReadFailureStoreOnRestrictedAndNonRestrictedIndices() {
|
||||
assumeTrue("requires failure store feature", DataStream.isFailureStoreFeatureFlagEnabled());
|
||||
String indexPattern = randomAlphanumericOfLength(10);
|
||||
List<Role> roles = List.of(
|
||||
buildRole(
|
||||
|
@ -1792,7 +1785,6 @@ public class CompositeRolesStoreTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public void testBuildRoleWithMultipleReadFailureStoreAndReadPrivilegeSplit() {
|
||||
assumeTrue("requires failure store feature", DataStream.isFailureStoreFeatureFlagEnabled());
|
||||
String indexPattern = randomAlphanumericOfLength(10);
|
||||
boolean allowRestrictedIndices = randomBoolean();
|
||||
final List<Role> roles = List.of(
|
||||
|
@ -1844,7 +1836,6 @@ public class CompositeRolesStoreTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public void testBuildRoleWithAllPrivilegeIsNeverSplit() {
|
||||
assumeTrue("requires failure store feature", DataStream.isFailureStoreFeatureFlagEnabled());
|
||||
String indexPattern = randomAlphanumericOfLength(10);
|
||||
boolean allowRestrictedIndices = randomBoolean();
|
||||
final List<Role> roles = List.of(
|
||||
|
@ -1899,7 +1890,6 @@ public class CompositeRolesStoreTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public void testBuildRoleWithFailureStorePrivilegeCollatesToRemoveDlsFlsFromAnotherGroup() {
|
||||
assumeTrue("requires failure store feature", DataStream.isFailureStoreFeatureFlagEnabled());
|
||||
String indexPattern = randomAlphanumericOfLength(10);
|
||||
boolean allowRestrictedIndices = randomBoolean();
|
||||
final List<Role> roles = List.of(
|
||||
|
@ -1977,7 +1967,6 @@ public class CompositeRolesStoreTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public void testBuildRoleWithFailureStorePrivilegeCollatesToKeepDlsFlsFromAnotherGroup() {
|
||||
assumeTrue("requires failure store feature", DataStream.isFailureStoreFeatureFlagEnabled());
|
||||
String indexPattern = randomAlphanumericOfLength(10);
|
||||
boolean allowRestrictedIndices = randomBoolean();
|
||||
final Role role = buildRole(
|
||||
|
@ -2025,18 +2014,12 @@ public class CompositeRolesStoreTests extends ESTestCase {
|
|||
|
||||
public void testBuildRoleDoesNotSplitIfAllPrivilegesHaveTheSameSelector() {
|
||||
String indexPattern = randomAlphanumericOfLength(10);
|
||||
IndexComponentSelectorPredicate predicate = (DataStream.isFailureStoreFeatureFlagEnabled())
|
||||
? randomFrom(
|
||||
IndexComponentSelectorPredicate.ALL,
|
||||
IndexComponentSelectorPredicate.DATA,
|
||||
IndexComponentSelectorPredicate.FAILURES,
|
||||
IndexComponentSelectorPredicate.DATA_AND_FAILURES
|
||||
)
|
||||
: randomFrom(
|
||||
IndexComponentSelectorPredicate.ALL,
|
||||
IndexComponentSelectorPredicate.DATA,
|
||||
IndexComponentSelectorPredicate.DATA_AND_FAILURES
|
||||
);
|
||||
IndexComponentSelectorPredicate predicate = randomFrom(
|
||||
IndexComponentSelectorPredicate.ALL,
|
||||
IndexComponentSelectorPredicate.DATA,
|
||||
IndexComponentSelectorPredicate.FAILURES,
|
||||
IndexComponentSelectorPredicate.DATA_AND_FAILURES
|
||||
);
|
||||
|
||||
List<String> privilegesWithSelector = IndexPrivilege.names()
|
||||
.stream()
|
||||
|
|
|
@ -62,7 +62,6 @@ public class FullClusterRestartIT extends ParameterizedFullClusterRestartTestCas
|
|||
.keystore("xpack.watcher.encryption_key", Resource.fromClasspath("system_key"))
|
||||
.keystore("xpack.security.transport.ssl.secure_key_passphrase", "testnode")
|
||||
.feature(FeatureFlag.TIME_SERIES_MODE)
|
||||
.feature(FeatureFlag.FAILURE_STORE_ENABLED)
|
||||
.build();
|
||||
|
||||
public FullClusterRestartIT(@Name("cluster") FullClusterRestartUpgradeStatus upgradeStatus) {
|
||||
|
|
|
@ -37,7 +37,6 @@ public abstract class AbstractXpackFullClusterRestartTestCase extends Parameteri
|
|||
.keystore("xpack.watcher.encryption_key", Resource.fromClasspath("system_key"))
|
||||
.keystore("xpack.security.transport.ssl.secure_key_passphrase", "testnode")
|
||||
.feature(FeatureFlag.TIME_SERIES_MODE)
|
||||
.feature(FeatureFlag.FAILURE_STORE_ENABLED)
|
||||
.build();
|
||||
|
||||
public AbstractXpackFullClusterRestartTestCase(FullClusterRestartUpgradeStatus upgradeStatus) {
|
||||
|
|
|
@ -46,7 +46,6 @@ public class CoreWithMultipleProjectsClientYamlTestSuiteIT extends MultipleProje
|
|||
.user(USER, PASS)
|
||||
.systemProperty("es.queryable_built_in_roles_enabled", "false")
|
||||
.feature(FeatureFlag.TIME_SERIES_MODE)
|
||||
.feature(FeatureFlag.FAILURE_STORE_ENABLED)
|
||||
.nodes(2)
|
||||
.build();
|
||||
|
||||
|
|
|
@ -44,7 +44,6 @@ public class XpackWithMultipleProjectsClientYamlTestSuiteIT extends MultipleProj
|
|||
.configFile("service_tokens", Resource.fromClasspath("service_tokens"))
|
||||
.user(USER, PASS)
|
||||
.feature(FeatureFlag.TIME_SERIES_MODE)
|
||||
.feature(FeatureFlag.FAILURE_STORE_ENABLED)
|
||||
.build();
|
||||
|
||||
public XpackWithMultipleProjectsClientYamlTestSuiteIT(@Name("yaml") ClientYamlTestCandidate testCandidate) {
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue