mirror of
https://github.com/elastic/elasticsearch.git
synced 2025-04-22 14:17:51 -04:00
[7.17] Backport DLS changes (#108330)
This commit introduced stricter DLS rules and is a manual backport of #105709 and #105714 with additional node level settings to optionally disable the stricter DLS rules. Since these settings are not present in 8.x the needed deprecation info API entries have also been added to help inform any users that may have set these values to remove them before upgrading.
This commit is contained in:
parent
7fe654b539
commit
4e08df5bda
23 changed files with 1279 additions and 400 deletions
|
@ -80,6 +80,23 @@ To avoid deprecation warnings, remove any SSL truststores that do not
|
|||
contain any trusted entries.
|
||||
====
|
||||
|
||||
[[deprecation_for_dls_settings]]
|
||||
.Deprecation for DLS settings
|
||||
[%collapsible]
|
||||
====
|
||||
*Details* +
|
||||
Two settings available in the latest versions of 7.17 are not available in the next major version.
|
||||
Newer versions of 7.17 default to stricter Document Level Security (DLS) rules and the follow
|
||||
settings can be used to disable those stricter DLS rules:
|
||||
`xpack.security.dls.force_terms_aggs_to_exclude_deleted_docs.enabled` and
|
||||
`xpack.security.dls.error_when_validate_query_with_rewrite.enabled`.
|
||||
Newer versions, of next major version of {es}, also default to the stricter DLS rules but don't allow
|
||||
usage of the less strict rules.
|
||||
|
||||
*Impact* +
|
||||
To avoid deprecation warnings, remove these settings from elasticsearch.yml.
|
||||
====
|
||||
|
||||
[discrete]
|
||||
[[deprecations_717_mapping]]
|
||||
==== Mapping deprecations
|
||||
|
|
|
@ -5,6 +5,36 @@ coming[7.17.22]
|
|||
|
||||
Also see <<breaking-changes-7.17,Breaking changes in 7.17>>.
|
||||
|
||||
[[breaking-7.17.22]]
|
||||
[float]
|
||||
=== Breaking changes
|
||||
|
||||
[discrete]
|
||||
[[breaking_7_17_22_dls_changes]]
|
||||
==== Stricter Document Level Security (DLS)
|
||||
|
||||
[[stricter_dls_7_17_22]]
|
||||
.Document Level Security (DLS) applies stricter checks for the validate query API and for terms aggregations when min_doc_count is set to 0.
|
||||
|
||||
[%collapsible]
|
||||
====
|
||||
*Details* +
|
||||
When Document Level Security (DLS) is applied to terms aggregations and min_doc_count is set to 0, stricter security rules apply.
|
||||
When Document Level Security (DLS) is applied to the validate query API with the rewrite parameter, stricter security rules apply.
|
||||
|
||||
*Impact* +
|
||||
If needed, test workflows with DLS enabled to ensure that the stricter security rules do not impact your application.
|
||||
|
||||
*Remediation* +
|
||||
Set min_doc_count to a value greater than 0 in terms aggregations or use an account not constrained by DLS for the validate query API calls.
|
||||
|
||||
Set `xpack.security.dls.force_terms_aggs_to_exclude_deleted_docs.enabled` to `false` in the Elasticsearch configuration
|
||||
to revert to the previous behavior.
|
||||
|
||||
Set `xpack.security.dls.error_when_validate_query_with_rewrite.enabled` to `false` in the Elasticsearch configuration
|
||||
to revert to the previous behavior.
|
||||
====
|
||||
|
||||
[[bug-7.17.22]]
|
||||
[float]
|
||||
=== Bug fixes
|
||||
|
|
|
@ -41,6 +41,7 @@ import java.util.List;
|
|||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import java.util.Queue;
|
||||
import java.util.Set;
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
|
@ -330,6 +331,48 @@ public class AggregatorFactories {
|
|||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Return true if any of the builders is a terms aggregation with min_doc_count=0
|
||||
*/
|
||||
public boolean hasZeroMinDocTermsAggregation() {
|
||||
final Queue<AggregationBuilder> queue = new LinkedList<>(aggregationBuilders);
|
||||
while (queue.isEmpty() == false) {
|
||||
final AggregationBuilder current = queue.poll();
|
||||
if (current == null) {
|
||||
continue;
|
||||
}
|
||||
if (current instanceof TermsAggregationBuilder) {
|
||||
TermsAggregationBuilder termsBuilder = (TermsAggregationBuilder) current;
|
||||
if (termsBuilder.minDocCount() == 0) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
queue.addAll(current.getSubAggregations());
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Force all min_doc_count=0 terms aggregations to exclude deleted docs.
|
||||
*/
|
||||
public void forceTermsAggsToExcludeDeletedDocs() {
|
||||
assert hasZeroMinDocTermsAggregation();
|
||||
final Queue<AggregationBuilder> queue = new LinkedList<>(aggregationBuilders);
|
||||
while (queue.isEmpty() == false) {
|
||||
final AggregationBuilder current = queue.poll();
|
||||
if (current == null) {
|
||||
continue;
|
||||
}
|
||||
if (current instanceof TermsAggregationBuilder) {
|
||||
TermsAggregationBuilder termsBuilder = (TermsAggregationBuilder) current;
|
||||
if (termsBuilder.minDocCount() == 0) {
|
||||
termsBuilder.excludeDeletedDocs(true);
|
||||
}
|
||||
}
|
||||
queue.addAll(current.getSubAggregations());
|
||||
}
|
||||
}
|
||||
|
||||
public Builder addAggregator(AggregationBuilder factory) {
|
||||
if (names.add(factory.name) == false) {
|
||||
throw new IllegalArgumentException("Two sibling aggregations cannot have the same name: [" + factory.name + "]");
|
||||
|
|
|
@ -9,15 +9,19 @@
|
|||
package org.elasticsearch.search.aggregations.bucket.terms;
|
||||
|
||||
import org.apache.lucene.index.DocValues;
|
||||
import org.apache.lucene.index.LeafReader;
|
||||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.apache.lucene.index.SortedDocValues;
|
||||
import org.apache.lucene.index.SortedSetDocValues;
|
||||
import org.apache.lucene.util.ArrayUtil;
|
||||
import org.apache.lucene.util.Bits;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.PriorityQueue;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.util.BigArrays;
|
||||
import org.elasticsearch.common.util.LongArray;
|
||||
import org.elasticsearch.common.util.LongHash;
|
||||
import org.elasticsearch.core.Nullable;
|
||||
import org.elasticsearch.core.Releasable;
|
||||
import org.elasticsearch.core.Releasables;
|
||||
import org.elasticsearch.search.DocValueFormat;
|
||||
|
@ -82,7 +86,8 @@ public class GlobalOrdinalsStringTermsAggregator extends AbstractStringTermsAggr
|
|||
SubAggCollectionMode collectionMode,
|
||||
boolean showTermDocCountError,
|
||||
CardinalityUpperBound cardinality,
|
||||
Map<String, Object> metadata
|
||||
Map<String, Object> metadata,
|
||||
boolean excludeDeletedDocs
|
||||
) throws IOException {
|
||||
super(name, factories, context, parent, order, format, bucketCountThresholds, collectionMode, showTermDocCountError, metadata);
|
||||
this.resultStrategy = resultStrategy.apply(this); // ResultStrategy needs a reference to the Aggregator to do its job.
|
||||
|
@ -91,13 +96,13 @@ public class GlobalOrdinalsStringTermsAggregator extends AbstractStringTermsAggr
|
|||
this.lookupGlobalOrd = values::lookupOrd;
|
||||
this.acceptedGlobalOrdinals = acceptedOrds;
|
||||
if (remapGlobalOrds) {
|
||||
this.collectionStrategy = new RemapGlobalOrds(cardinality);
|
||||
this.collectionStrategy = new RemapGlobalOrds(cardinality, excludeDeletedDocs);
|
||||
} else {
|
||||
this.collectionStrategy = cardinality.map(estimate -> {
|
||||
if (estimate > 1) {
|
||||
throw new AggregationExecutionException("Dense ords don't know how to collect from many buckets");
|
||||
}
|
||||
return new DenseGlobalOrds();
|
||||
return new DenseGlobalOrds(excludeDeletedDocs);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
@ -274,7 +279,8 @@ public class GlobalOrdinalsStringTermsAggregator extends AbstractStringTermsAggr
|
|||
boolean remapGlobalOrds,
|
||||
SubAggCollectionMode collectionMode,
|
||||
boolean showTermDocCountError,
|
||||
Map<String, Object> metadata
|
||||
Map<String, Object> metadata,
|
||||
boolean excludeDeletedDocs
|
||||
) throws IOException {
|
||||
super(
|
||||
name,
|
||||
|
@ -292,7 +298,8 @@ public class GlobalOrdinalsStringTermsAggregator extends AbstractStringTermsAggr
|
|||
collectionMode,
|
||||
showTermDocCountError,
|
||||
CardinalityUpperBound.ONE,
|
||||
metadata
|
||||
metadata,
|
||||
excludeDeletedDocs
|
||||
);
|
||||
assert factories == null || factories.countAggregators() == 0;
|
||||
this.segmentDocCounts = context.bigArrays().newLongArray(1, true);
|
||||
|
@ -441,6 +448,13 @@ public class GlobalOrdinalsStringTermsAggregator extends AbstractStringTermsAggr
|
|||
* bucket ordinal.
|
||||
*/
|
||||
class DenseGlobalOrds extends CollectionStrategy {
|
||||
|
||||
private final boolean excludeDeletedDocs;
|
||||
|
||||
DenseGlobalOrds(boolean excludeDeletedDocs) {
|
||||
this.excludeDeletedDocs = excludeDeletedDocs;
|
||||
}
|
||||
|
||||
@Override
|
||||
String describe() {
|
||||
return "dense";
|
||||
|
@ -471,6 +485,14 @@ public class GlobalOrdinalsStringTermsAggregator extends AbstractStringTermsAggr
|
|||
@Override
|
||||
void forEach(long owningBucketOrd, BucketInfoConsumer consumer) throws IOException {
|
||||
assert owningBucketOrd == 0;
|
||||
if (excludeDeletedDocs) {
|
||||
forEachExcludeDeletedDocs(consumer);
|
||||
} else {
|
||||
forEachAllowDeletedDocs(consumer);
|
||||
}
|
||||
}
|
||||
|
||||
private void forEachAllowDeletedDocs(BucketInfoConsumer consumer) throws IOException {
|
||||
for (long globalOrd = 0; globalOrd < valueCount; globalOrd++) {
|
||||
if (false == acceptedGlobalOrdinals.test(globalOrd)) {
|
||||
continue;
|
||||
|
@ -482,6 +504,39 @@ public class GlobalOrdinalsStringTermsAggregator extends AbstractStringTermsAggr
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Excludes deleted docs in the results by cross-checking with liveDocs.
|
||||
*/
|
||||
private void forEachExcludeDeletedDocs(BucketInfoConsumer consumer) throws IOException {
|
||||
try (LongHash accepted = new LongHash(20, new BigArrays(null, null, ""))) {
|
||||
for (LeafReaderContext ctx : searcher().getTopReaderContext().leaves()) {
|
||||
LeafReader reader = ctx.reader();
|
||||
Bits liveDocs = reader.getLiveDocs();
|
||||
SortedSetDocValues globalOrds = null;
|
||||
for (int docId = 0; docId < reader.maxDoc(); ++docId) {
|
||||
if (liveDocs == null || liveDocs.get(docId)) { // document is not deleted
|
||||
globalOrds = globalOrds == null ? valuesSource.globalOrdinalsValues(ctx) : globalOrds;
|
||||
if (globalOrds.advanceExact(docId)) {
|
||||
for (long globalOrd = globalOrds.nextOrd(); globalOrd != NO_MORE_ORDS; globalOrd = globalOrds.nextOrd()) {
|
||||
if (accepted.find(globalOrd) >= 0) {
|
||||
continue;
|
||||
}
|
||||
if (false == acceptedGlobalOrdinals.test(globalOrd)) {
|
||||
continue;
|
||||
}
|
||||
long docCount = bucketDocCount(globalOrd);
|
||||
if (bucketCountThresholds.getMinDocCount() == 0 || docCount > 0) {
|
||||
consumer.accept(globalOrd, globalOrd, docCount);
|
||||
accepted.add(globalOrd);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {}
|
||||
}
|
||||
|
@ -494,9 +549,11 @@ public class GlobalOrdinalsStringTermsAggregator extends AbstractStringTermsAggr
|
|||
*/
|
||||
private class RemapGlobalOrds extends CollectionStrategy {
|
||||
private final LongKeyedBucketOrds bucketOrds;
|
||||
private final boolean excludeDeletedDocs;
|
||||
|
||||
private RemapGlobalOrds(CardinalityUpperBound cardinality) {
|
||||
private RemapGlobalOrds(CardinalityUpperBound cardinality, boolean excludeDeletedDocs) {
|
||||
bucketOrds = LongKeyedBucketOrds.buildForValueRange(bigArrays(), cardinality, 0, valueCount - 1);
|
||||
this.excludeDeletedDocs = excludeDeletedDocs;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -530,27 +587,20 @@ public class GlobalOrdinalsStringTermsAggregator extends AbstractStringTermsAggr
|
|||
|
||||
@Override
|
||||
void forEach(long owningBucketOrd, BucketInfoConsumer consumer) throws IOException {
|
||||
if (excludeDeletedDocs) {
|
||||
forEachExcludeDeletedDocs(owningBucketOrd, consumer);
|
||||
} else {
|
||||
forEachAllowDeletedDocs(owningBucketOrd, consumer);
|
||||
}
|
||||
}
|
||||
|
||||
void forEachAllowDeletedDocs(long owningBucketOrd, BucketInfoConsumer consumer) throws IOException {
|
||||
if (bucketCountThresholds.getMinDocCount() == 0) {
|
||||
for (long globalOrd = 0; globalOrd < valueCount; globalOrd++) {
|
||||
if (false == acceptedGlobalOrdinals.test(globalOrd)) {
|
||||
continue;
|
||||
}
|
||||
/*
|
||||
* Use `add` instead of `find` here to assign an ordinal
|
||||
* even if the global ord wasn't found so we can build
|
||||
* sub-aggregations without trouble even though we haven't
|
||||
* hit any documents for them. This is wasteful, but
|
||||
* settings minDocCount == 0 is wasteful in general.....
|
||||
*/
|
||||
long bucketOrd = bucketOrds.add(owningBucketOrd, globalOrd);
|
||||
long docCount;
|
||||
if (bucketOrd < 0) {
|
||||
bucketOrd = -1 - bucketOrd;
|
||||
docCount = bucketDocCount(bucketOrd);
|
||||
} else {
|
||||
docCount = 0;
|
||||
}
|
||||
consumer.accept(globalOrd, bucketOrd, docCount);
|
||||
addBucketForMinDocCountZero(owningBucketOrd, globalOrd, consumer, null);
|
||||
}
|
||||
} else {
|
||||
LongKeyedBucketOrds.BucketOrdsEnum ordsEnum = bucketOrds.ordsEnum(owningBucketOrd);
|
||||
|
@ -563,6 +613,64 @@ public class GlobalOrdinalsStringTermsAggregator extends AbstractStringTermsAggr
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Excludes deleted docs in the results by cross-checking with liveDocs.
|
||||
*/
|
||||
void forEachExcludeDeletedDocs(long owningBucketOrd, BucketInfoConsumer consumer) throws IOException {
|
||||
assert bucketCountThresholds.getMinDocCount() == 0;
|
||||
try (LongHash accepted = new LongHash(20, new BigArrays(null, null, ""))) {
|
||||
for (LeafReaderContext ctx : searcher().getTopReaderContext().leaves()) {
|
||||
LeafReader reader = ctx.reader();
|
||||
Bits liveDocs = reader.getLiveDocs();
|
||||
SortedSetDocValues globalOrds = null;
|
||||
for (int docId = 0; docId < reader.maxDoc(); ++docId) {
|
||||
if (liveDocs == null || liveDocs.get(docId)) { // document is not deleted
|
||||
globalOrds = globalOrds == null ? valuesSource.globalOrdinalsValues(ctx) : globalOrds;
|
||||
if (globalOrds.advanceExact(docId)) {
|
||||
for (long globalOrd = globalOrds.nextOrd(); globalOrd != NO_MORE_ORDS; globalOrd = globalOrds.nextOrd()) {
|
||||
if (accepted.find(globalOrd) >= 0) {
|
||||
continue;
|
||||
}
|
||||
if (false == acceptedGlobalOrdinals.test(globalOrd)) {
|
||||
continue;
|
||||
}
|
||||
addBucketForMinDocCountZero(owningBucketOrd, globalOrd, consumer, accepted);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void addBucketForMinDocCountZero(
|
||||
long owningBucketOrd,
|
||||
long globalOrd,
|
||||
BucketInfoConsumer consumer,
|
||||
@Nullable LongHash accepted
|
||||
) throws IOException {
|
||||
/*
|
||||
* Use `add` instead of `find` here to assign an ordinal
|
||||
* even if the global ord wasn't found so we can build
|
||||
* sub-aggregations without trouble even though we haven't
|
||||
* hit any documents for them. This is wasteful, but
|
||||
* settings minDocCount == 0 is wasteful in general.....
|
||||
*/
|
||||
long bucketOrd = bucketOrds.add(owningBucketOrd, globalOrd);
|
||||
long docCount;
|
||||
if (bucketOrd < 0) {
|
||||
bucketOrd = -1 - bucketOrd;
|
||||
docCount = bucketDocCount(bucketOrd);
|
||||
} else {
|
||||
docCount = 0;
|
||||
}
|
||||
assert globalOrd >= 0;
|
||||
consumer.accept(globalOrd, bucketOrd, docCount);
|
||||
if (accepted != null) {
|
||||
accepted.add(globalOrd);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
bucketOrds.close();
|
||||
|
|
|
@ -51,6 +51,7 @@ public class MapStringTermsAggregator extends AbstractStringTermsAggregator {
|
|||
private final ResultStrategy<?, ?> resultStrategy;
|
||||
private final BytesKeyedBucketOrds bucketOrds;
|
||||
private final IncludeExclude.StringFilter includeExclude;
|
||||
private final boolean excludeDeletedDocs;
|
||||
|
||||
public MapStringTermsAggregator(
|
||||
String name,
|
||||
|
@ -66,7 +67,8 @@ public class MapStringTermsAggregator extends AbstractStringTermsAggregator {
|
|||
SubAggCollectionMode collectionMode,
|
||||
boolean showTermDocCountError,
|
||||
CardinalityUpperBound cardinality,
|
||||
Map<String, Object> metadata
|
||||
Map<String, Object> metadata,
|
||||
boolean excludeDeletedDocs
|
||||
) throws IOException {
|
||||
super(name, factories, context, parent, order, format, bucketCountThresholds, collectionMode, showTermDocCountError, metadata);
|
||||
this.resultStrategy = resultStrategy.apply(this); // ResultStrategy needs a reference to the Aggregator to do its job.
|
||||
|
@ -74,6 +76,7 @@ public class MapStringTermsAggregator extends AbstractStringTermsAggregator {
|
|||
bucketOrds = BytesKeyedBucketOrds.build(context.bigArrays(), cardinality);
|
||||
// set last because if there is an error during construction the collector gets release outside the constructor.
|
||||
this.collectorSource = collectorSource;
|
||||
this.excludeDeletedDocs = excludeDeletedDocs;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -243,7 +246,7 @@ public class MapStringTermsAggregator extends AbstractStringTermsAggregator {
|
|||
B[][] topBucketsPerOrd = buildTopBucketsPerOrd(owningBucketOrds.length);
|
||||
long[] otherDocCounts = new long[owningBucketOrds.length];
|
||||
for (int ordIdx = 0; ordIdx < owningBucketOrds.length; ordIdx++) {
|
||||
collectZeroDocEntriesIfNeeded(owningBucketOrds[ordIdx]);
|
||||
collectZeroDocEntriesIfNeeded(owningBucketOrds[ordIdx], excludeDeletedDocs);
|
||||
int size = (int) Math.min(bucketOrds.size(), bucketCountThresholds.getShardSize());
|
||||
|
||||
PriorityQueue<B> ordered = buildPriorityQueue(size);
|
||||
|
@ -295,7 +298,7 @@ public class MapStringTermsAggregator extends AbstractStringTermsAggregator {
|
|||
* Collect extra entries for "zero" hit documents if they were requested
|
||||
* and required.
|
||||
*/
|
||||
abstract void collectZeroDocEntriesIfNeeded(long owningBucketOrd) throws IOException;
|
||||
abstract void collectZeroDocEntriesIfNeeded(long owningBucketOrd, boolean excludeDeletedDocs) throws IOException;
|
||||
|
||||
/**
|
||||
* Build an empty temporary bucket.
|
||||
|
@ -370,7 +373,7 @@ public class MapStringTermsAggregator extends AbstractStringTermsAggregator {
|
|||
}
|
||||
|
||||
@Override
|
||||
void collectZeroDocEntriesIfNeeded(long owningBucketOrd) throws IOException {
|
||||
void collectZeroDocEntriesIfNeeded(long owningBucketOrd, boolean excludeDeletedDocs) throws IOException {
|
||||
if (bucketCountThresholds.getMinDocCount() != 0) {
|
||||
return;
|
||||
}
|
||||
|
@ -382,6 +385,9 @@ public class MapStringTermsAggregator extends AbstractStringTermsAggregator {
|
|||
SortedBinaryDocValues values = valuesSource.bytesValues(ctx);
|
||||
// brute force
|
||||
for (int docId = 0; docId < ctx.reader().maxDoc(); ++docId) {
|
||||
if (excludeDeletedDocs && ctx.reader().getLiveDocs() != null && ctx.reader().getLiveDocs().get(docId) == false) {
|
||||
continue;
|
||||
}
|
||||
if (values.advanceExact(docId)) {
|
||||
int valueCount = values.docValueCount();
|
||||
for (int i = 0; i < valueCount; ++i) {
|
||||
|
@ -518,7 +524,7 @@ public class MapStringTermsAggregator extends AbstractStringTermsAggregator {
|
|||
}
|
||||
|
||||
@Override
|
||||
void collectZeroDocEntriesIfNeeded(long owningBucketOrd) throws IOException {}
|
||||
void collectZeroDocEntriesIfNeeded(long owningBucketOrd, boolean excludeDeletedDocs) throws IOException {}
|
||||
|
||||
@Override
|
||||
Supplier<SignificantStringTerms.Bucket> emptyBucketBuilder(long owningBucketOrd) {
|
||||
|
|
|
@ -49,6 +49,7 @@ public class NumericTermsAggregator extends TermsAggregator {
|
|||
private final ValuesSource.Numeric valuesSource;
|
||||
private final LongKeyedBucketOrds bucketOrds;
|
||||
private final LongFilter longFilter;
|
||||
private final boolean excludeDeletedDocs;
|
||||
|
||||
public NumericTermsAggregator(
|
||||
String name,
|
||||
|
@ -63,13 +64,15 @@ public class NumericTermsAggregator extends TermsAggregator {
|
|||
SubAggCollectionMode subAggCollectMode,
|
||||
IncludeExclude.LongFilter longFilter,
|
||||
CardinalityUpperBound cardinality,
|
||||
Map<String, Object> metadata
|
||||
Map<String, Object> metadata,
|
||||
boolean excludeDeletedDocs
|
||||
) throws IOException {
|
||||
super(name, factories, context, parent, bucketCountThresholds, order, format, subAggCollectMode, metadata);
|
||||
this.resultStrategy = resultStrategy.apply(this); // ResultStrategy needs a reference to the Aggregator to do its job.
|
||||
this.valuesSource = valuesSource;
|
||||
this.longFilter = longFilter;
|
||||
bucketOrds = LongKeyedBucketOrds.build(bigArrays(), cardinality);
|
||||
this.excludeDeletedDocs = excludeDeletedDocs;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -143,7 +146,7 @@ public class NumericTermsAggregator extends TermsAggregator {
|
|||
B[][] topBucketsPerOrd = buildTopBucketsPerOrd(owningBucketOrds.length);
|
||||
long[] otherDocCounts = new long[owningBucketOrds.length];
|
||||
for (int ordIdx = 0; ordIdx < owningBucketOrds.length; ordIdx++) {
|
||||
collectZeroDocEntriesIfNeeded(owningBucketOrds[ordIdx]);
|
||||
collectZeroDocEntriesIfNeeded(owningBucketOrds[ordIdx], excludeDeletedDocs);
|
||||
long bucketsInOrd = bucketOrds.bucketsInOrd(owningBucketOrds[ordIdx]);
|
||||
|
||||
int size = (int) Math.min(bucketsInOrd, bucketCountThresholds.getShardSize());
|
||||
|
@ -239,7 +242,7 @@ public class NumericTermsAggregator extends TermsAggregator {
|
|||
* Collect extra entries for "zero" hit documents if they were requested
|
||||
* and required.
|
||||
*/
|
||||
abstract void collectZeroDocEntriesIfNeeded(long owningBucketOrd) throws IOException;
|
||||
abstract void collectZeroDocEntriesIfNeeded(long owningBucketOrd, boolean excludeDeletedDocs) throws IOException;
|
||||
|
||||
/**
|
||||
* Turn the buckets into an aggregation result.
|
||||
|
@ -284,7 +287,7 @@ public class NumericTermsAggregator extends TermsAggregator {
|
|||
abstract B buildEmptyBucket();
|
||||
|
||||
@Override
|
||||
final void collectZeroDocEntriesIfNeeded(long owningBucketOrd) throws IOException {
|
||||
final void collectZeroDocEntriesIfNeeded(long owningBucketOrd, boolean excludeDeletedDocs) throws IOException {
|
||||
if (bucketCountThresholds.getMinDocCount() != 0) {
|
||||
return;
|
||||
}
|
||||
|
@ -295,6 +298,9 @@ public class NumericTermsAggregator extends TermsAggregator {
|
|||
for (LeafReaderContext ctx : searcher().getTopReaderContext().leaves()) {
|
||||
SortedNumericDocValues values = getValues(ctx);
|
||||
for (int docId = 0; docId < ctx.reader().maxDoc(); ++docId) {
|
||||
if (excludeDeletedDocs && ctx.reader().getLiveDocs() != null && ctx.reader().getLiveDocs().get(docId) == false) {
|
||||
continue;
|
||||
}
|
||||
if (values.advanceExact(docId)) {
|
||||
int valueCount = values.docValueCount();
|
||||
for (int v = 0; v < valueCount; ++v) {
|
||||
|
@ -560,7 +566,7 @@ public class NumericTermsAggregator extends TermsAggregator {
|
|||
}
|
||||
|
||||
@Override
|
||||
void collectZeroDocEntriesIfNeeded(long owningBucketOrd) throws IOException {}
|
||||
void collectZeroDocEntriesIfNeeded(long owningBucketOrd, boolean excludeDeletedDocs) throws IOException {}
|
||||
|
||||
@Override
|
||||
SignificantLongTerms buildResult(long owningBucketOrd, long otherDocCoun, SignificantLongTerms.Bucket[] topBuckets) {
|
||||
|
|
|
@ -172,7 +172,8 @@ public class SignificantTermsAggregatorFactory extends ValuesSourceAggregatorFac
|
|||
SubAggCollectionMode.BREADTH_FIRST,
|
||||
longFilter,
|
||||
cardinality,
|
||||
metadata
|
||||
metadata,
|
||||
false
|
||||
);
|
||||
}
|
||||
};
|
||||
|
@ -309,7 +310,8 @@ public class SignificantTermsAggregatorFactory extends ValuesSourceAggregatorFac
|
|||
SubAggCollectionMode.BREADTH_FIRST,
|
||||
false,
|
||||
cardinality,
|
||||
metadata
|
||||
metadata,
|
||||
false
|
||||
);
|
||||
|
||||
}
|
||||
|
@ -363,7 +365,8 @@ public class SignificantTermsAggregatorFactory extends ValuesSourceAggregatorFac
|
|||
SubAggCollectionMode.BREADTH_FIRST,
|
||||
false,
|
||||
cardinality,
|
||||
metadata
|
||||
metadata,
|
||||
false
|
||||
);
|
||||
}
|
||||
};
|
||||
|
|
|
@ -162,7 +162,8 @@ public class SignificantTextAggregatorFactory extends AggregatorFactory {
|
|||
SubAggCollectionMode.BREADTH_FIRST,
|
||||
false,
|
||||
cardinality,
|
||||
metadata
|
||||
metadata,
|
||||
false
|
||||
);
|
||||
success = true;
|
||||
return mapStringTermsAggregator;
|
||||
|
|
|
@ -35,6 +35,8 @@ import java.util.List;
|
|||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
|
||||
import static org.elasticsearch.Version.V_7_17_22;
|
||||
|
||||
public class TermsAggregationBuilder extends ValuesSourceAggregationBuilder<TermsAggregationBuilder> {
|
||||
public static final String NAME = "terms";
|
||||
public static final ValuesSourceRegistry.RegistryKey<TermsAggregatorSupplier> REGISTRY_KEY = new ValuesSourceRegistry.RegistryKey<>(
|
||||
|
@ -113,6 +115,7 @@ public class TermsAggregationBuilder extends ValuesSourceAggregationBuilder<Term
|
|||
DEFAULT_BUCKET_COUNT_THRESHOLDS
|
||||
);
|
||||
private boolean showTermDocCountError = false;
|
||||
private boolean excludeDeletedDocs = false;
|
||||
|
||||
public TermsAggregationBuilder(String name) {
|
||||
super(name);
|
||||
|
@ -153,6 +156,9 @@ public class TermsAggregationBuilder extends ValuesSourceAggregationBuilder<Term
|
|||
includeExclude = in.readOptionalWriteable(IncludeExclude::new);
|
||||
order = InternalOrder.Streams.readOrder(in);
|
||||
showTermDocCountError = in.readBoolean();
|
||||
if (in.getVersion().onOrAfter(V_7_17_22)) {
|
||||
excludeDeletedDocs = in.readBoolean();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -168,6 +174,9 @@ public class TermsAggregationBuilder extends ValuesSourceAggregationBuilder<Term
|
|||
out.writeOptionalWriteable(includeExclude);
|
||||
order.writeTo(out);
|
||||
out.writeBoolean(showTermDocCountError);
|
||||
if (out.getVersion().onOrAfter(V_7_17_22)) {
|
||||
out.writeBoolean(excludeDeletedDocs);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -349,6 +358,18 @@ public class TermsAggregationBuilder extends ValuesSourceAggregationBuilder<Term
|
|||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set whether deleted documents should be explicitly excluded from the aggregation results
|
||||
*/
|
||||
public TermsAggregationBuilder excludeDeletedDocs(boolean excludeDeletedDocs) {
|
||||
this.excludeDeletedDocs = excludeDeletedDocs;
|
||||
return this;
|
||||
}
|
||||
|
||||
public boolean excludeDeletedDocs() {
|
||||
return excludeDeletedDocs;
|
||||
}
|
||||
|
||||
@Override
|
||||
public BucketCardinality bucketCardinality() {
|
||||
return BucketCardinality.MANY;
|
||||
|
@ -375,7 +396,8 @@ public class TermsAggregationBuilder extends ValuesSourceAggregationBuilder<Term
|
|||
parent,
|
||||
subFactoriesBuilder,
|
||||
metadata,
|
||||
aggregatorSupplier
|
||||
aggregatorSupplier,
|
||||
excludeDeletedDocs
|
||||
);
|
||||
}
|
||||
|
||||
|
@ -406,7 +428,8 @@ public class TermsAggregationBuilder extends ValuesSourceAggregationBuilder<Term
|
|||
executionHint,
|
||||
includeExclude,
|
||||
order,
|
||||
showTermDocCountError
|
||||
showTermDocCountError,
|
||||
excludeDeletedDocs
|
||||
);
|
||||
}
|
||||
|
||||
|
@ -421,7 +444,8 @@ public class TermsAggregationBuilder extends ValuesSourceAggregationBuilder<Term
|
|||
&& Objects.equals(executionHint, other.executionHint)
|
||||
&& Objects.equals(includeExclude, other.includeExclude)
|
||||
&& Objects.equals(order, other.order)
|
||||
&& Objects.equals(showTermDocCountError, other.showTermDocCountError);
|
||||
&& Objects.equals(showTermDocCountError, other.showTermDocCountError)
|
||||
&& Objects.equals(excludeDeletedDocs, other.excludeDeletedDocs);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -100,7 +100,8 @@ public class TermsAggregatorFactory extends ValuesSourceAggregatorFactory {
|
|||
SubAggCollectionMode subAggCollectMode,
|
||||
boolean showTermDocCountError,
|
||||
CardinalityUpperBound cardinality,
|
||||
Map<String, Object> metadata
|
||||
Map<String, Object> metadata,
|
||||
boolean excludeDeletedDocs
|
||||
) throws IOException {
|
||||
ValuesSource valuesSource = valuesSourceConfig.getValuesSource();
|
||||
ExecutionMode execution = null;
|
||||
|
@ -144,7 +145,8 @@ public class TermsAggregatorFactory extends ValuesSourceAggregatorFactory {
|
|||
subAggCollectMode,
|
||||
showTermDocCountError,
|
||||
cardinality,
|
||||
metadata
|
||||
metadata,
|
||||
excludeDeletedDocs
|
||||
);
|
||||
|
||||
}
|
||||
|
@ -171,7 +173,8 @@ public class TermsAggregatorFactory extends ValuesSourceAggregatorFactory {
|
|||
SubAggCollectionMode subAggCollectMode,
|
||||
boolean showTermDocCountError,
|
||||
CardinalityUpperBound cardinality,
|
||||
Map<String, Object> metadata
|
||||
Map<String, Object> metadata,
|
||||
boolean excludeDeletedDocs
|
||||
) throws IOException {
|
||||
|
||||
if ((includeExclude != null) && (includeExclude.isRegexBased())) {
|
||||
|
@ -215,7 +218,8 @@ public class TermsAggregatorFactory extends ValuesSourceAggregatorFactory {
|
|||
subAggCollectMode,
|
||||
longFilter,
|
||||
cardinality,
|
||||
metadata
|
||||
metadata,
|
||||
excludeDeletedDocs
|
||||
);
|
||||
}
|
||||
};
|
||||
|
@ -228,6 +232,7 @@ public class TermsAggregatorFactory extends ValuesSourceAggregatorFactory {
|
|||
private final SubAggCollectionMode collectMode;
|
||||
private final TermsAggregator.BucketCountThresholds bucketCountThresholds;
|
||||
private final boolean showTermDocCountError;
|
||||
private final boolean excludeDeletedDocs;
|
||||
|
||||
TermsAggregatorFactory(
|
||||
String name,
|
||||
|
@ -242,7 +247,8 @@ public class TermsAggregatorFactory extends ValuesSourceAggregatorFactory {
|
|||
AggregatorFactory parent,
|
||||
AggregatorFactories.Builder subFactoriesBuilder,
|
||||
Map<String, Object> metadata,
|
||||
TermsAggregatorSupplier aggregatorSupplier
|
||||
TermsAggregatorSupplier aggregatorSupplier,
|
||||
boolean excludeDeletedDocs
|
||||
) throws IOException {
|
||||
super(name, config, context, parent, subFactoriesBuilder, metadata);
|
||||
this.aggregatorSupplier = aggregatorSupplier;
|
||||
|
@ -252,6 +258,7 @@ public class TermsAggregatorFactory extends ValuesSourceAggregatorFactory {
|
|||
this.collectMode = collectMode;
|
||||
this.bucketCountThresholds = bucketCountThresholds;
|
||||
this.showTermDocCountError = showTermDocCountError;
|
||||
this.excludeDeletedDocs = excludeDeletedDocs;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -311,7 +318,8 @@ public class TermsAggregatorFactory extends ValuesSourceAggregatorFactory {
|
|||
collectMode,
|
||||
showTermDocCountError,
|
||||
cardinality,
|
||||
metadata
|
||||
metadata,
|
||||
excludeDeletedDocs
|
||||
);
|
||||
}
|
||||
|
||||
|
@ -371,7 +379,8 @@ public class TermsAggregatorFactory extends ValuesSourceAggregatorFactory {
|
|||
SubAggCollectionMode subAggCollectMode,
|
||||
boolean showTermDocCountError,
|
||||
CardinalityUpperBound cardinality,
|
||||
Map<String, Object> metadata
|
||||
Map<String, Object> metadata,
|
||||
boolean excludeDeletedDocs
|
||||
) throws IOException {
|
||||
IncludeExclude.StringFilter filter = includeExclude == null
|
||||
? null
|
||||
|
@ -390,7 +399,8 @@ public class TermsAggregatorFactory extends ValuesSourceAggregatorFactory {
|
|||
subAggCollectMode,
|
||||
showTermDocCountError,
|
||||
cardinality,
|
||||
metadata
|
||||
metadata,
|
||||
excludeDeletedDocs
|
||||
);
|
||||
}
|
||||
},
|
||||
|
@ -409,7 +419,8 @@ public class TermsAggregatorFactory extends ValuesSourceAggregatorFactory {
|
|||
SubAggCollectionMode subAggCollectMode,
|
||||
boolean showTermDocCountError,
|
||||
CardinalityUpperBound cardinality,
|
||||
Map<String, Object> metadata
|
||||
Map<String, Object> metadata,
|
||||
boolean excludeDeletedDocs
|
||||
) throws IOException {
|
||||
|
||||
assert valuesSourceConfig.getValuesSource() instanceof ValuesSource.Bytes.WithOrdinals;
|
||||
|
@ -417,7 +428,10 @@ public class TermsAggregatorFactory extends ValuesSourceAggregatorFactory {
|
|||
.getValuesSource();
|
||||
SortedSetDocValues values = globalOrdsValues(context, ordinalsValuesSource);
|
||||
long maxOrd = values.getValueCount();
|
||||
if (maxOrd > 0 && maxOrd <= MAX_ORDS_TO_TRY_FILTERS && context.enableRewriteToFilterByFilter()) {
|
||||
if (maxOrd > 0
|
||||
&& maxOrd <= MAX_ORDS_TO_TRY_FILTERS
|
||||
&& context.enableRewriteToFilterByFilter()
|
||||
&& false == excludeDeletedDocs) {
|
||||
StringTermsAggregatorFromFilters adapted = StringTermsAggregatorFromFilters.adaptIntoFiltersOrNull(
|
||||
name,
|
||||
factories,
|
||||
|
@ -485,7 +499,8 @@ public class TermsAggregatorFactory extends ValuesSourceAggregatorFactory {
|
|||
false,
|
||||
subAggCollectMode,
|
||||
showTermDocCountError,
|
||||
metadata
|
||||
metadata,
|
||||
excludeDeletedDocs
|
||||
);
|
||||
|
||||
}
|
||||
|
@ -531,7 +546,8 @@ public class TermsAggregatorFactory extends ValuesSourceAggregatorFactory {
|
|||
subAggCollectMode,
|
||||
showTermDocCountError,
|
||||
cardinality,
|
||||
metadata
|
||||
metadata,
|
||||
excludeDeletedDocs
|
||||
);
|
||||
}
|
||||
};
|
||||
|
@ -565,7 +581,8 @@ public class TermsAggregatorFactory extends ValuesSourceAggregatorFactory {
|
|||
SubAggCollectionMode subAggCollectMode,
|
||||
boolean showTermDocCountError,
|
||||
CardinalityUpperBound cardinality,
|
||||
Map<String, Object> metadata
|
||||
Map<String, Object> metadata,
|
||||
boolean excludeDeletedDocs
|
||||
) throws IOException;
|
||||
|
||||
@Override
|
||||
|
|
|
@ -31,6 +31,7 @@ interface TermsAggregatorSupplier {
|
|||
Aggregator.SubAggCollectionMode subAggCollectMode,
|
||||
boolean showTermDocCountError,
|
||||
CardinalityUpperBound cardinality,
|
||||
Map<String, Object> metadata
|
||||
Map<String, Object> metadata,
|
||||
boolean excludeDeletedDocs
|
||||
) throws IOException;
|
||||
}
|
||||
|
|
|
@ -13,6 +13,7 @@ import org.elasticsearch.common.io.stream.Writeable.Reader;
|
|||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.search.SearchModule;
|
||||
import org.elasticsearch.search.aggregations.AggregatorFactories.Builder;
|
||||
import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder;
|
||||
import org.elasticsearch.search.aggregations.pipeline.CumulativeSumPipelineAggregationBuilder;
|
||||
import org.elasticsearch.test.AbstractSerializingTestCase;
|
||||
import org.elasticsearch.xcontent.NamedXContentRegistry;
|
||||
|
@ -123,6 +124,42 @@ public class AggregatorFactoriesBuilderTests extends AbstractSerializingTestCase
|
|||
assertNotEquals(builder1.hashCode(), builder2.hashCode());
|
||||
}
|
||||
|
||||
public void testForceExcludedDocs() {
|
||||
// simple
|
||||
AggregatorFactories.Builder builder = new AggregatorFactories.Builder();
|
||||
TermsAggregationBuilder termsAggregationBuilder = AggregationBuilders.terms("myterms");
|
||||
builder.addAggregator(termsAggregationBuilder);
|
||||
assertFalse(termsAggregationBuilder.excludeDeletedDocs());
|
||||
assertFalse(builder.hasZeroMinDocTermsAggregation());
|
||||
termsAggregationBuilder.minDocCount(0);
|
||||
assertTrue(builder.hasZeroMinDocTermsAggregation());
|
||||
builder.forceTermsAggsToExcludeDeletedDocs();
|
||||
assertTrue(termsAggregationBuilder.excludeDeletedDocs());
|
||||
|
||||
// nested
|
||||
AggregatorFactories.Builder nested = new AggregatorFactories.Builder();
|
||||
boolean hasZeroMinDocTermsAggregation = false;
|
||||
for (int i = 0; i <= randomIntBetween(1, 10); i++) {
|
||||
AggregationBuilder agg = getRandomAggregation();
|
||||
nested.addAggregator(agg);
|
||||
if (randomBoolean()) {
|
||||
hasZeroMinDocTermsAggregation = true;
|
||||
agg.subAggregation(termsAggregationBuilder);
|
||||
}
|
||||
}
|
||||
if (hasZeroMinDocTermsAggregation) {
|
||||
assertTrue(nested.hasZeroMinDocTermsAggregation());
|
||||
nested.forceTermsAggsToExcludeDeletedDocs();
|
||||
for (AggregationBuilder agg : nested.getAggregatorFactories()) {
|
||||
if (agg instanceof TermsAggregationBuilder) {
|
||||
assertTrue(((TermsAggregationBuilder) agg).excludeDeletedDocs());
|
||||
}
|
||||
}
|
||||
} else {
|
||||
assertFalse(nested.hasZeroMinDocTermsAggregation());
|
||||
}
|
||||
}
|
||||
|
||||
private static AggregationBuilder getRandomAggregation() {
|
||||
// just a couple of aggregations, sufficient for the purpose of this test
|
||||
final int randomAggregatorPoolSize = 4;
|
||||
|
|
|
@ -242,7 +242,9 @@ public class DeprecationChecks {
|
|||
NodeDeprecationChecks::checkLifecyleStepMasterTimeoutSetting,
|
||||
NodeDeprecationChecks::checkEqlEnabledSetting,
|
||||
NodeDeprecationChecks::checkNodeAttrData,
|
||||
NodeDeprecationChecks::checkPollIntervalTooLow
|
||||
NodeDeprecationChecks::checkPollIntervalTooLow,
|
||||
NodeDeprecationChecks::checkDLSForceTermsAggsToExcludeDeleteDocsEnabledSetting,
|
||||
NodeDeprecationChecks::checkDLSErrorWhenValidateQueryWithRewrite
|
||||
)
|
||||
).collect(Collectors.toList());
|
||||
}
|
||||
|
|
|
@ -2502,4 +2502,49 @@ class NodeDeprecationChecks {
|
|||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
static DeprecationIssue checkDLSForceTermsAggsToExcludeDeleteDocsEnabledSetting(
|
||||
final Settings settings,
|
||||
final PluginsAndModules pluginsAndModules,
|
||||
final ClusterState clusterState,
|
||||
final XPackLicenseState licenseState
|
||||
) {
|
||||
Setting<Boolean> deprecatedSetting = Setting.boolSetting(
|
||||
"xpack.security.dls.force_terms_aggs_to_exclude_deleted_docs.enabled",
|
||||
true,
|
||||
Setting.Property.NodeScope,
|
||||
Setting.Property.Deprecated
|
||||
);
|
||||
String url = "https://www.elastic.co/guide/en/elasticsearch/reference/7.17/migrating-7.17.html#deprecation_for_dls_settings";
|
||||
return checkRemovedSetting(
|
||||
clusterState.metadata().settings(),
|
||||
settings,
|
||||
deprecatedSetting,
|
||||
url,
|
||||
"Stricter DLS rules are the default and are not configurable in newer versions."
|
||||
);
|
||||
}
|
||||
|
||||
static DeprecationIssue checkDLSErrorWhenValidateQueryWithRewrite(
|
||||
final Settings settings,
|
||||
final PluginsAndModules pluginsAndModules,
|
||||
final ClusterState clusterState,
|
||||
final XPackLicenseState licenseState
|
||||
) {
|
||||
Setting<Boolean> deprecatedSetting = Setting.boolSetting(
|
||||
"xpack.security.dls.error_when_validate_query_with_rewrite.enabled",
|
||||
true,
|
||||
Setting.Property.NodeScope,
|
||||
Setting.Property.Deprecated
|
||||
);
|
||||
String url = "https://www.elastic.co/guide/en/elasticsearch/reference/7.17/migrating-7.17.html#deprecation_for_dls_settings";
|
||||
return checkRemovedSetting(
|
||||
clusterState.metadata().settings(),
|
||||
settings,
|
||||
deprecatedSetting,
|
||||
url,
|
||||
"Stricter DLS rules are the default and are not configurable in newer versions."
|
||||
);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -2569,4 +2569,40 @@ public class NodeDeprecationChecksTests extends ESTestCase {
|
|||
assertThat(noIssues, hasSize(0));
|
||||
}
|
||||
}
|
||||
|
||||
public void testDLSSetting() {
|
||||
boolean settingValue = randomBoolean();
|
||||
List<String> settingsToCheck = Arrays.asList(
|
||||
"xpack.security.dls.force_terms_aggs_to_exclude_deleted_docs.enabled",
|
||||
"xpack.security.dls.error_when_validate_query_with_rewrite.enabled"
|
||||
);
|
||||
settingsToCheck.forEach(settingName -> {
|
||||
Settings settings = Settings.builder().put(settingName, settingValue).build();
|
||||
final PluginsAndModules pluginsAndModules = new PluginsAndModules(Collections.emptyList(), Collections.emptyList());
|
||||
final XPackLicenseState licenseState = new XPackLicenseState(Settings.EMPTY, () -> 0);
|
||||
final List<DeprecationIssue> issues = getDeprecationIssues(settings, pluginsAndModules, licenseState);
|
||||
final DeprecationIssue expected = new DeprecationIssue(
|
||||
DeprecationIssue.Level.CRITICAL,
|
||||
String.format(Locale.ROOT, "Setting [%s] is deprecated", settingName),
|
||||
"https://www.elastic.co/guide/en/elasticsearch/reference/7.17/migrating-7.17.html#deprecation_for_dls_settings",
|
||||
String.format(
|
||||
Locale.ROOT,
|
||||
"Remove the [%s] setting. " + "Stricter DLS rules are the default and are not configurable in newer versions.",
|
||||
settingName
|
||||
),
|
||||
false,
|
||||
null
|
||||
);
|
||||
assertThat(issues, hasItem(expected));
|
||||
assertWarnings(
|
||||
String.format(
|
||||
Locale.ROOT,
|
||||
"[%s] setting was deprecated in Elasticsearch and will be "
|
||||
+ "removed in a future release! See the breaking changes documentation for the next major version.",
|
||||
settingName
|
||||
)
|
||||
);
|
||||
});
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -10,6 +10,8 @@ import org.apache.lucene.search.join.ScoreMode;
|
|||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.elasticsearch.ElasticsearchSecurityException;
|
||||
import org.elasticsearch.ResourceNotFoundException;
|
||||
import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeAction;
|
||||
import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequestBuilder;
|
||||
import org.elasticsearch.action.bulk.BulkItemResponse;
|
||||
import org.elasticsearch.action.bulk.BulkResponse;
|
||||
import org.elasticsearch.action.get.GetResponse;
|
||||
|
@ -45,6 +47,9 @@ import org.elasticsearch.plugins.Plugin;
|
|||
import org.elasticsearch.rest.RestStatus;
|
||||
import org.elasticsearch.search.aggregations.AggregationBuilders;
|
||||
import org.elasticsearch.search.aggregations.bucket.global.Global;
|
||||
import org.elasticsearch.search.aggregations.bucket.terms.IncludeExclude;
|
||||
import org.elasticsearch.search.aggregations.bucket.terms.LongTerms;
|
||||
import org.elasticsearch.search.aggregations.bucket.terms.StringTerms;
|
||||
import org.elasticsearch.search.aggregations.bucket.terms.Terms;
|
||||
import org.elasticsearch.search.builder.PointInTimeBuilder;
|
||||
import org.elasticsearch.search.profile.ProfileResult;
|
||||
|
@ -69,6 +74,7 @@ import org.elasticsearch.xpack.core.XPackSettings;
|
|||
import org.elasticsearch.xpack.security.LocalStateSecurity;
|
||||
import org.elasticsearch.xpack.spatial.SpatialPlugin;
|
||||
import org.elasticsearch.xpack.spatial.index.query.ShapeQueryBuilder;
|
||||
import org.hamcrest.Matchers;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
|
@ -93,6 +99,8 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSear
|
|||
import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder;
|
||||
import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.BASIC_AUTH_HEADER;
|
||||
import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue;
|
||||
import static org.hamcrest.Matchers.allOf;
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.instanceOf;
|
||||
import static org.hamcrest.Matchers.is;
|
||||
|
@ -148,6 +156,9 @@ public class DocumentLevelSecurityTests extends SecurityIntegTestCase {
|
|||
+ "\n"
|
||||
+ "user5:"
|
||||
+ usersPasswdHashed
|
||||
+ "\n"
|
||||
+ "user6:"
|
||||
+ usersPasswdHashed
|
||||
+ "\n";
|
||||
}
|
||||
|
||||
|
@ -158,7 +169,8 @@ public class DocumentLevelSecurityTests extends SecurityIntegTestCase {
|
|||
+ "role2:user1,user3\n"
|
||||
+ "role3:user2,user3\n"
|
||||
+ "role4:user4\n"
|
||||
+ "role5:user5\n";
|
||||
+ "role5:user5\n"
|
||||
+ "role6:user6\n";
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -203,7 +215,13 @@ public class DocumentLevelSecurityTests extends SecurityIntegTestCase {
|
|||
+ " - names: [ 'fls-index' ]\n"
|
||||
+ " privileges: [ read ]\n"
|
||||
+ " field_security:\n"
|
||||
+ " grant: [ 'field1', 'other_field', 'suggest_field2' ]\n";
|
||||
+ " grant: [ 'field1', 'other_field', 'suggest_field2' ]\n"
|
||||
+ "role6:\n"
|
||||
+ " cluster: [ all ]\n"
|
||||
+ " indices:\n"
|
||||
+ " - names: '*'\n"
|
||||
+ " privileges: [ ALL ]\n"
|
||||
+ " query: '{\"term\" : {\"color\" : \"red\"}}'\n";
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -953,6 +971,131 @@ public class DocumentLevelSecurityTests extends SecurityIntegTestCase {
|
|||
assertThat(termsAgg.getBuckets().size(), equalTo(1));
|
||||
}
|
||||
|
||||
public void testZeroMinDocAggregation() throws Exception {
|
||||
assertAcked(
|
||||
client().admin()
|
||||
.indices()
|
||||
.prepareCreate("test")
|
||||
.addMapping("_doc", "color", "type=keyword", "fruit", "type=keyword", "count", "type=integer")
|
||||
.setSettings(Collections.singletonMap("index.number_of_shards", 1))
|
||||
);
|
||||
client().prepareIndex("test", "_doc")
|
||||
.setId("1")
|
||||
.setSource("color", "red", "fruit", "apple", "count", -1)
|
||||
.setRefreshPolicy(IMMEDIATE)
|
||||
.get();
|
||||
client().prepareIndex("test", "_doc")
|
||||
.setId("2")
|
||||
.setSource("color", "yellow", "fruit", "banana", "count", -2)
|
||||
.setRefreshPolicy(IMMEDIATE)
|
||||
.get();
|
||||
client().prepareIndex("test", "_doc")
|
||||
.setId("3")
|
||||
.setSource("color", "green", "fruit", "grape", "count", -3)
|
||||
.setRefreshPolicy(IMMEDIATE)
|
||||
.get();
|
||||
client().prepareIndex("test", "_doc")
|
||||
.setId("4")
|
||||
.setSource("color", "red", "fruit", "grape", "count", -4)
|
||||
.setRefreshPolicy(IMMEDIATE)
|
||||
.get();
|
||||
new ForceMergeRequestBuilder(client(), ForceMergeAction.INSTANCE).setIndices("test").get();
|
||||
|
||||
SearchResponse response = client().filterWithHeader(
|
||||
Collections.singletonMap(BASIC_AUTH_HEADER, basicAuthHeaderValue("user6", USERS_PASSWD))
|
||||
)
|
||||
.prepareSearch("test")
|
||||
.setQuery(termQuery("fruit", "apple"))
|
||||
// global ordinal
|
||||
.addAggregation(AggregationBuilders.terms("colors1").field("color").minDocCount(0))
|
||||
.addAggregation(AggregationBuilders.terms("fruits").field("fruit").minDocCount(0))
|
||||
// global ordinal remapped
|
||||
.addAggregation(
|
||||
AggregationBuilders.terms("colors2")
|
||||
.field("color")
|
||||
.minDocCount(0)
|
||||
.includeExclude(new IncludeExclude(".*", null, null, null))
|
||||
)
|
||||
// mapped
|
||||
.addAggregation(AggregationBuilders.terms("colors3").field("color").minDocCount(0).executionHint("map"))
|
||||
// numeric
|
||||
.addAggregation(AggregationBuilders.terms("counts").field("count").minDocCount(0))
|
||||
// nested
|
||||
.addAggregation(
|
||||
AggregationBuilders.terms("nested")
|
||||
.field("color")
|
||||
.minDocCount(0)
|
||||
.subAggregation(
|
||||
AggregationBuilders.terms("fruits")
|
||||
.field("fruit")
|
||||
.minDocCount(0)
|
||||
.executionHint("map")
|
||||
.subAggregation(AggregationBuilders.terms("counts").field("count").minDocCount(0))
|
||||
)
|
||||
.minDocCount(0)
|
||||
)
|
||||
.get();
|
||||
|
||||
assertThat(
|
||||
response.toString(),
|
||||
allOf(containsString("apple"), containsString("grape"), containsString("red"), containsString("-1"), containsString("-4"))
|
||||
);
|
||||
assertThat(
|
||||
response.toString(),
|
||||
allOf(
|
||||
Matchers.not(containsString("banana")),
|
||||
Matchers.not(containsString("yellow")),
|
||||
Matchers.not(containsString("green")),
|
||||
Matchers.not(containsString("-2")),
|
||||
Matchers.not(containsString("-3"))
|
||||
)
|
||||
);
|
||||
assertHitCount(response, 1);
|
||||
assertSearchHits(response, "1");
|
||||
// fruits
|
||||
StringTerms fruits = response.getAggregations().get("fruits");
|
||||
assertThat(fruits.getBuckets().size(), equalTo(2));
|
||||
List<StringTerms.Bucket> fruitBuckets = fruits.getBuckets();
|
||||
assertTrue(fruitBuckets.stream().anyMatch(bucket -> bucket.getKeyAsString().equals("apple") && bucket.getDocCount() == 1));
|
||||
assertTrue(fruitBuckets.stream().anyMatch(bucket -> bucket.getKeyAsString().equals("grape") && bucket.getDocCount() == 0));
|
||||
// counts
|
||||
LongTerms counts = response.getAggregations().get("counts");
|
||||
assertThat(counts.getBuckets().size(), equalTo(2));
|
||||
List<LongTerms.Bucket> countsBuckets = counts.getBuckets();
|
||||
assertTrue(countsBuckets.stream().anyMatch(bucket -> bucket.getKeyAsString().equals("-1") && bucket.getDocCount() == 1));
|
||||
assertTrue(countsBuckets.stream().anyMatch(bucket -> bucket.getKeyAsString().equals("-4") && bucket.getDocCount() == 0));
|
||||
// colors
|
||||
for (int i = 1; i <= 3; i++) {
|
||||
StringTerms colors = response.getAggregations().get("colors" + i);
|
||||
assertThat(colors.getBuckets().size(), equalTo(1));
|
||||
assertThat(colors.getBuckets().get(0).getKeyAsString(), equalTo("red"));
|
||||
assertThat(colors.getBuckets().get(0).getDocCount(), equalTo(1L));
|
||||
}
|
||||
// nested
|
||||
StringTerms nested = response.getAggregations().get("nested");
|
||||
assertThat(nested.getBuckets().size(), equalTo(1));
|
||||
assertThat(nested.getBuckets().get(0).getKeyAsString(), equalTo("red"));
|
||||
assertThat(nested.getBuckets().get(0).getDocCount(), equalTo(1L));
|
||||
StringTerms innerFruits = nested.getBuckets().get(0).getAggregations().get("fruits");
|
||||
List<StringTerms.Bucket> innerFruitsBuckets = innerFruits.getBuckets();
|
||||
assertTrue(innerFruitsBuckets.stream().anyMatch(b -> b.getKeyAsString().equals("apple") && b.getDocCount() == 1));
|
||||
assertTrue(innerFruitsBuckets.stream().anyMatch(b -> b.getKeyAsString().equals("grape") && b.getDocCount() == 0));
|
||||
assertThat(innerFruitsBuckets.size(), equalTo(2));
|
||||
|
||||
for (int i = 0; i <= 1; i++) {
|
||||
String parentBucketKey = innerFruitsBuckets.get(i).getKeyAsString();
|
||||
LongTerms innerCounts = innerFruitsBuckets.get(i).getAggregations().get("counts");
|
||||
assertThat(innerCounts.getBuckets().size(), equalTo(2));
|
||||
List<LongTerms.Bucket> icb = innerCounts.getBuckets();
|
||||
if ("apple".equals(parentBucketKey)) {
|
||||
assertTrue(icb.stream().anyMatch(bucket -> bucket.getKeyAsString().equals("-1") && bucket.getDocCount() == 1));
|
||||
} else {
|
||||
assertTrue(icb.stream().anyMatch(bucket -> bucket.getKeyAsString().equals("-1") && bucket.getDocCount() == 0));
|
||||
}
|
||||
assertTrue(icb.stream().anyMatch(bucket -> bucket.getKeyAsString().equals("-4") && bucket.getDocCount() == 0));
|
||||
}
|
||||
}
|
||||
|
||||
public void testParentChild() throws Exception {
|
||||
XContentBuilder mapping = jsonBuilder().startObject()
|
||||
.startObject("properties")
|
||||
|
|
|
@ -248,6 +248,7 @@ import org.elasticsearch.xpack.security.authz.interceptor.ResizeRequestIntercept
|
|||
import org.elasticsearch.xpack.security.authz.interceptor.SearchRequestInterceptor;
|
||||
import org.elasticsearch.xpack.security.authz.interceptor.ShardSearchRequestInterceptor;
|
||||
import org.elasticsearch.xpack.security.authz.interceptor.UpdateRequestInterceptor;
|
||||
import org.elasticsearch.xpack.security.authz.interceptor.ValidateRequestInterceptor;
|
||||
import org.elasticsearch.xpack.security.authz.store.CompositeRolesStore;
|
||||
import org.elasticsearch.xpack.security.authz.store.DeprecationRoleDescriptorConsumer;
|
||||
import org.elasticsearch.xpack.security.authz.store.FileRolesStore;
|
||||
|
@ -453,6 +454,28 @@ public class Security extends Plugin
|
|||
License.OperationMode.ENTERPRISE
|
||||
);
|
||||
|
||||
/**
|
||||
* 7.17.x only setting to help mitigate any potential issues for how DLS applies to terms aggs + min_doc_count=0.
|
||||
* New versions default to stricter DLS rules and setting this to false will allow to revert to the less strict DLS behavior.
|
||||
*/
|
||||
public static final Setting<Boolean> DLS_FORCE_TERMS_AGGS_TO_EXCLUDE_DELETED_DOCS = Setting.boolSetting(
|
||||
"xpack.security.dls.force_terms_aggs_to_exclude_deleted_docs.enabled",
|
||||
true,
|
||||
Property.NodeScope,
|
||||
Property.Deprecated
|
||||
);
|
||||
|
||||
/**
|
||||
* 7.17.x only setting to help mitigate any potential issues for how DLS applies to the validate API + rewrite=true.
|
||||
* New versions default to stricter DLS rules and setting this to false will allow to revert to the less strict DLS behavior.
|
||||
*/
|
||||
public static final Setting<Boolean> DLS_ERROR_WHEN_VALIDATE_QUERY_WITH_REWRITE = Setting.boolSetting(
|
||||
"xpack.security.dls.error_when_validate_query_with_rewrite.enabled",
|
||||
true,
|
||||
Property.NodeScope,
|
||||
Property.Deprecated
|
||||
);
|
||||
|
||||
private static final Logger logger = LogManager.getLogger(Security.class);
|
||||
|
||||
public static final SystemIndexDescriptor SECURITY_MAIN_INDEX_DESCRIPTOR = getSecurityMainIndexDescriptor();
|
||||
|
@ -842,7 +865,8 @@ public class Security extends Plugin
|
|||
new ShardSearchRequestInterceptor(threadPool, getLicenseState(), clusterService),
|
||||
new UpdateRequestInterceptor(threadPool, getLicenseState()),
|
||||
new BulkShardRequestInterceptor(threadPool, getLicenseState()),
|
||||
new DlsFlsLicenseRequestInterceptor(threadPool.getThreadContext(), getLicenseState())
|
||||
new DlsFlsLicenseRequestInterceptor(threadPool.getThreadContext(), getLicenseState()),
|
||||
new ValidateRequestInterceptor(threadPool, getLicenseState(), settings)
|
||||
)
|
||||
);
|
||||
}
|
||||
|
@ -1088,6 +1112,8 @@ public class Security extends Plugin
|
|||
settingsList.add(CachingServiceAccountTokenStore.CACHE_TTL_SETTING);
|
||||
settingsList.add(CachingServiceAccountTokenStore.CACHE_HASH_ALGO_SETTING);
|
||||
settingsList.add(CachingServiceAccountTokenStore.CACHE_MAX_TOKENS_SETTING);
|
||||
settingsList.add(DLS_FORCE_TERMS_AGGS_TO_EXCLUDE_DELETED_DOCS);
|
||||
settingsList.add(DLS_ERROR_WHEN_VALIDATE_QUERY_WITH_REWRITE);
|
||||
|
||||
// hide settings
|
||||
settingsList.add(
|
||||
|
|
|
@ -22,15 +22,18 @@ import java.util.Arrays;
|
|||
import java.util.Map;
|
||||
|
||||
import static org.elasticsearch.transport.RemoteClusterAware.REMOTE_CLUSTER_INDEX_SEPARATOR;
|
||||
import static org.elasticsearch.xpack.security.Security.DLS_FORCE_TERMS_AGGS_TO_EXCLUDE_DELETED_DOCS;
|
||||
|
||||
public class SearchRequestInterceptor extends FieldAndDocumentLevelSecurityRequestInterceptor {
|
||||
|
||||
public static final Version VERSION_SHARD_SEARCH_INTERCEPTOR = Version.V_7_11_2;
|
||||
private final ClusterService clusterService;
|
||||
private final boolean forceTermsAggsToExcludeDeleteDocsEnabled;
|
||||
|
||||
public SearchRequestInterceptor(ThreadPool threadPool, XPackLicenseState licenseState, ClusterService clusterService) {
|
||||
super(threadPool.getThreadContext(), licenseState);
|
||||
this.clusterService = clusterService;
|
||||
forceTermsAggsToExcludeDeleteDocsEnabled = DLS_FORCE_TERMS_AGGS_TO_EXCLUDE_DELETED_DOCS.get(clusterService.getSettings());
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -65,6 +68,11 @@ public class SearchRequestInterceptor extends FieldAndDocumentLevelSecurityReque
|
|||
)
|
||||
);
|
||||
} else {
|
||||
if (forceTermsAggsToExcludeDeleteDocsEnabled && hasZeroMinDocTermsAggregation(request)) {
|
||||
assert request.source() != null && request.source().aggregations() != null;
|
||||
request.source().aggregations().forceTermsAggsToExcludeDeletedDocs();
|
||||
}
|
||||
|
||||
listener.onResponse(null);
|
||||
}
|
||||
} else {
|
||||
|
@ -81,4 +89,11 @@ public class SearchRequestInterceptor extends FieldAndDocumentLevelSecurityReque
|
|||
boolean hasRemoteIndices(SearchRequest request) {
|
||||
return Arrays.stream(request.indices()).anyMatch(name -> name.indexOf(REMOTE_CLUSTER_INDEX_SEPARATOR) >= 0);
|
||||
}
|
||||
|
||||
private static boolean hasZeroMinDocTermsAggregation(SearchRequest searchRequest) {
|
||||
return searchRequest.source() != null
|
||||
&& searchRequest.source().aggregations() != null
|
||||
&& searchRequest.source().aggregations().hasZeroMinDocTermsAggregation();
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -0,0 +1,69 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0; you may not use this file except in compliance with the Elastic License
|
||||
* 2.0.
|
||||
*/
|
||||
package org.elasticsearch.xpack.security.authz.interceptor;
|
||||
|
||||
import org.elasticsearch.ElasticsearchSecurityException;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.IndicesRequest;
|
||||
import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryRequest;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.license.XPackLicenseState;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.xpack.core.security.authz.accesscontrol.IndicesAccessControl;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
import static org.elasticsearch.xpack.security.Security.DLS_ERROR_WHEN_VALIDATE_QUERY_WITH_REWRITE;
|
||||
|
||||
public class ValidateRequestInterceptor extends FieldAndDocumentLevelSecurityRequestInterceptor {
|
||||
|
||||
private final boolean enabled;
|
||||
|
||||
public ValidateRequestInterceptor(ThreadPool threadPool, XPackLicenseState licenseState, Settings settings) {
|
||||
super(threadPool.getThreadContext(), licenseState);
|
||||
enabled = DLS_ERROR_WHEN_VALIDATE_QUERY_WITH_REWRITE.get(settings);
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
void disableFeatures(
|
||||
IndicesRequest indicesRequest,
|
||||
Map<String, IndicesAccessControl.IndexAccessControl> indexAccessControlByIndex,
|
||||
ActionListener<Void> listener
|
||||
) {
|
||||
final ValidateQueryRequest request = (ValidateQueryRequest) indicesRequest;
|
||||
if (indexAccessControlByIndex.values().stream().anyMatch(iac -> iac.getDocumentPermissions().hasDocumentLevelPermissions())) {
|
||||
if (hasRewrite(request)) {
|
||||
listener.onFailure(
|
||||
new ElasticsearchSecurityException(
|
||||
"Validate with rewrite isn't supported if document level security is enabled",
|
||||
RestStatus.BAD_REQUEST
|
||||
)
|
||||
);
|
||||
} else {
|
||||
listener.onResponse(null);
|
||||
}
|
||||
} else {
|
||||
listener.onResponse(null);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean supports(IndicesRequest request) {
|
||||
if (enabled && request instanceof ValidateQueryRequest) {
|
||||
ValidateQueryRequest validateQueryRequest = (ValidateQueryRequest) request;
|
||||
return hasRewrite(validateQueryRequest);
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
private static boolean hasRewrite(ValidateQueryRequest validateQueryRequest) {
|
||||
return validateQueryRequest.rewrite();
|
||||
}
|
||||
}
|
|
@ -132,6 +132,7 @@ public class SecurityTests extends ESTestCase {
|
|||
Client client = mock(Client.class);
|
||||
when(client.threadPool()).thenReturn(threadPool);
|
||||
when(client.settings()).thenReturn(settings);
|
||||
when(clusterService.getSettings()).thenReturn(settings);
|
||||
return security.createComponents(
|
||||
client,
|
||||
threadPool,
|
||||
|
|
|
@ -13,13 +13,21 @@ import org.elasticsearch.action.support.PlainActionFuture;
|
|||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.bytes.BytesArray;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.ArrayUtils;
|
||||
import org.elasticsearch.core.Set;
|
||||
import org.elasticsearch.license.MockLicenseState;
|
||||
import org.elasticsearch.search.aggregations.AggregationBuilders;
|
||||
import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder;
|
||||
import org.elasticsearch.search.builder.SearchSourceBuilder;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.test.VersionUtils;
|
||||
import org.elasticsearch.threadpool.TestThreadPool;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.xpack.core.security.authz.accesscontrol.IndicesAccessControl;
|
||||
import org.elasticsearch.xpack.core.security.authz.permission.DocumentPermissions;
|
||||
import org.elasticsearch.xpack.core.security.authz.permission.FieldPermissions;
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
|
||||
|
@ -29,6 +37,7 @@ import java.util.Collections;
|
|||
import java.util.stream.Collectors;
|
||||
|
||||
import static org.elasticsearch.xpack.core.security.SecurityField.DOCUMENT_LEVEL_SECURITY_FEATURE;
|
||||
import static org.elasticsearch.xpack.security.Security.DLS_FORCE_TERMS_AGGS_TO_EXCLUDE_DELETED_DOCS;
|
||||
import static org.hamcrest.Matchers.is;
|
||||
import static org.mockito.ArgumentMatchers.anyBoolean;
|
||||
import static org.mockito.Mockito.mock;
|
||||
|
@ -50,6 +59,7 @@ public class SearchRequestInterceptorTests extends ESTestCase {
|
|||
when(licenseState.isSecurityEnabled()).thenReturn(true);
|
||||
when(licenseState.isAllowed(DOCUMENT_LEVEL_SECURITY_FEATURE)).thenReturn(true);
|
||||
clusterService = mock(ClusterService.class);
|
||||
when(clusterService.getSettings()).thenReturn(Settings.EMPTY);
|
||||
interceptor = new SearchRequestInterceptor(threadPool, licenseState, clusterService);
|
||||
}
|
||||
|
||||
|
@ -124,4 +134,109 @@ public class SearchRequestInterceptorTests extends ESTestCase {
|
|||
assertThat(interceptor.hasRemoteIndices(searchRequest), is(false));
|
||||
}
|
||||
}
|
||||
|
||||
public void testForceExcludeDeletedDocs() {
|
||||
innerTestForceExcludeDeletedDocs(true);
|
||||
}
|
||||
|
||||
public void testNoForceExcludeDeletedDocs() {
|
||||
configureMinMondeVersion(VersionUtils.randomVersionBetween(random(), Version.V_7_11_2, Version.CURRENT));
|
||||
SearchRequest searchRequest = new SearchRequest();
|
||||
SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder();
|
||||
TermsAggregationBuilder termsAggregationBuilder = new TermsAggregationBuilder("myterms");
|
||||
termsAggregationBuilder.minDocCount(1);
|
||||
searchSourceBuilder.aggregation(termsAggregationBuilder);
|
||||
searchRequest.source(searchSourceBuilder);
|
||||
|
||||
final DocumentPermissions documentPermissions = DocumentPermissions.filteredBy(
|
||||
Set.of(new BytesArray("{\"term\":{\"username\":\"foo\"}}"))
|
||||
);
|
||||
final String index = randomAlphaOfLengthBetween(3, 8);
|
||||
final PlainActionFuture<Void> listener = new PlainActionFuture<>();
|
||||
assertFalse(termsAggregationBuilder.excludeDeletedDocs());
|
||||
interceptor.disableFeatures(
|
||||
searchRequest,
|
||||
Collections.singletonMap(
|
||||
index,
|
||||
new IndicesAccessControl.IndexAccessControl(false, FieldPermissions.DEFAULT, documentPermissions)
|
||||
),
|
||||
listener
|
||||
);
|
||||
assertFalse(termsAggregationBuilder.excludeDeletedDocs()); // did not change value
|
||||
|
||||
termsAggregationBuilder.minDocCount(0);
|
||||
interceptor.disableFeatures(
|
||||
searchRequest,
|
||||
Collections.emptyMap(), // no DLS
|
||||
listener
|
||||
);
|
||||
assertFalse(termsAggregationBuilder.excludeDeletedDocs()); // did not change value
|
||||
}
|
||||
|
||||
public void testDisableFeaturesWithDLSConfig() {
|
||||
// default
|
||||
innerTestForceExcludeDeletedDocs(true);
|
||||
|
||||
// explicit configuration - same as default
|
||||
when(clusterService.getSettings()).thenReturn(
|
||||
Settings.builder()
|
||||
.put(
|
||||
DLS_FORCE_TERMS_AGGS_TO_EXCLUDE_DELETED_DOCS.getKey(),
|
||||
DLS_FORCE_TERMS_AGGS_TO_EXCLUDE_DELETED_DOCS.getDefault(Settings.EMPTY)
|
||||
)
|
||||
.build()
|
||||
);
|
||||
interceptor = new SearchRequestInterceptor(threadPool, licenseState, clusterService);
|
||||
innerTestForceExcludeDeletedDocs(true);
|
||||
assertWarnings(
|
||||
"[xpack.security.dls.force_terms_aggs_to_exclude_deleted_docs.enabled] setting was deprecated in Elasticsearch and will be "
|
||||
+ "removed in a future release! See the breaking changes documentation for the next major version."
|
||||
);
|
||||
|
||||
// explicit configuration - opposite of default
|
||||
when(clusterService.getSettings()).thenReturn(
|
||||
Settings.builder()
|
||||
.put(
|
||||
DLS_FORCE_TERMS_AGGS_TO_EXCLUDE_DELETED_DOCS.getKey(),
|
||||
DLS_FORCE_TERMS_AGGS_TO_EXCLUDE_DELETED_DOCS.getDefault(Settings.EMPTY) == false
|
||||
)
|
||||
.build()
|
||||
);
|
||||
interceptor = new SearchRequestInterceptor(threadPool, licenseState, clusterService);
|
||||
innerTestForceExcludeDeletedDocs(false);
|
||||
assertWarnings(
|
||||
"[xpack.security.dls.force_terms_aggs_to_exclude_deleted_docs.enabled] setting was deprecated in Elasticsearch and will be "
|
||||
+ "removed in a future release! See the breaking changes documentation for the next major version."
|
||||
);
|
||||
}
|
||||
|
||||
private void innerTestForceExcludeDeletedDocs(boolean expectedToExcludeDeletedDocs) {
|
||||
configureMinMondeVersion(VersionUtils.randomVersionBetween(random(), Version.V_7_11_2, Version.CURRENT));
|
||||
SearchRequest searchRequest = new SearchRequest();
|
||||
SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder();
|
||||
TermsAggregationBuilder termsAggregationBuilder = AggregationBuilders.terms("myterms");
|
||||
termsAggregationBuilder.minDocCount(0);
|
||||
searchSourceBuilder.aggregation(termsAggregationBuilder);
|
||||
searchRequest.source(searchSourceBuilder);
|
||||
|
||||
final DocumentPermissions documentPermissions = DocumentPermissions.filteredBy(
|
||||
Set.of(new BytesArray("{\"term\":{\"username\":\"foo\"}}"))
|
||||
);
|
||||
final String index = randomAlphaOfLengthBetween(3, 8);
|
||||
final PlainActionFuture<Void> listener = new PlainActionFuture<>();
|
||||
assertFalse(termsAggregationBuilder.excludeDeletedDocs());
|
||||
interceptor.disableFeatures(
|
||||
searchRequest,
|
||||
Collections.singletonMap(
|
||||
index,
|
||||
new IndicesAccessControl.IndexAccessControl(false, FieldPermissions.DEFAULT, documentPermissions)
|
||||
),
|
||||
listener
|
||||
);
|
||||
if (expectedToExcludeDeletedDocs) {
|
||||
assertTrue(termsAggregationBuilder.excludeDeletedDocs()); // changed value
|
||||
} else {
|
||||
assertFalse(termsAggregationBuilder.excludeDeletedDocs()); // did not change value
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,134 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0; you may not use this file except in compliance with the Elastic License
|
||||
* 2.0.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.xpack.security.authz.interceptor;
|
||||
|
||||
import org.elasticsearch.ElasticsearchSecurityException;
|
||||
import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryAction;
|
||||
import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryRequest;
|
||||
import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryRequestBuilder;
|
||||
import org.elasticsearch.action.support.PlainActionFuture;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
import org.elasticsearch.common.bytes.BytesArray;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.core.Set;
|
||||
import org.elasticsearch.license.MockLicenseState;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.threadpool.TestThreadPool;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.xpack.core.security.authz.accesscontrol.IndicesAccessControl;
|
||||
import org.elasticsearch.xpack.core.security.authz.permission.DocumentPermissions;
|
||||
import org.elasticsearch.xpack.core.security.authz.permission.FieldPermissions;
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
|
||||
import java.util.Collections;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.elasticsearch.xpack.core.security.SecurityField.DOCUMENT_LEVEL_SECURITY_FEATURE;
|
||||
import static org.elasticsearch.xpack.security.Security.DLS_ERROR_WHEN_VALIDATE_QUERY_WITH_REWRITE;
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
import static org.mockito.Mockito.mock;
|
||||
import static org.mockito.Mockito.when;
|
||||
|
||||
public class ValidateRequestInterceptorTests extends ESTestCase {
|
||||
|
||||
private ThreadPool threadPool;
|
||||
private MockLicenseState licenseState;
|
||||
private ValidateRequestInterceptor interceptor;
|
||||
private Settings settings;
|
||||
|
||||
@Before
|
||||
public void init() {
|
||||
threadPool = new TestThreadPool("validate request interceptor tests");
|
||||
licenseState = mock(MockLicenseState.class);
|
||||
when(licenseState.isAllowed(DOCUMENT_LEVEL_SECURITY_FEATURE)).thenReturn(true);
|
||||
settings = Settings.EMPTY;
|
||||
interceptor = new ValidateRequestInterceptor(threadPool, licenseState, settings);
|
||||
}
|
||||
|
||||
@After
|
||||
public void stopThreadPool() {
|
||||
terminate(threadPool);
|
||||
}
|
||||
|
||||
public void testValidateRequestWithDLS() {
|
||||
final DocumentPermissions documentPermissions = DocumentPermissions.filteredBy(
|
||||
Set.of(new BytesArray("{\"term\":{\"username\":\"foo\"}}"))
|
||||
); // value does not matter
|
||||
ElasticsearchClient client = mock(ElasticsearchClient.class);
|
||||
ValidateQueryRequestBuilder builder = new ValidateQueryRequestBuilder(client, ValidateQueryAction.INSTANCE);
|
||||
final String index = randomAlphaOfLengthBetween(3, 8);
|
||||
final PlainActionFuture<Void> listener1 = new PlainActionFuture<>();
|
||||
Map<String, IndicesAccessControl.IndexAccessControl> accessControlMap = Collections.singletonMap(
|
||||
index,
|
||||
new IndicesAccessControl.IndexAccessControl(false, FieldPermissions.DEFAULT, documentPermissions)
|
||||
);
|
||||
// with DLS and rewrite enabled
|
||||
interceptor.disableFeatures(builder.setRewrite(true).request(), accessControlMap, listener1);
|
||||
ElasticsearchSecurityException exception = expectThrows(ElasticsearchSecurityException.class, () -> listener1.actionGet());
|
||||
assertThat(exception.getMessage(), containsString("Validate with rewrite isn't supported if document level security is enabled"));
|
||||
|
||||
// with DLS and rewrite disabled
|
||||
final PlainActionFuture<Void> listener2 = new PlainActionFuture<>();
|
||||
interceptor.disableFeatures(builder.setRewrite(false).request(), accessControlMap, listener2);
|
||||
assertNull(listener2.actionGet());
|
||||
|
||||
}
|
||||
|
||||
public void testValidateRequestWithOutDLS() {
|
||||
final DocumentPermissions documentPermissions = null; // no DLS
|
||||
ElasticsearchClient client = mock(ElasticsearchClient.class);
|
||||
ValidateQueryRequestBuilder builder = new ValidateQueryRequestBuilder(client, ValidateQueryAction.INSTANCE);
|
||||
final String index = randomAlphaOfLengthBetween(3, 8);
|
||||
final PlainActionFuture<Void> listener1 = new PlainActionFuture<>();
|
||||
Map<String, IndicesAccessControl.IndexAccessControl> accessControlMap = Collections.singletonMap(
|
||||
index,
|
||||
new IndicesAccessControl.IndexAccessControl(false, FieldPermissions.DEFAULT, documentPermissions)
|
||||
);
|
||||
// without DLS and rewrite enabled
|
||||
interceptor.disableFeatures(builder.setRewrite(true).request(), accessControlMap, listener1);
|
||||
assertNull(listener1.actionGet());
|
||||
|
||||
// without DLS and rewrite disabled
|
||||
final PlainActionFuture<Void> listener2 = new PlainActionFuture<>();
|
||||
interceptor.disableFeatures(builder.setRewrite(false).request(), accessControlMap, listener2);
|
||||
assertNull(listener2.actionGet());
|
||||
}
|
||||
|
||||
public void testValidateRequestWithDLSConfig() {
|
||||
ValidateQueryRequest request = mock(ValidateQueryRequest.class);
|
||||
when(request.rewrite()).thenReturn(true);
|
||||
// default
|
||||
assertTrue(interceptor.supports(request));
|
||||
|
||||
// explicit configuration - same as default
|
||||
settings = Settings.builder()
|
||||
.put(DLS_ERROR_WHEN_VALIDATE_QUERY_WITH_REWRITE.getKey(), DLS_ERROR_WHEN_VALIDATE_QUERY_WITH_REWRITE.getDefault(Settings.EMPTY))
|
||||
.build();
|
||||
interceptor = new ValidateRequestInterceptor(threadPool, licenseState, settings);
|
||||
assertTrue(interceptor.supports(request));
|
||||
assertWarnings(
|
||||
"[xpack.security.dls.error_when_validate_query_with_rewrite.enabled] setting was deprecated in Elasticsearch and will be "
|
||||
+ "removed in a future release! See the breaking changes documentation for the next major version."
|
||||
);
|
||||
|
||||
// explicit configuration - opposite of default
|
||||
settings = Settings.builder()
|
||||
.put(
|
||||
DLS_ERROR_WHEN_VALIDATE_QUERY_WITH_REWRITE.getKey(),
|
||||
DLS_ERROR_WHEN_VALIDATE_QUERY_WITH_REWRITE.getDefault(Settings.EMPTY) == false
|
||||
)
|
||||
.build();
|
||||
interceptor = new ValidateRequestInterceptor(threadPool, licenseState, settings);
|
||||
assertFalse(interceptor.supports(request));
|
||||
assertWarnings(
|
||||
"[xpack.security.dls.error_when_validate_query_with_rewrite.enabled] setting was deprecated in Elasticsearch and will be "
|
||||
+ "removed in a future release! See the breaking changes documentation for the next major version."
|
||||
);
|
||||
}
|
||||
}
|
File diff suppressed because it is too large
Load diff
Loading…
Add table
Add a link
Reference in a new issue