mirror of
https://github.com/elastic/elasticsearch.git
synced 2025-06-28 09:28:55 -04:00
check parent circuit breaker when allocating empty bucket (#89568)
closes https://github.com/elastic/elasticsearch/issues/80789
This commit is contained in:
parent
773aeabf3d
commit
061e6432bf
6 changed files with 91 additions and 33 deletions
5
docs/changelog/89568.yaml
Normal file
5
docs/changelog/89568.yaml
Normal file
|
@ -0,0 +1,5 @@
|
||||||
|
pr: 89568
|
||||||
|
summary: check parent circuit breaker when allocating empty bucket
|
||||||
|
area: Aggregations
|
||||||
|
type: bug
|
||||||
|
issues: [80789]
|
|
@ -408,7 +408,19 @@ public final class InternalDateHistogram extends InternalMultiBucketAggregation<
|
||||||
|
|
||||||
InternalAggregations reducedEmptySubAggs = InternalAggregations.reduce(List.of(emptyBucketInfo.subAggregations), reduceContext);
|
InternalAggregations reducedEmptySubAggs = InternalAggregations.reduce(List.of(emptyBucketInfo.subAggregations), reduceContext);
|
||||||
ListIterator<Bucket> iter = list.listIterator();
|
ListIterator<Bucket> iter = list.listIterator();
|
||||||
iterateEmptyBuckets(list, iter, key -> iter.add(new InternalDateHistogram.Bucket(key, 0, keyed, format, reducedEmptySubAggs)));
|
iterateEmptyBuckets(list, iter, new LongConsumer() {
|
||||||
|
private int size = 0;
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void accept(long key) {
|
||||||
|
size++;
|
||||||
|
if (size >= REPORT_EMPTY_EVERY) {
|
||||||
|
reduceContext.consumeBucketsAndMaybeBreak(size);
|
||||||
|
size = 0;
|
||||||
|
}
|
||||||
|
iter.add(new InternalDateHistogram.Bucket(key, 0, keyed, format, reducedEmptySubAggs));
|
||||||
|
}
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
private void iterateEmptyBuckets(List<Bucket> list, ListIterator<Bucket> iter, LongConsumer onBucket) {
|
private void iterateEmptyBuckets(List<Bucket> list, ListIterator<Bucket> iter, LongConsumer onBucket) {
|
||||||
|
|
|
@ -400,7 +400,19 @@ public final class InternalHistogram extends InternalMultiBucketAggregation<Inte
|
||||||
reduceContext
|
reduceContext
|
||||||
);
|
);
|
||||||
ListIterator<Bucket> iter = list.listIterator();
|
ListIterator<Bucket> iter = list.listIterator();
|
||||||
iterateEmptyBuckets(list, iter, key -> iter.add(new Bucket(key, 0, keyed, format, reducedEmptySubAggs)));
|
iterateEmptyBuckets(list, iter, new DoubleConsumer() {
|
||||||
|
private int size;
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void accept(double key) {
|
||||||
|
size++;
|
||||||
|
if (size >= REPORT_EMPTY_EVERY) {
|
||||||
|
reduceContext.consumeBucketsAndMaybeBreak(size);
|
||||||
|
size = 0;
|
||||||
|
}
|
||||||
|
iter.add(new Bucket(key, 0, keyed, format, reducedEmptySubAggs));
|
||||||
|
}
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
private void iterateEmptyBuckets(List<Bucket> list, ListIterator<Bucket> iter, DoubleConsumer onBucket) {
|
private void iterateEmptyBuckets(List<Bucket> list, ListIterator<Bucket> iter, DoubleConsumer onBucket) {
|
||||||
|
|
|
@ -214,8 +214,7 @@ public class InternalDateHistogramTests extends InternalMultiBucketAggregationTe
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testLargeReduce() {
|
public void testLargeReduce() {
|
||||||
expectReduceUsesTooManyBuckets(
|
InternalDateHistogram largeHisto = new InternalDateHistogram(
|
||||||
new InternalDateHistogram(
|
|
||||||
"h",
|
"h",
|
||||||
List.of(),
|
List.of(),
|
||||||
BucketOrder.key(true),
|
BucketOrder.key(true),
|
||||||
|
@ -225,15 +224,15 @@ public class InternalDateHistogramTests extends InternalMultiBucketAggregationTe
|
||||||
Rounding.builder(DateTimeUnit.SECOND_OF_MINUTE).build(),
|
Rounding.builder(DateTimeUnit.SECOND_OF_MINUTE).build(),
|
||||||
InternalAggregations.EMPTY,
|
InternalAggregations.EMPTY,
|
||||||
new LongBounds(
|
new LongBounds(
|
||||||
DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parseMillis("2018-01-01T00:00:00Z"),
|
DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parseMillis("2020-01-01T00:00:00Z"),
|
||||||
DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parseMillis("2021-01-01T00:00:00Z")
|
DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parseMillis("2023-01-01T00:00:00Z")
|
||||||
)
|
)
|
||||||
),
|
),
|
||||||
DocValueFormat.RAW,
|
DocValueFormat.RAW,
|
||||||
false,
|
false,
|
||||||
null
|
null
|
||||||
),
|
|
||||||
100000
|
|
||||||
);
|
);
|
||||||
|
expectReduceUsesTooManyBuckets(largeHisto, 100000);
|
||||||
|
expectReduceThrowsRealMemoryBreaker(largeHisto);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -103,19 +103,18 @@ public class InternalHistogramTests extends InternalMultiBucketAggregationTestCa
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testLargeReduce() {
|
public void testLargeReduce() {
|
||||||
expectReduceUsesTooManyBuckets(
|
InternalHistogram largeHisto = new InternalHistogram(
|
||||||
new InternalHistogram(
|
|
||||||
"h",
|
"h",
|
||||||
List.of(),
|
List.of(),
|
||||||
BucketOrder.key(true),
|
BucketOrder.key(true),
|
||||||
0,
|
0,
|
||||||
new InternalHistogram.EmptyBucketInfo(5e-10, 0, 0, 100, InternalAggregations.EMPTY),
|
new InternalHistogram.EmptyBucketInfo(5e-8, 0, 0, 100, InternalAggregations.EMPTY),
|
||||||
DocValueFormat.RAW,
|
DocValueFormat.RAW,
|
||||||
false,
|
false,
|
||||||
null
|
null
|
||||||
),
|
|
||||||
100000
|
|
||||||
);
|
);
|
||||||
|
expectReduceUsesTooManyBuckets(largeHisto, 100000);
|
||||||
|
expectReduceThrowsRealMemoryBreaker(largeHisto);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|
|
@ -8,7 +8,11 @@
|
||||||
|
|
||||||
package org.elasticsearch.test;
|
package org.elasticsearch.test;
|
||||||
|
|
||||||
|
import org.elasticsearch.common.breaker.CircuitBreakingException;
|
||||||
|
import org.elasticsearch.common.settings.ClusterSettings;
|
||||||
|
import org.elasticsearch.common.settings.Settings;
|
||||||
import org.elasticsearch.common.util.BigArrays;
|
import org.elasticsearch.common.util.BigArrays;
|
||||||
|
import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService;
|
||||||
import org.elasticsearch.search.aggregations.Aggregation;
|
import org.elasticsearch.search.aggregations.Aggregation;
|
||||||
import org.elasticsearch.search.aggregations.AggregationBuilder;
|
import org.elasticsearch.search.aggregations.AggregationBuilder;
|
||||||
import org.elasticsearch.search.aggregations.AggregationReduceContext;
|
import org.elasticsearch.search.aggregations.AggregationReduceContext;
|
||||||
|
@ -33,6 +37,7 @@ import java.util.function.Supplier;
|
||||||
|
|
||||||
import static java.util.Collections.emptyMap;
|
import static java.util.Collections.emptyMap;
|
||||||
import static org.hamcrest.Matchers.equalTo;
|
import static org.hamcrest.Matchers.equalTo;
|
||||||
|
import static org.hamcrest.Matchers.startsWith;
|
||||||
import static org.mockito.Mockito.mock;
|
import static org.mockito.Mockito.mock;
|
||||||
|
|
||||||
public abstract class InternalMultiBucketAggregationTestCase<T extends InternalAggregation & MultiBucketsAggregation> extends
|
public abstract class InternalMultiBucketAggregationTestCase<T extends InternalAggregation & MultiBucketsAggregation> extends
|
||||||
|
@ -248,4 +253,30 @@ public abstract class InternalMultiBucketAggregationTestCase<T extends InternalA
|
||||||
Exception e = expectThrows(IllegalArgumentException.class, () -> agg.reduce(List.of(agg), reduceContext));
|
Exception e = expectThrows(IllegalArgumentException.class, () -> agg.reduce(List.of(agg), reduceContext));
|
||||||
assertThat(e.getMessage(), equalTo("too big!"));
|
assertThat(e.getMessage(), equalTo("too big!"));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Expect that reducing this aggregation will break the real memory breaker.
|
||||||
|
*/
|
||||||
|
protected static void expectReduceThrowsRealMemoryBreaker(InternalAggregation agg) {
|
||||||
|
HierarchyCircuitBreakerService breaker = new HierarchyCircuitBreakerService(
|
||||||
|
Settings.builder().put(HierarchyCircuitBreakerService.TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), "50%").build(),
|
||||||
|
List.of(),
|
||||||
|
new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)
|
||||||
|
) {
|
||||||
|
@Override
|
||||||
|
public void checkParentLimit(long newBytesReserved, String label) throws CircuitBreakingException {
|
||||||
|
super.checkParentLimit(newBytesReserved, label);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
AggregationReduceContext reduceContext = new AggregationReduceContext.ForFinal(
|
||||||
|
BigArrays.NON_RECYCLING_INSTANCE,
|
||||||
|
null,
|
||||||
|
() -> false,
|
||||||
|
mock(AggregationBuilder.class),
|
||||||
|
v -> breaker.getBreaker("request").addEstimateBytesAndMaybeBreak(0, "test"),
|
||||||
|
PipelineTree.EMPTY
|
||||||
|
);
|
||||||
|
Exception e = expectThrows(CircuitBreakingException.class, () -> agg.reduce(List.of(agg), reduceContext));
|
||||||
|
assertThat(e.getMessage(), startsWith("[parent] Data too large, data for [test] "));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue