[Uptime] Fix too many buckets error on overview page (#66832)

This was caused by an unoptimized query. It became apparent when we
increased the max number of items per page.
This commit is contained in:
Andrew Cholakian 2020-05-27 13:54:35 -05:00 committed by GitHub
parent 778b01b11a
commit b52c1a075f
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
4 changed files with 47 additions and 45 deletions

View file

@ -81,14 +81,13 @@ export const StateType = t.intersection([
export const HistogramPointType = t.type({
timestamp: t.number,
up: t.number,
down: t.number,
up: t.union([t.number, t.undefined]),
down: t.union([t.number, t.undefined]),
});
export type HistogramPoint = t.TypeOf<typeof HistogramPointType>;
export const HistogramType = t.type({
count: t.number,
points: t.array(HistogramPointType),
});

View file

@ -6,10 +6,10 @@
import { get, sortBy } from 'lodash';
import { QueryContext } from './query_context';
import { QUERY } from '../../../../common/constants';
import {
Check,
Histogram,
HistogramPoint,
MonitorSummary,
CursorDirection,
SortOrder,
@ -294,6 +294,11 @@ const getHistogramForMonitors = async (
query: {
bool: {
filter: [
{
range: {
'summary.down': { gt: 0 },
},
},
{
terms: {
'monitor.id': monitorIds,
@ -311,25 +316,23 @@ const getHistogramForMonitors = async (
},
},
aggs: {
by_id: {
terms: {
field: 'monitor.id',
size: queryContext.size,
histogram: {
auto_date_histogram: {
field: '@timestamp',
// 12 seems to be a good size for performance given
// long monitor lists of up to 100 on the overview page
buckets: 12,
missing: 0,
},
aggs: {
histogram: {
auto_date_histogram: {
field: '@timestamp',
buckets: QUERY.DEFAULT_BUCKET_COUNT,
missing: 0,
by_id: {
terms: {
field: 'monitor.id',
size: Math.max(monitorIds.length, 1),
},
aggs: {
status: {
terms: {
field: 'monitor.status',
size: 2,
shard_size: 2,
},
totalDown: {
sum: { field: 'summary.down' },
},
},
},
@ -340,32 +343,32 @@ const getHistogramForMonitors = async (
};
const result = await queryContext.search(params);
const buckets: any[] = get(result, 'aggregations.by_id.buckets', []);
return buckets.reduce((map: { [key: string]: any }, item: any) => {
const points = get(item, 'histogram.buckets', []).map((histogram: any) => {
const status = get(histogram, 'status.buckets', []).reduce(
(statuses: { up: number; down: number }, bucket: any) => {
if (bucket.key === 'up') {
statuses.up = bucket.doc_count;
} else if (bucket.key === 'down') {
statuses.down = bucket.doc_count;
}
return statuses;
},
{ up: 0, down: 0 }
);
return {
timestamp: histogram.key,
...status,
};
const histoBuckets: any[] = result.aggregations.histogram.buckets;
const simplified = histoBuckets.map((histoBucket: any): { timestamp: number; byId: any } => {
const byId: { [key: string]: number } = {};
histoBucket.by_id.buckets.forEach((idBucket: any) => {
byId[idBucket.key] = idBucket.totalDown.value;
});
map[item.key] = {
count: item.doc_count,
points,
return {
timestamp: parseInt(histoBucket.key, 10),
byId,
};
return map;
}, {});
});
const histosById: { [key: string]: Histogram } = {};
monitorIds.forEach((id: string) => {
const points: HistogramPoint[] = [];
simplified.forEach((simpleHisto) => {
points.push({
timestamp: simpleHisto.timestamp,
up: undefined,
down: simpleHisto.byId[id],
});
});
histosById[id] = { points };
});
return histosById;
};
const cursorDirectionToOrder = (cd: CursorDirection): 'asc' | 'desc' => {

View file

@ -90,7 +90,7 @@ export class MonitorGroupIterator {
}
while (true) {
const result = await this.attemptBufferMore(CHUNK_SIZE);
const result = await this.attemptBufferMore(size);
if (result.gotHit || !result.hasMore) {
return;
}

View file

@ -49,7 +49,7 @@ const checkMonitorStatesResponse = ({
).to.eql(statuses);
(summaries ?? []).forEach((s) => {
expect(s.state.url.full).to.be.ok();
expect(s.histogram?.count).to.be(20);
expect(Array.isArray(s.histogram?.points)).to.be(true);
(s.histogram?.points ?? []).forEach((point) => {
expect(point.timestamp).to.be.greaterThan(absFrom);
expect(point.timestamp).to.be.lessThan(absTo);