[Profiling] Remove deprecated paths not using the Elasticsearch plugin (#155955)

This PR removes the two read paths (flamegraph and TopN functions) that
connects to Elasticsearch without the plugin.

This is the first PR out of three that will remove the toggle for the
Elasticsearch plugin.
This commit is contained in:
Joseph Crail 2023-05-09 20:20:40 -07:00 committed by GitHub
parent 47c4830256
commit 7a46e9764f
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
4 changed files with 1 additions and 202 deletions

View file

@ -6,8 +6,6 @@
*/
import type { Logger } from '@kbn/core/server';
import seedrandom from 'seedrandom';
import { StackTraceID } from '../../common/profiling';
import { ProfilingESClient } from '../utils/create_profiling_es_client';
import { ProjectTimeQuery } from './query';
@ -95,31 +93,3 @@ export async function findDownsampledIndex({
logger.info('sampleCountFromPow6 ' + sampleCountFromInitialExp);
return getSampledTraceEventsIndex(index, sampleSize, sampleCountFromInitialExp, initialExp);
}
export function downsampleEventsRandomly(
stackTraceEvents: Map<StackTraceID, number>,
p: number,
seed: string
): number {
let totalCount = 0;
// Make the RNG predictable to get reproducible results.
const random = seedrandom(seed);
for (const [id, count] of stackTraceEvents) {
let newCount = 0;
for (let i = 0; i < count; i++) {
if (random() < p) {
newCount++;
}
}
if (newCount) {
stackTraceEvents.set(id, newCount);
totalCount += newCount;
} else {
stackTraceEvents.delete(id);
}
}
return totalCount;
}

View file

@ -1,94 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0; you may not use this file except in compliance with the Elastic License
* 2.0.
*/
import { Logger } from '@kbn/logging';
import { INDEX_EVENTS } from '../../common';
import { ProfilingESClient } from '../utils/create_profiling_es_client';
import { withProfilingSpan } from '../utils/with_profiling_span';
import { downsampleEventsRandomly, findDownsampledIndex } from './downsampling';
import { ProjectTimeQuery } from './query';
import {
mgetExecutables,
mgetStackFrames,
mgetStackTraces,
searchEventsGroupByStackTrace,
} from './stacktrace';
export async function getExecutablesAndStackTraces({
logger,
client,
filter,
sampleSize,
}: {
logger: Logger;
client: ProfilingESClient;
filter: ProjectTimeQuery;
sampleSize: number;
}) {
return withProfilingSpan('get_executables_and_stack_traces', async () => {
const eventsIndex = await findDownsampledIndex({
logger,
client,
index: INDEX_EVENTS,
filter,
sampleSize,
});
const { totalCount, stackTraceEvents } = await searchEventsGroupByStackTrace({
logger,
client,
index: eventsIndex,
filter,
});
// Manual downsampling if totalCount exceeds sampleSize by 10%.
let p = 1.0;
if (totalCount > sampleSize * 1.1) {
p = sampleSize / totalCount;
logger.info('downsampling events with p=' + p);
const t0 = Date.now();
const downsampledTotalCount = downsampleEventsRandomly(
stackTraceEvents,
p,
filter.toString()
);
logger.info(`downsampling events took ${Date.now() - t0} ms`);
logger.info('downsampled total count: ' + downsampledTotalCount);
logger.info('unique downsampled stacktraces: ' + stackTraceEvents.size);
}
// Adjust the sample counts from down-sampled to fully sampled.
// Be aware that downsampling drops entries from stackTraceEvents, so that
// the sum of the upscaled count values is less that totalCount.
for (const [id, count] of stackTraceEvents) {
stackTraceEvents.set(id, Math.floor(count / (eventsIndex.sampleRate * p)));
}
const { stackTraces, totalFrames, stackFrameDocIDs, executableDocIDs } = await mgetStackTraces({
logger,
client,
events: stackTraceEvents,
});
return withProfilingSpan('get_stackframes_and_executables', () =>
Promise.all([
mgetStackFrames({ logger, client, stackFrameIDs: stackFrameDocIDs }),
mgetExecutables({ logger, client, executableIDs: executableDocIDs }),
])
).then(([stackFrames, executables]) => {
return {
stackTraces,
executables,
stackFrames,
stackTraceEvents,
totalCount,
totalFrames,
eventsIndex,
};
});
});
}

View file

@ -7,9 +7,7 @@
import { RequestHandlerContext } from '@kbn/core/server';
import { Logger } from '@kbn/logging';
import { profilingElasticsearchPlugin } from '@kbn/observability-plugin/common';
import { ProfilingESClient } from '../utils/create_profiling_es_client';
import { getExecutablesAndStackTraces } from './get_executables_and_stacktraces';
import { ProjectTimeQuery } from './query';
import { searchStackTraces } from './search_stacktraces';
@ -26,21 +24,7 @@ export async function getStackTraces({
filter: ProjectTimeQuery;
sampleSize: number;
}) {
const core = await context.core;
const useElasticsearchPlugin = await core.uiSettings.client.get<boolean>(
profilingElasticsearchPlugin
);
if (useElasticsearchPlugin) {
return await searchStackTraces({
client,
filter,
sampleSize,
});
}
return await getExecutablesAndStackTraces({
logger,
return await searchStackTraces({
client,
filter,
sampleSize,

View file

@ -31,8 +31,6 @@ import {
import { runLengthDecodeBase64Url } from '../../common/run_length_encoding';
import { ProfilingESClient } from '../utils/create_profiling_es_client';
import { withProfilingSpan } from '../utils/with_profiling_span';
import { DownsampledEventsIndex } from './downsampling';
import { ProjectTimeQuery } from './query';
const BASE64_FRAME_ID_LENGTH = 32;
@ -95,65 +93,6 @@ export function decodeStackTrace(input: EncodedStackTrace): StackTrace {
} as StackTrace;
}
export async function searchEventsGroupByStackTrace({
logger,
client,
index,
filter,
}: {
logger: Logger;
client: ProfilingESClient;
index: DownsampledEventsIndex;
filter: ProjectTimeQuery;
}) {
const resEvents = await client.search('get_events_group_by_stack_trace', {
index: index.name,
track_total_hits: false,
query: filter,
aggs: {
group_by: {
terms: {
// 'size' should be max 100k, but might be slightly more. Better be on the safe side.
size: 150000,
field: ProfilingESField.StacktraceID,
// 'execution_hint: map' skips the slow building of ordinals that we don't need.
// Especially with high cardinality fields, this makes aggregations really slow.
// E.g. it reduces the latency from 70s to 0.7s on our 8.1. MVP cluster (as of 28.04.2022).
execution_hint: 'map',
},
aggs: {
count: {
sum: {
field: ProfilingESField.StacktraceCount,
},
},
},
},
total_count: {
sum: {
field: ProfilingESField.StacktraceCount,
},
},
},
pre_filter_shard_size: 1,
filter_path:
'aggregations.group_by.buckets.key,aggregations.group_by.buckets.count,aggregations.total_count,_shards.failures',
});
const totalCount = resEvents.aggregations?.total_count.value ?? 0;
const stackTraceEvents = new Map<StackTraceID, number>();
resEvents.aggregations?.group_by?.buckets.forEach((item) => {
const traceid: StackTraceID = String(item.key);
stackTraceEvents.set(traceid, item.count.value ?? 0);
});
logger.info('events total count: ' + totalCount);
logger.info('unique stacktraces: ' + stackTraceEvents.size);
return { totalCount, stackTraceEvents };
}
function summarizeCacheAndQuery(
logger: Logger,
name: string,