mirror of
https://github.com/elastic/kibana.git
synced 2025-04-23 17:28:26 -04:00
[APM] Enforce span creation/naming for ES searches (#101856)
This commit is contained in:
parent
571524005e
commit
666bce3923
105 changed files with 3448 additions and 3489 deletions
|
@ -14,6 +14,7 @@ export interface SpanOptions {
|
|||
type?: string;
|
||||
subtype?: string;
|
||||
labels?: Record<string, string>;
|
||||
intercept?: boolean;
|
||||
}
|
||||
|
||||
type Span = Exclude<typeof agent.currentSpan, undefined | null>;
|
||||
|
@ -36,23 +37,27 @@ export async function withSpan<T>(
|
|||
): Promise<T> {
|
||||
const options = parseSpanOptions(optionsOrName);
|
||||
|
||||
const { name, type, subtype, labels } = options;
|
||||
const { name, type, subtype, labels, intercept } = options;
|
||||
|
||||
if (!agent.isStarted()) {
|
||||
return cb();
|
||||
}
|
||||
|
||||
let createdSpan: Span | undefined;
|
||||
|
||||
// When a span starts, it's marked as the active span in its context.
|
||||
// When it ends, it's not untracked, which means that if a span
|
||||
// starts directly after this one ends, the newly started span is a
|
||||
// child of this span, even though it should be a sibling.
|
||||
// To mitigate this, we queue a microtask by awaiting a promise.
|
||||
await Promise.resolve();
|
||||
if (!intercept) {
|
||||
await Promise.resolve();
|
||||
|
||||
const span = agent.startSpan(name);
|
||||
createdSpan = agent.startSpan(name) ?? undefined;
|
||||
|
||||
if (!span) {
|
||||
return cb();
|
||||
if (!createdSpan) {
|
||||
return cb();
|
||||
}
|
||||
}
|
||||
|
||||
// If a span is created in the same context as the span that we just
|
||||
|
@ -61,33 +66,51 @@ export async function withSpan<T>(
|
|||
// mitigate this we create a new context.
|
||||
|
||||
return runInNewContext(() => {
|
||||
const promise = cb(createdSpan);
|
||||
|
||||
let span: Span | undefined = createdSpan;
|
||||
|
||||
if (intercept) {
|
||||
span = agent.currentSpan ?? undefined;
|
||||
}
|
||||
|
||||
if (!span) {
|
||||
return promise;
|
||||
}
|
||||
|
||||
const targetedSpan = span;
|
||||
|
||||
if (name) {
|
||||
targetedSpan.name = name;
|
||||
}
|
||||
|
||||
// @ts-ignore
|
||||
if (type) {
|
||||
span.type = type;
|
||||
targetedSpan.type = type;
|
||||
}
|
||||
if (subtype) {
|
||||
span.subtype = subtype;
|
||||
targetedSpan.subtype = subtype;
|
||||
}
|
||||
|
||||
if (labels) {
|
||||
span.addLabels(labels);
|
||||
targetedSpan.addLabels(labels);
|
||||
}
|
||||
|
||||
return cb(span)
|
||||
return promise
|
||||
.then((res) => {
|
||||
if (!span.outcome || span.outcome === 'unknown') {
|
||||
span.outcome = 'success';
|
||||
if (!targetedSpan.outcome || targetedSpan.outcome === 'unknown') {
|
||||
targetedSpan.outcome = 'success';
|
||||
}
|
||||
return res;
|
||||
})
|
||||
.catch((err) => {
|
||||
if (!span.outcome || span.outcome === 'unknown') {
|
||||
span.outcome = 'failure';
|
||||
if (!targetedSpan.outcome || targetedSpan.outcome === 'unknown') {
|
||||
targetedSpan.outcome = 'failure';
|
||||
}
|
||||
throw err;
|
||||
})
|
||||
.finally(() => {
|
||||
span.end();
|
||||
targetedSpan.end();
|
||||
});
|
||||
});
|
||||
}
|
||||
|
|
|
@ -15,82 +15,82 @@ import {
|
|||
import { ProcessorEvent } from '../../../../common/processor_event';
|
||||
import { environmentQuery, rangeQuery } from '../../../../server/utils/queries';
|
||||
import { AlertParams } from '../../../routes/alerts/chart_preview';
|
||||
import { withApmSpan } from '../../../utils/with_apm_span';
|
||||
import { getBucketSize } from '../../helpers/get_bucket_size';
|
||||
import { Setup, SetupTimeRange } from '../../helpers/setup_request';
|
||||
|
||||
export function getTransactionDurationChartPreview({
|
||||
export async function getTransactionDurationChartPreview({
|
||||
alertParams,
|
||||
setup,
|
||||
}: {
|
||||
alertParams: AlertParams;
|
||||
setup: Setup & SetupTimeRange;
|
||||
}) {
|
||||
return withApmSpan('get_transaction_duration_chart_preview', async () => {
|
||||
const { apmEventClient, start, end } = setup;
|
||||
const {
|
||||
aggregationType,
|
||||
environment,
|
||||
serviceName,
|
||||
transactionType,
|
||||
} = alertParams;
|
||||
const { apmEventClient, start, end } = setup;
|
||||
const {
|
||||
aggregationType,
|
||||
environment,
|
||||
serviceName,
|
||||
transactionType,
|
||||
} = alertParams;
|
||||
|
||||
const query = {
|
||||
bool: {
|
||||
filter: [
|
||||
{ term: { [PROCESSOR_EVENT]: ProcessorEvent.transaction } },
|
||||
...(serviceName ? [{ term: { [SERVICE_NAME]: serviceName } }] : []),
|
||||
...(transactionType
|
||||
? [{ term: { [TRANSACTION_TYPE]: transactionType } }]
|
||||
: []),
|
||||
...rangeQuery(start, end),
|
||||
...environmentQuery(environment),
|
||||
] as QueryDslQueryContainer[],
|
||||
const query = {
|
||||
bool: {
|
||||
filter: [
|
||||
{ term: { [PROCESSOR_EVENT]: ProcessorEvent.transaction } },
|
||||
...(serviceName ? [{ term: { [SERVICE_NAME]: serviceName } }] : []),
|
||||
...(transactionType
|
||||
? [{ term: { [TRANSACTION_TYPE]: transactionType } }]
|
||||
: []),
|
||||
...rangeQuery(start, end),
|
||||
...environmentQuery(environment),
|
||||
] as QueryDslQueryContainer[],
|
||||
},
|
||||
};
|
||||
|
||||
const { intervalString } = getBucketSize({ start, end, numBuckets: 20 });
|
||||
|
||||
const aggs = {
|
||||
timeseries: {
|
||||
date_histogram: {
|
||||
field: '@timestamp',
|
||||
fixed_interval: intervalString,
|
||||
},
|
||||
};
|
||||
|
||||
const { intervalString } = getBucketSize({ start, end, numBuckets: 20 });
|
||||
|
||||
const aggs = {
|
||||
timeseries: {
|
||||
date_histogram: {
|
||||
field: '@timestamp',
|
||||
fixed_interval: intervalString,
|
||||
},
|
||||
aggs: {
|
||||
agg:
|
||||
aggregationType === 'avg'
|
||||
? { avg: { field: TRANSACTION_DURATION } }
|
||||
: {
|
||||
percentiles: {
|
||||
field: TRANSACTION_DURATION,
|
||||
percents: [aggregationType === '95th' ? 95 : 99],
|
||||
},
|
||||
aggs: {
|
||||
agg:
|
||||
aggregationType === 'avg'
|
||||
? { avg: { field: TRANSACTION_DURATION } }
|
||||
: {
|
||||
percentiles: {
|
||||
field: TRANSACTION_DURATION,
|
||||
percents: [aggregationType === '95th' ? 95 : 99],
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
const params = {
|
||||
apm: { events: [ProcessorEvent.transaction] },
|
||||
body: { size: 0, query, aggs },
|
||||
};
|
||||
const resp = await apmEventClient.search(params);
|
||||
},
|
||||
};
|
||||
const params = {
|
||||
apm: { events: [ProcessorEvent.transaction] },
|
||||
body: { size: 0, query, aggs },
|
||||
};
|
||||
const resp = await apmEventClient.search(
|
||||
'get_transaction_duration_chart_preview',
|
||||
params
|
||||
);
|
||||
|
||||
if (!resp.aggregations) {
|
||||
return [];
|
||||
}
|
||||
if (!resp.aggregations) {
|
||||
return [];
|
||||
}
|
||||
|
||||
return resp.aggregations.timeseries.buckets.map((bucket) => {
|
||||
const percentilesKey = aggregationType === '95th' ? '95.0' : '99.0';
|
||||
const x = bucket.key;
|
||||
const y =
|
||||
aggregationType === 'avg'
|
||||
? (bucket.agg as { value: number | null }).value
|
||||
: (bucket.agg as { values: Record<string, number | null> }).values[
|
||||
percentilesKey
|
||||
];
|
||||
return resp.aggregations.timeseries.buckets.map((bucket) => {
|
||||
const percentilesKey = aggregationType === '95th' ? '95.0' : '99.0';
|
||||
const x = bucket.key;
|
||||
const y =
|
||||
aggregationType === 'avg'
|
||||
? (bucket.agg as { value: number | null }).value
|
||||
: (bucket.agg as { values: Record<string, number | null> }).values[
|
||||
percentilesKey
|
||||
];
|
||||
|
||||
return { x, y };
|
||||
});
|
||||
return { x, y };
|
||||
});
|
||||
}
|
||||
|
|
|
@ -9,58 +9,58 @@ import { SERVICE_NAME } from '../../../../common/elasticsearch_fieldnames';
|
|||
import { ProcessorEvent } from '../../../../common/processor_event';
|
||||
import { AlertParams } from '../../../routes/alerts/chart_preview';
|
||||
import { environmentQuery, rangeQuery } from '../../../../server/utils/queries';
|
||||
import { withApmSpan } from '../../../utils/with_apm_span';
|
||||
import { getBucketSize } from '../../helpers/get_bucket_size';
|
||||
import { Setup, SetupTimeRange } from '../../helpers/setup_request';
|
||||
|
||||
export function getTransactionErrorCountChartPreview({
|
||||
export async function getTransactionErrorCountChartPreview({
|
||||
setup,
|
||||
alertParams,
|
||||
}: {
|
||||
setup: Setup & SetupTimeRange;
|
||||
alertParams: AlertParams;
|
||||
}) {
|
||||
return withApmSpan('get_transaction_error_count_chart_preview', async () => {
|
||||
const { apmEventClient, start, end } = setup;
|
||||
const { serviceName, environment } = alertParams;
|
||||
const { apmEventClient, start, end } = setup;
|
||||
const { serviceName, environment } = alertParams;
|
||||
|
||||
const query = {
|
||||
bool: {
|
||||
filter: [
|
||||
...(serviceName ? [{ term: { [SERVICE_NAME]: serviceName } }] : []),
|
||||
...rangeQuery(start, end),
|
||||
...environmentQuery(environment),
|
||||
],
|
||||
const query = {
|
||||
bool: {
|
||||
filter: [
|
||||
...(serviceName ? [{ term: { [SERVICE_NAME]: serviceName } }] : []),
|
||||
...rangeQuery(start, end),
|
||||
...environmentQuery(environment),
|
||||
],
|
||||
},
|
||||
};
|
||||
|
||||
const { intervalString } = getBucketSize({ start, end, numBuckets: 20 });
|
||||
|
||||
const aggs = {
|
||||
timeseries: {
|
||||
date_histogram: {
|
||||
field: '@timestamp',
|
||||
fixed_interval: intervalString,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
const params = {
|
||||
apm: { events: [ProcessorEvent.error] },
|
||||
body: { size: 0, query, aggs },
|
||||
};
|
||||
|
||||
const resp = await apmEventClient.search(
|
||||
'get_transaction_error_count_chart_preview',
|
||||
params
|
||||
);
|
||||
|
||||
if (!resp.aggregations) {
|
||||
return [];
|
||||
}
|
||||
|
||||
return resp.aggregations.timeseries.buckets.map((bucket) => {
|
||||
return {
|
||||
x: bucket.key,
|
||||
y: bucket.doc_count,
|
||||
};
|
||||
|
||||
const { intervalString } = getBucketSize({ start, end, numBuckets: 20 });
|
||||
|
||||
const aggs = {
|
||||
timeseries: {
|
||||
date_histogram: {
|
||||
field: '@timestamp',
|
||||
fixed_interval: intervalString,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
const params = {
|
||||
apm: { events: [ProcessorEvent.error] },
|
||||
body: { size: 0, query, aggs },
|
||||
};
|
||||
|
||||
const resp = await apmEventClient.search(params);
|
||||
|
||||
if (!resp.aggregations) {
|
||||
return [];
|
||||
}
|
||||
|
||||
return resp.aggregations.timeseries.buckets.map((bucket) => {
|
||||
return {
|
||||
x: bucket.key,
|
||||
y: bucket.doc_count,
|
||||
};
|
||||
});
|
||||
});
|
||||
}
|
||||
|
|
|
@ -64,7 +64,10 @@ export async function getTransactionErrorRateChartPreview({
|
|||
body: { size: 0, query, aggs },
|
||||
};
|
||||
|
||||
const resp = await apmEventClient.search(params);
|
||||
const resp = await apmEventClient.search(
|
||||
'get_transaction_error_rate_chart_preview',
|
||||
params
|
||||
);
|
||||
|
||||
if (!resp.aggregations) {
|
||||
return [];
|
||||
|
|
|
@ -21,68 +21,68 @@ import {
|
|||
getTimeseriesAggregation,
|
||||
getTransactionErrorRateTimeSeries,
|
||||
} from '../../helpers/transaction_error_rate';
|
||||
import { withApmSpan } from '../../../utils/with_apm_span';
|
||||
import { CorrelationsOptions, getCorrelationsFilters } from '../get_filters';
|
||||
|
||||
interface Options extends CorrelationsOptions {
|
||||
fieldNames: string[];
|
||||
}
|
||||
export async function getCorrelationsForFailedTransactions(options: Options) {
|
||||
return withApmSpan('get_correlations_for_failed_transactions', async () => {
|
||||
const { fieldNames, setup } = options;
|
||||
const { apmEventClient } = setup;
|
||||
const filters = getCorrelationsFilters(options);
|
||||
const { fieldNames, setup } = options;
|
||||
const { apmEventClient } = setup;
|
||||
const filters = getCorrelationsFilters(options);
|
||||
|
||||
const params = {
|
||||
apm: { events: [ProcessorEvent.transaction] },
|
||||
track_total_hits: true,
|
||||
body: {
|
||||
size: 0,
|
||||
query: {
|
||||
bool: { filter: filters },
|
||||
},
|
||||
aggs: {
|
||||
failed_transactions: {
|
||||
filter: { term: { [EVENT_OUTCOME]: EventOutcome.failure } },
|
||||
const params = {
|
||||
apm: { events: [ProcessorEvent.transaction] },
|
||||
track_total_hits: true,
|
||||
body: {
|
||||
size: 0,
|
||||
query: {
|
||||
bool: { filter: filters },
|
||||
},
|
||||
aggs: {
|
||||
failed_transactions: {
|
||||
filter: { term: { [EVENT_OUTCOME]: EventOutcome.failure } },
|
||||
|
||||
// significant term aggs
|
||||
aggs: fieldNames.reduce((acc, fieldName) => {
|
||||
return {
|
||||
...acc,
|
||||
[fieldName]: {
|
||||
significant_terms: {
|
||||
size: 10,
|
||||
field: fieldName,
|
||||
background_filter: {
|
||||
bool: {
|
||||
filter: filters,
|
||||
must_not: {
|
||||
term: { [EVENT_OUTCOME]: EventOutcome.failure },
|
||||
},
|
||||
// significant term aggs
|
||||
aggs: fieldNames.reduce((acc, fieldName) => {
|
||||
return {
|
||||
...acc,
|
||||
[fieldName]: {
|
||||
significant_terms: {
|
||||
size: 10,
|
||||
field: fieldName,
|
||||
background_filter: {
|
||||
bool: {
|
||||
filter: filters,
|
||||
must_not: {
|
||||
term: { [EVENT_OUTCOME]: EventOutcome.failure },
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
}, {} as Record<string, { significant_terms: AggregationOptionsByType['significant_terms'] }>),
|
||||
},
|
||||
},
|
||||
};
|
||||
}, {} as Record<string, { significant_terms: AggregationOptionsByType['significant_terms'] }>),
|
||||
},
|
||||
},
|
||||
};
|
||||
},
|
||||
};
|
||||
|
||||
const response = await apmEventClient.search(params);
|
||||
if (!response.aggregations) {
|
||||
return { significantTerms: [] };
|
||||
}
|
||||
const response = await apmEventClient.search(
|
||||
'get_correlations_for_failed_transactions',
|
||||
params
|
||||
);
|
||||
if (!response.aggregations) {
|
||||
return { significantTerms: [] };
|
||||
}
|
||||
|
||||
const sigTermAggs = omit(
|
||||
response.aggregations?.failed_transactions,
|
||||
'doc_count'
|
||||
);
|
||||
const sigTermAggs = omit(
|
||||
response.aggregations?.failed_transactions,
|
||||
'doc_count'
|
||||
);
|
||||
|
||||
const topSigTerms = processSignificantTermAggs({ sigTermAggs });
|
||||
return getErrorRateTimeSeries({ setup, filters, topSigTerms });
|
||||
});
|
||||
const topSigTerms = processSignificantTermAggs({ sigTermAggs });
|
||||
return getErrorRateTimeSeries({ setup, filters, topSigTerms });
|
||||
}
|
||||
|
||||
export async function getErrorRateTimeSeries({
|
||||
|
@ -94,58 +94,59 @@ export async function getErrorRateTimeSeries({
|
|||
filters: ESFilter[];
|
||||
topSigTerms: TopSigTerm[];
|
||||
}) {
|
||||
return withApmSpan('get_error_rate_timeseries', async () => {
|
||||
const { start, end, apmEventClient } = setup;
|
||||
const { intervalString } = getBucketSize({ start, end, numBuckets: 15 });
|
||||
const { start, end, apmEventClient } = setup;
|
||||
const { intervalString } = getBucketSize({ start, end, numBuckets: 15 });
|
||||
|
||||
if (isEmpty(topSigTerms)) {
|
||||
return { significantTerms: [] };
|
||||
if (isEmpty(topSigTerms)) {
|
||||
return { significantTerms: [] };
|
||||
}
|
||||
|
||||
const timeseriesAgg = getTimeseriesAggregation(start, end, intervalString);
|
||||
|
||||
const perTermAggs = topSigTerms.reduce(
|
||||
(acc, term, index) => {
|
||||
acc[`term_${index}`] = {
|
||||
filter: { term: { [term.fieldName]: term.fieldValue } },
|
||||
aggs: { timeseries: timeseriesAgg },
|
||||
};
|
||||
return acc;
|
||||
},
|
||||
{} as {
|
||||
[key: string]: {
|
||||
filter: AggregationOptionsByType['filter'];
|
||||
aggs: { timeseries: typeof timeseriesAgg };
|
||||
};
|
||||
}
|
||||
);
|
||||
|
||||
const timeseriesAgg = getTimeseriesAggregation(start, end, intervalString);
|
||||
const params = {
|
||||
// TODO: add support for metrics
|
||||
apm: { events: [ProcessorEvent.transaction] },
|
||||
body: {
|
||||
size: 0,
|
||||
query: { bool: { filter: filters } },
|
||||
aggs: perTermAggs,
|
||||
},
|
||||
};
|
||||
|
||||
const perTermAggs = topSigTerms.reduce(
|
||||
(acc, term, index) => {
|
||||
acc[`term_${index}`] = {
|
||||
filter: { term: { [term.fieldName]: term.fieldValue } },
|
||||
aggs: { timeseries: timeseriesAgg },
|
||||
};
|
||||
return acc;
|
||||
},
|
||||
{} as {
|
||||
[key: string]: {
|
||||
filter: AggregationOptionsByType['filter'];
|
||||
aggs: { timeseries: typeof timeseriesAgg };
|
||||
};
|
||||
}
|
||||
);
|
||||
const response = await apmEventClient.search(
|
||||
'get_error_rate_timeseries',
|
||||
params
|
||||
);
|
||||
const { aggregations } = response;
|
||||
|
||||
const params = {
|
||||
// TODO: add support for metrics
|
||||
apm: { events: [ProcessorEvent.transaction] },
|
||||
body: {
|
||||
size: 0,
|
||||
query: { bool: { filter: filters } },
|
||||
aggs: perTermAggs,
|
||||
},
|
||||
};
|
||||
if (!aggregations) {
|
||||
return { significantTerms: [] };
|
||||
}
|
||||
|
||||
const response = await apmEventClient.search(params);
|
||||
const { aggregations } = response;
|
||||
return {
|
||||
significantTerms: topSigTerms.map((topSig, index) => {
|
||||
const agg = aggregations[`term_${index}`]!;
|
||||
|
||||
if (!aggregations) {
|
||||
return { significantTerms: [] };
|
||||
}
|
||||
|
||||
return {
|
||||
significantTerms: topSigTerms.map((topSig, index) => {
|
||||
const agg = aggregations[`term_${index}`]!;
|
||||
|
||||
return {
|
||||
...topSig,
|
||||
timeseries: getTransactionErrorRateTimeSeries(agg.timeseries.buckets),
|
||||
};
|
||||
}),
|
||||
};
|
||||
});
|
||||
return {
|
||||
...topSig,
|
||||
timeseries: getTransactionErrorRateTimeSeries(agg.timeseries.buckets),
|
||||
};
|
||||
}),
|
||||
};
|
||||
}
|
||||
|
|
|
@ -11,41 +11,41 @@ import {
|
|||
getTimeseriesAggregation,
|
||||
getTransactionErrorRateTimeSeries,
|
||||
} from '../../helpers/transaction_error_rate';
|
||||
import { withApmSpan } from '../../../utils/with_apm_span';
|
||||
import { CorrelationsOptions, getCorrelationsFilters } from '../get_filters';
|
||||
|
||||
export async function getOverallErrorTimeseries(options: CorrelationsOptions) {
|
||||
return withApmSpan('get_error_rate_timeseries', async () => {
|
||||
const { setup } = options;
|
||||
const filters = getCorrelationsFilters(options);
|
||||
const { start, end, apmEventClient } = setup;
|
||||
const { intervalString } = getBucketSize({ start, end, numBuckets: 15 });
|
||||
const { setup } = options;
|
||||
const filters = getCorrelationsFilters(options);
|
||||
const { start, end, apmEventClient } = setup;
|
||||
const { intervalString } = getBucketSize({ start, end, numBuckets: 15 });
|
||||
|
||||
const params = {
|
||||
// TODO: add support for metrics
|
||||
apm: { events: [ProcessorEvent.transaction] },
|
||||
body: {
|
||||
size: 0,
|
||||
query: { bool: { filter: filters } },
|
||||
aggs: {
|
||||
timeseries: getTimeseriesAggregation(start, end, intervalString),
|
||||
},
|
||||
const params = {
|
||||
// TODO: add support for metrics
|
||||
apm: { events: [ProcessorEvent.transaction] },
|
||||
body: {
|
||||
size: 0,
|
||||
query: { bool: { filter: filters } },
|
||||
aggs: {
|
||||
timeseries: getTimeseriesAggregation(start, end, intervalString),
|
||||
},
|
||||
};
|
||||
},
|
||||
};
|
||||
|
||||
const response = await apmEventClient.search(params);
|
||||
const { aggregations } = response;
|
||||
const response = await apmEventClient.search(
|
||||
'get_error_rate_timeseries',
|
||||
params
|
||||
);
|
||||
const { aggregations } = response;
|
||||
|
||||
if (!aggregations) {
|
||||
return { overall: null };
|
||||
}
|
||||
if (!aggregations) {
|
||||
return { overall: null };
|
||||
}
|
||||
|
||||
return {
|
||||
overall: {
|
||||
timeseries: getTransactionErrorRateTimeSeries(
|
||||
aggregations.timeseries.buckets
|
||||
),
|
||||
},
|
||||
};
|
||||
});
|
||||
return {
|
||||
overall: {
|
||||
timeseries: getTransactionErrorRateTimeSeries(
|
||||
aggregations.timeseries.buckets
|
||||
),
|
||||
},
|
||||
};
|
||||
}
|
||||
|
|
|
@ -41,60 +41,63 @@ export async function getCorrelationsForSlowTransactions(options: Options) {
|
|||
return { significantTerms: [] };
|
||||
}
|
||||
|
||||
const response = await withApmSpan('get_significant_terms', () => {
|
||||
const params = {
|
||||
apm: { events: [ProcessorEvent.transaction] },
|
||||
body: {
|
||||
size: 0,
|
||||
query: {
|
||||
bool: {
|
||||
// foreground filters
|
||||
filter: filters,
|
||||
must: {
|
||||
function_score: {
|
||||
query: {
|
||||
range: {
|
||||
[TRANSACTION_DURATION]: { gte: durationForPercentile },
|
||||
},
|
||||
const params = {
|
||||
apm: { events: [ProcessorEvent.transaction] },
|
||||
body: {
|
||||
size: 0,
|
||||
query: {
|
||||
bool: {
|
||||
// foreground filters
|
||||
filter: filters,
|
||||
must: {
|
||||
function_score: {
|
||||
query: {
|
||||
range: {
|
||||
[TRANSACTION_DURATION]: { gte: durationForPercentile },
|
||||
},
|
||||
script_score: {
|
||||
script: {
|
||||
source: `Math.log(2 + doc['${TRANSACTION_DURATION}'].value)`,
|
||||
},
|
||||
},
|
||||
script_score: {
|
||||
script: {
|
||||
source: `Math.log(2 + doc['${TRANSACTION_DURATION}'].value)`,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
aggs: fieldNames.reduce((acc, fieldName) => {
|
||||
return {
|
||||
...acc,
|
||||
[fieldName]: {
|
||||
significant_terms: {
|
||||
size: 10,
|
||||
field: fieldName,
|
||||
background_filter: {
|
||||
bool: {
|
||||
filter: [
|
||||
...filters,
|
||||
{
|
||||
range: {
|
||||
[TRANSACTION_DURATION]: {
|
||||
lt: durationForPercentile,
|
||||
},
|
||||
},
|
||||
aggs: fieldNames.reduce((acc, fieldName) => {
|
||||
return {
|
||||
...acc,
|
||||
[fieldName]: {
|
||||
significant_terms: {
|
||||
size: 10,
|
||||
field: fieldName,
|
||||
background_filter: {
|
||||
bool: {
|
||||
filter: [
|
||||
...filters,
|
||||
{
|
||||
range: {
|
||||
[TRANSACTION_DURATION]: {
|
||||
lt: durationForPercentile,
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
}, {} as Record<string, { significant_terms: AggregationOptionsByType['significant_terms'] }>),
|
||||
},
|
||||
};
|
||||
return apmEventClient.search(params);
|
||||
});
|
||||
},
|
||||
};
|
||||
}, {} as Record<string, { significant_terms: AggregationOptionsByType['significant_terms'] }>),
|
||||
},
|
||||
};
|
||||
|
||||
const response = await apmEventClient.search(
|
||||
'get_significant_terms',
|
||||
params
|
||||
);
|
||||
|
||||
if (!response.aggregations) {
|
||||
return { significantTerms: [] };
|
||||
}
|
||||
|
|
|
@ -8,7 +8,6 @@
|
|||
import { ESFilter } from '../../../../../../../typings/elasticsearch';
|
||||
import { TRANSACTION_DURATION } from '../../../../common/elasticsearch_fieldnames';
|
||||
import { ProcessorEvent } from '../../../../common/processor_event';
|
||||
import { withApmSpan } from '../../../utils/with_apm_span';
|
||||
import { Setup, SetupTimeRange } from '../../helpers/setup_request';
|
||||
|
||||
export async function getDurationForPercentile({
|
||||
|
@ -20,31 +19,27 @@ export async function getDurationForPercentile({
|
|||
filters: ESFilter[];
|
||||
setup: Setup & SetupTimeRange;
|
||||
}) {
|
||||
return withApmSpan('get_duration_for_percentiles', async () => {
|
||||
const { apmEventClient } = setup;
|
||||
const res = await apmEventClient.search({
|
||||
apm: {
|
||||
events: [ProcessorEvent.transaction],
|
||||
const { apmEventClient } = setup;
|
||||
const res = await apmEventClient.search('get_duration_for_percentiles', {
|
||||
apm: {
|
||||
events: [ProcessorEvent.transaction],
|
||||
},
|
||||
body: {
|
||||
size: 0,
|
||||
query: {
|
||||
bool: { filter: filters },
|
||||
},
|
||||
body: {
|
||||
size: 0,
|
||||
query: {
|
||||
bool: { filter: filters },
|
||||
},
|
||||
aggs: {
|
||||
percentile: {
|
||||
percentiles: {
|
||||
field: TRANSACTION_DURATION,
|
||||
percents: [durationPercentile],
|
||||
},
|
||||
aggs: {
|
||||
percentile: {
|
||||
percentiles: {
|
||||
field: TRANSACTION_DURATION,
|
||||
percents: [durationPercentile],
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
const duration = Object.values(
|
||||
res.aggregations?.percentile.values || {}
|
||||
)[0];
|
||||
return duration || 0;
|
||||
},
|
||||
});
|
||||
|
||||
const duration = Object.values(res.aggregations?.percentile.values || {})[0];
|
||||
return duration || 0;
|
||||
}
|
||||
|
|
|
@ -10,7 +10,7 @@ import { ESFilter } from '../../../../../../../typings/elasticsearch';
|
|||
import { ProcessorEvent } from '../../../../common/processor_event';
|
||||
import { Setup, SetupTimeRange } from '../../helpers/setup_request';
|
||||
import { TopSigTerm } from '../process_significant_term_aggs';
|
||||
import { withApmSpan } from '../../../utils/with_apm_span';
|
||||
|
||||
import {
|
||||
getDistributionAggregation,
|
||||
trimBuckets,
|
||||
|
@ -29,70 +29,70 @@ export async function getLatencyDistribution({
|
|||
maxLatency: number;
|
||||
distributionInterval: number;
|
||||
}) {
|
||||
return withApmSpan('get_latency_distribution', async () => {
|
||||
const { apmEventClient } = setup;
|
||||
const { apmEventClient } = setup;
|
||||
|
||||
const distributionAgg = getDistributionAggregation(
|
||||
maxLatency,
|
||||
distributionInterval
|
||||
);
|
||||
const distributionAgg = getDistributionAggregation(
|
||||
maxLatency,
|
||||
distributionInterval
|
||||
);
|
||||
|
||||
const perTermAggs = topSigTerms.reduce(
|
||||
(acc, term, index) => {
|
||||
acc[`term_${index}`] = {
|
||||
filter: { term: { [term.fieldName]: term.fieldValue } },
|
||||
aggs: {
|
||||
distribution: distributionAgg,
|
||||
},
|
||||
};
|
||||
return acc;
|
||||
},
|
||||
{} as Record<
|
||||
string,
|
||||
{
|
||||
filter: AggregationOptionsByType['filter'];
|
||||
aggs: {
|
||||
distribution: typeof distributionAgg;
|
||||
};
|
||||
}
|
||||
>
|
||||
);
|
||||
|
||||
const params = {
|
||||
// TODO: add support for metrics
|
||||
apm: { events: [ProcessorEvent.transaction] },
|
||||
body: {
|
||||
size: 0,
|
||||
query: { bool: { filter: filters } },
|
||||
aggs: perTermAggs,
|
||||
},
|
||||
};
|
||||
|
||||
const response = await withApmSpan('get_terms_distribution', () =>
|
||||
apmEventClient.search(params)
|
||||
);
|
||||
type Agg = NonNullable<typeof response.aggregations>;
|
||||
|
||||
if (!response.aggregations) {
|
||||
return [];
|
||||
}
|
||||
|
||||
return topSigTerms.map((topSig, index) => {
|
||||
// ignore the typescript error since existence of response.aggregations is already checked:
|
||||
// @ts-expect-error
|
||||
const agg = response.aggregations[`term_${index}`] as Agg[string];
|
||||
const total = agg.distribution.doc_count;
|
||||
const buckets = trimBuckets(
|
||||
agg.distribution.dist_filtered_by_latency.buckets
|
||||
);
|
||||
|
||||
return {
|
||||
...topSig,
|
||||
distribution: buckets.map((bucket) => ({
|
||||
x: bucket.key,
|
||||
y: (bucket.doc_count / total) * 100,
|
||||
})),
|
||||
const perTermAggs = topSigTerms.reduce(
|
||||
(acc, term, index) => {
|
||||
acc[`term_${index}`] = {
|
||||
filter: { term: { [term.fieldName]: term.fieldValue } },
|
||||
aggs: {
|
||||
distribution: distributionAgg,
|
||||
},
|
||||
};
|
||||
});
|
||||
return acc;
|
||||
},
|
||||
{} as Record<
|
||||
string,
|
||||
{
|
||||
filter: AggregationOptionsByType['filter'];
|
||||
aggs: {
|
||||
distribution: typeof distributionAgg;
|
||||
};
|
||||
}
|
||||
>
|
||||
);
|
||||
|
||||
const params = {
|
||||
// TODO: add support for metrics
|
||||
apm: { events: [ProcessorEvent.transaction] },
|
||||
body: {
|
||||
size: 0,
|
||||
query: { bool: { filter: filters } },
|
||||
aggs: perTermAggs,
|
||||
},
|
||||
};
|
||||
|
||||
const response = await apmEventClient.search(
|
||||
'get_latency_distribution',
|
||||
params
|
||||
);
|
||||
|
||||
type Agg = NonNullable<typeof response.aggregations>;
|
||||
|
||||
if (!response.aggregations) {
|
||||
return [];
|
||||
}
|
||||
|
||||
return topSigTerms.map((topSig, index) => {
|
||||
// ignore the typescript error since existence of response.aggregations is already checked:
|
||||
// @ts-expect-error
|
||||
const agg = response.aggregations[`term_${index}`] as Agg[string];
|
||||
const total = agg.distribution.doc_count;
|
||||
const buckets = trimBuckets(
|
||||
agg.distribution.dist_filtered_by_latency.buckets
|
||||
);
|
||||
|
||||
return {
|
||||
...topSig,
|
||||
distribution: buckets.map((bucket) => ({
|
||||
x: bucket.key,
|
||||
y: (bucket.doc_count / total) * 100,
|
||||
})),
|
||||
};
|
||||
});
|
||||
}
|
||||
|
|
|
@ -8,7 +8,6 @@
|
|||
import { ESFilter } from '../../../../../../../typings/elasticsearch';
|
||||
import { TRANSACTION_DURATION } from '../../../../common/elasticsearch_fieldnames';
|
||||
import { ProcessorEvent } from '../../../../common/processor_event';
|
||||
import { withApmSpan } from '../../../utils/with_apm_span';
|
||||
import { Setup, SetupTimeRange } from '../../helpers/setup_request';
|
||||
import { TopSigTerm } from '../process_significant_term_aggs';
|
||||
|
||||
|
@ -21,41 +20,39 @@ export async function getMaxLatency({
|
|||
filters: ESFilter[];
|
||||
topSigTerms?: TopSigTerm[];
|
||||
}) {
|
||||
return withApmSpan('get_max_latency', async () => {
|
||||
const { apmEventClient } = setup;
|
||||
const { apmEventClient } = setup;
|
||||
|
||||
const params = {
|
||||
// TODO: add support for metrics
|
||||
apm: { events: [ProcessorEvent.transaction] },
|
||||
body: {
|
||||
size: 0,
|
||||
query: {
|
||||
bool: {
|
||||
filter: filters,
|
||||
const params = {
|
||||
// TODO: add support for metrics
|
||||
apm: { events: [ProcessorEvent.transaction] },
|
||||
body: {
|
||||
size: 0,
|
||||
query: {
|
||||
bool: {
|
||||
filter: filters,
|
||||
|
||||
...(topSigTerms.length
|
||||
? {
|
||||
// only include docs containing the significant terms
|
||||
should: topSigTerms.map((term) => ({
|
||||
term: { [term.fieldName]: term.fieldValue },
|
||||
})),
|
||||
minimum_should_match: 1,
|
||||
}
|
||||
: null),
|
||||
},
|
||||
},
|
||||
aggs: {
|
||||
// TODO: add support for metrics
|
||||
// max_latency: { max: { field: TRANSACTION_DURATION } },
|
||||
max_latency: {
|
||||
percentiles: { field: TRANSACTION_DURATION, percents: [99] },
|
||||
},
|
||||
...(topSigTerms.length
|
||||
? {
|
||||
// only include docs containing the significant terms
|
||||
should: topSigTerms.map((term) => ({
|
||||
term: { [term.fieldName]: term.fieldValue },
|
||||
})),
|
||||
minimum_should_match: 1,
|
||||
}
|
||||
: null),
|
||||
},
|
||||
},
|
||||
};
|
||||
aggs: {
|
||||
// TODO: add support for metrics
|
||||
// max_latency: { max: { field: TRANSACTION_DURATION } },
|
||||
max_latency: {
|
||||
percentiles: { field: TRANSACTION_DURATION, percents: [99] },
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
const response = await apmEventClient.search(params);
|
||||
// return response.aggregations?.max_latency.value;
|
||||
return Object.values(response.aggregations?.max_latency.values ?? {})[0];
|
||||
});
|
||||
const response = await apmEventClient.search('get_max_latency', params);
|
||||
// return response.aggregations?.max_latency.value;
|
||||
return Object.values(response.aggregations?.max_latency.values ?? {})[0];
|
||||
}
|
||||
|
|
|
@ -71,8 +71,9 @@ export async function getOverallLatencyDistribution(
|
|||
},
|
||||
};
|
||||
|
||||
const response = await withApmSpan('get_terms_distribution', () =>
|
||||
apmEventClient.search(params)
|
||||
const response = await apmEventClient.search(
|
||||
'get_terms_distribution',
|
||||
params
|
||||
);
|
||||
|
||||
if (!response.aggregations) {
|
||||
|
|
|
@ -13,7 +13,6 @@ import {
|
|||
} from '../../../common/elasticsearch_fieldnames';
|
||||
import { ENVIRONMENT_NOT_DEFINED } from '../../../common/environment_filter_values';
|
||||
import { getProcessorEventForAggregatedTransactions } from '../helpers/aggregated_transactions';
|
||||
import { withApmSpan } from '../../utils/with_apm_span';
|
||||
|
||||
/**
|
||||
* This is used for getting *all* environments, and does not filter by range.
|
||||
|
@ -30,59 +29,56 @@ export async function getAllEnvironments({
|
|||
searchAggregatedTransactions: boolean;
|
||||
includeMissing?: boolean;
|
||||
}) {
|
||||
const spanName = serviceName
|
||||
const operationName = serviceName
|
||||
? 'get_all_environments_for_service'
|
||||
: 'get_all_environments_for_all_services';
|
||||
return withApmSpan(spanName, async () => {
|
||||
const { apmEventClient, config } = setup;
|
||||
const maxServiceEnvironments = config['xpack.apm.maxServiceEnvironments'];
|
||||
|
||||
// omit filter for service.name if "All" option is selected
|
||||
const serviceNameFilter = serviceName
|
||||
? [{ term: { [SERVICE_NAME]: serviceName } }]
|
||||
: [];
|
||||
const { apmEventClient, config } = setup;
|
||||
const maxServiceEnvironments = config['xpack.apm.maxServiceEnvironments'];
|
||||
|
||||
const params = {
|
||||
apm: {
|
||||
events: [
|
||||
getProcessorEventForAggregatedTransactions(
|
||||
searchAggregatedTransactions
|
||||
),
|
||||
ProcessorEvent.error,
|
||||
ProcessorEvent.metric,
|
||||
],
|
||||
},
|
||||
body: {
|
||||
// use timeout + min_doc_count to return as early as possible
|
||||
// if filter is not defined to prevent timeouts
|
||||
...(!serviceName ? { timeout: '1ms' } : {}),
|
||||
size: 0,
|
||||
query: {
|
||||
bool: {
|
||||
filter: [...serviceNameFilter],
|
||||
},
|
||||
// omit filter for service.name if "All" option is selected
|
||||
const serviceNameFilter = serviceName
|
||||
? [{ term: { [SERVICE_NAME]: serviceName } }]
|
||||
: [];
|
||||
|
||||
const params = {
|
||||
apm: {
|
||||
events: [
|
||||
getProcessorEventForAggregatedTransactions(
|
||||
searchAggregatedTransactions
|
||||
),
|
||||
ProcessorEvent.error,
|
||||
ProcessorEvent.metric,
|
||||
],
|
||||
},
|
||||
body: {
|
||||
// use timeout + min_doc_count to return as early as possible
|
||||
// if filter is not defined to prevent timeouts
|
||||
...(!serviceName ? { timeout: '1ms' } : {}),
|
||||
size: 0,
|
||||
query: {
|
||||
bool: {
|
||||
filter: [...serviceNameFilter],
|
||||
},
|
||||
aggs: {
|
||||
environments: {
|
||||
terms: {
|
||||
field: SERVICE_ENVIRONMENT,
|
||||
size: maxServiceEnvironments,
|
||||
...(!serviceName ? { min_doc_count: 0 } : {}),
|
||||
missing: includeMissing
|
||||
? ENVIRONMENT_NOT_DEFINED.value
|
||||
: undefined,
|
||||
},
|
||||
},
|
||||
aggs: {
|
||||
environments: {
|
||||
terms: {
|
||||
field: SERVICE_ENVIRONMENT,
|
||||
size: maxServiceEnvironments,
|
||||
...(!serviceName ? { min_doc_count: 0 } : {}),
|
||||
missing: includeMissing ? ENVIRONMENT_NOT_DEFINED.value : undefined,
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
},
|
||||
};
|
||||
|
||||
const resp = await apmEventClient.search(params);
|
||||
const resp = await apmEventClient.search(operationName, params);
|
||||
|
||||
const environments =
|
||||
resp.aggregations?.environments.buckets.map(
|
||||
(bucket) => bucket.key as string
|
||||
) || [];
|
||||
return environments;
|
||||
});
|
||||
const environments =
|
||||
resp.aggregations?.environments.buckets.map(
|
||||
(bucket) => bucket.key as string
|
||||
) || [];
|
||||
return environments;
|
||||
}
|
||||
|
|
|
@ -12,7 +12,6 @@ import {
|
|||
import { ENVIRONMENT_NOT_DEFINED } from '../../../common/environment_filter_values';
|
||||
import { ProcessorEvent } from '../../../common/processor_event';
|
||||
import { rangeQuery } from '../../../server/utils/queries';
|
||||
import { withApmSpan } from '../../utils/with_apm_span';
|
||||
import { getProcessorEventForAggregatedTransactions } from '../helpers/aggregated_transactions';
|
||||
import { Setup, SetupTimeRange } from '../helpers/setup_request';
|
||||
|
||||
|
@ -29,60 +28,58 @@ export async function getEnvironments({
|
|||
serviceName?: string;
|
||||
searchAggregatedTransactions: boolean;
|
||||
}) {
|
||||
const spanName = serviceName
|
||||
const operationName = serviceName
|
||||
? 'get_environments_for_service'
|
||||
: 'get_environments';
|
||||
|
||||
return withApmSpan(spanName, async () => {
|
||||
const { start, end, apmEventClient, config } = setup;
|
||||
const { start, end, apmEventClient, config } = setup;
|
||||
|
||||
const filter = rangeQuery(start, end);
|
||||
const filter = rangeQuery(start, end);
|
||||
|
||||
if (serviceName) {
|
||||
filter.push({
|
||||
term: { [SERVICE_NAME]: serviceName },
|
||||
});
|
||||
}
|
||||
if (serviceName) {
|
||||
filter.push({
|
||||
term: { [SERVICE_NAME]: serviceName },
|
||||
});
|
||||
}
|
||||
|
||||
const maxServiceEnvironments = config['xpack.apm.maxServiceEnvironments'];
|
||||
const maxServiceEnvironments = config['xpack.apm.maxServiceEnvironments'];
|
||||
|
||||
const params = {
|
||||
apm: {
|
||||
events: [
|
||||
getProcessorEventForAggregatedTransactions(
|
||||
searchAggregatedTransactions
|
||||
),
|
||||
ProcessorEvent.metric,
|
||||
ProcessorEvent.error,
|
||||
],
|
||||
},
|
||||
body: {
|
||||
size: 0,
|
||||
query: {
|
||||
bool: {
|
||||
filter,
|
||||
},
|
||||
const params = {
|
||||
apm: {
|
||||
events: [
|
||||
getProcessorEventForAggregatedTransactions(
|
||||
searchAggregatedTransactions
|
||||
),
|
||||
ProcessorEvent.metric,
|
||||
ProcessorEvent.error,
|
||||
],
|
||||
},
|
||||
body: {
|
||||
size: 0,
|
||||
query: {
|
||||
bool: {
|
||||
filter,
|
||||
},
|
||||
aggs: {
|
||||
environments: {
|
||||
terms: {
|
||||
field: SERVICE_ENVIRONMENT,
|
||||
missing: ENVIRONMENT_NOT_DEFINED.value,
|
||||
size: maxServiceEnvironments,
|
||||
},
|
||||
},
|
||||
aggs: {
|
||||
environments: {
|
||||
terms: {
|
||||
field: SERVICE_ENVIRONMENT,
|
||||
missing: ENVIRONMENT_NOT_DEFINED.value,
|
||||
size: maxServiceEnvironments,
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
},
|
||||
};
|
||||
|
||||
const resp = await apmEventClient.search(params);
|
||||
const aggs = resp.aggregations;
|
||||
const environmentsBuckets = aggs?.environments.buckets || [];
|
||||
const resp = await apmEventClient.search(operationName, params);
|
||||
const aggs = resp.aggregations;
|
||||
const environmentsBuckets = aggs?.environments.buckets || [];
|
||||
|
||||
const environments = environmentsBuckets.map(
|
||||
(environmentBucket) => environmentBucket.key as string
|
||||
);
|
||||
const environments = environmentsBuckets.map(
|
||||
(environmentBucket) => environmentBucket.key as string
|
||||
);
|
||||
|
||||
return environments;
|
||||
});
|
||||
return environments;
|
||||
}
|
||||
|
|
|
@ -3,6 +3,7 @@
|
|||
exports[`get buckets should make the correct query 1`] = `
|
||||
Array [
|
||||
Array [
|
||||
"get_error_distribution_buckets",
|
||||
Object {
|
||||
"apm": Object {
|
||||
"events": Array [
|
||||
|
|
|
@ -65,7 +65,7 @@ describe('get buckets', () => {
|
|||
});
|
||||
|
||||
it('should limit query results to error documents', () => {
|
||||
const query = clientSpy.mock.calls[0][0];
|
||||
const query = clientSpy.mock.calls[0][1];
|
||||
expect(query.apm.events).toEqual([ProcessorEvent.error]);
|
||||
});
|
||||
});
|
||||
|
|
|
@ -16,7 +16,6 @@ import {
|
|||
rangeQuery,
|
||||
kqlQuery,
|
||||
} from '../../../../server/utils/queries';
|
||||
import { withApmSpan } from '../../../utils/with_apm_span';
|
||||
import { Setup, SetupTimeRange } from '../../helpers/setup_request';
|
||||
|
||||
export async function getBuckets({
|
||||
|
@ -34,58 +33,59 @@ export async function getBuckets({
|
|||
bucketSize: number;
|
||||
setup: Setup & SetupTimeRange;
|
||||
}) {
|
||||
return withApmSpan('get_error_distribution_buckets', async () => {
|
||||
const { start, end, apmEventClient } = setup;
|
||||
const filter: ESFilter[] = [
|
||||
{ term: { [SERVICE_NAME]: serviceName } },
|
||||
...rangeQuery(start, end),
|
||||
...environmentQuery(environment),
|
||||
...kqlQuery(kuery),
|
||||
];
|
||||
const { start, end, apmEventClient } = setup;
|
||||
const filter: ESFilter[] = [
|
||||
{ term: { [SERVICE_NAME]: serviceName } },
|
||||
...rangeQuery(start, end),
|
||||
...environmentQuery(environment),
|
||||
...kqlQuery(kuery),
|
||||
];
|
||||
|
||||
if (groupId) {
|
||||
filter.push({ term: { [ERROR_GROUP_ID]: groupId } });
|
||||
}
|
||||
if (groupId) {
|
||||
filter.push({ term: { [ERROR_GROUP_ID]: groupId } });
|
||||
}
|
||||
|
||||
const params = {
|
||||
apm: {
|
||||
events: [ProcessorEvent.error],
|
||||
},
|
||||
body: {
|
||||
size: 0,
|
||||
query: {
|
||||
bool: {
|
||||
filter,
|
||||
},
|
||||
const params = {
|
||||
apm: {
|
||||
events: [ProcessorEvent.error],
|
||||
},
|
||||
body: {
|
||||
size: 0,
|
||||
query: {
|
||||
bool: {
|
||||
filter,
|
||||
},
|
||||
aggs: {
|
||||
distribution: {
|
||||
histogram: {
|
||||
field: '@timestamp',
|
||||
min_doc_count: 0,
|
||||
interval: bucketSize,
|
||||
extended_bounds: {
|
||||
min: start,
|
||||
max: end,
|
||||
},
|
||||
},
|
||||
aggs: {
|
||||
distribution: {
|
||||
histogram: {
|
||||
field: '@timestamp',
|
||||
min_doc_count: 0,
|
||||
interval: bucketSize,
|
||||
extended_bounds: {
|
||||
min: start,
|
||||
max: end,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
},
|
||||
};
|
||||
|
||||
const resp = await apmEventClient.search(params);
|
||||
const resp = await apmEventClient.search(
|
||||
'get_error_distribution_buckets',
|
||||
params
|
||||
);
|
||||
|
||||
const buckets = (resp.aggregations?.distribution.buckets || []).map(
|
||||
(bucket) => ({
|
||||
key: bucket.key,
|
||||
count: bucket.doc_count,
|
||||
})
|
||||
);
|
||||
const buckets = (resp.aggregations?.distribution.buckets || []).map(
|
||||
(bucket) => ({
|
||||
key: bucket.key,
|
||||
count: bucket.doc_count,
|
||||
})
|
||||
);
|
||||
|
||||
return {
|
||||
noHits: resp.hits.total.value === 0,
|
||||
buckets: resp.hits.total.value > 0 ? buckets : [],
|
||||
};
|
||||
});
|
||||
return {
|
||||
noHits: resp.hits.total.value === 0,
|
||||
buckets: resp.hits.total.value > 0 ? buckets : [],
|
||||
};
|
||||
}
|
||||
|
|
|
@ -17,11 +17,10 @@ import {
|
|||
rangeQuery,
|
||||
kqlQuery,
|
||||
} from '../../../server/utils/queries';
|
||||
import { withApmSpan } from '../../utils/with_apm_span';
|
||||
import { Setup, SetupTimeRange } from '../helpers/setup_request';
|
||||
import { getTransaction } from '../transactions/get_transaction';
|
||||
|
||||
export function getErrorGroupSample({
|
||||
export async function getErrorGroupSample({
|
||||
environment,
|
||||
kuery,
|
||||
serviceName,
|
||||
|
@ -34,48 +33,46 @@ export function getErrorGroupSample({
|
|||
groupId: string;
|
||||
setup: Setup & SetupTimeRange;
|
||||
}) {
|
||||
return withApmSpan('get_error_group_sample', async () => {
|
||||
const { start, end, apmEventClient } = setup;
|
||||
const { start, end, apmEventClient } = setup;
|
||||
|
||||
const params = {
|
||||
apm: {
|
||||
events: [ProcessorEvent.error as const],
|
||||
},
|
||||
body: {
|
||||
size: 1,
|
||||
query: {
|
||||
bool: {
|
||||
filter: [
|
||||
{ term: { [SERVICE_NAME]: serviceName } },
|
||||
{ term: { [ERROR_GROUP_ID]: groupId } },
|
||||
...rangeQuery(start, end),
|
||||
...environmentQuery(environment),
|
||||
...kqlQuery(kuery),
|
||||
],
|
||||
should: [{ term: { [TRANSACTION_SAMPLED]: true } }],
|
||||
},
|
||||
const params = {
|
||||
apm: {
|
||||
events: [ProcessorEvent.error as const],
|
||||
},
|
||||
body: {
|
||||
size: 1,
|
||||
query: {
|
||||
bool: {
|
||||
filter: [
|
||||
{ term: { [SERVICE_NAME]: serviceName } },
|
||||
{ term: { [ERROR_GROUP_ID]: groupId } },
|
||||
...rangeQuery(start, end),
|
||||
...environmentQuery(environment),
|
||||
...kqlQuery(kuery),
|
||||
],
|
||||
should: [{ term: { [TRANSACTION_SAMPLED]: true } }],
|
||||
},
|
||||
sort: asMutableArray([
|
||||
{ _score: 'desc' }, // sort by _score first to ensure that errors with transaction.sampled:true ends up on top
|
||||
{ '@timestamp': { order: 'desc' } }, // sort by timestamp to get the most recent error
|
||||
] as const),
|
||||
},
|
||||
};
|
||||
sort: asMutableArray([
|
||||
{ _score: 'desc' }, // sort by _score first to ensure that errors with transaction.sampled:true ends up on top
|
||||
{ '@timestamp': { order: 'desc' } }, // sort by timestamp to get the most recent error
|
||||
] as const),
|
||||
},
|
||||
};
|
||||
|
||||
const resp = await apmEventClient.search(params);
|
||||
const error = resp.hits.hits[0]?._source;
|
||||
const transactionId = error?.transaction?.id;
|
||||
const traceId = error?.trace?.id;
|
||||
const resp = await apmEventClient.search('get_error_group_sample', params);
|
||||
const error = resp.hits.hits[0]?._source;
|
||||
const transactionId = error?.transaction?.id;
|
||||
const traceId = error?.trace?.id;
|
||||
|
||||
let transaction;
|
||||
if (transactionId && traceId) {
|
||||
transaction = await getTransaction({ transactionId, traceId, setup });
|
||||
}
|
||||
let transaction;
|
||||
if (transactionId && traceId) {
|
||||
transaction = await getTransaction({ transactionId, traceId, setup });
|
||||
}
|
||||
|
||||
return {
|
||||
transaction,
|
||||
error,
|
||||
occurrencesCount: resp.hits.total.value,
|
||||
};
|
||||
});
|
||||
return {
|
||||
transaction,
|
||||
error,
|
||||
occurrencesCount: resp.hits.total.value,
|
||||
};
|
||||
}
|
||||
|
|
|
@ -15,11 +15,10 @@ import {
|
|||
} from '../../../common/elasticsearch_fieldnames';
|
||||
import { getErrorGroupsProjection } from '../../projections/errors';
|
||||
import { mergeProjection } from '../../projections/util/merge_projection';
|
||||
import { withApmSpan } from '../../utils/with_apm_span';
|
||||
import { getErrorName } from '../helpers/get_error_name';
|
||||
import { Setup, SetupTimeRange } from '../helpers/setup_request';
|
||||
|
||||
export function getErrorGroups({
|
||||
export async function getErrorGroups({
|
||||
environment,
|
||||
kuery,
|
||||
serviceName,
|
||||
|
@ -34,87 +33,83 @@ export function getErrorGroups({
|
|||
sortDirection?: 'asc' | 'desc';
|
||||
setup: Setup & SetupTimeRange;
|
||||
}) {
|
||||
return withApmSpan('get_error_groups', async () => {
|
||||
const { apmEventClient } = setup;
|
||||
const { apmEventClient } = setup;
|
||||
|
||||
// sort buckets by last occurrence of error
|
||||
const sortByLatestOccurrence = sortField === 'latestOccurrenceAt';
|
||||
// sort buckets by last occurrence of error
|
||||
const sortByLatestOccurrence = sortField === 'latestOccurrenceAt';
|
||||
|
||||
const projection = getErrorGroupsProjection({
|
||||
environment,
|
||||
kuery,
|
||||
setup,
|
||||
serviceName,
|
||||
});
|
||||
const projection = getErrorGroupsProjection({
|
||||
environment,
|
||||
kuery,
|
||||
setup,
|
||||
serviceName,
|
||||
});
|
||||
|
||||
const order = sortByLatestOccurrence
|
||||
? {
|
||||
max_timestamp: sortDirection,
|
||||
}
|
||||
: { _count: sortDirection };
|
||||
const order = sortByLatestOccurrence
|
||||
? {
|
||||
max_timestamp: sortDirection,
|
||||
}
|
||||
: { _count: sortDirection };
|
||||
|
||||
const params = mergeProjection(projection, {
|
||||
body: {
|
||||
size: 0,
|
||||
aggs: {
|
||||
error_groups: {
|
||||
terms: {
|
||||
...projection.body.aggs.error_groups.terms,
|
||||
size: 500,
|
||||
order,
|
||||
},
|
||||
aggs: {
|
||||
sample: {
|
||||
top_hits: {
|
||||
_source: [
|
||||
ERROR_LOG_MESSAGE,
|
||||
ERROR_EXC_MESSAGE,
|
||||
ERROR_EXC_HANDLED,
|
||||
ERROR_EXC_TYPE,
|
||||
ERROR_CULPRIT,
|
||||
ERROR_GROUP_ID,
|
||||
'@timestamp',
|
||||
],
|
||||
sort: [{ '@timestamp': 'desc' as const }],
|
||||
size: 1,
|
||||
},
|
||||
const params = mergeProjection(projection, {
|
||||
body: {
|
||||
size: 0,
|
||||
aggs: {
|
||||
error_groups: {
|
||||
terms: {
|
||||
...projection.body.aggs.error_groups.terms,
|
||||
size: 500,
|
||||
order,
|
||||
},
|
||||
aggs: {
|
||||
sample: {
|
||||
top_hits: {
|
||||
_source: [
|
||||
ERROR_LOG_MESSAGE,
|
||||
ERROR_EXC_MESSAGE,
|
||||
ERROR_EXC_HANDLED,
|
||||
ERROR_EXC_TYPE,
|
||||
ERROR_CULPRIT,
|
||||
ERROR_GROUP_ID,
|
||||
'@timestamp',
|
||||
],
|
||||
sort: [{ '@timestamp': 'desc' as const }],
|
||||
size: 1,
|
||||
},
|
||||
...(sortByLatestOccurrence
|
||||
? {
|
||||
max_timestamp: {
|
||||
max: {
|
||||
field: '@timestamp',
|
||||
},
|
||||
},
|
||||
}
|
||||
: {}),
|
||||
},
|
||||
...(sortByLatestOccurrence
|
||||
? {
|
||||
max_timestamp: {
|
||||
max: {
|
||||
field: '@timestamp',
|
||||
},
|
||||
},
|
||||
}
|
||||
: {}),
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
const resp = await apmEventClient.search(params);
|
||||
|
||||
// aggregations can be undefined when no matching indices are found.
|
||||
// this is an exception rather than the rule so the ES type does not account for this.
|
||||
const hits = (resp.aggregations?.error_groups.buckets || []).map(
|
||||
(bucket) => {
|
||||
const source = bucket.sample.hits.hits[0]._source;
|
||||
const message = getErrorName(source);
|
||||
|
||||
return {
|
||||
message,
|
||||
occurrenceCount: bucket.doc_count,
|
||||
culprit: source.error.culprit,
|
||||
groupId: source.error.grouping_key,
|
||||
latestOccurrenceAt: source['@timestamp'],
|
||||
handled: source.error.exception?.[0].handled,
|
||||
type: source.error.exception?.[0].type,
|
||||
};
|
||||
}
|
||||
);
|
||||
|
||||
return hits;
|
||||
},
|
||||
});
|
||||
|
||||
const resp = await apmEventClient.search('get_error_groups', params);
|
||||
|
||||
// aggregations can be undefined when no matching indices are found.
|
||||
// this is an exception rather than the rule so the ES type does not account for this.
|
||||
const hits = (resp.aggregations?.error_groups.buckets || []).map((bucket) => {
|
||||
const source = bucket.sample.hits.hits[0]._source;
|
||||
const message = getErrorName(source);
|
||||
|
||||
return {
|
||||
message,
|
||||
occurrenceCount: bucket.doc_count,
|
||||
culprit: source.error.culprit,
|
||||
groupId: source.error.grouping_key,
|
||||
latestOccurrenceAt: source['@timestamp'],
|
||||
handled: source.error.exception?.[0].handled,
|
||||
type: source.error.exception?.[0].type,
|
||||
};
|
||||
});
|
||||
|
||||
return hits;
|
||||
}
|
||||
|
|
|
@ -14,7 +14,6 @@ import {
|
|||
} from '../../../../common/elasticsearch_fieldnames';
|
||||
import { APMConfig } from '../../..';
|
||||
import { APMEventClient } from '../create_es_client/create_apm_event_client';
|
||||
import { withApmSpan } from '../../../utils/with_apm_span';
|
||||
|
||||
export async function getHasAggregatedTransactions({
|
||||
start,
|
||||
|
@ -25,8 +24,9 @@ export async function getHasAggregatedTransactions({
|
|||
end?: number;
|
||||
apmEventClient: APMEventClient;
|
||||
}) {
|
||||
return withApmSpan('get_has_aggregated_transactions', async () => {
|
||||
const response = await apmEventClient.search({
|
||||
const response = await apmEventClient.search(
|
||||
'get_has_aggregated_transactions',
|
||||
{
|
||||
apm: {
|
||||
events: [ProcessorEvent.metric],
|
||||
},
|
||||
|
@ -41,14 +41,14 @@ export async function getHasAggregatedTransactions({
|
|||
},
|
||||
},
|
||||
terminateAfter: 1,
|
||||
});
|
||||
|
||||
if (response.hits.total.value > 0) {
|
||||
return true;
|
||||
}
|
||||
);
|
||||
|
||||
return false;
|
||||
});
|
||||
if (response.hits.total.value > 0) {
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
export async function getSearchAggregatedTransactions({
|
||||
|
|
|
@ -81,17 +81,26 @@ export async function callAsyncWithDebug<T>({
|
|||
return res;
|
||||
}
|
||||
|
||||
export const getDebugBody = (
|
||||
params: Record<string, any>,
|
||||
requestType: string
|
||||
) => {
|
||||
export const getDebugBody = ({
|
||||
params,
|
||||
requestType,
|
||||
operationName,
|
||||
}: {
|
||||
params: Record<string, any>;
|
||||
requestType: string;
|
||||
operationName: string;
|
||||
}) => {
|
||||
const operationLine = `${operationName}\n`;
|
||||
|
||||
if (requestType === 'search') {
|
||||
return `GET ${params.index}/_search\n${formatObj(params.body)}`;
|
||||
return `${operationLine}GET ${params.index}/_search\n${formatObj(
|
||||
params.body
|
||||
)}`;
|
||||
}
|
||||
|
||||
return `${chalk.bold('ES operation:')} ${requestType}\n${chalk.bold(
|
||||
'ES query:'
|
||||
)}\n${formatObj(params)}`;
|
||||
)}\n${operationLine}${formatObj(params)}`;
|
||||
};
|
||||
|
||||
export const getDebugTitle = (request: KibanaRequest) =>
|
||||
|
|
|
@ -47,7 +47,7 @@ describe('createApmEventClient', () => {
|
|||
},
|
||||
});
|
||||
|
||||
await eventClient.search({
|
||||
await eventClient.search('foo', {
|
||||
apm: {
|
||||
events: [],
|
||||
},
|
||||
|
|
|
@ -6,6 +6,7 @@
|
|||
*/
|
||||
|
||||
import { ValuesType } from 'utility-types';
|
||||
import { withApmSpan } from '../../../../utils/with_apm_span';
|
||||
import { Profile } from '../../../../../typings/es_schemas/ui/profile';
|
||||
import {
|
||||
ElasticsearchClient,
|
||||
|
@ -34,6 +35,7 @@ import { unpackProcessorEvents } from './unpack_processor_events';
|
|||
export type APMEventESSearchRequest = Omit<ESSearchRequest, 'index'> & {
|
||||
apm: {
|
||||
events: ProcessorEvent[];
|
||||
includeLegacyData?: boolean;
|
||||
};
|
||||
};
|
||||
|
||||
|
@ -78,11 +80,13 @@ export function createApmEventClient({
|
|||
}) {
|
||||
return {
|
||||
async search<TParams extends APMEventESSearchRequest>(
|
||||
params: TParams,
|
||||
{ includeLegacyData = false } = {}
|
||||
operationName: string,
|
||||
params: TParams
|
||||
): Promise<TypedSearchResponse<TParams>> {
|
||||
const withProcessorEventFilter = unpackProcessorEvents(params, indices);
|
||||
|
||||
const { includeLegacyData = false } = params.apm;
|
||||
|
||||
const withPossibleLegacyDataFilter = !includeLegacyData
|
||||
? addFilterToExcludeLegacyData(withProcessorEventFilter)
|
||||
: withProcessorEventFilter;
|
||||
|
@ -98,15 +102,18 @@ export function createApmEventClient({
|
|||
|
||||
return callAsyncWithDebug({
|
||||
cb: () => {
|
||||
const searchPromise = cancelEsRequestOnAbort(
|
||||
esClient.search(searchParams),
|
||||
request
|
||||
const searchPromise = withApmSpan(operationName, () =>
|
||||
cancelEsRequestOnAbort(esClient.search(searchParams), request)
|
||||
);
|
||||
|
||||
return unwrapEsResponse(searchPromise);
|
||||
},
|
||||
getDebugMessage: () => ({
|
||||
body: getDebugBody(searchParams, requestType),
|
||||
body: getDebugBody({
|
||||
params: searchParams,
|
||||
requestType,
|
||||
operationName,
|
||||
}),
|
||||
title: getDebugTitle(request),
|
||||
}),
|
||||
isCalledWithInternalUser: false,
|
||||
|
|
|
@ -31,20 +31,23 @@ export function createInternalESClient({
|
|||
}: Pick<APMRouteHandlerResources, 'context' | 'request'> & { debug: boolean }) {
|
||||
const { asInternalUser } = context.core.elasticsearch.client;
|
||||
|
||||
function callEs<T extends { body: any }>({
|
||||
cb,
|
||||
requestType,
|
||||
params,
|
||||
}: {
|
||||
requestType: string;
|
||||
cb: () => TransportRequestPromise<T>;
|
||||
params: Record<string, any>;
|
||||
}) {
|
||||
function callEs<T extends { body: any }>(
|
||||
operationName: string,
|
||||
{
|
||||
cb,
|
||||
requestType,
|
||||
params,
|
||||
}: {
|
||||
requestType: string;
|
||||
cb: () => TransportRequestPromise<T>;
|
||||
params: Record<string, any>;
|
||||
}
|
||||
) {
|
||||
return callAsyncWithDebug({
|
||||
cb: () => unwrapEsResponse(cancelEsRequestOnAbort(cb(), request)),
|
||||
getDebugMessage: () => ({
|
||||
title: getDebugTitle(request),
|
||||
body: getDebugBody(params, requestType),
|
||||
body: getDebugBody({ params, requestType, operationName }),
|
||||
}),
|
||||
debug,
|
||||
isCalledWithInternalUser: true,
|
||||
|
@ -59,30 +62,37 @@ export function createInternalESClient({
|
|||
TDocument = unknown,
|
||||
TSearchRequest extends ESSearchRequest = ESSearchRequest
|
||||
>(
|
||||
operationName: string,
|
||||
params: TSearchRequest
|
||||
): Promise<ESSearchResponse<TDocument, TSearchRequest>> => {
|
||||
return callEs({
|
||||
return callEs(operationName, {
|
||||
requestType: 'search',
|
||||
cb: () => asInternalUser.search(params),
|
||||
params,
|
||||
});
|
||||
},
|
||||
index: <T>(params: APMIndexDocumentParams<T>) => {
|
||||
return callEs({
|
||||
index: <T>(operationName: string, params: APMIndexDocumentParams<T>) => {
|
||||
return callEs(operationName, {
|
||||
requestType: 'index',
|
||||
cb: () => asInternalUser.index(params),
|
||||
params,
|
||||
});
|
||||
},
|
||||
delete: (params: estypes.DeleteRequest): Promise<{ result: string }> => {
|
||||
return callEs({
|
||||
delete: (
|
||||
operationName: string,
|
||||
params: estypes.DeleteRequest
|
||||
): Promise<{ result: string }> => {
|
||||
return callEs(operationName, {
|
||||
requestType: 'delete',
|
||||
cb: () => asInternalUser.delete(params),
|
||||
params,
|
||||
});
|
||||
},
|
||||
indicesCreate: (params: estypes.IndicesCreateRequest) => {
|
||||
return callEs({
|
||||
indicesCreate: (
|
||||
operationName: string,
|
||||
params: estypes.IndicesCreateRequest
|
||||
) => {
|
||||
return callEs(operationName, {
|
||||
requestType: 'indices.create',
|
||||
cb: () => asInternalUser.indices.create(params),
|
||||
params,
|
||||
|
|
|
@ -109,7 +109,7 @@ describe('setupRequest', () => {
|
|||
it('calls callWithRequest', async () => {
|
||||
const mockResources = getMockResources();
|
||||
const { apmEventClient } = await setupRequest(mockResources);
|
||||
await apmEventClient.search({
|
||||
await apmEventClient.search('foo', {
|
||||
apm: { events: [ProcessorEvent.transaction] },
|
||||
body: { foo: 'bar' },
|
||||
});
|
||||
|
@ -137,7 +137,7 @@ describe('setupRequest', () => {
|
|||
it('calls callWithInternalUser', async () => {
|
||||
const mockResources = getMockResources();
|
||||
const { internalClient } = await setupRequest(mockResources);
|
||||
await internalClient.search({
|
||||
await internalClient.search('foo', {
|
||||
index: ['apm-*'],
|
||||
body: { foo: 'bar' },
|
||||
} as any);
|
||||
|
@ -156,7 +156,7 @@ describe('setupRequest', () => {
|
|||
it('adds a range filter for `observer.version_major` to the existing filter', async () => {
|
||||
const mockResources = getMockResources();
|
||||
const { apmEventClient } = await setupRequest(mockResources);
|
||||
await apmEventClient.search({
|
||||
await apmEventClient.search('foo', {
|
||||
apm: {
|
||||
events: [ProcessorEvent.transaction],
|
||||
},
|
||||
|
@ -183,19 +183,15 @@ describe('setupRequest', () => {
|
|||
it('does not add a range filter for `observer.version_major` if includeLegacyData=true', async () => {
|
||||
const mockResources = getMockResources();
|
||||
const { apmEventClient } = await setupRequest(mockResources);
|
||||
await apmEventClient.search(
|
||||
{
|
||||
apm: {
|
||||
events: [ProcessorEvent.error],
|
||||
},
|
||||
body: {
|
||||
query: { bool: { filter: [{ term: { field: 'someTerm' } }] } },
|
||||
},
|
||||
},
|
||||
{
|
||||
await apmEventClient.search('foo', {
|
||||
apm: {
|
||||
events: [ProcessorEvent.error],
|
||||
includeLegacyData: true,
|
||||
}
|
||||
);
|
||||
},
|
||||
body: {
|
||||
query: { bool: { filter: [{ term: { field: 'someTerm' } }] } },
|
||||
},
|
||||
});
|
||||
const params =
|
||||
mockResources.context.core.elasticsearch.client.asCurrentUser.search
|
||||
.mock.calls[0][0];
|
||||
|
@ -221,7 +217,7 @@ describe('without a bool filter', () => {
|
|||
it('adds a range filter for `observer.version_major`', async () => {
|
||||
const mockResources = getMockResources();
|
||||
const { apmEventClient } = await setupRequest(mockResources);
|
||||
await apmEventClient.search({
|
||||
await apmEventClient.search('foo', {
|
||||
apm: {
|
||||
events: [ProcessorEvent.error],
|
||||
},
|
||||
|
@ -251,7 +247,7 @@ describe('with includeFrozen=false', () => {
|
|||
|
||||
const { apmEventClient } = await setupRequest(mockResources);
|
||||
|
||||
await apmEventClient.search({
|
||||
await apmEventClient.search('foo', {
|
||||
apm: {
|
||||
events: [],
|
||||
},
|
||||
|
@ -273,7 +269,7 @@ describe('with includeFrozen=true', () => {
|
|||
|
||||
const { apmEventClient } = await setupRequest(mockResources);
|
||||
|
||||
await apmEventClient.search({
|
||||
await apmEventClient.search('foo', {
|
||||
apm: { events: [] },
|
||||
});
|
||||
|
||||
|
|
|
@ -30,6 +30,7 @@ export async function fetchAndTransformGcMetrics({
|
|||
serviceNodeName,
|
||||
chartBase,
|
||||
fieldName,
|
||||
operationName,
|
||||
}: {
|
||||
environment?: string;
|
||||
kuery?: string;
|
||||
|
@ -38,6 +39,7 @@ export async function fetchAndTransformGcMetrics({
|
|||
serviceNodeName?: string;
|
||||
chartBase: ChartBase;
|
||||
fieldName: typeof METRIC_JAVA_GC_COUNT | typeof METRIC_JAVA_GC_TIME;
|
||||
operationName: string;
|
||||
}) {
|
||||
const { start, end, apmEventClient, config } = setup;
|
||||
|
||||
|
@ -108,7 +110,7 @@ export async function fetchAndTransformGcMetrics({
|
|||
},
|
||||
});
|
||||
|
||||
const response = await apmEventClient.search(params);
|
||||
const response = await apmEventClient.search(operationName, params);
|
||||
|
||||
const { aggregations } = response;
|
||||
|
||||
|
|
|
@ -7,7 +7,6 @@
|
|||
|
||||
import theme from '@elastic/eui/dist/eui_theme_light.json';
|
||||
import { i18n } from '@kbn/i18n';
|
||||
import { withApmSpan } from '../../../../../utils/with_apm_span';
|
||||
import { METRIC_JAVA_GC_COUNT } from '../../../../../../common/elasticsearch_fieldnames';
|
||||
import { Setup, SetupTimeRange } from '../../../../helpers/setup_request';
|
||||
import { fetchAndTransformGcMetrics } from './fetch_and_transform_gc_metrics';
|
||||
|
@ -45,17 +44,16 @@ function getGcRateChart({
|
|||
serviceName: string;
|
||||
serviceNodeName?: string;
|
||||
}) {
|
||||
return withApmSpan('get_gc_rate_charts', () =>
|
||||
fetchAndTransformGcMetrics({
|
||||
environment,
|
||||
kuery,
|
||||
setup,
|
||||
serviceName,
|
||||
serviceNodeName,
|
||||
chartBase,
|
||||
fieldName: METRIC_JAVA_GC_COUNT,
|
||||
})
|
||||
);
|
||||
return fetchAndTransformGcMetrics({
|
||||
environment,
|
||||
kuery,
|
||||
setup,
|
||||
serviceName,
|
||||
serviceNodeName,
|
||||
chartBase,
|
||||
fieldName: METRIC_JAVA_GC_COUNT,
|
||||
operationName: 'get_gc_rate_charts',
|
||||
});
|
||||
}
|
||||
|
||||
export { getGcRateChart };
|
||||
|
|
|
@ -7,7 +7,6 @@
|
|||
|
||||
import theme from '@elastic/eui/dist/eui_theme_light.json';
|
||||
import { i18n } from '@kbn/i18n';
|
||||
import { withApmSpan } from '../../../../../utils/with_apm_span';
|
||||
import { METRIC_JAVA_GC_TIME } from '../../../../../../common/elasticsearch_fieldnames';
|
||||
import { Setup, SetupTimeRange } from '../../../../helpers/setup_request';
|
||||
import { fetchAndTransformGcMetrics } from './fetch_and_transform_gc_metrics';
|
||||
|
@ -45,17 +44,16 @@ function getGcTimeChart({
|
|||
serviceName: string;
|
||||
serviceNodeName?: string;
|
||||
}) {
|
||||
return withApmSpan('get_gc_time_charts', () =>
|
||||
fetchAndTransformGcMetrics({
|
||||
environment,
|
||||
kuery,
|
||||
setup,
|
||||
serviceName,
|
||||
serviceNodeName,
|
||||
chartBase,
|
||||
fieldName: METRIC_JAVA_GC_TIME,
|
||||
})
|
||||
);
|
||||
return fetchAndTransformGcMetrics({
|
||||
environment,
|
||||
kuery,
|
||||
setup,
|
||||
serviceName,
|
||||
serviceNodeName,
|
||||
chartBase,
|
||||
fieldName: METRIC_JAVA_GC_TIME,
|
||||
operationName: 'get_gc_time_charts',
|
||||
});
|
||||
}
|
||||
|
||||
export { getGcTimeChart };
|
||||
|
|
|
@ -7,7 +7,6 @@
|
|||
|
||||
import theme from '@elastic/eui/dist/eui_theme_light.json';
|
||||
import { i18n } from '@kbn/i18n';
|
||||
import { withApmSpan } from '../../../../../utils/with_apm_span';
|
||||
import {
|
||||
METRIC_JAVA_HEAP_MEMORY_MAX,
|
||||
METRIC_JAVA_HEAP_MEMORY_COMMITTED,
|
||||
|
@ -65,22 +64,21 @@ export function getHeapMemoryChart({
|
|||
serviceName: string;
|
||||
serviceNodeName?: string;
|
||||
}) {
|
||||
return withApmSpan('get_heap_memory_charts', () =>
|
||||
fetchAndTransformMetrics({
|
||||
environment,
|
||||
kuery,
|
||||
setup,
|
||||
serviceName,
|
||||
serviceNodeName,
|
||||
chartBase,
|
||||
aggs: {
|
||||
heapMemoryMax: { avg: { field: METRIC_JAVA_HEAP_MEMORY_MAX } },
|
||||
heapMemoryCommitted: {
|
||||
avg: { field: METRIC_JAVA_HEAP_MEMORY_COMMITTED },
|
||||
},
|
||||
heapMemoryUsed: { avg: { field: METRIC_JAVA_HEAP_MEMORY_USED } },
|
||||
return fetchAndTransformMetrics({
|
||||
environment,
|
||||
kuery,
|
||||
setup,
|
||||
serviceName,
|
||||
serviceNodeName,
|
||||
chartBase,
|
||||
aggs: {
|
||||
heapMemoryMax: { avg: { field: METRIC_JAVA_HEAP_MEMORY_MAX } },
|
||||
heapMemoryCommitted: {
|
||||
avg: { field: METRIC_JAVA_HEAP_MEMORY_COMMITTED },
|
||||
},
|
||||
additionalFilters: [{ term: { [AGENT_NAME]: 'java' } }],
|
||||
})
|
||||
);
|
||||
heapMemoryUsed: { avg: { field: METRIC_JAVA_HEAP_MEMORY_USED } },
|
||||
},
|
||||
additionalFilters: [{ term: { [AGENT_NAME]: 'java' } }],
|
||||
operationName: 'get_heap_memory_charts',
|
||||
});
|
||||
}
|
||||
|
|
|
@ -7,7 +7,6 @@
|
|||
|
||||
import theme from '@elastic/eui/dist/eui_theme_light.json';
|
||||
import { i18n } from '@kbn/i18n';
|
||||
import { withApmSpan } from '../../../../../utils/with_apm_span';
|
||||
import {
|
||||
METRIC_JAVA_NON_HEAP_MEMORY_MAX,
|
||||
METRIC_JAVA_NON_HEAP_MEMORY_COMMITTED,
|
||||
|
@ -62,24 +61,23 @@ export async function getNonHeapMemoryChart({
|
|||
serviceName: string;
|
||||
serviceNodeName?: string;
|
||||
}) {
|
||||
return withApmSpan('get_non_heap_memory_charts', () =>
|
||||
fetchAndTransformMetrics({
|
||||
environment,
|
||||
kuery,
|
||||
setup,
|
||||
serviceName,
|
||||
serviceNodeName,
|
||||
chartBase,
|
||||
aggs: {
|
||||
nonHeapMemoryMax: { avg: { field: METRIC_JAVA_NON_HEAP_MEMORY_MAX } },
|
||||
nonHeapMemoryCommitted: {
|
||||
avg: { field: METRIC_JAVA_NON_HEAP_MEMORY_COMMITTED },
|
||||
},
|
||||
nonHeapMemoryUsed: {
|
||||
avg: { field: METRIC_JAVA_NON_HEAP_MEMORY_USED },
|
||||
},
|
||||
return fetchAndTransformMetrics({
|
||||
environment,
|
||||
kuery,
|
||||
setup,
|
||||
serviceName,
|
||||
serviceNodeName,
|
||||
chartBase,
|
||||
aggs: {
|
||||
nonHeapMemoryMax: { avg: { field: METRIC_JAVA_NON_HEAP_MEMORY_MAX } },
|
||||
nonHeapMemoryCommitted: {
|
||||
avg: { field: METRIC_JAVA_NON_HEAP_MEMORY_COMMITTED },
|
||||
},
|
||||
additionalFilters: [{ term: { [AGENT_NAME]: 'java' } }],
|
||||
})
|
||||
);
|
||||
nonHeapMemoryUsed: {
|
||||
avg: { field: METRIC_JAVA_NON_HEAP_MEMORY_USED },
|
||||
},
|
||||
},
|
||||
additionalFilters: [{ term: { [AGENT_NAME]: 'java' } }],
|
||||
operationName: 'get_non_heap_memory_charts',
|
||||
});
|
||||
}
|
||||
|
|
|
@ -7,7 +7,6 @@
|
|||
|
||||
import theme from '@elastic/eui/dist/eui_theme_light.json';
|
||||
import { i18n } from '@kbn/i18n';
|
||||
import { withApmSpan } from '../../../../../utils/with_apm_span';
|
||||
import {
|
||||
METRIC_JAVA_THREAD_COUNT,
|
||||
AGENT_NAME,
|
||||
|
@ -54,19 +53,18 @@ export async function getThreadCountChart({
|
|||
serviceName: string;
|
||||
serviceNodeName?: string;
|
||||
}) {
|
||||
return withApmSpan('get_thread_count_charts', () =>
|
||||
fetchAndTransformMetrics({
|
||||
environment,
|
||||
kuery,
|
||||
setup,
|
||||
serviceName,
|
||||
serviceNodeName,
|
||||
chartBase,
|
||||
aggs: {
|
||||
threadCount: { avg: { field: METRIC_JAVA_THREAD_COUNT } },
|
||||
threadCountMax: { max: { field: METRIC_JAVA_THREAD_COUNT } },
|
||||
},
|
||||
additionalFilters: [{ term: { [AGENT_NAME]: 'java' } }],
|
||||
})
|
||||
);
|
||||
return fetchAndTransformMetrics({
|
||||
environment,
|
||||
kuery,
|
||||
setup,
|
||||
serviceName,
|
||||
serviceNodeName,
|
||||
chartBase,
|
||||
aggs: {
|
||||
threadCount: { avg: { field: METRIC_JAVA_THREAD_COUNT } },
|
||||
threadCountMax: { max: { field: METRIC_JAVA_THREAD_COUNT } },
|
||||
},
|
||||
additionalFilters: [{ term: { [AGENT_NAME]: 'java' } }],
|
||||
operationName: 'get_thread_count_charts',
|
||||
});
|
||||
}
|
||||
|
|
|
@ -7,7 +7,6 @@
|
|||
|
||||
import theme from '@elastic/eui/dist/eui_theme_light.json';
|
||||
import { i18n } from '@kbn/i18n';
|
||||
import { withApmSpan } from '../../../../../utils/with_apm_span';
|
||||
import {
|
||||
METRIC_SYSTEM_CPU_PERCENT,
|
||||
METRIC_PROCESS_CPU_PERCENT,
|
||||
|
@ -66,20 +65,19 @@ export function getCPUChartData({
|
|||
serviceName: string;
|
||||
serviceNodeName?: string;
|
||||
}) {
|
||||
return withApmSpan('get_cpu_metric_charts', () =>
|
||||
fetchAndTransformMetrics({
|
||||
environment,
|
||||
kuery,
|
||||
setup,
|
||||
serviceName,
|
||||
serviceNodeName,
|
||||
chartBase,
|
||||
aggs: {
|
||||
systemCPUAverage: { avg: { field: METRIC_SYSTEM_CPU_PERCENT } },
|
||||
systemCPUMax: { max: { field: METRIC_SYSTEM_CPU_PERCENT } },
|
||||
processCPUAverage: { avg: { field: METRIC_PROCESS_CPU_PERCENT } },
|
||||
processCPUMax: { max: { field: METRIC_PROCESS_CPU_PERCENT } },
|
||||
},
|
||||
})
|
||||
);
|
||||
return fetchAndTransformMetrics({
|
||||
environment,
|
||||
kuery,
|
||||
setup,
|
||||
serviceName,
|
||||
serviceNodeName,
|
||||
chartBase,
|
||||
aggs: {
|
||||
systemCPUAverage: { avg: { field: METRIC_SYSTEM_CPU_PERCENT } },
|
||||
systemCPUMax: { max: { field: METRIC_SYSTEM_CPU_PERCENT } },
|
||||
processCPUAverage: { avg: { field: METRIC_PROCESS_CPU_PERCENT } },
|
||||
processCPUMax: { max: { field: METRIC_PROCESS_CPU_PERCENT } },
|
||||
},
|
||||
operationName: 'get_cpu_metric_charts',
|
||||
});
|
||||
}
|
||||
|
|
|
@ -84,45 +84,41 @@ export async function getMemoryChartData({
|
|||
serviceNodeName?: string;
|
||||
}) {
|
||||
return withApmSpan('get_memory_metrics_charts', async () => {
|
||||
const cgroupResponse = await withApmSpan(
|
||||
'get_cgroup_memory_metrics_charts',
|
||||
() =>
|
||||
fetchAndTransformMetrics({
|
||||
environment,
|
||||
kuery,
|
||||
setup,
|
||||
serviceName,
|
||||
serviceNodeName,
|
||||
chartBase,
|
||||
aggs: {
|
||||
memoryUsedAvg: { avg: { script: percentCgroupMemoryUsedScript } },
|
||||
memoryUsedMax: { max: { script: percentCgroupMemoryUsedScript } },
|
||||
},
|
||||
additionalFilters: [
|
||||
{ exists: { field: METRIC_CGROUP_MEMORY_USAGE_BYTES } },
|
||||
],
|
||||
})
|
||||
);
|
||||
const cgroupResponse = await fetchAndTransformMetrics({
|
||||
environment,
|
||||
kuery,
|
||||
setup,
|
||||
serviceName,
|
||||
serviceNodeName,
|
||||
chartBase,
|
||||
aggs: {
|
||||
memoryUsedAvg: { avg: { script: percentCgroupMemoryUsedScript } },
|
||||
memoryUsedMax: { max: { script: percentCgroupMemoryUsedScript } },
|
||||
},
|
||||
additionalFilters: [
|
||||
{ exists: { field: METRIC_CGROUP_MEMORY_USAGE_BYTES } },
|
||||
],
|
||||
operationName: 'get_cgroup_memory_metrics_charts',
|
||||
});
|
||||
|
||||
if (cgroupResponse.noHits) {
|
||||
return await withApmSpan('get_system_memory_metrics_charts', () =>
|
||||
fetchAndTransformMetrics({
|
||||
environment,
|
||||
kuery,
|
||||
setup,
|
||||
serviceName,
|
||||
serviceNodeName,
|
||||
chartBase,
|
||||
aggs: {
|
||||
memoryUsedAvg: { avg: { script: percentSystemMemoryUsedScript } },
|
||||
memoryUsedMax: { max: { script: percentSystemMemoryUsedScript } },
|
||||
},
|
||||
additionalFilters: [
|
||||
{ exists: { field: METRIC_SYSTEM_FREE_MEMORY } },
|
||||
{ exists: { field: METRIC_SYSTEM_TOTAL_MEMORY } },
|
||||
],
|
||||
})
|
||||
);
|
||||
return await fetchAndTransformMetrics({
|
||||
environment,
|
||||
kuery,
|
||||
setup,
|
||||
serviceName,
|
||||
serviceNodeName,
|
||||
chartBase,
|
||||
aggs: {
|
||||
memoryUsedAvg: { avg: { script: percentSystemMemoryUsedScript } },
|
||||
memoryUsedMax: { max: { script: percentSystemMemoryUsedScript } },
|
||||
},
|
||||
additionalFilters: [
|
||||
{ exists: { field: METRIC_SYSTEM_FREE_MEMORY } },
|
||||
{ exists: { field: METRIC_SYSTEM_TOTAL_MEMORY } },
|
||||
],
|
||||
operationName: 'get_system_memory_metrics_charts',
|
||||
});
|
||||
}
|
||||
|
||||
return cgroupResponse;
|
||||
|
|
|
@ -56,6 +56,7 @@ export async function fetchAndTransformMetrics<T extends MetricAggs>({
|
|||
chartBase,
|
||||
aggs,
|
||||
additionalFilters = [],
|
||||
operationName,
|
||||
}: {
|
||||
environment?: string;
|
||||
kuery?: string;
|
||||
|
@ -65,6 +66,7 @@ export async function fetchAndTransformMetrics<T extends MetricAggs>({
|
|||
chartBase: ChartBase;
|
||||
aggs: T;
|
||||
additionalFilters?: Filter[];
|
||||
operationName: string;
|
||||
}) {
|
||||
const { start, end, apmEventClient, config } = setup;
|
||||
|
||||
|
@ -98,7 +100,7 @@ export async function fetchAndTransformMetrics<T extends MetricAggs>({
|
|||
},
|
||||
});
|
||||
|
||||
const response = await apmEventClient.search(params);
|
||||
const response = await apmEventClient.search(operationName, params);
|
||||
|
||||
return transformDataToMetricsChart(response, chartBase);
|
||||
}
|
||||
|
|
|
@ -10,40 +10,40 @@ import { rangeQuery } from '../../../server/utils/queries';
|
|||
import { SERVICE_NAME } from '../../../common/elasticsearch_fieldnames';
|
||||
import { Setup, SetupTimeRange } from '../helpers/setup_request';
|
||||
import { getProcessorEventForAggregatedTransactions } from '../helpers/aggregated_transactions';
|
||||
import { withApmSpan } from '../../utils/with_apm_span';
|
||||
|
||||
export function getServiceCount({
|
||||
export async function getServiceCount({
|
||||
setup,
|
||||
searchAggregatedTransactions,
|
||||
}: {
|
||||
setup: Setup & SetupTimeRange;
|
||||
searchAggregatedTransactions: boolean;
|
||||
}) {
|
||||
return withApmSpan('observability_overview_get_service_count', async () => {
|
||||
const { apmEventClient, start, end } = setup;
|
||||
const { apmEventClient, start, end } = setup;
|
||||
|
||||
const params = {
|
||||
apm: {
|
||||
events: [
|
||||
getProcessorEventForAggregatedTransactions(
|
||||
searchAggregatedTransactions
|
||||
),
|
||||
ProcessorEvent.error,
|
||||
ProcessorEvent.metric,
|
||||
],
|
||||
},
|
||||
body: {
|
||||
size: 0,
|
||||
query: {
|
||||
bool: {
|
||||
filter: rangeQuery(start, end),
|
||||
},
|
||||
const params = {
|
||||
apm: {
|
||||
events: [
|
||||
getProcessorEventForAggregatedTransactions(
|
||||
searchAggregatedTransactions
|
||||
),
|
||||
ProcessorEvent.error,
|
||||
ProcessorEvent.metric,
|
||||
],
|
||||
},
|
||||
body: {
|
||||
size: 0,
|
||||
query: {
|
||||
bool: {
|
||||
filter: rangeQuery(start, end),
|
||||
},
|
||||
aggs: { serviceCount: { cardinality: { field: SERVICE_NAME } } },
|
||||
},
|
||||
};
|
||||
aggs: { serviceCount: { cardinality: { field: SERVICE_NAME } } },
|
||||
},
|
||||
};
|
||||
|
||||
const { aggregations } = await apmEventClient.search(params);
|
||||
return aggregations?.serviceCount.value || 0;
|
||||
});
|
||||
const { aggregations } = await apmEventClient.search(
|
||||
'observability_overview_get_service_count',
|
||||
params
|
||||
);
|
||||
return aggregations?.serviceCount.value || 0;
|
||||
}
|
||||
|
|
|
@ -14,9 +14,8 @@ import { rangeQuery } from '../../../server/utils/queries';
|
|||
import { Setup, SetupTimeRange } from '../helpers/setup_request';
|
||||
import { getProcessorEventForAggregatedTransactions } from '../helpers/aggregated_transactions';
|
||||
import { calculateThroughput } from '../helpers/calculate_throughput';
|
||||
import { withApmSpan } from '../../utils/with_apm_span';
|
||||
|
||||
export function getTransactionsPerMinute({
|
||||
export async function getTransactionsPerMinute({
|
||||
setup,
|
||||
bucketSize,
|
||||
searchAggregatedTransactions,
|
||||
|
@ -25,71 +24,69 @@ export function getTransactionsPerMinute({
|
|||
bucketSize: string;
|
||||
searchAggregatedTransactions: boolean;
|
||||
}) {
|
||||
return withApmSpan(
|
||||
'observability_overview_get_transactions_per_minute',
|
||||
async () => {
|
||||
const { apmEventClient, start, end } = setup;
|
||||
const { apmEventClient, start, end } = setup;
|
||||
|
||||
const { aggregations } = await apmEventClient.search({
|
||||
apm: {
|
||||
events: [
|
||||
getProcessorEventForAggregatedTransactions(
|
||||
searchAggregatedTransactions
|
||||
),
|
||||
],
|
||||
},
|
||||
body: {
|
||||
size: 0,
|
||||
query: {
|
||||
bool: {
|
||||
filter: rangeQuery(start, end),
|
||||
},
|
||||
const { aggregations } = await apmEventClient.search(
|
||||
'observability_overview_get_transactions_per_minute',
|
||||
{
|
||||
apm: {
|
||||
events: [
|
||||
getProcessorEventForAggregatedTransactions(
|
||||
searchAggregatedTransactions
|
||||
),
|
||||
],
|
||||
},
|
||||
body: {
|
||||
size: 0,
|
||||
query: {
|
||||
bool: {
|
||||
filter: rangeQuery(start, end),
|
||||
},
|
||||
aggs: {
|
||||
transactionType: {
|
||||
terms: {
|
||||
field: TRANSACTION_TYPE,
|
||||
},
|
||||
aggs: {
|
||||
timeseries: {
|
||||
date_histogram: {
|
||||
field: '@timestamp',
|
||||
fixed_interval: bucketSize,
|
||||
min_doc_count: 0,
|
||||
},
|
||||
aggs: {
|
||||
throughput: { rate: { unit: 'minute' as const } },
|
||||
},
|
||||
},
|
||||
aggs: {
|
||||
transactionType: {
|
||||
terms: {
|
||||
field: TRANSACTION_TYPE,
|
||||
},
|
||||
aggs: {
|
||||
timeseries: {
|
||||
date_histogram: {
|
||||
field: '@timestamp',
|
||||
fixed_interval: bucketSize,
|
||||
min_doc_count: 0,
|
||||
},
|
||||
aggs: {
|
||||
throughput: { rate: { unit: 'minute' as const } },
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
if (!aggregations || !aggregations.transactionType.buckets) {
|
||||
return { value: undefined, timeseries: [] };
|
||||
}
|
||||
|
||||
const topTransactionTypeBucket =
|
||||
aggregations.transactionType.buckets.find(
|
||||
({ key: transactionType }) =>
|
||||
transactionType === TRANSACTION_REQUEST ||
|
||||
transactionType === TRANSACTION_PAGE_LOAD
|
||||
) || aggregations.transactionType.buckets[0];
|
||||
|
||||
return {
|
||||
value: calculateThroughput({
|
||||
start,
|
||||
end,
|
||||
value: topTransactionTypeBucket?.doc_count || 0,
|
||||
}),
|
||||
timeseries:
|
||||
topTransactionTypeBucket?.timeseries.buckets.map((bucket) => ({
|
||||
x: bucket.key,
|
||||
y: bucket.throughput.value,
|
||||
})) || [],
|
||||
};
|
||||
},
|
||||
}
|
||||
);
|
||||
|
||||
if (!aggregations || !aggregations.transactionType.buckets) {
|
||||
return { value: undefined, timeseries: [] };
|
||||
}
|
||||
|
||||
const topTransactionTypeBucket =
|
||||
aggregations.transactionType.buckets.find(
|
||||
({ key: transactionType }) =>
|
||||
transactionType === TRANSACTION_REQUEST ||
|
||||
transactionType === TRANSACTION_PAGE_LOAD
|
||||
) || aggregations.transactionType.buckets[0];
|
||||
|
||||
return {
|
||||
value: calculateThroughput({
|
||||
start,
|
||||
end,
|
||||
value: topTransactionTypeBucket?.doc_count || 0,
|
||||
}),
|
||||
timeseries:
|
||||
topTransactionTypeBucket?.timeseries.buckets.map((bucket) => ({
|
||||
x: bucket.key,
|
||||
y: bucket.throughput.value,
|
||||
})) || [],
|
||||
};
|
||||
}
|
||||
|
|
|
@ -6,31 +6,31 @@
|
|||
*/
|
||||
|
||||
import { ProcessorEvent } from '../../../common/processor_event';
|
||||
import { withApmSpan } from '../../utils/with_apm_span';
|
||||
import { Setup } from '../helpers/setup_request';
|
||||
|
||||
export function getHasData({ setup }: { setup: Setup }) {
|
||||
return withApmSpan('observability_overview_has_apm_data', async () => {
|
||||
const { apmEventClient } = setup;
|
||||
try {
|
||||
const params = {
|
||||
apm: {
|
||||
events: [
|
||||
ProcessorEvent.transaction,
|
||||
ProcessorEvent.error,
|
||||
ProcessorEvent.metric,
|
||||
],
|
||||
},
|
||||
terminateAfter: 1,
|
||||
body: {
|
||||
size: 0,
|
||||
},
|
||||
};
|
||||
export async function getHasData({ setup }: { setup: Setup }) {
|
||||
const { apmEventClient } = setup;
|
||||
try {
|
||||
const params = {
|
||||
apm: {
|
||||
events: [
|
||||
ProcessorEvent.transaction,
|
||||
ProcessorEvent.error,
|
||||
ProcessorEvent.metric,
|
||||
],
|
||||
},
|
||||
terminateAfter: 1,
|
||||
body: {
|
||||
size: 0,
|
||||
},
|
||||
};
|
||||
|
||||
const response = await apmEventClient.search(params);
|
||||
return response.hits.total.value > 0;
|
||||
} catch (e) {
|
||||
return false;
|
||||
}
|
||||
});
|
||||
const response = await apmEventClient.search(
|
||||
'observability_overview_has_apm_data',
|
||||
params
|
||||
);
|
||||
return response.hits.total.value > 0;
|
||||
} catch (e) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -63,7 +63,7 @@ export async function getClientMetrics({
|
|||
});
|
||||
|
||||
const { apmEventClient } = setup;
|
||||
const response = await apmEventClient.search(params);
|
||||
const response = await apmEventClient.search('get_client_metrics', params);
|
||||
const {
|
||||
hasFetchStartField: { backEnd, totalPageLoadDuration },
|
||||
} = response.aggregations!;
|
||||
|
|
|
@ -94,7 +94,7 @@ export async function getJSErrors({
|
|||
|
||||
const { apmEventClient } = setup;
|
||||
|
||||
const response = await apmEventClient.search(params);
|
||||
const response = await apmEventClient.search('get_js_errors', params);
|
||||
|
||||
const { totalErrorGroups, totalErrorPages, errors } =
|
||||
response.aggregations ?? {};
|
||||
|
|
|
@ -64,7 +64,7 @@ export async function getLongTaskMetrics({
|
|||
|
||||
const { apmEventClient } = setup;
|
||||
|
||||
const response = await apmEventClient.search(params);
|
||||
const response = await apmEventClient.search('get_long_task_metrics', params);
|
||||
|
||||
const pkey = percentile.toFixed(1);
|
||||
|
||||
|
|
|
@ -117,7 +117,7 @@ export async function getPageLoadDistribution({
|
|||
const {
|
||||
aggregations,
|
||||
hits: { total },
|
||||
} = await apmEventClient.search(params);
|
||||
} = await apmEventClient.search('get_page_load_distribution', params);
|
||||
|
||||
if (total.value === 0) {
|
||||
return null;
|
||||
|
@ -210,7 +210,10 @@ const getPercentilesDistribution = async ({
|
|||
|
||||
const { apmEventClient } = setup;
|
||||
|
||||
const { aggregations } = await apmEventClient.search(params);
|
||||
const { aggregations } = await apmEventClient.search(
|
||||
'get_page_load_distribution',
|
||||
params
|
||||
);
|
||||
|
||||
return aggregations?.loadDistribution.values ?? [];
|
||||
};
|
||||
|
|
|
@ -69,7 +69,7 @@ export async function getPageViewTrends({
|
|||
|
||||
const { apmEventClient } = setup;
|
||||
|
||||
const response = await apmEventClient.search(params);
|
||||
const response = await apmEventClient.search('get_page_view_trends', params);
|
||||
|
||||
const { topBreakdowns } = response.aggregations ?? {};
|
||||
|
||||
|
|
|
@ -92,7 +92,10 @@ export const getPageLoadDistBreakdown = async ({
|
|||
|
||||
const { apmEventClient } = setup;
|
||||
|
||||
const { aggregations } = await apmEventClient.search(params);
|
||||
const { aggregations } = await apmEventClient.search(
|
||||
'get_page_load_dist_breakdown',
|
||||
params
|
||||
);
|
||||
|
||||
const pageDistBreakdowns = aggregations?.breakdowns.buckets;
|
||||
|
||||
|
|
|
@ -38,7 +38,7 @@ export async function getRumServices({
|
|||
|
||||
const { apmEventClient } = setup;
|
||||
|
||||
const response = await apmEventClient.search(params);
|
||||
const response = await apmEventClient.search('get_rum_services', params);
|
||||
|
||||
const result = response.aggregations?.services.buckets ?? [];
|
||||
|
||||
|
|
|
@ -56,7 +56,7 @@ export async function getUrlSearch({
|
|||
|
||||
const { apmEventClient } = setup;
|
||||
|
||||
const response = await apmEventClient.search(params);
|
||||
const response = await apmEventClient.search('get_url_search', params);
|
||||
const { urls, totalUrls } = response.aggregations ?? {};
|
||||
|
||||
const pkey = percentile.toFixed(1);
|
||||
|
|
|
@ -51,7 +51,7 @@ export async function getVisitorBreakdown({
|
|||
|
||||
const { apmEventClient } = setup;
|
||||
|
||||
const response = await apmEventClient.search(params);
|
||||
const response = await apmEventClient.search('get_visitor_breakdown', params);
|
||||
const { browsers, os } = response.aggregations!;
|
||||
|
||||
const totalItems = response.hits.total.value;
|
||||
|
|
|
@ -103,7 +103,7 @@ export async function getWebCoreVitals({
|
|||
|
||||
const { apmEventClient } = setup;
|
||||
|
||||
const response = await apmEventClient.search(params);
|
||||
const response = await apmEventClient.search('get_web_core_vitals', params);
|
||||
const {
|
||||
lcp,
|
||||
cls,
|
||||
|
|
|
@ -51,7 +51,7 @@ export async function hasRumData({
|
|||
|
||||
const { apmEventClient } = setup;
|
||||
|
||||
const response = await apmEventClient.search(params);
|
||||
const response = await apmEventClient.search('has_rum_data', params);
|
||||
return {
|
||||
indices: setup.indices['apm_oss.transactionIndices']!,
|
||||
hasData: response.hits.total.value > 0,
|
||||
|
|
|
@ -38,37 +38,38 @@ export function getLocalUIFilters({
|
|||
delete projectionWithoutAggs.body.aggs;
|
||||
|
||||
return Promise.all(
|
||||
localFilterNames.map(async (name) =>
|
||||
withApmSpan('get_ui_filter_options_for_field', async () => {
|
||||
const query = getLocalFilterQuery({
|
||||
uiFilters,
|
||||
projection,
|
||||
localUIFilterName: name,
|
||||
});
|
||||
localFilterNames.map(async (name) => {
|
||||
const query = getLocalFilterQuery({
|
||||
uiFilters,
|
||||
projection,
|
||||
localUIFilterName: name,
|
||||
});
|
||||
|
||||
const response = await apmEventClient.search(query);
|
||||
const response = await apmEventClient.search(
|
||||
'get_ui_filter_options_for_field',
|
||||
query
|
||||
);
|
||||
|
||||
const filter = localUIFilters[name];
|
||||
const filter = localUIFilters[name];
|
||||
|
||||
const buckets = response?.aggregations?.by_terms?.buckets ?? [];
|
||||
const buckets = response?.aggregations?.by_terms?.buckets ?? [];
|
||||
|
||||
return {
|
||||
...filter,
|
||||
options: orderBy(
|
||||
buckets.map((bucket) => {
|
||||
return {
|
||||
name: bucket.key as string,
|
||||
count: bucket.bucket_count
|
||||
? bucket.bucket_count.value
|
||||
: bucket.doc_count,
|
||||
};
|
||||
}),
|
||||
'count',
|
||||
'desc'
|
||||
),
|
||||
};
|
||||
})
|
||||
)
|
||||
return {
|
||||
...filter,
|
||||
options: orderBy(
|
||||
buckets.map((bucket) => {
|
||||
return {
|
||||
name: bucket.key as string,
|
||||
count: bucket.bucket_count
|
||||
? bucket.bucket_count.value
|
||||
: bucket.doc_count,
|
||||
};
|
||||
}),
|
||||
'count',
|
||||
'desc'
|
||||
),
|
||||
};
|
||||
})
|
||||
);
|
||||
});
|
||||
}
|
||||
|
|
|
@ -14,44 +14,42 @@ import {
|
|||
ServiceConnectionNode,
|
||||
} from '../../../common/service_map';
|
||||
import { Setup, SetupTimeRange } from '../helpers/setup_request';
|
||||
import { withApmSpan } from '../../utils/with_apm_span';
|
||||
|
||||
export async function fetchServicePathsFromTraceIds(
|
||||
setup: Setup & SetupTimeRange,
|
||||
traceIds: string[]
|
||||
) {
|
||||
return withApmSpan('get_service_paths_from_trace_ids', async () => {
|
||||
const { apmEventClient } = setup;
|
||||
const { apmEventClient } = setup;
|
||||
|
||||
// make sure there's a range so ES can skip shards
|
||||
const dayInMs = 24 * 60 * 60 * 1000;
|
||||
const start = setup.start - dayInMs;
|
||||
const end = setup.end + dayInMs;
|
||||
// make sure there's a range so ES can skip shards
|
||||
const dayInMs = 24 * 60 * 60 * 1000;
|
||||
const start = setup.start - dayInMs;
|
||||
const end = setup.end + dayInMs;
|
||||
|
||||
const serviceMapParams = {
|
||||
apm: {
|
||||
events: [ProcessorEvent.span, ProcessorEvent.transaction],
|
||||
},
|
||||
body: {
|
||||
size: 0,
|
||||
query: {
|
||||
bool: {
|
||||
filter: [
|
||||
{
|
||||
terms: {
|
||||
[TRACE_ID]: traceIds,
|
||||
},
|
||||
const serviceMapParams = {
|
||||
apm: {
|
||||
events: [ProcessorEvent.span, ProcessorEvent.transaction],
|
||||
},
|
||||
body: {
|
||||
size: 0,
|
||||
query: {
|
||||
bool: {
|
||||
filter: [
|
||||
{
|
||||
terms: {
|
||||
[TRACE_ID]: traceIds,
|
||||
},
|
||||
...rangeQuery(start, end),
|
||||
],
|
||||
},
|
||||
},
|
||||
...rangeQuery(start, end),
|
||||
],
|
||||
},
|
||||
aggs: {
|
||||
service_map: {
|
||||
scripted_metric: {
|
||||
init_script: {
|
||||
lang: 'painless',
|
||||
source: `state.eventsById = new HashMap();
|
||||
},
|
||||
aggs: {
|
||||
service_map: {
|
||||
scripted_metric: {
|
||||
init_script: {
|
||||
lang: 'painless',
|
||||
source: `state.eventsById = new HashMap();
|
||||
|
||||
String[] fieldsToCopy = new String[] {
|
||||
'parent.id',
|
||||
|
@ -65,10 +63,10 @@ export async function fetchServicePathsFromTraceIds(
|
|||
'agent.name'
|
||||
};
|
||||
state.fieldsToCopy = fieldsToCopy;`,
|
||||
},
|
||||
map_script: {
|
||||
lang: 'painless',
|
||||
source: `def id;
|
||||
},
|
||||
map_script: {
|
||||
lang: 'painless',
|
||||
source: `def id;
|
||||
if (!doc['span.id'].empty) {
|
||||
id = doc['span.id'].value;
|
||||
} else {
|
||||
|
@ -85,14 +83,14 @@ export async function fetchServicePathsFromTraceIds(
|
|||
}
|
||||
|
||||
state.eventsById[id] = copy`,
|
||||
},
|
||||
combine_script: {
|
||||
lang: 'painless',
|
||||
source: `return state.eventsById;`,
|
||||
},
|
||||
reduce_script: {
|
||||
lang: 'painless',
|
||||
source: `
|
||||
},
|
||||
combine_script: {
|
||||
lang: 'painless',
|
||||
source: `return state.eventsById;`,
|
||||
},
|
||||
reduce_script: {
|
||||
lang: 'painless',
|
||||
source: `
|
||||
def getDestination ( def event ) {
|
||||
def destination = new HashMap();
|
||||
destination['span.destination.service.resource'] = event['span.destination.service.resource'];
|
||||
|
@ -208,29 +206,29 @@ export async function fetchServicePathsFromTraceIds(
|
|||
response.discoveredServices = discoveredServices;
|
||||
|
||||
return response;`,
|
||||
},
|
||||
},
|
||||
},
|
||||
} as const,
|
||||
},
|
||||
};
|
||||
},
|
||||
};
|
||||
|
||||
const serviceMapFromTraceIdsScriptResponse = await apmEventClient.search(
|
||||
serviceMapParams
|
||||
);
|
||||
const serviceMapFromTraceIdsScriptResponse = await apmEventClient.search(
|
||||
'get_service_paths_from_trace_ids',
|
||||
serviceMapParams
|
||||
);
|
||||
|
||||
return serviceMapFromTraceIdsScriptResponse as {
|
||||
aggregations?: {
|
||||
service_map: {
|
||||
value: {
|
||||
paths: ConnectionNode[][];
|
||||
discoveredServices: Array<{
|
||||
from: ExternalConnectionNode;
|
||||
to: ServiceConnectionNode;
|
||||
}>;
|
||||
};
|
||||
return serviceMapFromTraceIdsScriptResponse as {
|
||||
aggregations?: {
|
||||
service_map: {
|
||||
value: {
|
||||
paths: ConnectionNode[][];
|
||||
discoveredServices: Array<{
|
||||
from: ExternalConnectionNode;
|
||||
to: ServiceConnectionNode;
|
||||
}>;
|
||||
};
|
||||
};
|
||||
};
|
||||
});
|
||||
};
|
||||
}
|
||||
|
|
|
@ -87,69 +87,70 @@ async function getConnectionData({
|
|||
}
|
||||
|
||||
async function getServicesData(options: IEnvOptions) {
|
||||
return withApmSpan('get_service_stats_for_service_map', async () => {
|
||||
const { environment, setup, searchAggregatedTransactions } = options;
|
||||
const { environment, setup, searchAggregatedTransactions } = options;
|
||||
|
||||
const projection = getServicesProjection({
|
||||
setup,
|
||||
searchAggregatedTransactions,
|
||||
const projection = getServicesProjection({
|
||||
setup,
|
||||
searchAggregatedTransactions,
|
||||
});
|
||||
|
||||
let filter = [
|
||||
...projection.body.query.bool.filter,
|
||||
...environmentQuery(environment),
|
||||
];
|
||||
|
||||
if (options.serviceName) {
|
||||
filter = filter.concat({
|
||||
term: {
|
||||
[SERVICE_NAME]: options.serviceName,
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
let filter = [
|
||||
...projection.body.query.bool.filter,
|
||||
...environmentQuery(environment),
|
||||
];
|
||||
|
||||
if (options.serviceName) {
|
||||
filter = filter.concat({
|
||||
term: {
|
||||
[SERVICE_NAME]: options.serviceName,
|
||||
const params = mergeProjection(projection, {
|
||||
body: {
|
||||
size: 0,
|
||||
query: {
|
||||
bool: {
|
||||
...projection.body.query.bool,
|
||||
filter,
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
const params = mergeProjection(projection, {
|
||||
body: {
|
||||
size: 0,
|
||||
query: {
|
||||
bool: {
|
||||
...projection.body.query.bool,
|
||||
filter,
|
||||
},
|
||||
aggs: {
|
||||
services: {
|
||||
terms: {
|
||||
field: projection.body.aggs.services.terms.field,
|
||||
size: 500,
|
||||
},
|
||||
},
|
||||
aggs: {
|
||||
services: {
|
||||
terms: {
|
||||
field: projection.body.aggs.services.terms.field,
|
||||
size: 500,
|
||||
},
|
||||
aggs: {
|
||||
agent_name: {
|
||||
terms: {
|
||||
field: AGENT_NAME,
|
||||
},
|
||||
aggs: {
|
||||
agent_name: {
|
||||
terms: {
|
||||
field: AGENT_NAME,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
const { apmEventClient } = setup;
|
||||
|
||||
const response = await apmEventClient.search(params);
|
||||
|
||||
return (
|
||||
response.aggregations?.services.buckets.map((bucket) => {
|
||||
return {
|
||||
[SERVICE_NAME]: bucket.key as string,
|
||||
[AGENT_NAME]:
|
||||
(bucket.agent_name.buckets[0]?.key as string | undefined) || '',
|
||||
[SERVICE_ENVIRONMENT]: options.environment || null,
|
||||
};
|
||||
}) || []
|
||||
);
|
||||
},
|
||||
});
|
||||
|
||||
const { apmEventClient } = setup;
|
||||
|
||||
const response = await apmEventClient.search(
|
||||
'get_service_stats_for_service_map',
|
||||
params
|
||||
);
|
||||
|
||||
return (
|
||||
response.aggregations?.services.buckets.map((bucket) => {
|
||||
return {
|
||||
[SERVICE_NAME]: bucket.key as string,
|
||||
[AGENT_NAME]:
|
||||
(bucket.agent_name.buckets[0]?.key as string | undefined) || '',
|
||||
[SERVICE_ENVIRONMENT]: options.environment || null,
|
||||
};
|
||||
}) || []
|
||||
);
|
||||
}
|
||||
|
||||
export type ConnectionsResponse = PromiseReturnType<typeof getConnectionData>;
|
||||
|
|
|
@ -120,7 +120,7 @@ async function getErrorStats({
|
|||
});
|
||||
}
|
||||
|
||||
function getTransactionStats({
|
||||
async function getTransactionStats({
|
||||
setup,
|
||||
filter,
|
||||
minutes,
|
||||
|
@ -129,68 +129,70 @@ function getTransactionStats({
|
|||
avgTransactionDuration: number | null;
|
||||
avgRequestsPerMinute: number | null;
|
||||
}> {
|
||||
return withApmSpan('get_transaction_stats_for_service_map_node', async () => {
|
||||
const { apmEventClient } = setup;
|
||||
const { apmEventClient } = setup;
|
||||
|
||||
const params = {
|
||||
apm: {
|
||||
events: [
|
||||
getProcessorEventForAggregatedTransactions(
|
||||
searchAggregatedTransactions
|
||||
),
|
||||
],
|
||||
},
|
||||
body: {
|
||||
size: 0,
|
||||
query: {
|
||||
bool: {
|
||||
filter: [
|
||||
...filter,
|
||||
...getDocumentTypeFilterForAggregatedTransactions(
|
||||
searchAggregatedTransactions
|
||||
),
|
||||
{
|
||||
terms: {
|
||||
[TRANSACTION_TYPE]: [
|
||||
TRANSACTION_REQUEST,
|
||||
TRANSACTION_PAGE_LOAD,
|
||||
],
|
||||
},
|
||||
const params = {
|
||||
apm: {
|
||||
events: [
|
||||
getProcessorEventForAggregatedTransactions(
|
||||
searchAggregatedTransactions
|
||||
),
|
||||
],
|
||||
},
|
||||
body: {
|
||||
size: 0,
|
||||
query: {
|
||||
bool: {
|
||||
filter: [
|
||||
...filter,
|
||||
...getDocumentTypeFilterForAggregatedTransactions(
|
||||
searchAggregatedTransactions
|
||||
),
|
||||
{
|
||||
terms: {
|
||||
[TRANSACTION_TYPE]: [
|
||||
TRANSACTION_REQUEST,
|
||||
TRANSACTION_PAGE_LOAD,
|
||||
],
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
track_total_hits: true,
|
||||
aggs: {
|
||||
duration: {
|
||||
avg: {
|
||||
field: getTransactionDurationFieldForAggregatedTransactions(
|
||||
searchAggregatedTransactions
|
||||
),
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
track_total_hits: true,
|
||||
aggs: {
|
||||
duration: {
|
||||
avg: {
|
||||
field: getTransactionDurationFieldForAggregatedTransactions(
|
||||
searchAggregatedTransactions
|
||||
),
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
const response = await apmEventClient.search(params);
|
||||
},
|
||||
};
|
||||
const response = await apmEventClient.search(
|
||||
'get_transaction_stats_for_service_map_node',
|
||||
params
|
||||
);
|
||||
|
||||
const totalRequests = response.hits.total.value;
|
||||
const totalRequests = response.hits.total.value;
|
||||
|
||||
return {
|
||||
avgTransactionDuration: response.aggregations?.duration.value ?? null,
|
||||
avgRequestsPerMinute: totalRequests > 0 ? totalRequests / minutes : null,
|
||||
};
|
||||
});
|
||||
return {
|
||||
avgTransactionDuration: response.aggregations?.duration.value ?? null,
|
||||
avgRequestsPerMinute: totalRequests > 0 ? totalRequests / minutes : null,
|
||||
};
|
||||
}
|
||||
|
||||
function getCpuStats({
|
||||
async function getCpuStats({
|
||||
setup,
|
||||
filter,
|
||||
}: TaskParameters): Promise<{ avgCpuUsage: number | null }> {
|
||||
return withApmSpan('get_avg_cpu_usage_for_service_map_node', async () => {
|
||||
const { apmEventClient } = setup;
|
||||
const { apmEventClient } = setup;
|
||||
|
||||
const response = await apmEventClient.search({
|
||||
const response = await apmEventClient.search(
|
||||
'get_avg_cpu_usage_for_service_map_node',
|
||||
{
|
||||
apm: {
|
||||
events: [ProcessorEvent.metric],
|
||||
},
|
||||
|
@ -206,10 +208,10 @@ function getCpuStats({
|
|||
},
|
||||
aggs: { avgCpuUsage: { avg: { field: METRIC_SYSTEM_CPU_PERCENT } } },
|
||||
},
|
||||
});
|
||||
}
|
||||
);
|
||||
|
||||
return { avgCpuUsage: response.aggregations?.avgCpuUsage.value ?? null };
|
||||
});
|
||||
return { avgCpuUsage: response.aggregations?.avgCpuUsage.value ?? null };
|
||||
}
|
||||
|
||||
function getMemoryStats({
|
||||
|
@ -219,7 +221,7 @@ function getMemoryStats({
|
|||
return withApmSpan('get_memory_stats_for_service_map_node', async () => {
|
||||
const { apmEventClient } = setup;
|
||||
|
||||
const getAvgMemoryUsage = ({
|
||||
const getAvgMemoryUsage = async ({
|
||||
additionalFilters,
|
||||
script,
|
||||
}: {
|
||||
|
@ -228,8 +230,9 @@ function getMemoryStats({
|
|||
| typeof percentCgroupMemoryUsedScript
|
||||
| typeof percentSystemMemoryUsedScript;
|
||||
}) => {
|
||||
return withApmSpan('get_avg_memory_for_service_map_node', async () => {
|
||||
const response = await apmEventClient.search({
|
||||
const response = await apmEventClient.search(
|
||||
'get_avg_memory_for_service_map_node',
|
||||
{
|
||||
apm: {
|
||||
events: [ProcessorEvent.metric],
|
||||
},
|
||||
|
@ -244,9 +247,9 @@ function getMemoryStats({
|
|||
avgMemoryUsage: { avg: { script } },
|
||||
},
|
||||
},
|
||||
});
|
||||
return response.aggregations?.avgMemoryUsage.value ?? null;
|
||||
});
|
||||
}
|
||||
);
|
||||
return response.aggregations?.avgMemoryUsage.value ?? null;
|
||||
};
|
||||
|
||||
let avgMemoryUsage = await getAvgMemoryUsage({
|
||||
|
|
|
@ -18,12 +18,11 @@ import {
|
|||
import { ProcessorEvent } from '../../../common/processor_event';
|
||||
import { SERVICE_MAP_TIMEOUT_ERROR } from '../../../common/service_map';
|
||||
import { environmentQuery, rangeQuery } from '../../../server/utils/queries';
|
||||
import { withApmSpan } from '../../utils/with_apm_span';
|
||||
import { Setup, SetupTimeRange } from '../helpers/setup_request';
|
||||
|
||||
const MAX_TRACES_TO_INSPECT = 1000;
|
||||
|
||||
export function getTraceSampleIds({
|
||||
export async function getTraceSampleIds({
|
||||
serviceName,
|
||||
environment,
|
||||
setup,
|
||||
|
@ -32,90 +31,88 @@ export function getTraceSampleIds({
|
|||
environment?: string;
|
||||
setup: Setup & SetupTimeRange;
|
||||
}) {
|
||||
return withApmSpan('get_trace_sample_ids', async () => {
|
||||
const { start, end, apmEventClient, config } = setup;
|
||||
const { start, end, apmEventClient, config } = setup;
|
||||
|
||||
const query = {
|
||||
bool: {
|
||||
filter: [
|
||||
{
|
||||
exists: {
|
||||
field: SPAN_DESTINATION_SERVICE_RESOURCE,
|
||||
},
|
||||
const query = {
|
||||
bool: {
|
||||
filter: [
|
||||
{
|
||||
exists: {
|
||||
field: SPAN_DESTINATION_SERVICE_RESOURCE,
|
||||
},
|
||||
...rangeQuery(start, end),
|
||||
] as ESFilter[],
|
||||
},
|
||||
} as { bool: { filter: ESFilter[]; must_not?: ESFilter[] | ESFilter } };
|
||||
},
|
||||
...rangeQuery(start, end),
|
||||
] as ESFilter[],
|
||||
},
|
||||
} as { bool: { filter: ESFilter[]; must_not?: ESFilter[] | ESFilter } };
|
||||
|
||||
if (serviceName) {
|
||||
query.bool.filter.push({ term: { [SERVICE_NAME]: serviceName } });
|
||||
}
|
||||
if (serviceName) {
|
||||
query.bool.filter.push({ term: { [SERVICE_NAME]: serviceName } });
|
||||
}
|
||||
|
||||
query.bool.filter.push(...environmentQuery(environment));
|
||||
query.bool.filter.push(...environmentQuery(environment));
|
||||
|
||||
const fingerprintBucketSize = serviceName
|
||||
? config['xpack.apm.serviceMapFingerprintBucketSize']
|
||||
: config['xpack.apm.serviceMapFingerprintGlobalBucketSize'];
|
||||
const fingerprintBucketSize = serviceName
|
||||
? config['xpack.apm.serviceMapFingerprintBucketSize']
|
||||
: config['xpack.apm.serviceMapFingerprintGlobalBucketSize'];
|
||||
|
||||
const traceIdBucketSize = serviceName
|
||||
? config['xpack.apm.serviceMapTraceIdBucketSize']
|
||||
: config['xpack.apm.serviceMapTraceIdGlobalBucketSize'];
|
||||
const traceIdBucketSize = serviceName
|
||||
? config['xpack.apm.serviceMapTraceIdBucketSize']
|
||||
: config['xpack.apm.serviceMapTraceIdGlobalBucketSize'];
|
||||
|
||||
const samplerShardSize = traceIdBucketSize * 10;
|
||||
const samplerShardSize = traceIdBucketSize * 10;
|
||||
|
||||
const params = {
|
||||
apm: {
|
||||
events: [ProcessorEvent.span],
|
||||
},
|
||||
body: {
|
||||
size: 0,
|
||||
query,
|
||||
aggs: {
|
||||
connections: {
|
||||
composite: {
|
||||
sources: asMutableArray([
|
||||
{
|
||||
[SPAN_DESTINATION_SERVICE_RESOURCE]: {
|
||||
terms: {
|
||||
field: SPAN_DESTINATION_SERVICE_RESOURCE,
|
||||
},
|
||||
const params = {
|
||||
apm: {
|
||||
events: [ProcessorEvent.span],
|
||||
},
|
||||
body: {
|
||||
size: 0,
|
||||
query,
|
||||
aggs: {
|
||||
connections: {
|
||||
composite: {
|
||||
sources: asMutableArray([
|
||||
{
|
||||
[SPAN_DESTINATION_SERVICE_RESOURCE]: {
|
||||
terms: {
|
||||
field: SPAN_DESTINATION_SERVICE_RESOURCE,
|
||||
},
|
||||
},
|
||||
{
|
||||
[SERVICE_NAME]: {
|
||||
terms: {
|
||||
field: SERVICE_NAME,
|
||||
},
|
||||
},
|
||||
{
|
||||
[SERVICE_NAME]: {
|
||||
terms: {
|
||||
field: SERVICE_NAME,
|
||||
},
|
||||
},
|
||||
{
|
||||
[SERVICE_ENVIRONMENT]: {
|
||||
terms: {
|
||||
field: SERVICE_ENVIRONMENT,
|
||||
missing_bucket: true,
|
||||
},
|
||||
},
|
||||
{
|
||||
[SERVICE_ENVIRONMENT]: {
|
||||
terms: {
|
||||
field: SERVICE_ENVIRONMENT,
|
||||
missing_bucket: true,
|
||||
},
|
||||
},
|
||||
] as const),
|
||||
size: fingerprintBucketSize,
|
||||
},
|
||||
aggs: {
|
||||
sample: {
|
||||
sampler: {
|
||||
shard_size: samplerShardSize,
|
||||
},
|
||||
aggs: {
|
||||
trace_ids: {
|
||||
terms: {
|
||||
field: TRACE_ID,
|
||||
size: traceIdBucketSize,
|
||||
execution_hint: 'map' as const,
|
||||
// remove bias towards large traces by sorting on trace.id
|
||||
// which will be random-esque
|
||||
order: {
|
||||
_key: 'desc' as const,
|
||||
},
|
||||
},
|
||||
] as const),
|
||||
size: fingerprintBucketSize,
|
||||
},
|
||||
aggs: {
|
||||
sample: {
|
||||
sampler: {
|
||||
shard_size: samplerShardSize,
|
||||
},
|
||||
aggs: {
|
||||
trace_ids: {
|
||||
terms: {
|
||||
field: TRACE_ID,
|
||||
size: traceIdBucketSize,
|
||||
execution_hint: 'map' as const,
|
||||
// remove bias towards large traces by sorting on trace.id
|
||||
// which will be random-esque
|
||||
order: {
|
||||
_key: 'desc' as const,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -124,34 +121,36 @@ export function getTraceSampleIds({
|
|||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
},
|
||||
};
|
||||
|
||||
try {
|
||||
const tracesSampleResponse = await apmEventClient.search(params);
|
||||
// make sure at least one trace per composite/connection bucket
|
||||
// is queried
|
||||
const traceIdsWithPriority =
|
||||
tracesSampleResponse.aggregations?.connections.buckets.flatMap(
|
||||
(bucket) =>
|
||||
bucket.sample.trace_ids.buckets.map((sampleDocBucket, index) => ({
|
||||
traceId: sampleDocBucket.key as string,
|
||||
priority: index,
|
||||
}))
|
||||
) || [];
|
||||
try {
|
||||
const tracesSampleResponse = await apmEventClient.search(
|
||||
'get_trace_sample_ids',
|
||||
params
|
||||
);
|
||||
// make sure at least one trace per composite/connection bucket
|
||||
// is queried
|
||||
const traceIdsWithPriority =
|
||||
tracesSampleResponse.aggregations?.connections.buckets.flatMap((bucket) =>
|
||||
bucket.sample.trace_ids.buckets.map((sampleDocBucket, index) => ({
|
||||
traceId: sampleDocBucket.key as string,
|
||||
priority: index,
|
||||
}))
|
||||
) || [];
|
||||
|
||||
const traceIds = take(
|
||||
uniq(
|
||||
sortBy(traceIdsWithPriority, 'priority').map(({ traceId }) => traceId)
|
||||
),
|
||||
MAX_TRACES_TO_INSPECT
|
||||
);
|
||||
const traceIds = take(
|
||||
uniq(
|
||||
sortBy(traceIdsWithPriority, 'priority').map(({ traceId }) => traceId)
|
||||
),
|
||||
MAX_TRACES_TO_INSPECT
|
||||
);
|
||||
|
||||
return { traceIds };
|
||||
} catch (error) {
|
||||
if ('displayName' in error && error.displayName === 'RequestTimeout') {
|
||||
throw Boom.internal(SERVICE_MAP_TIMEOUT_ERROR);
|
||||
}
|
||||
throw error;
|
||||
return { traceIds };
|
||||
} catch (error) {
|
||||
if ('displayName' in error && error.displayName === 'RequestTimeout') {
|
||||
throw Boom.internal(SERVICE_MAP_TIMEOUT_ERROR);
|
||||
}
|
||||
});
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -14,10 +14,9 @@ import {
|
|||
import { SERVICE_NODE_NAME_MISSING } from '../../../common/service_nodes';
|
||||
import { getServiceNodesProjection } from '../../projections/service_nodes';
|
||||
import { mergeProjection } from '../../projections/util/merge_projection';
|
||||
import { withApmSpan } from '../../utils/with_apm_span';
|
||||
import { Setup, SetupTimeRange } from '../helpers/setup_request';
|
||||
|
||||
const getServiceNodes = ({
|
||||
const getServiceNodes = async ({
|
||||
kuery,
|
||||
setup,
|
||||
serviceName,
|
||||
|
@ -26,69 +25,67 @@ const getServiceNodes = ({
|
|||
setup: Setup & SetupTimeRange;
|
||||
serviceName: string;
|
||||
}) => {
|
||||
return withApmSpan('get_service_nodes', async () => {
|
||||
const { apmEventClient } = setup;
|
||||
const { apmEventClient } = setup;
|
||||
|
||||
const projection = getServiceNodesProjection({ kuery, setup, serviceName });
|
||||
const projection = getServiceNodesProjection({ kuery, setup, serviceName });
|
||||
|
||||
const params = mergeProjection(projection, {
|
||||
body: {
|
||||
aggs: {
|
||||
nodes: {
|
||||
terms: {
|
||||
...projection.body.aggs.nodes.terms,
|
||||
size: 10000,
|
||||
missing: SERVICE_NODE_NAME_MISSING,
|
||||
const params = mergeProjection(projection, {
|
||||
body: {
|
||||
aggs: {
|
||||
nodes: {
|
||||
terms: {
|
||||
...projection.body.aggs.nodes.terms,
|
||||
size: 10000,
|
||||
missing: SERVICE_NODE_NAME_MISSING,
|
||||
},
|
||||
aggs: {
|
||||
cpu: {
|
||||
avg: {
|
||||
field: METRIC_PROCESS_CPU_PERCENT,
|
||||
},
|
||||
},
|
||||
aggs: {
|
||||
cpu: {
|
||||
avg: {
|
||||
field: METRIC_PROCESS_CPU_PERCENT,
|
||||
},
|
||||
heapMemory: {
|
||||
avg: {
|
||||
field: METRIC_JAVA_HEAP_MEMORY_USED,
|
||||
},
|
||||
heapMemory: {
|
||||
avg: {
|
||||
field: METRIC_JAVA_HEAP_MEMORY_USED,
|
||||
},
|
||||
},
|
||||
nonHeapMemory: {
|
||||
avg: {
|
||||
field: METRIC_JAVA_NON_HEAP_MEMORY_USED,
|
||||
},
|
||||
nonHeapMemory: {
|
||||
avg: {
|
||||
field: METRIC_JAVA_NON_HEAP_MEMORY_USED,
|
||||
},
|
||||
},
|
||||
threadCount: {
|
||||
max: {
|
||||
field: METRIC_JAVA_THREAD_COUNT,
|
||||
},
|
||||
},
|
||||
threadCount: {
|
||||
max: {
|
||||
field: METRIC_JAVA_THREAD_COUNT,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
const response = await apmEventClient.search(params);
|
||||
|
||||
if (!response.aggregations) {
|
||||
return [];
|
||||
}
|
||||
|
||||
return response.aggregations.nodes.buckets
|
||||
.map((bucket) => ({
|
||||
name: bucket.key as string,
|
||||
cpu: bucket.cpu.value,
|
||||
heapMemory: bucket.heapMemory.value,
|
||||
nonHeapMemory: bucket.nonHeapMemory.value,
|
||||
threadCount: bucket.threadCount.value,
|
||||
}))
|
||||
.filter(
|
||||
(item) =>
|
||||
item.cpu !== null ||
|
||||
item.heapMemory !== null ||
|
||||
item.nonHeapMemory !== null ||
|
||||
item.threadCount != null
|
||||
);
|
||||
},
|
||||
});
|
||||
|
||||
const response = await apmEventClient.search('get_service_nodes', params);
|
||||
|
||||
if (!response.aggregations) {
|
||||
return [];
|
||||
}
|
||||
|
||||
return response.aggregations.nodes.buckets
|
||||
.map((bucket) => ({
|
||||
name: bucket.key as string,
|
||||
cpu: bucket.cpu.value,
|
||||
heapMemory: bucket.heapMemory.value,
|
||||
nonHeapMemory: bucket.nonHeapMemory.value,
|
||||
threadCount: bucket.threadCount.value,
|
||||
}))
|
||||
.filter(
|
||||
(item) =>
|
||||
item.cpu !== null ||
|
||||
item.heapMemory !== null ||
|
||||
item.nonHeapMemory !== null ||
|
||||
item.threadCount != null
|
||||
);
|
||||
};
|
||||
|
||||
export { getServiceNodes };
|
||||
|
|
|
@ -22,6 +22,7 @@ Object {
|
|||
"events": Array [
|
||||
"transaction",
|
||||
],
|
||||
"includeLegacyData": true,
|
||||
},
|
||||
"body": Object {
|
||||
"query": Object {
|
||||
|
|
|
@ -13,7 +13,6 @@ import {
|
|||
SERVICE_VERSION,
|
||||
} from '../../../../common/elasticsearch_fieldnames';
|
||||
import { environmentQuery, rangeQuery } from '../../../../server/utils/queries';
|
||||
import { withApmSpan } from '../../../utils/with_apm_span';
|
||||
import {
|
||||
getDocumentTypeFilterForAggregatedTransactions,
|
||||
getProcessorEventForAggregatedTransactions,
|
||||
|
@ -31,20 +30,52 @@ export async function getDerivedServiceAnnotations({
|
|||
setup: Setup & SetupTimeRange;
|
||||
searchAggregatedTransactions: boolean;
|
||||
}) {
|
||||
return withApmSpan('get_derived_service_annotations', async () => {
|
||||
const { start, end, apmEventClient } = setup;
|
||||
const { start, end, apmEventClient } = setup;
|
||||
|
||||
const filter: ESFilter[] = [
|
||||
{ term: { [SERVICE_NAME]: serviceName } },
|
||||
...getDocumentTypeFilterForAggregatedTransactions(
|
||||
searchAggregatedTransactions
|
||||
),
|
||||
...environmentQuery(environment),
|
||||
];
|
||||
const filter: ESFilter[] = [
|
||||
{ term: { [SERVICE_NAME]: serviceName } },
|
||||
...getDocumentTypeFilterForAggregatedTransactions(
|
||||
searchAggregatedTransactions
|
||||
),
|
||||
...environmentQuery(environment),
|
||||
];
|
||||
|
||||
const versions =
|
||||
(
|
||||
await apmEventClient.search({
|
||||
const versions =
|
||||
(
|
||||
await apmEventClient.search('get_derived_service_annotations', {
|
||||
apm: {
|
||||
events: [
|
||||
getProcessorEventForAggregatedTransactions(
|
||||
searchAggregatedTransactions
|
||||
),
|
||||
],
|
||||
},
|
||||
body: {
|
||||
size: 0,
|
||||
query: {
|
||||
bool: {
|
||||
filter: [...filter, ...rangeQuery(start, end)],
|
||||
},
|
||||
},
|
||||
aggs: {
|
||||
versions: {
|
||||
terms: {
|
||||
field: SERVICE_VERSION,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
).aggregations?.versions.buckets.map((bucket) => bucket.key) ?? [];
|
||||
|
||||
if (versions.length <= 1) {
|
||||
return [];
|
||||
}
|
||||
const annotations = await Promise.all(
|
||||
versions.map(async (version) => {
|
||||
const response = await apmEventClient.search(
|
||||
'get_first_seen_of_version',
|
||||
{
|
||||
apm: {
|
||||
events: [
|
||||
getProcessorEventForAggregatedTransactions(
|
||||
|
@ -53,73 +84,40 @@ export async function getDerivedServiceAnnotations({
|
|||
],
|
||||
},
|
||||
body: {
|
||||
size: 0,
|
||||
size: 1,
|
||||
query: {
|
||||
bool: {
|
||||
filter: [...filter, ...rangeQuery(start, end)],
|
||||
filter: [...filter, { term: { [SERVICE_VERSION]: version } }],
|
||||
},
|
||||
},
|
||||
aggs: {
|
||||
versions: {
|
||||
terms: {
|
||||
field: SERVICE_VERSION,
|
||||
},
|
||||
},
|
||||
sort: {
|
||||
'@timestamp': 'asc',
|
||||
},
|
||||
},
|
||||
})
|
||||
).aggregations?.versions.buckets.map((bucket) => bucket.key) ?? [];
|
||||
}
|
||||
);
|
||||
|
||||
if (versions.length <= 1) {
|
||||
return [];
|
||||
}
|
||||
const annotations = await Promise.all(
|
||||
versions.map(async (version) => {
|
||||
return withApmSpan('get_first_seen_of_version', async () => {
|
||||
const response = await apmEventClient.search({
|
||||
apm: {
|
||||
events: [
|
||||
getProcessorEventForAggregatedTransactions(
|
||||
searchAggregatedTransactions
|
||||
),
|
||||
],
|
||||
},
|
||||
body: {
|
||||
size: 1,
|
||||
query: {
|
||||
bool: {
|
||||
filter: [...filter, { term: { [SERVICE_VERSION]: version } }],
|
||||
},
|
||||
},
|
||||
sort: {
|
||||
'@timestamp': 'asc',
|
||||
},
|
||||
},
|
||||
});
|
||||
const firstSeen = new Date(
|
||||
response.hits.hits[0]._source['@timestamp']
|
||||
).getTime();
|
||||
|
||||
const firstSeen = new Date(
|
||||
response.hits.hits[0]._source['@timestamp']
|
||||
).getTime();
|
||||
if (!isFiniteNumber(firstSeen)) {
|
||||
throw new Error(
|
||||
'First seen for version was unexpectedly undefined or null.'
|
||||
);
|
||||
}
|
||||
|
||||
if (!isFiniteNumber(firstSeen)) {
|
||||
throw new Error(
|
||||
'First seen for version was unexpectedly undefined or null.'
|
||||
);
|
||||
}
|
||||
if (firstSeen < start || firstSeen > end) {
|
||||
return null;
|
||||
}
|
||||
|
||||
if (firstSeen < start || firstSeen > end) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return {
|
||||
type: AnnotationType.VERSION,
|
||||
id: version,
|
||||
'@timestamp': firstSeen,
|
||||
text: version,
|
||||
};
|
||||
});
|
||||
})
|
||||
);
|
||||
return annotations.filter(Boolean) as Annotation[];
|
||||
});
|
||||
return {
|
||||
type: AnnotationType.VERSION,
|
||||
id: version,
|
||||
'@timestamp': firstSeen,
|
||||
text: version,
|
||||
};
|
||||
})
|
||||
);
|
||||
return annotations.filter(Boolean) as Annotation[];
|
||||
}
|
||||
|
|
|
@ -13,9 +13,8 @@ import {
|
|||
import { rangeQuery } from '../../../server/utils/queries';
|
||||
import { Setup, SetupTimeRange } from '../helpers/setup_request';
|
||||
import { getProcessorEventForAggregatedTransactions } from '../helpers/aggregated_transactions';
|
||||
import { withApmSpan } from '../../utils/with_apm_span';
|
||||
|
||||
export function getServiceAgentName({
|
||||
export async function getServiceAgentName({
|
||||
serviceName,
|
||||
setup,
|
||||
searchAggregatedTransactions,
|
||||
|
@ -24,42 +23,41 @@ export function getServiceAgentName({
|
|||
setup: Setup & SetupTimeRange;
|
||||
searchAggregatedTransactions: boolean;
|
||||
}) {
|
||||
return withApmSpan('get_service_agent_name', async () => {
|
||||
const { start, end, apmEventClient } = setup;
|
||||
const { start, end, apmEventClient } = setup;
|
||||
|
||||
const params = {
|
||||
terminateAfter: 1,
|
||||
apm: {
|
||||
events: [
|
||||
ProcessorEvent.error,
|
||||
getProcessorEventForAggregatedTransactions(
|
||||
searchAggregatedTransactions
|
||||
),
|
||||
ProcessorEvent.metric,
|
||||
],
|
||||
},
|
||||
body: {
|
||||
size: 0,
|
||||
query: {
|
||||
bool: {
|
||||
filter: [
|
||||
{ term: { [SERVICE_NAME]: serviceName } },
|
||||
...rangeQuery(start, end),
|
||||
],
|
||||
},
|
||||
},
|
||||
aggs: {
|
||||
agents: {
|
||||
terms: { field: AGENT_NAME, size: 1 },
|
||||
},
|
||||
const params = {
|
||||
terminateAfter: 1,
|
||||
apm: {
|
||||
events: [
|
||||
ProcessorEvent.error,
|
||||
getProcessorEventForAggregatedTransactions(
|
||||
searchAggregatedTransactions
|
||||
),
|
||||
ProcessorEvent.metric,
|
||||
],
|
||||
},
|
||||
body: {
|
||||
size: 0,
|
||||
query: {
|
||||
bool: {
|
||||
filter: [
|
||||
{ term: { [SERVICE_NAME]: serviceName } },
|
||||
...rangeQuery(start, end),
|
||||
],
|
||||
},
|
||||
},
|
||||
};
|
||||
aggs: {
|
||||
agents: {
|
||||
terms: { field: AGENT_NAME, size: 1 },
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
const { aggregations } = await apmEventClient.search(params);
|
||||
const agentName = aggregations?.agents.buckets[0]?.key as
|
||||
| string
|
||||
| undefined;
|
||||
return { agentName };
|
||||
});
|
||||
const { aggregations } = await apmEventClient.search(
|
||||
'get_service_agent_name',
|
||||
params
|
||||
);
|
||||
const agentName = aggregations?.agents.buckets[0]?.key as string | undefined;
|
||||
return { agentName };
|
||||
}
|
||||
|
|
|
@ -38,56 +38,54 @@ export const getDestinationMap = ({
|
|||
return withApmSpan('get_service_destination_map', async () => {
|
||||
const { start, end, apmEventClient } = setup;
|
||||
|
||||
const response = await withApmSpan('get_exit_span_samples', async () =>
|
||||
apmEventClient.search({
|
||||
apm: {
|
||||
events: [ProcessorEvent.span],
|
||||
},
|
||||
body: {
|
||||
size: 0,
|
||||
query: {
|
||||
bool: {
|
||||
filter: [
|
||||
{ term: { [SERVICE_NAME]: serviceName } },
|
||||
{ exists: { field: SPAN_DESTINATION_SERVICE_RESOURCE } },
|
||||
...rangeQuery(start, end),
|
||||
...environmentQuery(environment),
|
||||
],
|
||||
},
|
||||
const response = await apmEventClient.search('get_exit_span_samples', {
|
||||
apm: {
|
||||
events: [ProcessorEvent.span],
|
||||
},
|
||||
body: {
|
||||
size: 0,
|
||||
query: {
|
||||
bool: {
|
||||
filter: [
|
||||
{ term: { [SERVICE_NAME]: serviceName } },
|
||||
{ exists: { field: SPAN_DESTINATION_SERVICE_RESOURCE } },
|
||||
...rangeQuery(start, end),
|
||||
...environmentQuery(environment),
|
||||
],
|
||||
},
|
||||
aggs: {
|
||||
connections: {
|
||||
composite: {
|
||||
size: 1000,
|
||||
sources: asMutableArray([
|
||||
{
|
||||
[SPAN_DESTINATION_SERVICE_RESOURCE]: {
|
||||
terms: { field: SPAN_DESTINATION_SERVICE_RESOURCE },
|
||||
},
|
||||
aggs: {
|
||||
connections: {
|
||||
composite: {
|
||||
size: 1000,
|
||||
sources: asMutableArray([
|
||||
{
|
||||
[SPAN_DESTINATION_SERVICE_RESOURCE]: {
|
||||
terms: { field: SPAN_DESTINATION_SERVICE_RESOURCE },
|
||||
},
|
||||
},
|
||||
// make sure we get samples for both successful
|
||||
// and failed calls
|
||||
{ [EVENT_OUTCOME]: { terms: { field: EVENT_OUTCOME } } },
|
||||
] as const),
|
||||
},
|
||||
aggs: {
|
||||
sample: {
|
||||
top_hits: {
|
||||
size: 1,
|
||||
_source: [SPAN_TYPE, SPAN_SUBTYPE, SPAN_ID],
|
||||
sort: [
|
||||
{
|
||||
'@timestamp': 'desc' as const,
|
||||
},
|
||||
},
|
||||
// make sure we get samples for both successful
|
||||
// and failed calls
|
||||
{ [EVENT_OUTCOME]: { terms: { field: EVENT_OUTCOME } } },
|
||||
] as const),
|
||||
},
|
||||
aggs: {
|
||||
sample: {
|
||||
top_hits: {
|
||||
size: 1,
|
||||
_source: [SPAN_TYPE, SPAN_SUBTYPE, SPAN_ID],
|
||||
sort: [
|
||||
{
|
||||
'@timestamp': 'desc' as const,
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
);
|
||||
},
|
||||
});
|
||||
|
||||
const outgoingConnections =
|
||||
response.aggregations?.connections.buckets.map((bucket) => {
|
||||
|
@ -103,38 +101,37 @@ export const getDestinationMap = ({
|
|||
};
|
||||
}) ?? [];
|
||||
|
||||
const transactionResponse = await withApmSpan(
|
||||
const transactionResponse = await apmEventClient.search(
|
||||
'get_transactions_for_exit_spans',
|
||||
() =>
|
||||
apmEventClient.search({
|
||||
apm: {
|
||||
events: [ProcessorEvent.transaction],
|
||||
},
|
||||
body: {
|
||||
query: {
|
||||
bool: {
|
||||
filter: [
|
||||
{
|
||||
terms: {
|
||||
[PARENT_ID]: outgoingConnections.map(
|
||||
(connection) => connection[SPAN_ID]
|
||||
),
|
||||
},
|
||||
{
|
||||
apm: {
|
||||
events: [ProcessorEvent.transaction],
|
||||
},
|
||||
body: {
|
||||
query: {
|
||||
bool: {
|
||||
filter: [
|
||||
{
|
||||
terms: {
|
||||
[PARENT_ID]: outgoingConnections.map(
|
||||
(connection) => connection[SPAN_ID]
|
||||
),
|
||||
},
|
||||
...rangeQuery(start, end),
|
||||
],
|
||||
},
|
||||
},
|
||||
...rangeQuery(start, end),
|
||||
],
|
||||
},
|
||||
size: outgoingConnections.length,
|
||||
docvalue_fields: asMutableArray([
|
||||
SERVICE_NAME,
|
||||
SERVICE_ENVIRONMENT,
|
||||
AGENT_NAME,
|
||||
PARENT_ID,
|
||||
] as const),
|
||||
_source: false,
|
||||
},
|
||||
})
|
||||
size: outgoingConnections.length,
|
||||
docvalue_fields: asMutableArray([
|
||||
SERVICE_NAME,
|
||||
SERVICE_ENVIRONMENT,
|
||||
AGENT_NAME,
|
||||
PARENT_ID,
|
||||
] as const),
|
||||
_source: false,
|
||||
},
|
||||
}
|
||||
);
|
||||
|
||||
const incomingConnections = transactionResponse.hits.hits.map((hit) => ({
|
||||
|
|
|
@ -18,9 +18,8 @@ import { environmentQuery, rangeQuery } from '../../../../server/utils/queries';
|
|||
import { getBucketSize } from '../../helpers/get_bucket_size';
|
||||
import { EventOutcome } from '../../../../common/event_outcome';
|
||||
import { Setup, SetupTimeRange } from '../../helpers/setup_request';
|
||||
import { withApmSpan } from '../../../utils/with_apm_span';
|
||||
|
||||
export const getMetrics = ({
|
||||
export const getMetrics = async ({
|
||||
setup,
|
||||
serviceName,
|
||||
environment,
|
||||
|
@ -31,10 +30,11 @@ export const getMetrics = ({
|
|||
environment?: string;
|
||||
numBuckets: number;
|
||||
}) => {
|
||||
return withApmSpan('get_service_destination_metrics', async () => {
|
||||
const { start, end, apmEventClient } = setup;
|
||||
const { start, end, apmEventClient } = setup;
|
||||
|
||||
const response = await apmEventClient.search({
|
||||
const response = await apmEventClient.search(
|
||||
'get_service_destination_metrics',
|
||||
{
|
||||
apm: {
|
||||
events: [ProcessorEvent.metric],
|
||||
},
|
||||
|
@ -46,7 +46,9 @@ export const getMetrics = ({
|
|||
filter: [
|
||||
{ term: { [SERVICE_NAME]: serviceName } },
|
||||
{
|
||||
exists: { field: SPAN_DESTINATION_SERVICE_RESPONSE_TIME_COUNT },
|
||||
exists: {
|
||||
field: SPAN_DESTINATION_SERVICE_RESPONSE_TIME_COUNT,
|
||||
},
|
||||
},
|
||||
...rangeQuery(start, end),
|
||||
...environmentQuery(environment),
|
||||
|
@ -99,47 +101,47 @@ export const getMetrics = ({
|
|||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
}
|
||||
);
|
||||
|
||||
return (
|
||||
response.aggregations?.connections.buckets.map((bucket) => ({
|
||||
span: {
|
||||
destination: {
|
||||
service: {
|
||||
resource: String(bucket.key),
|
||||
},
|
||||
return (
|
||||
response.aggregations?.connections.buckets.map((bucket) => ({
|
||||
span: {
|
||||
destination: {
|
||||
service: {
|
||||
resource: String(bucket.key),
|
||||
},
|
||||
},
|
||||
value: {
|
||||
count: sum(
|
||||
bucket.timeseries.buckets.map(
|
||||
(dateBucket) => dateBucket.count.value ?? 0
|
||||
)
|
||||
),
|
||||
latency_sum: sum(
|
||||
bucket.timeseries.buckets.map(
|
||||
(dateBucket) => dateBucket.latency_sum.value ?? 0
|
||||
)
|
||||
),
|
||||
error_count: sum(
|
||||
bucket.timeseries.buckets.flatMap(
|
||||
(dateBucket) =>
|
||||
dateBucket[EVENT_OUTCOME].buckets.find(
|
||||
(outcomeBucket) => outcomeBucket.key === EventOutcome.failure
|
||||
)?.count.value ?? 0
|
||||
)
|
||||
),
|
||||
},
|
||||
timeseries: bucket.timeseries.buckets.map((dateBucket) => ({
|
||||
x: dateBucket.key,
|
||||
count: dateBucket.count.value ?? 0,
|
||||
latency_sum: dateBucket.latency_sum.value ?? 0,
|
||||
error_count:
|
||||
dateBucket[EVENT_OUTCOME].buckets.find(
|
||||
(outcomeBucket) => outcomeBucket.key === EventOutcome.failure
|
||||
)?.count.value ?? 0,
|
||||
})),
|
||||
})) ?? []
|
||||
);
|
||||
});
|
||||
},
|
||||
value: {
|
||||
count: sum(
|
||||
bucket.timeseries.buckets.map(
|
||||
(dateBucket) => dateBucket.count.value ?? 0
|
||||
)
|
||||
),
|
||||
latency_sum: sum(
|
||||
bucket.timeseries.buckets.map(
|
||||
(dateBucket) => dateBucket.latency_sum.value ?? 0
|
||||
)
|
||||
),
|
||||
error_count: sum(
|
||||
bucket.timeseries.buckets.flatMap(
|
||||
(dateBucket) =>
|
||||
dateBucket[EVENT_OUTCOME].buckets.find(
|
||||
(outcomeBucket) => outcomeBucket.key === EventOutcome.failure
|
||||
)?.count.value ?? 0
|
||||
)
|
||||
),
|
||||
},
|
||||
timeseries: bucket.timeseries.buckets.map((dateBucket) => ({
|
||||
x: dateBucket.key,
|
||||
count: dateBucket.count.value ?? 0,
|
||||
latency_sum: dateBucket.latency_sum.value ?? 0,
|
||||
error_count:
|
||||
dateBucket[EVENT_OUTCOME].buckets.find(
|
||||
(outcomeBucket) => outcomeBucket.key === EventOutcome.failure
|
||||
)?.count.value ?? 0,
|
||||
})),
|
||||
})) ?? []
|
||||
);
|
||||
};
|
||||
|
|
|
@ -18,7 +18,6 @@ import {
|
|||
rangeQuery,
|
||||
kqlQuery,
|
||||
} from '../../../../server/utils/queries';
|
||||
import { withApmSpan } from '../../../utils/with_apm_span';
|
||||
import { getBucketSize } from '../../helpers/get_bucket_size';
|
||||
import { Setup, SetupTimeRange } from '../../helpers/setup_request';
|
||||
|
||||
|
@ -43,75 +42,71 @@ export async function getServiceErrorGroupDetailedStatistics({
|
|||
start: number;
|
||||
end: number;
|
||||
}): Promise<Array<{ groupId: string; timeseries: Coordinate[] }>> {
|
||||
return withApmSpan(
|
||||
const { apmEventClient } = setup;
|
||||
|
||||
const { intervalString } = getBucketSize({ start, end, numBuckets });
|
||||
|
||||
const timeseriesResponse = await apmEventClient.search(
|
||||
'get_service_error_group_detailed_statistics',
|
||||
async () => {
|
||||
const { apmEventClient } = setup;
|
||||
|
||||
const { intervalString } = getBucketSize({ start, end, numBuckets });
|
||||
|
||||
const timeseriesResponse = await apmEventClient.search({
|
||||
apm: {
|
||||
events: [ProcessorEvent.error],
|
||||
},
|
||||
body: {
|
||||
size: 0,
|
||||
query: {
|
||||
bool: {
|
||||
filter: [
|
||||
{ terms: { [ERROR_GROUP_ID]: groupIds } },
|
||||
{ term: { [SERVICE_NAME]: serviceName } },
|
||||
{ term: { [TRANSACTION_TYPE]: transactionType } },
|
||||
...rangeQuery(start, end),
|
||||
...environmentQuery(environment),
|
||||
...kqlQuery(kuery),
|
||||
],
|
||||
},
|
||||
{
|
||||
apm: {
|
||||
events: [ProcessorEvent.error],
|
||||
},
|
||||
body: {
|
||||
size: 0,
|
||||
query: {
|
||||
bool: {
|
||||
filter: [
|
||||
{ terms: { [ERROR_GROUP_ID]: groupIds } },
|
||||
{ term: { [SERVICE_NAME]: serviceName } },
|
||||
{ term: { [TRANSACTION_TYPE]: transactionType } },
|
||||
...rangeQuery(start, end),
|
||||
...environmentQuery(environment),
|
||||
...kqlQuery(kuery),
|
||||
],
|
||||
},
|
||||
aggs: {
|
||||
error_groups: {
|
||||
terms: {
|
||||
field: ERROR_GROUP_ID,
|
||||
size: 500,
|
||||
},
|
||||
aggs: {
|
||||
timeseries: {
|
||||
date_histogram: {
|
||||
field: '@timestamp',
|
||||
fixed_interval: intervalString,
|
||||
min_doc_count: 0,
|
||||
extended_bounds: {
|
||||
min: start,
|
||||
max: end,
|
||||
},
|
||||
},
|
||||
aggs: {
|
||||
error_groups: {
|
||||
terms: {
|
||||
field: ERROR_GROUP_ID,
|
||||
size: 500,
|
||||
},
|
||||
aggs: {
|
||||
timeseries: {
|
||||
date_histogram: {
|
||||
field: '@timestamp',
|
||||
fixed_interval: intervalString,
|
||||
min_doc_count: 0,
|
||||
extended_bounds: {
|
||||
min: start,
|
||||
max: end,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
if (!timeseriesResponse.aggregations) {
|
||||
return [];
|
||||
}
|
||||
|
||||
return timeseriesResponse.aggregations.error_groups.buckets.map(
|
||||
(bucket) => {
|
||||
const groupId = bucket.key as string;
|
||||
return {
|
||||
groupId,
|
||||
timeseries: bucket.timeseries.buckets.map((timeseriesBucket) => {
|
||||
return {
|
||||
x: timeseriesBucket.key,
|
||||
y: timeseriesBucket.doc_count,
|
||||
};
|
||||
}),
|
||||
};
|
||||
}
|
||||
);
|
||||
},
|
||||
}
|
||||
);
|
||||
|
||||
if (!timeseriesResponse.aggregations) {
|
||||
return [];
|
||||
}
|
||||
|
||||
return timeseriesResponse.aggregations.error_groups.buckets.map((bucket) => {
|
||||
const groupId = bucket.key as string;
|
||||
return {
|
||||
groupId,
|
||||
timeseries: bucket.timeseries.buckets.map((timeseriesBucket) => {
|
||||
return {
|
||||
x: timeseriesBucket.key,
|
||||
y: timeseriesBucket.doc_count,
|
||||
};
|
||||
}),
|
||||
};
|
||||
});
|
||||
}
|
||||
|
||||
export async function getServiceErrorGroupPeriods({
|
||||
|
|
|
@ -19,11 +19,10 @@ import {
|
|||
rangeQuery,
|
||||
kqlQuery,
|
||||
} from '../../../../server/utils/queries';
|
||||
import { withApmSpan } from '../../../utils/with_apm_span';
|
||||
import { getErrorName } from '../../helpers/get_error_name';
|
||||
import { Setup, SetupTimeRange } from '../../helpers/setup_request';
|
||||
|
||||
export function getServiceErrorGroupMainStatistics({
|
||||
export async function getServiceErrorGroupMainStatistics({
|
||||
kuery,
|
||||
serviceName,
|
||||
setup,
|
||||
|
@ -36,10 +35,11 @@ export function getServiceErrorGroupMainStatistics({
|
|||
transactionType: string;
|
||||
environment?: string;
|
||||
}) {
|
||||
return withApmSpan('get_service_error_group_main_statistics', async () => {
|
||||
const { apmEventClient, start, end } = setup;
|
||||
const { apmEventClient, start, end } = setup;
|
||||
|
||||
const response = await apmEventClient.search({
|
||||
const response = await apmEventClient.search(
|
||||
'get_service_error_group_main_statistics',
|
||||
{
|
||||
apm: {
|
||||
events: [ProcessorEvent.error],
|
||||
},
|
||||
|
@ -79,24 +79,23 @@ export function getServiceErrorGroupMainStatistics({
|
|||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
}
|
||||
);
|
||||
|
||||
const errorGroups =
|
||||
response.aggregations?.error_groups.buckets.map((bucket) => ({
|
||||
group_id: bucket.key as string,
|
||||
name:
|
||||
getErrorName(bucket.sample.hits.hits[0]._source) ??
|
||||
NOT_AVAILABLE_LABEL,
|
||||
last_seen: new Date(
|
||||
bucket.sample.hits.hits[0]?._source['@timestamp']
|
||||
).getTime(),
|
||||
occurrences: bucket.doc_count,
|
||||
})) ?? [];
|
||||
const errorGroups =
|
||||
response.aggregations?.error_groups.buckets.map((bucket) => ({
|
||||
group_id: bucket.key as string,
|
||||
name:
|
||||
getErrorName(bucket.sample.hits.hits[0]._source) ?? NOT_AVAILABLE_LABEL,
|
||||
last_seen: new Date(
|
||||
bucket.sample.hits.hits[0]?._source['@timestamp']
|
||||
).getTime(),
|
||||
occurrences: bucket.doc_count,
|
||||
})) ?? [];
|
||||
|
||||
return {
|
||||
is_aggregation_accurate:
|
||||
(response.aggregations?.error_groups.sum_other_doc_count ?? 0) === 0,
|
||||
error_groups: errorGroups,
|
||||
};
|
||||
});
|
||||
return {
|
||||
is_aggregation_accurate:
|
||||
(response.aggregations?.error_groups.sum_other_doc_count ?? 0) === 0,
|
||||
error_groups: errorGroups,
|
||||
};
|
||||
}
|
||||
|
|
|
@ -59,8 +59,9 @@ export async function getServiceErrorGroups({
|
|||
|
||||
const { intervalString } = getBucketSize({ start, end, numBuckets });
|
||||
|
||||
const response = await withApmSpan('get_top_service_error_groups', () =>
|
||||
apmEventClient.search({
|
||||
const response = await apmEventClient.search(
|
||||
'get_top_service_error_groups',
|
||||
{
|
||||
apm: {
|
||||
events: [ProcessorEvent.error],
|
||||
},
|
||||
|
@ -104,7 +105,7 @@ export async function getServiceErrorGroups({
|
|||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
);
|
||||
|
||||
const errorGroups =
|
||||
|
@ -139,50 +140,49 @@ export async function getServiceErrorGroups({
|
|||
(group) => group.group_id
|
||||
);
|
||||
|
||||
const timeseriesResponse = await withApmSpan(
|
||||
const timeseriesResponse = await apmEventClient.search(
|
||||
'get_service_error_groups_timeseries',
|
||||
async () =>
|
||||
apmEventClient.search({
|
||||
apm: {
|
||||
events: [ProcessorEvent.error],
|
||||
},
|
||||
body: {
|
||||
size: 0,
|
||||
query: {
|
||||
bool: {
|
||||
filter: [
|
||||
{ terms: { [ERROR_GROUP_ID]: sortedErrorGroupIds } },
|
||||
{ term: { [SERVICE_NAME]: serviceName } },
|
||||
{ term: { [TRANSACTION_TYPE]: transactionType } },
|
||||
...rangeQuery(start, end),
|
||||
...environmentQuery(environment),
|
||||
...kqlQuery(kuery),
|
||||
],
|
||||
},
|
||||
{
|
||||
apm: {
|
||||
events: [ProcessorEvent.error],
|
||||
},
|
||||
body: {
|
||||
size: 0,
|
||||
query: {
|
||||
bool: {
|
||||
filter: [
|
||||
{ terms: { [ERROR_GROUP_ID]: sortedErrorGroupIds } },
|
||||
{ term: { [SERVICE_NAME]: serviceName } },
|
||||
{ term: { [TRANSACTION_TYPE]: transactionType } },
|
||||
...rangeQuery(start, end),
|
||||
...environmentQuery(environment),
|
||||
...kqlQuery(kuery),
|
||||
],
|
||||
},
|
||||
aggs: {
|
||||
error_groups: {
|
||||
terms: {
|
||||
field: ERROR_GROUP_ID,
|
||||
size,
|
||||
},
|
||||
aggs: {
|
||||
timeseries: {
|
||||
date_histogram: {
|
||||
field: '@timestamp',
|
||||
fixed_interval: intervalString,
|
||||
min_doc_count: 0,
|
||||
extended_bounds: {
|
||||
min: start,
|
||||
max: end,
|
||||
},
|
||||
},
|
||||
aggs: {
|
||||
error_groups: {
|
||||
terms: {
|
||||
field: ERROR_GROUP_ID,
|
||||
size,
|
||||
},
|
||||
aggs: {
|
||||
timeseries: {
|
||||
date_histogram: {
|
||||
field: '@timestamp',
|
||||
fixed_interval: intervalString,
|
||||
min_doc_count: 0,
|
||||
extended_bounds: {
|
||||
min: start,
|
||||
max: end,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
},
|
||||
}
|
||||
);
|
||||
|
||||
return {
|
||||
|
|
|
@ -11,7 +11,6 @@ import {
|
|||
TRANSACTION_TYPE,
|
||||
} from '../../../common/elasticsearch_fieldnames';
|
||||
import { environmentQuery, kqlQuery, rangeQuery } from '../../utils/queries';
|
||||
import { withApmSpan } from '../../utils/with_apm_span';
|
||||
import { getProcessorEventForAggregatedTransactions } from '../helpers/aggregated_transactions';
|
||||
import { Setup, SetupTimeRange } from '../helpers/setup_request';
|
||||
|
||||
|
@ -37,18 +36,19 @@ export async function getServiceInstanceMetadataDetails({
|
|||
environment?: string;
|
||||
kuery?: string;
|
||||
}) {
|
||||
return withApmSpan('get_service_instance_metadata_details', async () => {
|
||||
const { start, end, apmEventClient } = setup;
|
||||
const filter = [
|
||||
{ term: { [SERVICE_NAME]: serviceName } },
|
||||
{ term: { [SERVICE_NODE_NAME]: serviceNodeName } },
|
||||
{ term: { [TRANSACTION_TYPE]: transactionType } },
|
||||
...rangeQuery(start, end),
|
||||
...environmentQuery(environment),
|
||||
...kqlQuery(kuery),
|
||||
];
|
||||
const { start, end, apmEventClient } = setup;
|
||||
const filter = [
|
||||
{ term: { [SERVICE_NAME]: serviceName } },
|
||||
{ term: { [SERVICE_NODE_NAME]: serviceNodeName } },
|
||||
{ term: { [TRANSACTION_TYPE]: transactionType } },
|
||||
...rangeQuery(start, end),
|
||||
...environmentQuery(environment),
|
||||
...kqlQuery(kuery),
|
||||
];
|
||||
|
||||
const response = await apmEventClient.search({
|
||||
const response = await apmEventClient.search(
|
||||
'get_service_instance_metadata_details',
|
||||
{
|
||||
apm: {
|
||||
events: [
|
||||
getProcessorEventForAggregatedTransactions(
|
||||
|
@ -61,24 +61,24 @@ export async function getServiceInstanceMetadataDetails({
|
|||
size: 1,
|
||||
query: { bool: { filter } },
|
||||
},
|
||||
});
|
||||
|
||||
const sample = response.hits.hits[0]?._source;
|
||||
|
||||
if (!sample) {
|
||||
return {};
|
||||
}
|
||||
);
|
||||
|
||||
const { agent, service, container, kubernetes, host, cloud } = sample;
|
||||
const sample = response.hits.hits[0]?._source;
|
||||
|
||||
return {
|
||||
'@timestamp': sample['@timestamp'],
|
||||
agent,
|
||||
service,
|
||||
container,
|
||||
kubernetes,
|
||||
host,
|
||||
cloud,
|
||||
};
|
||||
});
|
||||
if (!sample) {
|
||||
return {};
|
||||
}
|
||||
|
||||
const { agent, service, container, kubernetes, host, cloud } = sample;
|
||||
|
||||
return {
|
||||
'@timestamp': sample['@timestamp'],
|
||||
agent,
|
||||
service,
|
||||
container,
|
||||
kubernetes,
|
||||
host,
|
||||
cloud,
|
||||
};
|
||||
}
|
||||
|
|
|
@ -24,7 +24,6 @@ import {
|
|||
percentCgroupMemoryUsedScript,
|
||||
percentSystemMemoryUsedScript,
|
||||
} from '../../metrics/by_agent/shared/memory';
|
||||
import { withApmSpan } from '../../../utils/with_apm_span';
|
||||
|
||||
interface ServiceInstanceSystemMetricPrimaryStatistics {
|
||||
serviceNodeName: string;
|
||||
|
@ -67,142 +66,140 @@ export async function getServiceInstancesSystemMetricStatistics<
|
|||
size?: number;
|
||||
isComparisonSearch: T;
|
||||
}): Promise<Array<ServiceInstanceSystemMetricStatistics<T>>> {
|
||||
return withApmSpan(
|
||||
'get_service_instances_system_metric_statistics',
|
||||
async () => {
|
||||
const { apmEventClient } = setup;
|
||||
const { apmEventClient } = setup;
|
||||
|
||||
const { intervalString } = getBucketSize({ start, end, numBuckets });
|
||||
const { intervalString } = getBucketSize({ start, end, numBuckets });
|
||||
|
||||
const systemMemoryFilter = {
|
||||
bool: {
|
||||
filter: [
|
||||
{ exists: { field: METRIC_SYSTEM_FREE_MEMORY } },
|
||||
{ exists: { field: METRIC_SYSTEM_TOTAL_MEMORY } },
|
||||
],
|
||||
},
|
||||
};
|
||||
const systemMemoryFilter = {
|
||||
bool: {
|
||||
filter: [
|
||||
{ exists: { field: METRIC_SYSTEM_FREE_MEMORY } },
|
||||
{ exists: { field: METRIC_SYSTEM_TOTAL_MEMORY } },
|
||||
],
|
||||
},
|
||||
};
|
||||
|
||||
const cgroupMemoryFilter = {
|
||||
exists: { field: METRIC_CGROUP_MEMORY_USAGE_BYTES },
|
||||
};
|
||||
const cgroupMemoryFilter = {
|
||||
exists: { field: METRIC_CGROUP_MEMORY_USAGE_BYTES },
|
||||
};
|
||||
|
||||
const cpuUsageFilter = { exists: { field: METRIC_PROCESS_CPU_PERCENT } };
|
||||
const cpuUsageFilter = { exists: { field: METRIC_PROCESS_CPU_PERCENT } };
|
||||
|
||||
function withTimeseries<TParams extends AggregationOptionsByType['avg']>(
|
||||
agg: TParams
|
||||
) {
|
||||
return {
|
||||
...(isComparisonSearch
|
||||
? {
|
||||
avg: { avg: agg },
|
||||
timeseries: {
|
||||
date_histogram: {
|
||||
field: '@timestamp',
|
||||
fixed_interval: intervalString,
|
||||
min_doc_count: 0,
|
||||
extended_bounds: {
|
||||
min: start,
|
||||
max: end,
|
||||
},
|
||||
},
|
||||
aggs: { avg: { avg: agg } },
|
||||
function withTimeseries<TParams extends AggregationOptionsByType['avg']>(
|
||||
agg: TParams
|
||||
) {
|
||||
return {
|
||||
...(isComparisonSearch
|
||||
? {
|
||||
avg: { avg: agg },
|
||||
timeseries: {
|
||||
date_histogram: {
|
||||
field: '@timestamp',
|
||||
fixed_interval: intervalString,
|
||||
min_doc_count: 0,
|
||||
extended_bounds: {
|
||||
min: start,
|
||||
max: end,
|
||||
},
|
||||
}
|
||||
: { avg: { avg: agg } }),
|
||||
};
|
||||
}
|
||||
|
||||
const subAggs = {
|
||||
memory_usage_cgroup: {
|
||||
filter: cgroupMemoryFilter,
|
||||
aggs: withTimeseries({ script: percentCgroupMemoryUsedScript }),
|
||||
},
|
||||
memory_usage_system: {
|
||||
filter: systemMemoryFilter,
|
||||
aggs: withTimeseries({ script: percentSystemMemoryUsedScript }),
|
||||
},
|
||||
cpu_usage: {
|
||||
filter: cpuUsageFilter,
|
||||
aggs: withTimeseries({ field: METRIC_PROCESS_CPU_PERCENT }),
|
||||
},
|
||||
};
|
||||
|
||||
const response = await apmEventClient.search({
|
||||
apm: {
|
||||
events: [ProcessorEvent.metric],
|
||||
},
|
||||
body: {
|
||||
size: 0,
|
||||
query: {
|
||||
bool: {
|
||||
filter: [
|
||||
{ term: { [SERVICE_NAME]: serviceName } },
|
||||
...rangeQuery(start, end),
|
||||
...environmentQuery(environment),
|
||||
...kqlQuery(kuery),
|
||||
...(isComparisonSearch && serviceNodeIds
|
||||
? [{ terms: { [SERVICE_NODE_NAME]: serviceNodeIds } }]
|
||||
: []),
|
||||
],
|
||||
should: [cgroupMemoryFilter, systemMemoryFilter, cpuUsageFilter],
|
||||
minimum_should_match: 1,
|
||||
},
|
||||
},
|
||||
aggs: {
|
||||
[SERVICE_NODE_NAME]: {
|
||||
terms: {
|
||||
field: SERVICE_NODE_NAME,
|
||||
missing: SERVICE_NODE_NAME_MISSING,
|
||||
...(size ? { size } : {}),
|
||||
...(isComparisonSearch ? { include: serviceNodeIds } : {}),
|
||||
},
|
||||
aggs: subAggs,
|
||||
aggs: { avg: { avg: agg } },
|
||||
},
|
||||
}
|
||||
: { avg: { avg: agg } }),
|
||||
};
|
||||
}
|
||||
|
||||
const subAggs = {
|
||||
memory_usage_cgroup: {
|
||||
filter: cgroupMemoryFilter,
|
||||
aggs: withTimeseries({ script: percentCgroupMemoryUsedScript }),
|
||||
},
|
||||
memory_usage_system: {
|
||||
filter: systemMemoryFilter,
|
||||
aggs: withTimeseries({ script: percentSystemMemoryUsedScript }),
|
||||
},
|
||||
cpu_usage: {
|
||||
filter: cpuUsageFilter,
|
||||
aggs: withTimeseries({ field: METRIC_PROCESS_CPU_PERCENT }),
|
||||
},
|
||||
};
|
||||
|
||||
const response = await apmEventClient.search(
|
||||
'get_service_instances_system_metric_statistics',
|
||||
{
|
||||
apm: {
|
||||
events: [ProcessorEvent.metric],
|
||||
},
|
||||
body: {
|
||||
size: 0,
|
||||
query: {
|
||||
bool: {
|
||||
filter: [
|
||||
{ term: { [SERVICE_NAME]: serviceName } },
|
||||
...rangeQuery(start, end),
|
||||
...environmentQuery(environment),
|
||||
...kqlQuery(kuery),
|
||||
...(isComparisonSearch && serviceNodeIds
|
||||
? [{ terms: { [SERVICE_NODE_NAME]: serviceNodeIds } }]
|
||||
: []),
|
||||
],
|
||||
should: [cgroupMemoryFilter, systemMemoryFilter, cpuUsageFilter],
|
||||
minimum_should_match: 1,
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
return (
|
||||
(response.aggregations?.[SERVICE_NODE_NAME].buckets.map(
|
||||
(serviceNodeBucket) => {
|
||||
const serviceNodeName = String(serviceNodeBucket.key);
|
||||
const hasCGroupData =
|
||||
serviceNodeBucket.memory_usage_cgroup.avg.value !== null;
|
||||
|
||||
const memoryMetricsKey = hasCGroupData
|
||||
? 'memory_usage_cgroup'
|
||||
: 'memory_usage_system';
|
||||
|
||||
const cpuUsage =
|
||||
// Timeseries is available when isComparisonSearch is true
|
||||
'timeseries' in serviceNodeBucket.cpu_usage
|
||||
? serviceNodeBucket.cpu_usage.timeseries.buckets.map(
|
||||
(dateBucket) => ({
|
||||
x: dateBucket.key,
|
||||
y: dateBucket.avg.value,
|
||||
})
|
||||
)
|
||||
: serviceNodeBucket.cpu_usage.avg.value;
|
||||
|
||||
const memoryUsageValue = serviceNodeBucket[memoryMetricsKey];
|
||||
const memoryUsage =
|
||||
// Timeseries is available when isComparisonSearch is true
|
||||
'timeseries' in memoryUsageValue
|
||||
? memoryUsageValue.timeseries.buckets.map((dateBucket) => ({
|
||||
x: dateBucket.key,
|
||||
y: dateBucket.avg.value,
|
||||
}))
|
||||
: serviceNodeBucket[memoryMetricsKey].avg.value;
|
||||
|
||||
return {
|
||||
serviceNodeName,
|
||||
cpuUsage,
|
||||
memoryUsage,
|
||||
};
|
||||
}
|
||||
) as Array<ServiceInstanceSystemMetricStatistics<T>>) || []
|
||||
);
|
||||
aggs: {
|
||||
[SERVICE_NODE_NAME]: {
|
||||
terms: {
|
||||
field: SERVICE_NODE_NAME,
|
||||
missing: SERVICE_NODE_NAME_MISSING,
|
||||
...(size ? { size } : {}),
|
||||
...(isComparisonSearch ? { include: serviceNodeIds } : {}),
|
||||
},
|
||||
aggs: subAggs,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
);
|
||||
|
||||
return (
|
||||
(response.aggregations?.[SERVICE_NODE_NAME].buckets.map(
|
||||
(serviceNodeBucket) => {
|
||||
const serviceNodeName = String(serviceNodeBucket.key);
|
||||
const hasCGroupData =
|
||||
serviceNodeBucket.memory_usage_cgroup.avg.value !== null;
|
||||
|
||||
const memoryMetricsKey = hasCGroupData
|
||||
? 'memory_usage_cgroup'
|
||||
: 'memory_usage_system';
|
||||
|
||||
const cpuUsage =
|
||||
// Timeseries is available when isComparisonSearch is true
|
||||
'timeseries' in serviceNodeBucket.cpu_usage
|
||||
? serviceNodeBucket.cpu_usage.timeseries.buckets.map(
|
||||
(dateBucket) => ({
|
||||
x: dateBucket.key,
|
||||
y: dateBucket.avg.value,
|
||||
})
|
||||
)
|
||||
: serviceNodeBucket.cpu_usage.avg.value;
|
||||
|
||||
const memoryUsageValue = serviceNodeBucket[memoryMetricsKey];
|
||||
const memoryUsage =
|
||||
// Timeseries is available when isComparisonSearch is true
|
||||
'timeseries' in memoryUsageValue
|
||||
? memoryUsageValue.timeseries.buckets.map((dateBucket) => ({
|
||||
x: dateBucket.key,
|
||||
y: dateBucket.avg.value,
|
||||
}))
|
||||
: serviceNodeBucket[memoryMetricsKey].avg.value;
|
||||
|
||||
return {
|
||||
serviceNodeName,
|
||||
cpuUsage,
|
||||
memoryUsage,
|
||||
};
|
||||
}
|
||||
) as Array<ServiceInstanceSystemMetricStatistics<T>>) || []
|
||||
);
|
||||
}
|
||||
|
|
|
@ -26,7 +26,6 @@ import {
|
|||
getLatencyValue,
|
||||
} from '../../helpers/latency_aggregation_type';
|
||||
import { Setup } from '../../helpers/setup_request';
|
||||
import { withApmSpan } from '../../../utils/with_apm_span';
|
||||
|
||||
interface ServiceInstanceTransactionPrimaryStatistics {
|
||||
serviceNodeName: string;
|
||||
|
@ -77,126 +76,124 @@ export async function getServiceInstancesTransactionStatistics<
|
|||
size?: number;
|
||||
numBuckets?: number;
|
||||
}): Promise<Array<ServiceInstanceTransactionStatistics<T>>> {
|
||||
return withApmSpan(
|
||||
'get_service_instances_transaction_statistics',
|
||||
async () => {
|
||||
const { apmEventClient } = setup;
|
||||
const { apmEventClient } = setup;
|
||||
|
||||
const { intervalString, bucketSize } = getBucketSize({
|
||||
start,
|
||||
end,
|
||||
numBuckets,
|
||||
});
|
||||
const { intervalString, bucketSize } = getBucketSize({
|
||||
start,
|
||||
end,
|
||||
numBuckets,
|
||||
});
|
||||
|
||||
const field = getTransactionDurationFieldForAggregatedTransactions(
|
||||
searchAggregatedTransactions
|
||||
);
|
||||
const field = getTransactionDurationFieldForAggregatedTransactions(
|
||||
searchAggregatedTransactions
|
||||
);
|
||||
|
||||
const subAggs = {
|
||||
...getLatencyAggregation(latencyAggregationType, field),
|
||||
failures: {
|
||||
filter: {
|
||||
term: {
|
||||
[EVENT_OUTCOME]: EventOutcome.failure,
|
||||
const subAggs = {
|
||||
...getLatencyAggregation(latencyAggregationType, field),
|
||||
failures: {
|
||||
filter: {
|
||||
term: {
|
||||
[EVENT_OUTCOME]: EventOutcome.failure,
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
const query = {
|
||||
bool: {
|
||||
filter: [
|
||||
{ term: { [SERVICE_NAME]: serviceName } },
|
||||
{ term: { [TRANSACTION_TYPE]: transactionType } },
|
||||
...rangeQuery(start, end),
|
||||
...environmentQuery(environment),
|
||||
...kqlQuery(kuery),
|
||||
...(isComparisonSearch && serviceNodeIds
|
||||
? [{ terms: { [SERVICE_NODE_NAME]: serviceNodeIds } }]
|
||||
: []),
|
||||
],
|
||||
},
|
||||
};
|
||||
|
||||
const aggs = {
|
||||
[SERVICE_NODE_NAME]: {
|
||||
terms: {
|
||||
field: SERVICE_NODE_NAME,
|
||||
missing: SERVICE_NODE_NAME_MISSING,
|
||||
...(size ? { size } : {}),
|
||||
...(isComparisonSearch ? { include: serviceNodeIds } : {}),
|
||||
},
|
||||
aggs: isComparisonSearch
|
||||
? {
|
||||
timeseries: {
|
||||
date_histogram: {
|
||||
field: '@timestamp',
|
||||
fixed_interval: intervalString,
|
||||
min_doc_count: 0,
|
||||
extended_bounds: { min: start, max: end },
|
||||
},
|
||||
aggs: subAggs,
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
const query = {
|
||||
bool: {
|
||||
filter: [
|
||||
{ term: { [SERVICE_NAME]: serviceName } },
|
||||
{ term: { [TRANSACTION_TYPE]: transactionType } },
|
||||
...rangeQuery(start, end),
|
||||
...environmentQuery(environment),
|
||||
...kqlQuery(kuery),
|
||||
...(isComparisonSearch && serviceNodeIds
|
||||
? [{ terms: { [SERVICE_NODE_NAME]: serviceNodeIds } }]
|
||||
: []),
|
||||
],
|
||||
},
|
||||
};
|
||||
|
||||
const aggs = {
|
||||
[SERVICE_NODE_NAME]: {
|
||||
terms: {
|
||||
field: SERVICE_NODE_NAME,
|
||||
missing: SERVICE_NODE_NAME_MISSING,
|
||||
...(size ? { size } : {}),
|
||||
...(isComparisonSearch ? { include: serviceNodeIds } : {}),
|
||||
},
|
||||
aggs: isComparisonSearch
|
||||
? {
|
||||
timeseries: {
|
||||
date_histogram: {
|
||||
field: '@timestamp',
|
||||
fixed_interval: intervalString,
|
||||
min_doc_count: 0,
|
||||
extended_bounds: { min: start, max: end },
|
||||
},
|
||||
aggs: subAggs,
|
||||
},
|
||||
}
|
||||
: subAggs,
|
||||
},
|
||||
};
|
||||
|
||||
const response = await apmEventClient.search({
|
||||
apm: {
|
||||
events: [
|
||||
getProcessorEventForAggregatedTransactions(
|
||||
searchAggregatedTransactions
|
||||
),
|
||||
],
|
||||
},
|
||||
body: { size: 0, query, aggs },
|
||||
});
|
||||
|
||||
const bucketSizeInMinutes = bucketSize / 60;
|
||||
|
||||
return (
|
||||
(response.aggregations?.[SERVICE_NODE_NAME].buckets.map(
|
||||
(serviceNodeBucket) => {
|
||||
const { doc_count: count, key } = serviceNodeBucket;
|
||||
const serviceNodeName = String(key);
|
||||
|
||||
// Timeseries is returned when isComparisonSearch is true
|
||||
if ('timeseries' in serviceNodeBucket) {
|
||||
const { timeseries } = serviceNodeBucket;
|
||||
return {
|
||||
serviceNodeName,
|
||||
errorRate: timeseries.buckets.map((dateBucket) => ({
|
||||
x: dateBucket.key,
|
||||
y: dateBucket.failures.doc_count / dateBucket.doc_count,
|
||||
})),
|
||||
throughput: timeseries.buckets.map((dateBucket) => ({
|
||||
x: dateBucket.key,
|
||||
y: dateBucket.doc_count / bucketSizeInMinutes,
|
||||
})),
|
||||
latency: timeseries.buckets.map((dateBucket) => ({
|
||||
x: dateBucket.key,
|
||||
y: getLatencyValue({
|
||||
aggregation: dateBucket.latency,
|
||||
latencyAggregationType,
|
||||
}),
|
||||
})),
|
||||
};
|
||||
} else {
|
||||
const { failures, latency } = serviceNodeBucket;
|
||||
return {
|
||||
serviceNodeName,
|
||||
errorRate: failures.doc_count / count,
|
||||
latency: getLatencyValue({
|
||||
aggregation: latency,
|
||||
latencyAggregationType,
|
||||
}),
|
||||
throughput: calculateThroughput({ start, end, value: count }),
|
||||
};
|
||||
}
|
||||
}
|
||||
) as Array<ServiceInstanceTransactionStatistics<T>>) || []
|
||||
);
|
||||
: subAggs,
|
||||
},
|
||||
};
|
||||
|
||||
const response = await apmEventClient.search(
|
||||
'get_service_instances_transaction_statistics',
|
||||
{
|
||||
apm: {
|
||||
events: [
|
||||
getProcessorEventForAggregatedTransactions(
|
||||
searchAggregatedTransactions
|
||||
),
|
||||
],
|
||||
},
|
||||
body: { size: 0, query, aggs },
|
||||
}
|
||||
);
|
||||
|
||||
const bucketSizeInMinutes = bucketSize / 60;
|
||||
|
||||
return (
|
||||
(response.aggregations?.[SERVICE_NODE_NAME].buckets.map(
|
||||
(serviceNodeBucket) => {
|
||||
const { doc_count: count, key } = serviceNodeBucket;
|
||||
const serviceNodeName = String(key);
|
||||
|
||||
// Timeseries is returned when isComparisonSearch is true
|
||||
if ('timeseries' in serviceNodeBucket) {
|
||||
const { timeseries } = serviceNodeBucket;
|
||||
return {
|
||||
serviceNodeName,
|
||||
errorRate: timeseries.buckets.map((dateBucket) => ({
|
||||
x: dateBucket.key,
|
||||
y: dateBucket.failures.doc_count / dateBucket.doc_count,
|
||||
})),
|
||||
throughput: timeseries.buckets.map((dateBucket) => ({
|
||||
x: dateBucket.key,
|
||||
y: dateBucket.doc_count / bucketSizeInMinutes,
|
||||
})),
|
||||
latency: timeseries.buckets.map((dateBucket) => ({
|
||||
x: dateBucket.key,
|
||||
y: getLatencyValue({
|
||||
aggregation: dateBucket.latency,
|
||||
latencyAggregationType,
|
||||
}),
|
||||
})),
|
||||
};
|
||||
} else {
|
||||
const { failures, latency } = serviceNodeBucket;
|
||||
return {
|
||||
serviceNodeName,
|
||||
errorRate: failures.doc_count / count,
|
||||
latency: getLatencyValue({
|
||||
aggregation: latency,
|
||||
latencyAggregationType,
|
||||
}),
|
||||
throughput: calculateThroughput({ start, end, value: count }),
|
||||
};
|
||||
}
|
||||
}
|
||||
) as Array<ServiceInstanceTransactionStatistics<T>>) || []
|
||||
);
|
||||
}
|
||||
|
|
|
@ -25,7 +25,6 @@ import { TransactionRaw } from '../../../typings/es_schemas/raw/transaction_raw'
|
|||
import { getProcessorEventForAggregatedTransactions } from '../helpers/aggregated_transactions';
|
||||
import { Setup, SetupTimeRange } from '../helpers/setup_request';
|
||||
import { should } from './get_service_metadata_icons';
|
||||
import { withApmSpan } from '../../utils/with_apm_span';
|
||||
|
||||
type ServiceMetadataDetailsRaw = Pick<
|
||||
TransactionRaw,
|
||||
|
@ -59,7 +58,7 @@ export interface ServiceMetadataDetails {
|
|||
};
|
||||
}
|
||||
|
||||
export function getServiceMetadataDetails({
|
||||
export async function getServiceMetadataDetails({
|
||||
serviceName,
|
||||
setup,
|
||||
searchAggregatedTransactions,
|
||||
|
@ -68,105 +67,106 @@ export function getServiceMetadataDetails({
|
|||
setup: Setup & SetupTimeRange;
|
||||
searchAggregatedTransactions: boolean;
|
||||
}): Promise<ServiceMetadataDetails> {
|
||||
return withApmSpan('get_service_metadata_details', async () => {
|
||||
const { start, end, apmEventClient } = setup;
|
||||
const { start, end, apmEventClient } = setup;
|
||||
|
||||
const filter = [
|
||||
{ term: { [SERVICE_NAME]: serviceName } },
|
||||
...rangeQuery(start, end),
|
||||
];
|
||||
const filter = [
|
||||
{ term: { [SERVICE_NAME]: serviceName } },
|
||||
...rangeQuery(start, end),
|
||||
];
|
||||
|
||||
const params = {
|
||||
apm: {
|
||||
events: [
|
||||
getProcessorEventForAggregatedTransactions(
|
||||
searchAggregatedTransactions
|
||||
),
|
||||
ProcessorEvent.error,
|
||||
ProcessorEvent.metric,
|
||||
],
|
||||
},
|
||||
body: {
|
||||
size: 1,
|
||||
_source: [SERVICE, AGENT, HOST, CONTAINER_ID, KUBERNETES, CLOUD],
|
||||
query: { bool: { filter, should } },
|
||||
aggs: {
|
||||
serviceVersions: {
|
||||
terms: {
|
||||
field: SERVICE_VERSION,
|
||||
size: 10,
|
||||
order: { _key: 'desc' as const },
|
||||
},
|
||||
const params = {
|
||||
apm: {
|
||||
events: [
|
||||
getProcessorEventForAggregatedTransactions(
|
||||
searchAggregatedTransactions
|
||||
),
|
||||
ProcessorEvent.error,
|
||||
ProcessorEvent.metric,
|
||||
],
|
||||
},
|
||||
body: {
|
||||
size: 1,
|
||||
_source: [SERVICE, AGENT, HOST, CONTAINER_ID, KUBERNETES, CLOUD],
|
||||
query: { bool: { filter, should } },
|
||||
aggs: {
|
||||
serviceVersions: {
|
||||
terms: {
|
||||
field: SERVICE_VERSION,
|
||||
size: 10,
|
||||
order: { _key: 'desc' as const },
|
||||
},
|
||||
availabilityZones: {
|
||||
terms: {
|
||||
field: CLOUD_AVAILABILITY_ZONE,
|
||||
size: 10,
|
||||
},
|
||||
},
|
||||
machineTypes: {
|
||||
terms: {
|
||||
field: CLOUD_MACHINE_TYPE,
|
||||
size: 10,
|
||||
},
|
||||
},
|
||||
totalNumberInstances: { cardinality: { field: SERVICE_NODE_NAME } },
|
||||
},
|
||||
availabilityZones: {
|
||||
terms: {
|
||||
field: CLOUD_AVAILABILITY_ZONE,
|
||||
size: 10,
|
||||
},
|
||||
},
|
||||
machineTypes: {
|
||||
terms: {
|
||||
field: CLOUD_MACHINE_TYPE,
|
||||
size: 10,
|
||||
},
|
||||
},
|
||||
totalNumberInstances: { cardinality: { field: SERVICE_NODE_NAME } },
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
const response = await apmEventClient.search(
|
||||
'get_service_metadata_details',
|
||||
params
|
||||
);
|
||||
|
||||
if (response.hits.total.value === 0) {
|
||||
return {
|
||||
service: undefined,
|
||||
container: undefined,
|
||||
cloud: undefined,
|
||||
};
|
||||
}
|
||||
|
||||
const response = await apmEventClient.search(params);
|
||||
const { service, agent, host, kubernetes, container, cloud } = response.hits
|
||||
.hits[0]._source as ServiceMetadataDetailsRaw;
|
||||
|
||||
if (response.hits.total.value === 0) {
|
||||
return {
|
||||
service: undefined,
|
||||
container: undefined,
|
||||
cloud: undefined,
|
||||
};
|
||||
}
|
||||
const serviceMetadataDetails = {
|
||||
versions: response.aggregations?.serviceVersions.buckets.map(
|
||||
(bucket) => bucket.key as string
|
||||
),
|
||||
runtime: service.runtime,
|
||||
framework: service.framework?.name,
|
||||
agent,
|
||||
};
|
||||
|
||||
const { service, agent, host, kubernetes, container, cloud } = response.hits
|
||||
.hits[0]._source as ServiceMetadataDetailsRaw;
|
||||
const totalNumberInstances =
|
||||
response.aggregations?.totalNumberInstances.value;
|
||||
|
||||
const serviceMetadataDetails = {
|
||||
versions: response.aggregations?.serviceVersions.buckets.map(
|
||||
(bucket) => bucket.key as string
|
||||
),
|
||||
runtime: service.runtime,
|
||||
framework: service.framework?.name,
|
||||
agent,
|
||||
};
|
||||
|
||||
const totalNumberInstances =
|
||||
response.aggregations?.totalNumberInstances.value;
|
||||
|
||||
const containerDetails =
|
||||
host || container || totalNumberInstances || kubernetes
|
||||
? {
|
||||
os: host?.os?.platform,
|
||||
type: (!!kubernetes ? 'Kubernetes' : 'Docker') as ContainerType,
|
||||
isContainerized: !!container?.id,
|
||||
totalNumberInstances,
|
||||
}
|
||||
: undefined;
|
||||
|
||||
const cloudDetails = cloud
|
||||
const containerDetails =
|
||||
host || container || totalNumberInstances || kubernetes
|
||||
? {
|
||||
provider: cloud.provider,
|
||||
projectName: cloud.project?.name,
|
||||
availabilityZones: response.aggregations?.availabilityZones.buckets.map(
|
||||
(bucket) => bucket.key as string
|
||||
),
|
||||
machineTypes: response.aggregations?.machineTypes.buckets.map(
|
||||
(bucket) => bucket.key as string
|
||||
),
|
||||
os: host?.os?.platform,
|
||||
type: (!!kubernetes ? 'Kubernetes' : 'Docker') as ContainerType,
|
||||
isContainerized: !!container?.id,
|
||||
totalNumberInstances,
|
||||
}
|
||||
: undefined;
|
||||
|
||||
return {
|
||||
service: serviceMetadataDetails,
|
||||
container: containerDetails,
|
||||
cloud: cloudDetails,
|
||||
};
|
||||
});
|
||||
const cloudDetails = cloud
|
||||
? {
|
||||
provider: cloud.provider,
|
||||
projectName: cloud.project?.name,
|
||||
availabilityZones: response.aggregations?.availabilityZones.buckets.map(
|
||||
(bucket) => bucket.key as string
|
||||
),
|
||||
machineTypes: response.aggregations?.machineTypes.buckets.map(
|
||||
(bucket) => bucket.key as string
|
||||
),
|
||||
}
|
||||
: undefined;
|
||||
|
||||
return {
|
||||
service: serviceMetadataDetails,
|
||||
container: containerDetails,
|
||||
cloud: cloudDetails,
|
||||
};
|
||||
}
|
||||
|
|
|
@ -20,7 +20,6 @@ import { rangeQuery } from '../../../server/utils/queries';
|
|||
import { TransactionRaw } from '../../../typings/es_schemas/raw/transaction_raw';
|
||||
import { getProcessorEventForAggregatedTransactions } from '../helpers/aggregated_transactions';
|
||||
import { Setup, SetupTimeRange } from '../helpers/setup_request';
|
||||
import { withApmSpan } from '../../utils/with_apm_span';
|
||||
|
||||
type ServiceMetadataIconsRaw = Pick<
|
||||
TransactionRaw,
|
||||
|
@ -41,7 +40,7 @@ export const should = [
|
|||
{ exists: { field: AGENT_NAME } },
|
||||
];
|
||||
|
||||
export function getServiceMetadataIcons({
|
||||
export async function getServiceMetadataIcons({
|
||||
serviceName,
|
||||
setup,
|
||||
searchAggregatedTransactions,
|
||||
|
@ -50,55 +49,56 @@ export function getServiceMetadataIcons({
|
|||
setup: Setup & SetupTimeRange;
|
||||
searchAggregatedTransactions: boolean;
|
||||
}): Promise<ServiceMetadataIcons> {
|
||||
return withApmSpan('get_service_metadata_icons', async () => {
|
||||
const { start, end, apmEventClient } = setup;
|
||||
const { start, end, apmEventClient } = setup;
|
||||
|
||||
const filter = [
|
||||
{ term: { [SERVICE_NAME]: serviceName } },
|
||||
...rangeQuery(start, end),
|
||||
];
|
||||
const filter = [
|
||||
{ term: { [SERVICE_NAME]: serviceName } },
|
||||
...rangeQuery(start, end),
|
||||
];
|
||||
|
||||
const params = {
|
||||
apm: {
|
||||
events: [
|
||||
getProcessorEventForAggregatedTransactions(
|
||||
searchAggregatedTransactions
|
||||
),
|
||||
ProcessorEvent.error,
|
||||
ProcessorEvent.metric,
|
||||
],
|
||||
},
|
||||
body: {
|
||||
size: 1,
|
||||
_source: [KUBERNETES, CLOUD_PROVIDER, CONTAINER_ID, AGENT_NAME],
|
||||
query: { bool: { filter, should } },
|
||||
},
|
||||
};
|
||||
const params = {
|
||||
apm: {
|
||||
events: [
|
||||
getProcessorEventForAggregatedTransactions(
|
||||
searchAggregatedTransactions
|
||||
),
|
||||
ProcessorEvent.error,
|
||||
ProcessorEvent.metric,
|
||||
],
|
||||
},
|
||||
body: {
|
||||
size: 1,
|
||||
_source: [KUBERNETES, CLOUD_PROVIDER, CONTAINER_ID, AGENT_NAME],
|
||||
query: { bool: { filter, should } },
|
||||
},
|
||||
};
|
||||
|
||||
const response = await apmEventClient.search(params);
|
||||
|
||||
if (response.hits.total.value === 0) {
|
||||
return {
|
||||
agentName: undefined,
|
||||
containerType: undefined,
|
||||
cloudProvider: undefined,
|
||||
};
|
||||
}
|
||||
|
||||
const { kubernetes, cloud, container, agent } = response.hits.hits[0]
|
||||
._source as ServiceMetadataIconsRaw;
|
||||
|
||||
let containerType: ContainerType;
|
||||
if (!!kubernetes) {
|
||||
containerType = 'Kubernetes';
|
||||
} else if (!!container) {
|
||||
containerType = 'Docker';
|
||||
}
|
||||
const response = await apmEventClient.search(
|
||||
'get_service_metadata_icons',
|
||||
params
|
||||
);
|
||||
|
||||
if (response.hits.total.value === 0) {
|
||||
return {
|
||||
agentName: agent?.name,
|
||||
containerType,
|
||||
cloudProvider: cloud?.provider,
|
||||
agentName: undefined,
|
||||
containerType: undefined,
|
||||
cloudProvider: undefined,
|
||||
};
|
||||
});
|
||||
}
|
||||
|
||||
const { kubernetes, cloud, container, agent } = response.hits.hits[0]
|
||||
._source as ServiceMetadataIconsRaw;
|
||||
|
||||
let containerType: ContainerType;
|
||||
if (!!kubernetes) {
|
||||
containerType = 'Kubernetes';
|
||||
} else if (!!container) {
|
||||
containerType = 'Docker';
|
||||
}
|
||||
|
||||
return {
|
||||
agentName: agent?.name,
|
||||
containerType,
|
||||
cloudProvider: cloud?.provider,
|
||||
};
|
||||
}
|
||||
|
|
|
@ -13,9 +13,8 @@ import {
|
|||
import { NOT_AVAILABLE_LABEL } from '../../../common/i18n';
|
||||
import { mergeProjection } from '../../projections/util/merge_projection';
|
||||
import { getServiceNodesProjection } from '../../projections/service_nodes';
|
||||
import { withApmSpan } from '../../utils/with_apm_span';
|
||||
|
||||
export function getServiceNodeMetadata({
|
||||
export async function getServiceNodeMetadata({
|
||||
kuery,
|
||||
serviceName,
|
||||
serviceNodeName,
|
||||
|
@ -26,44 +25,44 @@ export function getServiceNodeMetadata({
|
|||
serviceNodeName: string;
|
||||
setup: Setup & SetupTimeRange;
|
||||
}) {
|
||||
return withApmSpan('get_service_node_metadata', async () => {
|
||||
const { apmEventClient } = setup;
|
||||
const { apmEventClient } = setup;
|
||||
|
||||
const query = mergeProjection(
|
||||
getServiceNodesProjection({
|
||||
kuery,
|
||||
setup,
|
||||
serviceName,
|
||||
serviceNodeName,
|
||||
}),
|
||||
{
|
||||
body: {
|
||||
size: 0,
|
||||
aggs: {
|
||||
host: {
|
||||
terms: {
|
||||
field: HOST_NAME,
|
||||
size: 1,
|
||||
},
|
||||
const query = mergeProjection(
|
||||
getServiceNodesProjection({
|
||||
kuery,
|
||||
setup,
|
||||
serviceName,
|
||||
serviceNodeName,
|
||||
}),
|
||||
{
|
||||
body: {
|
||||
size: 0,
|
||||
aggs: {
|
||||
host: {
|
||||
terms: {
|
||||
field: HOST_NAME,
|
||||
size: 1,
|
||||
},
|
||||
containerId: {
|
||||
terms: {
|
||||
field: CONTAINER_ID,
|
||||
size: 1,
|
||||
},
|
||||
},
|
||||
containerId: {
|
||||
terms: {
|
||||
field: CONTAINER_ID,
|
||||
size: 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
);
|
||||
},
|
||||
}
|
||||
);
|
||||
|
||||
const response = await apmEventClient.search(query);
|
||||
const response = await apmEventClient.search(
|
||||
'get_service_node_metadata',
|
||||
query
|
||||
);
|
||||
|
||||
return {
|
||||
host: response.aggregations?.host.buckets[0]?.key || NOT_AVAILABLE_LABEL,
|
||||
containerId:
|
||||
response.aggregations?.containerId.buckets[0]?.key ||
|
||||
NOT_AVAILABLE_LABEL,
|
||||
};
|
||||
});
|
||||
return {
|
||||
host: response.aggregations?.host.buckets[0]?.key || NOT_AVAILABLE_LABEL,
|
||||
containerId:
|
||||
response.aggregations?.containerId.buckets[0]?.key || NOT_AVAILABLE_LABEL,
|
||||
};
|
||||
}
|
||||
|
|
|
@ -21,7 +21,6 @@ import {
|
|||
kqlQuery,
|
||||
} from '../../../server/utils/queries';
|
||||
import { Coordinate } from '../../../typings/timeseries';
|
||||
import { withApmSpan } from '../../utils/with_apm_span';
|
||||
import {
|
||||
getDocumentTypeFilterForAggregatedTransactions,
|
||||
getProcessorEventForAggregatedTransactions,
|
||||
|
@ -68,121 +67,115 @@ export async function getServiceTransactionGroupDetailedStatistics({
|
|||
impact: number;
|
||||
}>
|
||||
> {
|
||||
return withApmSpan(
|
||||
const { apmEventClient } = setup;
|
||||
const { intervalString } = getBucketSize({ start, end, numBuckets });
|
||||
|
||||
const field = getTransactionDurationFieldForAggregatedTransactions(
|
||||
searchAggregatedTransactions
|
||||
);
|
||||
|
||||
const response = await apmEventClient.search(
|
||||
'get_service_transaction_group_detailed_statistics',
|
||||
async () => {
|
||||
const { apmEventClient } = setup;
|
||||
const { intervalString } = getBucketSize({ start, end, numBuckets });
|
||||
|
||||
const field = getTransactionDurationFieldForAggregatedTransactions(
|
||||
searchAggregatedTransactions
|
||||
);
|
||||
|
||||
const response = await apmEventClient.search({
|
||||
apm: {
|
||||
events: [
|
||||
getProcessorEventForAggregatedTransactions(
|
||||
searchAggregatedTransactions
|
||||
),
|
||||
],
|
||||
},
|
||||
body: {
|
||||
size: 0,
|
||||
query: {
|
||||
bool: {
|
||||
filter: [
|
||||
{ term: { [SERVICE_NAME]: serviceName } },
|
||||
{ term: { [TRANSACTION_TYPE]: transactionType } },
|
||||
...getDocumentTypeFilterForAggregatedTransactions(
|
||||
searchAggregatedTransactions
|
||||
),
|
||||
...rangeQuery(start, end),
|
||||
...environmentQuery(environment),
|
||||
...kqlQuery(kuery),
|
||||
],
|
||||
},
|
||||
},
|
||||
aggs: {
|
||||
total_duration: { sum: { field } },
|
||||
transaction_groups: {
|
||||
terms: {
|
||||
field: TRANSACTION_NAME,
|
||||
include: transactionNames,
|
||||
size: transactionNames.length,
|
||||
},
|
||||
aggs: {
|
||||
transaction_group_total_duration: {
|
||||
sum: { field },
|
||||
},
|
||||
timeseries: {
|
||||
date_histogram: {
|
||||
field: '@timestamp',
|
||||
fixed_interval: intervalString,
|
||||
min_doc_count: 0,
|
||||
extended_bounds: {
|
||||
min: start,
|
||||
max: end,
|
||||
},
|
||||
},
|
||||
aggs: {
|
||||
throughput_rate: {
|
||||
rate: {
|
||||
unit: 'minute',
|
||||
},
|
||||
},
|
||||
...getLatencyAggregation(latencyAggregationType, field),
|
||||
[EVENT_OUTCOME]: {
|
||||
terms: {
|
||||
field: EVENT_OUTCOME,
|
||||
include: [EventOutcome.failure, EventOutcome.success],
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
const buckets = response.aggregations?.transaction_groups.buckets ?? [];
|
||||
|
||||
const totalDuration = response.aggregations?.total_duration.value;
|
||||
return buckets.map((bucket) => {
|
||||
const transactionName = bucket.key as string;
|
||||
const latency = bucket.timeseries.buckets.map((timeseriesBucket) => ({
|
||||
x: timeseriesBucket.key,
|
||||
y: getLatencyValue({
|
||||
latencyAggregationType,
|
||||
aggregation: timeseriesBucket.latency,
|
||||
}),
|
||||
}));
|
||||
const throughput = bucket.timeseries.buckets.map(
|
||||
(timeseriesBucket) => ({
|
||||
x: timeseriesBucket.key,
|
||||
y: timeseriesBucket.throughput_rate.value,
|
||||
})
|
||||
);
|
||||
const errorRate = bucket.timeseries.buckets.map((timeseriesBucket) => ({
|
||||
x: timeseriesBucket.key,
|
||||
y: calculateTransactionErrorPercentage(
|
||||
timeseriesBucket[EVENT_OUTCOME]
|
||||
{
|
||||
apm: {
|
||||
events: [
|
||||
getProcessorEventForAggregatedTransactions(
|
||||
searchAggregatedTransactions
|
||||
),
|
||||
}));
|
||||
const transactionGroupTotalDuration =
|
||||
bucket.transaction_group_total_duration.value || 0;
|
||||
return {
|
||||
transactionName,
|
||||
latency,
|
||||
throughput,
|
||||
errorRate,
|
||||
impact: totalDuration
|
||||
? (transactionGroupTotalDuration * 100) / totalDuration
|
||||
: 0,
|
||||
};
|
||||
});
|
||||
],
|
||||
},
|
||||
body: {
|
||||
size: 0,
|
||||
query: {
|
||||
bool: {
|
||||
filter: [
|
||||
{ term: { [SERVICE_NAME]: serviceName } },
|
||||
{ term: { [TRANSACTION_TYPE]: transactionType } },
|
||||
...getDocumentTypeFilterForAggregatedTransactions(
|
||||
searchAggregatedTransactions
|
||||
),
|
||||
...rangeQuery(start, end),
|
||||
...environmentQuery(environment),
|
||||
...kqlQuery(kuery),
|
||||
],
|
||||
},
|
||||
},
|
||||
aggs: {
|
||||
total_duration: { sum: { field } },
|
||||
transaction_groups: {
|
||||
terms: {
|
||||
field: TRANSACTION_NAME,
|
||||
include: transactionNames,
|
||||
size: transactionNames.length,
|
||||
},
|
||||
aggs: {
|
||||
transaction_group_total_duration: {
|
||||
sum: { field },
|
||||
},
|
||||
timeseries: {
|
||||
date_histogram: {
|
||||
field: '@timestamp',
|
||||
fixed_interval: intervalString,
|
||||
min_doc_count: 0,
|
||||
extended_bounds: {
|
||||
min: start,
|
||||
max: end,
|
||||
},
|
||||
},
|
||||
aggs: {
|
||||
throughput_rate: {
|
||||
rate: {
|
||||
unit: 'minute',
|
||||
},
|
||||
},
|
||||
...getLatencyAggregation(latencyAggregationType, field),
|
||||
[EVENT_OUTCOME]: {
|
||||
terms: {
|
||||
field: EVENT_OUTCOME,
|
||||
include: [EventOutcome.failure, EventOutcome.success],
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
);
|
||||
|
||||
const buckets = response.aggregations?.transaction_groups.buckets ?? [];
|
||||
|
||||
const totalDuration = response.aggregations?.total_duration.value;
|
||||
return buckets.map((bucket) => {
|
||||
const transactionName = bucket.key as string;
|
||||
const latency = bucket.timeseries.buckets.map((timeseriesBucket) => ({
|
||||
x: timeseriesBucket.key,
|
||||
y: getLatencyValue({
|
||||
latencyAggregationType,
|
||||
aggregation: timeseriesBucket.latency,
|
||||
}),
|
||||
}));
|
||||
const throughput = bucket.timeseries.buckets.map((timeseriesBucket) => ({
|
||||
x: timeseriesBucket.key,
|
||||
y: timeseriesBucket.throughput_rate.value,
|
||||
}));
|
||||
const errorRate = bucket.timeseries.buckets.map((timeseriesBucket) => ({
|
||||
x: timeseriesBucket.key,
|
||||
y: calculateTransactionErrorPercentage(timeseriesBucket[EVENT_OUTCOME]),
|
||||
}));
|
||||
const transactionGroupTotalDuration =
|
||||
bucket.transaction_group_total_duration.value || 0;
|
||||
return {
|
||||
transactionName,
|
||||
latency,
|
||||
throughput,
|
||||
errorRate,
|
||||
impact: totalDuration
|
||||
? (transactionGroupTotalDuration * 100) / totalDuration
|
||||
: 0,
|
||||
};
|
||||
});
|
||||
}
|
||||
|
||||
export async function getServiceTransactionGroupDetailedStatisticsPeriods({
|
||||
|
|
|
@ -18,7 +18,6 @@ import {
|
|||
rangeQuery,
|
||||
kqlQuery,
|
||||
} from '../../../server/utils/queries';
|
||||
import { withApmSpan } from '../../utils/with_apm_span';
|
||||
import {
|
||||
getDocumentTypeFilterForAggregatedTransactions,
|
||||
getProcessorEventForAggregatedTransactions,
|
||||
|
@ -56,14 +55,15 @@ export async function getServiceTransactionGroups({
|
|||
transactionType: string;
|
||||
latencyAggregationType: LatencyAggregationType;
|
||||
}) {
|
||||
return withApmSpan('get_service_transaction_groups', async () => {
|
||||
const { apmEventClient, start, end } = setup;
|
||||
const { apmEventClient, start, end } = setup;
|
||||
|
||||
const field = getTransactionDurationFieldForAggregatedTransactions(
|
||||
searchAggregatedTransactions
|
||||
);
|
||||
const field = getTransactionDurationFieldForAggregatedTransactions(
|
||||
searchAggregatedTransactions
|
||||
);
|
||||
|
||||
const response = await apmEventClient.search({
|
||||
const response = await apmEventClient.search(
|
||||
'get_service_transaction_groups',
|
||||
{
|
||||
apm: {
|
||||
events: [
|
||||
getProcessorEventForAggregatedTransactions(
|
||||
|
@ -110,45 +110,45 @@ export async function getServiceTransactionGroups({
|
|||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
}
|
||||
);
|
||||
|
||||
const totalDuration = response.aggregations?.total_duration.value;
|
||||
const totalDuration = response.aggregations?.total_duration.value;
|
||||
|
||||
const transactionGroups =
|
||||
response.aggregations?.transaction_groups.buckets.map((bucket) => {
|
||||
const errorRate = calculateTransactionErrorPercentage(
|
||||
bucket[EVENT_OUTCOME]
|
||||
);
|
||||
const transactionGroups =
|
||||
response.aggregations?.transaction_groups.buckets.map((bucket) => {
|
||||
const errorRate = calculateTransactionErrorPercentage(
|
||||
bucket[EVENT_OUTCOME]
|
||||
);
|
||||
|
||||
const transactionGroupTotalDuration =
|
||||
bucket.transaction_group_total_duration.value || 0;
|
||||
const transactionGroupTotalDuration =
|
||||
bucket.transaction_group_total_duration.value || 0;
|
||||
|
||||
return {
|
||||
name: bucket.key as string,
|
||||
latency: getLatencyValue({
|
||||
latencyAggregationType,
|
||||
aggregation: bucket.latency,
|
||||
}),
|
||||
throughput: calculateThroughput({
|
||||
start,
|
||||
end,
|
||||
value: bucket.doc_count,
|
||||
}),
|
||||
errorRate,
|
||||
impact: totalDuration
|
||||
? (transactionGroupTotalDuration * 100) / totalDuration
|
||||
: 0,
|
||||
};
|
||||
}) ?? [];
|
||||
return {
|
||||
name: bucket.key as string,
|
||||
latency: getLatencyValue({
|
||||
latencyAggregationType,
|
||||
aggregation: bucket.latency,
|
||||
}),
|
||||
throughput: calculateThroughput({
|
||||
start,
|
||||
end,
|
||||
value: bucket.doc_count,
|
||||
}),
|
||||
errorRate,
|
||||
impact: totalDuration
|
||||
? (transactionGroupTotalDuration * 100) / totalDuration
|
||||
: 0,
|
||||
};
|
||||
}) ?? [];
|
||||
|
||||
return {
|
||||
transactionGroups: transactionGroups.map((transactionGroup) => ({
|
||||
...transactionGroup,
|
||||
transactionType,
|
||||
})),
|
||||
isAggregationAccurate:
|
||||
(response.aggregations?.transaction_groups.sum_other_doc_count ?? 0) ===
|
||||
0,
|
||||
};
|
||||
});
|
||||
return {
|
||||
transactionGroups: transactionGroups.map((transactionGroup) => ({
|
||||
...transactionGroup,
|
||||
transactionType,
|
||||
})),
|
||||
isAggregationAccurate:
|
||||
(response.aggregations?.transaction_groups.sum_other_doc_count ?? 0) ===
|
||||
0,
|
||||
};
|
||||
}
|
||||
|
|
|
@ -15,9 +15,8 @@ import {
|
|||
getDocumentTypeFilterForAggregatedTransactions,
|
||||
getProcessorEventForAggregatedTransactions,
|
||||
} from '../helpers/aggregated_transactions';
|
||||
import { withApmSpan } from '../../utils/with_apm_span';
|
||||
|
||||
export function getServiceTransactionTypes({
|
||||
export async function getServiceTransactionTypes({
|
||||
setup,
|
||||
serviceName,
|
||||
searchAggregatedTransactions,
|
||||
|
@ -26,41 +25,42 @@ export function getServiceTransactionTypes({
|
|||
setup: Setup & SetupTimeRange;
|
||||
searchAggregatedTransactions: boolean;
|
||||
}) {
|
||||
return withApmSpan('get_service_transaction_types', async () => {
|
||||
const { start, end, apmEventClient } = setup;
|
||||
const { start, end, apmEventClient } = setup;
|
||||
|
||||
const params = {
|
||||
apm: {
|
||||
events: [
|
||||
getProcessorEventForAggregatedTransactions(
|
||||
searchAggregatedTransactions
|
||||
),
|
||||
],
|
||||
},
|
||||
body: {
|
||||
size: 0,
|
||||
query: {
|
||||
bool: {
|
||||
filter: [
|
||||
...getDocumentTypeFilterForAggregatedTransactions(
|
||||
searchAggregatedTransactions
|
||||
),
|
||||
{ term: { [SERVICE_NAME]: serviceName } },
|
||||
...rangeQuery(start, end),
|
||||
],
|
||||
},
|
||||
},
|
||||
aggs: {
|
||||
types: {
|
||||
terms: { field: TRANSACTION_TYPE, size: 100 },
|
||||
},
|
||||
const params = {
|
||||
apm: {
|
||||
events: [
|
||||
getProcessorEventForAggregatedTransactions(
|
||||
searchAggregatedTransactions
|
||||
),
|
||||
],
|
||||
},
|
||||
body: {
|
||||
size: 0,
|
||||
query: {
|
||||
bool: {
|
||||
filter: [
|
||||
...getDocumentTypeFilterForAggregatedTransactions(
|
||||
searchAggregatedTransactions
|
||||
),
|
||||
{ term: { [SERVICE_NAME]: serviceName } },
|
||||
...rangeQuery(start, end),
|
||||
],
|
||||
},
|
||||
},
|
||||
};
|
||||
aggs: {
|
||||
types: {
|
||||
terms: { field: TRANSACTION_TYPE, size: 100 },
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
const { aggregations } = await apmEventClient.search(params);
|
||||
const transactionTypes =
|
||||
aggregations?.types.buckets.map((bucket) => bucket.key as string) || [];
|
||||
return { transactionTypes };
|
||||
});
|
||||
const { aggregations } = await apmEventClient.search(
|
||||
'get_service_transaction_types',
|
||||
params
|
||||
);
|
||||
const transactionTypes =
|
||||
aggregations?.types.buckets.map((bucket) => bucket.key as string) || [];
|
||||
return { transactionTypes };
|
||||
}
|
||||
|
|
|
@ -9,35 +9,31 @@ import { rangeQuery } from '../../../../server/utils/queries';
|
|||
import { ProcessorEvent } from '../../../../common/processor_event';
|
||||
import { OBSERVER_VERSION_MAJOR } from '../../../../common/elasticsearch_fieldnames';
|
||||
import { Setup, SetupTimeRange } from '../../helpers/setup_request';
|
||||
import { withApmSpan } from '../../../utils/with_apm_span';
|
||||
|
||||
// returns true if 6.x data is found
|
||||
export async function getLegacyDataStatus(setup: Setup & SetupTimeRange) {
|
||||
return withApmSpan('get_legacy_data_status', async () => {
|
||||
const { apmEventClient, start, end } = setup;
|
||||
const { apmEventClient, start, end } = setup;
|
||||
|
||||
const params = {
|
||||
terminateAfter: 1,
|
||||
apm: {
|
||||
events: [ProcessorEvent.transaction],
|
||||
},
|
||||
body: {
|
||||
size: 0,
|
||||
query: {
|
||||
bool: {
|
||||
filter: [
|
||||
{ range: { [OBSERVER_VERSION_MAJOR]: { lt: 7 } } },
|
||||
...rangeQuery(start, end),
|
||||
],
|
||||
},
|
||||
const params = {
|
||||
terminateAfter: 1,
|
||||
apm: {
|
||||
events: [ProcessorEvent.transaction],
|
||||
includeLegacyData: true,
|
||||
},
|
||||
body: {
|
||||
size: 0,
|
||||
query: {
|
||||
bool: {
|
||||
filter: [
|
||||
{ range: { [OBSERVER_VERSION_MAJOR]: { lt: 7 } } },
|
||||
...rangeQuery(start, end),
|
||||
],
|
||||
},
|
||||
},
|
||||
};
|
||||
},
|
||||
};
|
||||
|
||||
const resp = await apmEventClient.search(params, {
|
||||
includeLegacyData: true,
|
||||
});
|
||||
const hasLegacyData = resp.hits.total.value > 0;
|
||||
return hasLegacyData;
|
||||
});
|
||||
const resp = await apmEventClient.search('get_legacy_data_status', params);
|
||||
const hasLegacyData = resp.hits.total.value > 0;
|
||||
return hasLegacyData;
|
||||
}
|
||||
|
|
|
@ -33,7 +33,6 @@ import {
|
|||
getOutcomeAggregation,
|
||||
} from '../../helpers/transaction_error_rate';
|
||||
import { ServicesItemsSetup } from './get_services_items';
|
||||
import { withApmSpan } from '../../../utils/with_apm_span';
|
||||
|
||||
interface AggregationParams {
|
||||
environment?: string;
|
||||
|
@ -50,23 +49,24 @@ export async function getServiceTransactionStats({
|
|||
searchAggregatedTransactions,
|
||||
maxNumServices,
|
||||
}: AggregationParams) {
|
||||
return withApmSpan('get_service_transaction_stats', async () => {
|
||||
const { apmEventClient, start, end } = setup;
|
||||
const { apmEventClient, start, end } = setup;
|
||||
|
||||
const outcomes = getOutcomeAggregation();
|
||||
const outcomes = getOutcomeAggregation();
|
||||
|
||||
const metrics = {
|
||||
avg_duration: {
|
||||
avg: {
|
||||
field: getTransactionDurationFieldForAggregatedTransactions(
|
||||
searchAggregatedTransactions
|
||||
),
|
||||
},
|
||||
const metrics = {
|
||||
avg_duration: {
|
||||
avg: {
|
||||
field: getTransactionDurationFieldForAggregatedTransactions(
|
||||
searchAggregatedTransactions
|
||||
),
|
||||
},
|
||||
outcomes,
|
||||
};
|
||||
},
|
||||
outcomes,
|
||||
};
|
||||
|
||||
const response = await apmEventClient.search({
|
||||
const response = await apmEventClient.search(
|
||||
'get_service_transaction_stats',
|
||||
{
|
||||
apm: {
|
||||
events: [
|
||||
getProcessorEventForAggregatedTransactions(
|
||||
|
@ -133,64 +133,64 @@ export async function getServiceTransactionStats({
|
|||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
}
|
||||
);
|
||||
|
||||
return (
|
||||
response.aggregations?.services.buckets.map((bucket) => {
|
||||
const topTransactionTypeBucket =
|
||||
bucket.transactionType.buckets.find(
|
||||
({ key }) =>
|
||||
key === TRANSACTION_REQUEST || key === TRANSACTION_PAGE_LOAD
|
||||
) ?? bucket.transactionType.buckets[0];
|
||||
return (
|
||||
response.aggregations?.services.buckets.map((bucket) => {
|
||||
const topTransactionTypeBucket =
|
||||
bucket.transactionType.buckets.find(
|
||||
({ key }) =>
|
||||
key === TRANSACTION_REQUEST || key === TRANSACTION_PAGE_LOAD
|
||||
) ?? bucket.transactionType.buckets[0];
|
||||
|
||||
return {
|
||||
serviceName: bucket.key as string,
|
||||
transactionType: topTransactionTypeBucket.key as string,
|
||||
environments: topTransactionTypeBucket.environments.buckets.map(
|
||||
(environmentBucket) => environmentBucket.key as string
|
||||
return {
|
||||
serviceName: bucket.key as string,
|
||||
transactionType: topTransactionTypeBucket.key as string,
|
||||
environments: topTransactionTypeBucket.environments.buckets.map(
|
||||
(environmentBucket) => environmentBucket.key as string
|
||||
),
|
||||
agentName: topTransactionTypeBucket.sample.top[0].metrics[
|
||||
AGENT_NAME
|
||||
] as AgentName,
|
||||
avgResponseTime: {
|
||||
value: topTransactionTypeBucket.avg_duration.value,
|
||||
timeseries: topTransactionTypeBucket.timeseries.buckets.map(
|
||||
(dateBucket) => ({
|
||||
x: dateBucket.key,
|
||||
y: dateBucket.avg_duration.value,
|
||||
})
|
||||
),
|
||||
agentName: topTransactionTypeBucket.sample.top[0].metrics[
|
||||
AGENT_NAME
|
||||
] as AgentName,
|
||||
avgResponseTime: {
|
||||
value: topTransactionTypeBucket.avg_duration.value,
|
||||
timeseries: topTransactionTypeBucket.timeseries.buckets.map(
|
||||
(dateBucket) => ({
|
||||
x: dateBucket.key,
|
||||
y: dateBucket.avg_duration.value,
|
||||
})
|
||||
),
|
||||
},
|
||||
transactionErrorRate: {
|
||||
value: calculateTransactionErrorPercentage(
|
||||
topTransactionTypeBucket.outcomes
|
||||
),
|
||||
timeseries: topTransactionTypeBucket.timeseries.buckets.map(
|
||||
(dateBucket) => ({
|
||||
x: dateBucket.key,
|
||||
y: calculateTransactionErrorPercentage(dateBucket.outcomes),
|
||||
})
|
||||
),
|
||||
},
|
||||
transactionsPerMinute: {
|
||||
value: calculateThroughput({
|
||||
start,
|
||||
end,
|
||||
value: topTransactionTypeBucket.doc_count,
|
||||
}),
|
||||
timeseries: topTransactionTypeBucket.timeseries.buckets.map(
|
||||
(dateBucket) => ({
|
||||
x: dateBucket.key,
|
||||
y: calculateThroughput({
|
||||
start,
|
||||
end,
|
||||
value: dateBucket.doc_count,
|
||||
}),
|
||||
})
|
||||
),
|
||||
},
|
||||
};
|
||||
}) ?? []
|
||||
);
|
||||
});
|
||||
},
|
||||
transactionErrorRate: {
|
||||
value: calculateTransactionErrorPercentage(
|
||||
topTransactionTypeBucket.outcomes
|
||||
),
|
||||
timeseries: topTransactionTypeBucket.timeseries.buckets.map(
|
||||
(dateBucket) => ({
|
||||
x: dateBucket.key,
|
||||
y: calculateTransactionErrorPercentage(dateBucket.outcomes),
|
||||
})
|
||||
),
|
||||
},
|
||||
transactionsPerMinute: {
|
||||
value: calculateThroughput({
|
||||
start,
|
||||
end,
|
||||
value: topTransactionTypeBucket.doc_count,
|
||||
}),
|
||||
timeseries: topTransactionTypeBucket.timeseries.buckets.map(
|
||||
(dateBucket) => ({
|
||||
x: dateBucket.key,
|
||||
y: calculateThroughput({
|
||||
start,
|
||||
end,
|
||||
value: dateBucket.doc_count,
|
||||
}),
|
||||
})
|
||||
),
|
||||
},
|
||||
};
|
||||
}) ?? []
|
||||
);
|
||||
}
|
||||
|
|
|
@ -14,9 +14,8 @@ import {
|
|||
import { environmentQuery, kqlQuery, rangeQuery } from '../../../utils/queries';
|
||||
import { ProcessorEvent } from '../../../../common/processor_event';
|
||||
import { Setup, SetupTimeRange } from '../../helpers/setup_request';
|
||||
import { withApmSpan } from '../../../utils/with_apm_span';
|
||||
|
||||
export function getServicesFromMetricDocuments({
|
||||
export async function getServicesFromMetricDocuments({
|
||||
environment,
|
||||
setup,
|
||||
maxNumServices,
|
||||
|
@ -27,10 +26,11 @@ export function getServicesFromMetricDocuments({
|
|||
maxNumServices: number;
|
||||
kuery?: string;
|
||||
}) {
|
||||
return withApmSpan('get_services_from_metric_documents', async () => {
|
||||
const { apmEventClient, start, end } = setup;
|
||||
const { apmEventClient, start, end } = setup;
|
||||
|
||||
const response = await apmEventClient.search({
|
||||
const response = await apmEventClient.search(
|
||||
'get_services_from_metric_documents',
|
||||
{
|
||||
apm: {
|
||||
events: [ProcessorEvent.metric],
|
||||
},
|
||||
|
@ -67,18 +67,18 @@ export function getServicesFromMetricDocuments({
|
|||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
}
|
||||
);
|
||||
|
||||
return (
|
||||
response.aggregations?.services.buckets.map((bucket) => {
|
||||
return {
|
||||
serviceName: bucket.key as string,
|
||||
environments: bucket.environments.buckets.map(
|
||||
(envBucket) => envBucket.key as string
|
||||
),
|
||||
agentName: bucket.latest.top[0].metrics[AGENT_NAME] as AgentName,
|
||||
};
|
||||
}) ?? []
|
||||
);
|
||||
});
|
||||
return (
|
||||
response.aggregations?.services.buckets.map((bucket) => {
|
||||
return {
|
||||
serviceName: bucket.key as string,
|
||||
environments: bucket.environments.buckets.map(
|
||||
(envBucket) => envBucket.key as string
|
||||
),
|
||||
agentName: bucket.latest.top[0].metrics[AGENT_NAME] as AgentName,
|
||||
};
|
||||
}) ?? []
|
||||
);
|
||||
}
|
||||
|
|
|
@ -6,29 +6,26 @@
|
|||
*/
|
||||
|
||||
import { ProcessorEvent } from '../../../../common/processor_event';
|
||||
import { withApmSpan } from '../../../utils/with_apm_span';
|
||||
import { Setup } from '../../helpers/setup_request';
|
||||
|
||||
// Note: this logic is duplicated in tutorials/apm/envs/on_prem
|
||||
export async function hasHistoricalAgentData(setup: Setup) {
|
||||
return withApmSpan('has_historical_agent_data', async () => {
|
||||
const { apmEventClient } = setup;
|
||||
const { apmEventClient } = setup;
|
||||
|
||||
const params = {
|
||||
terminateAfter: 1,
|
||||
apm: {
|
||||
events: [
|
||||
ProcessorEvent.error,
|
||||
ProcessorEvent.metric,
|
||||
ProcessorEvent.transaction,
|
||||
],
|
||||
},
|
||||
body: {
|
||||
size: 0,
|
||||
},
|
||||
};
|
||||
const params = {
|
||||
terminateAfter: 1,
|
||||
apm: {
|
||||
events: [
|
||||
ProcessorEvent.error,
|
||||
ProcessorEvent.metric,
|
||||
ProcessorEvent.transaction,
|
||||
],
|
||||
},
|
||||
body: {
|
||||
size: 0,
|
||||
},
|
||||
};
|
||||
|
||||
const resp = await apmEventClient.search(params);
|
||||
return resp.hits.total.value > 0;
|
||||
});
|
||||
const resp = await apmEventClient.search('has_historical_agent_data', params);
|
||||
return resp.hits.total.value > 0;
|
||||
}
|
||||
|
|
|
@ -21,7 +21,6 @@ import {
|
|||
} from '../helpers/aggregated_transactions';
|
||||
import { getBucketSize } from '../helpers/get_bucket_size';
|
||||
import { Setup } from '../helpers/setup_request';
|
||||
import { withApmSpan } from '../../utils/with_apm_span';
|
||||
|
||||
interface Options {
|
||||
environment?: string;
|
||||
|
@ -88,20 +87,18 @@ function fetcher({
|
|||
},
|
||||
};
|
||||
|
||||
return apmEventClient.search(params);
|
||||
return apmEventClient.search('get_throughput_for_service', params);
|
||||
}
|
||||
|
||||
export function getThroughput(options: Options) {
|
||||
return withApmSpan('get_throughput_for_service', async () => {
|
||||
const response = await fetcher(options);
|
||||
export async function getThroughput(options: Options) {
|
||||
const response = await fetcher(options);
|
||||
|
||||
return (
|
||||
response.aggregations?.timeseries.buckets.map((bucket) => {
|
||||
return {
|
||||
x: bucket.key,
|
||||
y: bucket.throughput.value,
|
||||
};
|
||||
}) ?? []
|
||||
);
|
||||
});
|
||||
return (
|
||||
response.aggregations?.timeseries.buckets.map((bucket) => {
|
||||
return {
|
||||
x: bucket.key,
|
||||
y: bucket.throughput.value,
|
||||
};
|
||||
}) ?? []
|
||||
);
|
||||
}
|
||||
|
|
|
@ -41,7 +41,7 @@ const maybeAdd = (to: any[], value: any) => {
|
|||
to.push(value);
|
||||
};
|
||||
|
||||
function getProfilingStats({
|
||||
async function getProfilingStats({
|
||||
apmEventClient,
|
||||
filter,
|
||||
valueTypeField,
|
||||
|
@ -50,49 +50,47 @@ function getProfilingStats({
|
|||
filter: ESFilter[];
|
||||
valueTypeField: string;
|
||||
}) {
|
||||
return withApmSpan('get_profiling_stats', async () => {
|
||||
const response = await apmEventClient.search({
|
||||
apm: {
|
||||
events: [ProcessorEvent.profile],
|
||||
},
|
||||
body: {
|
||||
size: 0,
|
||||
query: {
|
||||
bool: {
|
||||
filter,
|
||||
},
|
||||
const response = await apmEventClient.search('get_profiling_stats', {
|
||||
apm: {
|
||||
events: [ProcessorEvent.profile],
|
||||
},
|
||||
body: {
|
||||
size: 0,
|
||||
query: {
|
||||
bool: {
|
||||
filter,
|
||||
},
|
||||
aggs: {
|
||||
stacks: {
|
||||
terms: {
|
||||
field: PROFILE_TOP_ID,
|
||||
size: MAX_STACK_IDS,
|
||||
order: {
|
||||
value: 'desc',
|
||||
},
|
||||
},
|
||||
aggs: {
|
||||
stacks: {
|
||||
terms: {
|
||||
field: PROFILE_TOP_ID,
|
||||
size: MAX_STACK_IDS,
|
||||
order: {
|
||||
value: 'desc',
|
||||
},
|
||||
aggs: {
|
||||
value: {
|
||||
sum: {
|
||||
field: valueTypeField,
|
||||
},
|
||||
},
|
||||
aggs: {
|
||||
value: {
|
||||
sum: {
|
||||
field: valueTypeField,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
const stacks =
|
||||
response.aggregations?.stacks.buckets.map((stack) => {
|
||||
return {
|
||||
id: stack.key as string,
|
||||
value: stack.value.value!,
|
||||
};
|
||||
}) ?? [];
|
||||
|
||||
return stacks;
|
||||
},
|
||||
});
|
||||
|
||||
const stacks =
|
||||
response.aggregations?.stacks.buckets.map((stack) => {
|
||||
return {
|
||||
id: stack.key as string,
|
||||
value: stack.value.value!,
|
||||
};
|
||||
}) ?? [];
|
||||
|
||||
return stacks;
|
||||
}
|
||||
|
||||
function getProfilesWithStacks({
|
||||
|
@ -103,8 +101,9 @@ function getProfilesWithStacks({
|
|||
filter: ESFilter[];
|
||||
}) {
|
||||
return withApmSpan('get_profiles_with_stacks', async () => {
|
||||
const cardinalityResponse = await withApmSpan('get_top_cardinality', () =>
|
||||
apmEventClient.search({
|
||||
const cardinalityResponse = await apmEventClient.search(
|
||||
'get_top_cardinality',
|
||||
{
|
||||
apm: {
|
||||
events: [ProcessorEvent.profile],
|
||||
},
|
||||
|
@ -121,7 +120,7 @@ function getProfilesWithStacks({
|
|||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
);
|
||||
|
||||
const cardinality = cardinalityResponse.aggregations?.top.value ?? 0;
|
||||
|
@ -140,39 +139,37 @@ function getProfilesWithStacks({
|
|||
const allResponses = await withApmSpan('get_all_stacks', async () => {
|
||||
return Promise.all(
|
||||
[...new Array(partitions)].map(async (_, num) => {
|
||||
const response = await withApmSpan('get_partition', () =>
|
||||
apmEventClient.search({
|
||||
apm: {
|
||||
events: [ProcessorEvent.profile],
|
||||
},
|
||||
body: {
|
||||
query: {
|
||||
bool: {
|
||||
filter,
|
||||
},
|
||||
const response = await apmEventClient.search('get_partition', {
|
||||
apm: {
|
||||
events: [ProcessorEvent.profile],
|
||||
},
|
||||
body: {
|
||||
query: {
|
||||
bool: {
|
||||
filter,
|
||||
},
|
||||
aggs: {
|
||||
top: {
|
||||
terms: {
|
||||
field: PROFILE_TOP_ID,
|
||||
size: Math.max(MAX_STACKS_PER_REQUEST),
|
||||
include: {
|
||||
num_partitions: partitions,
|
||||
partition: num,
|
||||
},
|
||||
},
|
||||
aggs: {
|
||||
top: {
|
||||
terms: {
|
||||
field: PROFILE_TOP_ID,
|
||||
size: Math.max(MAX_STACKS_PER_REQUEST),
|
||||
include: {
|
||||
num_partitions: partitions,
|
||||
partition: num,
|
||||
},
|
||||
aggs: {
|
||||
latest: {
|
||||
top_hits: {
|
||||
_source: [PROFILE_TOP_ID, PROFILE_STACK],
|
||||
},
|
||||
},
|
||||
aggs: {
|
||||
latest: {
|
||||
top_hits: {
|
||||
_source: [PROFILE_TOP_ID, PROFILE_STACK],
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
);
|
||||
},
|
||||
});
|
||||
|
||||
return (
|
||||
response.aggregations?.top.buckets.flatMap((bucket) => {
|
||||
|
|
|
@ -17,7 +17,6 @@ import {
|
|||
} from '../../../../common/profiling';
|
||||
import { Setup, SetupTimeRange } from '../../helpers/setup_request';
|
||||
import { getBucketSize } from '../../helpers/get_bucket_size';
|
||||
import { withApmSpan } from '../../../utils/with_apm_span';
|
||||
import { kqlQuery } from '../../../utils/queries';
|
||||
|
||||
const configMap = mapValues(
|
||||
|
@ -38,10 +37,11 @@ export async function getServiceProfilingTimeline({
|
|||
setup: Setup & SetupTimeRange;
|
||||
environment?: string;
|
||||
}) {
|
||||
return withApmSpan('get_service_profiling_timeline', async () => {
|
||||
const { apmEventClient, start, end } = setup;
|
||||
const { apmEventClient, start, end } = setup;
|
||||
|
||||
const response = await apmEventClient.search({
|
||||
const response = await apmEventClient.search(
|
||||
'get_service_profiling_timeline',
|
||||
{
|
||||
apm: {
|
||||
events: [ProcessorEvent.profile],
|
||||
},
|
||||
|
@ -96,29 +96,29 @@ export async function getServiceProfilingTimeline({
|
|||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
const { aggregations } = response;
|
||||
|
||||
if (!aggregations) {
|
||||
return [];
|
||||
}
|
||||
);
|
||||
|
||||
return aggregations.timeseries.buckets.map((bucket) => {
|
||||
return {
|
||||
x: bucket.key,
|
||||
valueTypes: {
|
||||
unknown: bucket.value_type.buckets.unknown.num_profiles.value,
|
||||
// TODO: use enum as object key. not possible right now
|
||||
// because of https://github.com/microsoft/TypeScript/issues/37888
|
||||
...mapValues(configMap, (_, key) => {
|
||||
return (
|
||||
bucket.value_type.buckets[key as ProfilingValueType]?.num_profiles
|
||||
.value ?? 0
|
||||
);
|
||||
}),
|
||||
},
|
||||
};
|
||||
});
|
||||
const { aggregations } = response;
|
||||
|
||||
if (!aggregations) {
|
||||
return [];
|
||||
}
|
||||
|
||||
return aggregations.timeseries.buckets.map((bucket) => {
|
||||
return {
|
||||
x: bucket.key,
|
||||
valueTypes: {
|
||||
unknown: bucket.value_type.buckets.unknown.num_profiles.value,
|
||||
// TODO: use enum as object key. not possible right now
|
||||
// because of https://github.com/microsoft/TypeScript/issues/37888
|
||||
...mapValues(configMap, (_, key) => {
|
||||
return (
|
||||
bucket.value_type.buckets[key as ProfilingValueType]?.num_profiles
|
||||
.value ?? 0
|
||||
);
|
||||
}),
|
||||
},
|
||||
};
|
||||
});
|
||||
}
|
||||
|
|
|
@ -55,7 +55,7 @@ describe('services queries', () => {
|
|||
})
|
||||
);
|
||||
|
||||
const allParams = mock.spy.mock.calls.map((call) => call[0]);
|
||||
const allParams = mock.spy.mock.calls.map((call) => call[1]);
|
||||
|
||||
expect(allParams).toMatchSnapshot();
|
||||
});
|
||||
|
|
|
@ -12,7 +12,6 @@ import {
|
|||
AgentConfigurationIntake,
|
||||
} from '../../../../common/agent_configuration/configuration_types';
|
||||
import { APMIndexDocumentParams } from '../../helpers/create_es_client/create_internal_es_client';
|
||||
import { withApmSpan } from '../../../utils/with_apm_span';
|
||||
|
||||
export function createOrUpdateConfiguration({
|
||||
configurationId,
|
||||
|
@ -23,30 +22,28 @@ export function createOrUpdateConfiguration({
|
|||
configurationIntake: AgentConfigurationIntake;
|
||||
setup: Setup;
|
||||
}) {
|
||||
return withApmSpan('create_or_update_configuration', async () => {
|
||||
const { internalClient, indices } = setup;
|
||||
const { internalClient, indices } = setup;
|
||||
|
||||
const params: APMIndexDocumentParams<AgentConfiguration> = {
|
||||
refresh: true,
|
||||
index: indices.apmAgentConfigurationIndex,
|
||||
body: {
|
||||
agent_name: configurationIntake.agent_name,
|
||||
service: {
|
||||
name: configurationIntake.service.name,
|
||||
environment: configurationIntake.service.environment,
|
||||
},
|
||||
settings: configurationIntake.settings,
|
||||
'@timestamp': Date.now(),
|
||||
applied_by_agent: false,
|
||||
etag: hash(configurationIntake),
|
||||
const params: APMIndexDocumentParams<AgentConfiguration> = {
|
||||
refresh: true,
|
||||
index: indices.apmAgentConfigurationIndex,
|
||||
body: {
|
||||
agent_name: configurationIntake.agent_name,
|
||||
service: {
|
||||
name: configurationIntake.service.name,
|
||||
environment: configurationIntake.service.environment,
|
||||
},
|
||||
};
|
||||
settings: configurationIntake.settings,
|
||||
'@timestamp': Date.now(),
|
||||
applied_by_agent: false,
|
||||
etag: hash(configurationIntake),
|
||||
},
|
||||
};
|
||||
|
||||
// by specifying an id elasticsearch will delete the previous doc and insert the updated doc
|
||||
if (configurationId) {
|
||||
params.id = configurationId;
|
||||
}
|
||||
// by specifying an id elasticsearch will delete the previous doc and insert the updated doc
|
||||
if (configurationId) {
|
||||
params.id = configurationId;
|
||||
}
|
||||
|
||||
return internalClient.index(params);
|
||||
});
|
||||
return internalClient.index('create_or_update_agent_configuration', params);
|
||||
}
|
||||
|
|
|
@ -5,7 +5,6 @@
|
|||
* 2.0.
|
||||
*/
|
||||
|
||||
import { withApmSpan } from '../../../utils/with_apm_span';
|
||||
import { Setup } from '../../helpers/setup_request';
|
||||
|
||||
export async function deleteConfiguration({
|
||||
|
@ -15,15 +14,13 @@ export async function deleteConfiguration({
|
|||
configurationId: string;
|
||||
setup: Setup;
|
||||
}) {
|
||||
return withApmSpan('delete_agent_configuration', async () => {
|
||||
const { internalClient, indices } = setup;
|
||||
const { internalClient, indices } = setup;
|
||||
|
||||
const params = {
|
||||
refresh: 'wait_for' as const,
|
||||
index: indices.apmAgentConfigurationIndex,
|
||||
id: configurationId,
|
||||
};
|
||||
const params = {
|
||||
refresh: 'wait_for' as const,
|
||||
index: indices.apmAgentConfigurationIndex,
|
||||
id: configurationId,
|
||||
};
|
||||
|
||||
return internalClient.delete(params);
|
||||
});
|
||||
return internalClient.delete('delete_agent_configuration', params);
|
||||
}
|
||||
|
|
|
@ -11,47 +11,45 @@ import {
|
|||
SERVICE_ENVIRONMENT,
|
||||
SERVICE_NAME,
|
||||
} from '../../../../common/elasticsearch_fieldnames';
|
||||
import { withApmSpan } from '../../../utils/with_apm_span';
|
||||
import { Setup } from '../../helpers/setup_request';
|
||||
import { convertConfigSettingsToString } from './convert_settings_to_string';
|
||||
|
||||
export function findExactConfiguration({
|
||||
export async function findExactConfiguration({
|
||||
service,
|
||||
setup,
|
||||
}: {
|
||||
service: AgentConfiguration['service'];
|
||||
setup: Setup;
|
||||
}) {
|
||||
return withApmSpan('find_exact_agent_configuration', async () => {
|
||||
const { internalClient, indices } = setup;
|
||||
const { internalClient, indices } = setup;
|
||||
|
||||
const serviceNameFilter = service.name
|
||||
? { term: { [SERVICE_NAME]: service.name } }
|
||||
: { bool: { must_not: [{ exists: { field: SERVICE_NAME } }] } };
|
||||
const serviceNameFilter = service.name
|
||||
? { term: { [SERVICE_NAME]: service.name } }
|
||||
: { bool: { must_not: [{ exists: { field: SERVICE_NAME } }] } };
|
||||
|
||||
const environmentFilter = service.environment
|
||||
? { term: { [SERVICE_ENVIRONMENT]: service.environment } }
|
||||
: { bool: { must_not: [{ exists: { field: SERVICE_ENVIRONMENT } }] } };
|
||||
const environmentFilter = service.environment
|
||||
? { term: { [SERVICE_ENVIRONMENT]: service.environment } }
|
||||
: { bool: { must_not: [{ exists: { field: SERVICE_ENVIRONMENT } }] } };
|
||||
|
||||
const params = {
|
||||
index: indices.apmAgentConfigurationIndex,
|
||||
body: {
|
||||
query: {
|
||||
bool: { filter: [serviceNameFilter, environmentFilter] },
|
||||
},
|
||||
const params = {
|
||||
index: indices.apmAgentConfigurationIndex,
|
||||
body: {
|
||||
query: {
|
||||
bool: { filter: [serviceNameFilter, environmentFilter] },
|
||||
},
|
||||
};
|
||||
},
|
||||
};
|
||||
|
||||
const resp = await internalClient.search<AgentConfiguration, typeof params>(
|
||||
params
|
||||
);
|
||||
const resp = await internalClient.search<AgentConfiguration, typeof params>(
|
||||
'find_exact_agent_configuration',
|
||||
params
|
||||
);
|
||||
|
||||
const hit = resp.hits.hits[0] as SearchHit<AgentConfiguration> | undefined;
|
||||
const hit = resp.hits.hits[0] as SearchHit<AgentConfiguration> | undefined;
|
||||
|
||||
if (!hit) {
|
||||
return;
|
||||
}
|
||||
if (!hit) {
|
||||
return;
|
||||
}
|
||||
|
||||
return convertConfigSettingsToString(hit);
|
||||
});
|
||||
return convertConfigSettingsToString(hit);
|
||||
}
|
||||
|
|
|
@ -9,7 +9,6 @@ import { ProcessorEvent } from '../../../../common/processor_event';
|
|||
import { Setup } from '../../helpers/setup_request';
|
||||
import { SERVICE_NAME } from '../../../../common/elasticsearch_fieldnames';
|
||||
import { AGENT_NAME } from '../../../../common/elasticsearch_fieldnames';
|
||||
import { withApmSpan } from '../../../utils/with_apm_span';
|
||||
|
||||
export async function getAgentNameByService({
|
||||
serviceName,
|
||||
|
@ -18,35 +17,36 @@ export async function getAgentNameByService({
|
|||
serviceName: string;
|
||||
setup: Setup;
|
||||
}) {
|
||||
return withApmSpan('get_agent_name_by_service', async () => {
|
||||
const { apmEventClient } = setup;
|
||||
const { apmEventClient } = setup;
|
||||
|
||||
const params = {
|
||||
terminateAfter: 1,
|
||||
apm: {
|
||||
events: [
|
||||
ProcessorEvent.transaction,
|
||||
ProcessorEvent.error,
|
||||
ProcessorEvent.metric,
|
||||
],
|
||||
},
|
||||
body: {
|
||||
size: 0,
|
||||
query: {
|
||||
bool: {
|
||||
filter: [{ term: { [SERVICE_NAME]: serviceName } }],
|
||||
},
|
||||
},
|
||||
aggs: {
|
||||
agent_names: {
|
||||
terms: { field: AGENT_NAME, size: 1 },
|
||||
},
|
||||
const params = {
|
||||
terminateAfter: 1,
|
||||
apm: {
|
||||
events: [
|
||||
ProcessorEvent.transaction,
|
||||
ProcessorEvent.error,
|
||||
ProcessorEvent.metric,
|
||||
],
|
||||
},
|
||||
body: {
|
||||
size: 0,
|
||||
query: {
|
||||
bool: {
|
||||
filter: [{ term: { [SERVICE_NAME]: serviceName } }],
|
||||
},
|
||||
},
|
||||
};
|
||||
aggs: {
|
||||
agent_names: {
|
||||
terms: { field: AGENT_NAME, size: 1 },
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
const { aggregations } = await apmEventClient.search(params);
|
||||
const agentName = aggregations?.agent_names.buckets[0]?.key;
|
||||
return agentName as string | undefined;
|
||||
});
|
||||
const { aggregations } = await apmEventClient.search(
|
||||
'get_agent_name_by_service',
|
||||
params
|
||||
);
|
||||
const agentName = aggregations?.agent_names.buckets[0]?.key;
|
||||
return agentName as string | undefined;
|
||||
}
|
||||
|
|
|
@ -5,7 +5,6 @@
|
|||
* 2.0.
|
||||
*/
|
||||
|
||||
import { withApmSpan } from '../../../../utils/with_apm_span';
|
||||
import { Setup } from '../../../helpers/setup_request';
|
||||
import {
|
||||
SERVICE_NAME,
|
||||
|
@ -20,36 +19,37 @@ export async function getExistingEnvironmentsForService({
|
|||
serviceName: string | undefined;
|
||||
setup: Setup;
|
||||
}) {
|
||||
return withApmSpan('get_existing_environments_for_service', async () => {
|
||||
const { internalClient, indices, config } = setup;
|
||||
const maxServiceEnvironments = config['xpack.apm.maxServiceEnvironments'];
|
||||
const { internalClient, indices, config } = setup;
|
||||
const maxServiceEnvironments = config['xpack.apm.maxServiceEnvironments'];
|
||||
|
||||
const bool = serviceName
|
||||
? { filter: [{ term: { [SERVICE_NAME]: serviceName } }] }
|
||||
: { must_not: [{ exists: { field: SERVICE_NAME } }] };
|
||||
const bool = serviceName
|
||||
? { filter: [{ term: { [SERVICE_NAME]: serviceName } }] }
|
||||
: { must_not: [{ exists: { field: SERVICE_NAME } }] };
|
||||
|
||||
const params = {
|
||||
index: indices.apmAgentConfigurationIndex,
|
||||
body: {
|
||||
size: 0,
|
||||
query: { bool },
|
||||
aggs: {
|
||||
environments: {
|
||||
terms: {
|
||||
field: SERVICE_ENVIRONMENT,
|
||||
missing: ALL_OPTION_VALUE,
|
||||
size: maxServiceEnvironments,
|
||||
},
|
||||
const params = {
|
||||
index: indices.apmAgentConfigurationIndex,
|
||||
body: {
|
||||
size: 0,
|
||||
query: { bool },
|
||||
aggs: {
|
||||
environments: {
|
||||
terms: {
|
||||
field: SERVICE_ENVIRONMENT,
|
||||
missing: ALL_OPTION_VALUE,
|
||||
size: maxServiceEnvironments,
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
},
|
||||
};
|
||||
|
||||
const resp = await internalClient.search(params);
|
||||
const existingEnvironments =
|
||||
resp.aggregations?.environments.buckets.map(
|
||||
(bucket) => bucket.key as string
|
||||
) || [];
|
||||
return existingEnvironments;
|
||||
});
|
||||
const resp = await internalClient.search(
|
||||
'get_existing_environments_for_service',
|
||||
params
|
||||
);
|
||||
const existingEnvironments =
|
||||
resp.aggregations?.environments.buckets.map(
|
||||
(bucket) => bucket.key as string
|
||||
) || [];
|
||||
return existingEnvironments;
|
||||
}
|
||||
|
|
|
@ -11,52 +11,52 @@ import { PromiseReturnType } from '../../../../../observability/typings/common';
|
|||
import { SERVICE_NAME } from '../../../../common/elasticsearch_fieldnames';
|
||||
import { ALL_OPTION_VALUE } from '../../../../common/agent_configuration/all_option';
|
||||
import { getProcessorEventForAggregatedTransactions } from '../../helpers/aggregated_transactions';
|
||||
import { withApmSpan } from '../../../utils/with_apm_span';
|
||||
|
||||
export type AgentConfigurationServicesAPIResponse = PromiseReturnType<
|
||||
typeof getServiceNames
|
||||
>;
|
||||
export function getServiceNames({
|
||||
export async function getServiceNames({
|
||||
setup,
|
||||
searchAggregatedTransactions,
|
||||
}: {
|
||||
setup: Setup;
|
||||
searchAggregatedTransactions: boolean;
|
||||
}) {
|
||||
return withApmSpan('get_service_names_for_agent_config', async () => {
|
||||
const { apmEventClient, config } = setup;
|
||||
const maxServiceSelection = config['xpack.apm.maxServiceSelection'];
|
||||
const { apmEventClient, config } = setup;
|
||||
const maxServiceSelection = config['xpack.apm.maxServiceSelection'];
|
||||
|
||||
const params = {
|
||||
apm: {
|
||||
events: [
|
||||
getProcessorEventForAggregatedTransactions(
|
||||
searchAggregatedTransactions
|
||||
),
|
||||
ProcessorEvent.error,
|
||||
ProcessorEvent.metric,
|
||||
],
|
||||
},
|
||||
body: {
|
||||
timeout: '1ms',
|
||||
size: 0,
|
||||
aggs: {
|
||||
services: {
|
||||
terms: {
|
||||
field: SERVICE_NAME,
|
||||
size: maxServiceSelection,
|
||||
min_doc_count: 0,
|
||||
},
|
||||
const params = {
|
||||
apm: {
|
||||
events: [
|
||||
getProcessorEventForAggregatedTransactions(
|
||||
searchAggregatedTransactions
|
||||
),
|
||||
ProcessorEvent.error,
|
||||
ProcessorEvent.metric,
|
||||
],
|
||||
},
|
||||
body: {
|
||||
timeout: '1ms',
|
||||
size: 0,
|
||||
aggs: {
|
||||
services: {
|
||||
terms: {
|
||||
field: SERVICE_NAME,
|
||||
size: maxServiceSelection,
|
||||
min_doc_count: 0,
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
},
|
||||
};
|
||||
|
||||
const resp = await apmEventClient.search(params);
|
||||
const serviceNames =
|
||||
resp.aggregations?.services.buckets
|
||||
.map((bucket) => bucket.key as string)
|
||||
.sort() || [];
|
||||
return [ALL_OPTION_VALUE, ...serviceNames];
|
||||
});
|
||||
const resp = await apmEventClient.search(
|
||||
'get_service_names_for_agent_config',
|
||||
params
|
||||
);
|
||||
const serviceNames =
|
||||
resp.aggregations?.services.buckets
|
||||
.map((bucket) => bucket.key as string)
|
||||
.sort() || [];
|
||||
return [ALL_OPTION_VALUE, ...serviceNames];
|
||||
}
|
||||
|
|
|
@ -8,7 +8,6 @@
|
|||
import { Setup } from '../../helpers/setup_request';
|
||||
import { AgentConfiguration } from '../../../../common/agent_configuration/configuration_types';
|
||||
import { convertConfigSettingsToString } from './convert_settings_to_string';
|
||||
import { withApmSpan } from '../../../utils/with_apm_span';
|
||||
|
||||
export async function listConfigurations({ setup }: { setup: Setup }) {
|
||||
const { internalClient, indices } = setup;
|
||||
|
@ -18,8 +17,9 @@ export async function listConfigurations({ setup }: { setup: Setup }) {
|
|||
size: 200,
|
||||
};
|
||||
|
||||
const resp = await withApmSpan('list_agent_configurations', () =>
|
||||
internalClient.search<AgentConfiguration>(params)
|
||||
const resp = await internalClient.search<AgentConfiguration>(
|
||||
'list_agent_configuration',
|
||||
params
|
||||
);
|
||||
|
||||
return resp.hits.hits
|
||||
|
|
|
@ -29,5 +29,8 @@ export async function markAppliedByAgent({
|
|||
},
|
||||
};
|
||||
|
||||
return internalClient.index<AgentConfiguration>(params);
|
||||
return internalClient.index<AgentConfiguration>(
|
||||
'mark_configuration_applied_by_agent',
|
||||
params
|
||||
);
|
||||
}
|
||||
|
|
|
@ -13,7 +13,6 @@ import {
|
|||
import { Setup } from '../../helpers/setup_request';
|
||||
import { AgentConfiguration } from '../../../../common/agent_configuration/configuration_types';
|
||||
import { convertConfigSettingsToString } from './convert_settings_to_string';
|
||||
import { withApmSpan } from '../../../utils/with_apm_span';
|
||||
|
||||
export async function searchConfigurations({
|
||||
service,
|
||||
|
@ -22,65 +21,64 @@ export async function searchConfigurations({
|
|||
service: AgentConfiguration['service'];
|
||||
setup: Setup;
|
||||
}) {
|
||||
return withApmSpan('search_agent_configurations', async () => {
|
||||
const { internalClient, indices } = setup;
|
||||
const { internalClient, indices } = setup;
|
||||
|
||||
// In the following `constant_score` is being used to disable IDF calculation (where frequency of a term influences scoring).
|
||||
// Additionally a boost has been added to service.name to ensure it scores higher.
|
||||
// If there is tie between a config with a matching service.name and a config with a matching environment, the config that matches service.name wins
|
||||
const serviceNameFilter = service.name
|
||||
? [
|
||||
{
|
||||
constant_score: {
|
||||
filter: { term: { [SERVICE_NAME]: service.name } },
|
||||
boost: 2,
|
||||
},
|
||||
},
|
||||
]
|
||||
: [];
|
||||
|
||||
const environmentFilter = service.environment
|
||||
? [
|
||||
{
|
||||
constant_score: {
|
||||
filter: { term: { [SERVICE_ENVIRONMENT]: service.environment } },
|
||||
boost: 1,
|
||||
},
|
||||
},
|
||||
]
|
||||
: [];
|
||||
|
||||
const params = {
|
||||
index: indices.apmAgentConfigurationIndex,
|
||||
body: {
|
||||
query: {
|
||||
bool: {
|
||||
minimum_should_match: 2,
|
||||
should: [
|
||||
...serviceNameFilter,
|
||||
...environmentFilter,
|
||||
{ bool: { must_not: [{ exists: { field: SERVICE_NAME } }] } },
|
||||
{
|
||||
bool: {
|
||||
must_not: [{ exists: { field: SERVICE_ENVIRONMENT } }],
|
||||
},
|
||||
},
|
||||
],
|
||||
// In the following `constant_score` is being used to disable IDF calculation (where frequency of a term influences scoring).
|
||||
// Additionally a boost has been added to service.name to ensure it scores higher.
|
||||
// If there is tie between a config with a matching service.name and a config with a matching environment, the config that matches service.name wins
|
||||
const serviceNameFilter = service.name
|
||||
? [
|
||||
{
|
||||
constant_score: {
|
||||
filter: { term: { [SERVICE_NAME]: service.name } },
|
||||
boost: 2,
|
||||
},
|
||||
},
|
||||
]
|
||||
: [];
|
||||
|
||||
const environmentFilter = service.environment
|
||||
? [
|
||||
{
|
||||
constant_score: {
|
||||
filter: { term: { [SERVICE_ENVIRONMENT]: service.environment } },
|
||||
boost: 1,
|
||||
},
|
||||
},
|
||||
]
|
||||
: [];
|
||||
|
||||
const params = {
|
||||
index: indices.apmAgentConfigurationIndex,
|
||||
body: {
|
||||
query: {
|
||||
bool: {
|
||||
minimum_should_match: 2,
|
||||
should: [
|
||||
...serviceNameFilter,
|
||||
...environmentFilter,
|
||||
{ bool: { must_not: [{ exists: { field: SERVICE_NAME } }] } },
|
||||
{
|
||||
bool: {
|
||||
must_not: [{ exists: { field: SERVICE_ENVIRONMENT } }],
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
};
|
||||
},
|
||||
};
|
||||
|
||||
const resp = await internalClient.search<AgentConfiguration, typeof params>(
|
||||
params
|
||||
);
|
||||
const resp = await internalClient.search<AgentConfiguration, typeof params>(
|
||||
'search_agent_configurations',
|
||||
params
|
||||
);
|
||||
|
||||
const hit = resp.hits.hits[0] as SearchHit<AgentConfiguration> | undefined;
|
||||
const hit = resp.hits.hits[0] as SearchHit<AgentConfiguration> | undefined;
|
||||
|
||||
if (!hit) {
|
||||
return;
|
||||
}
|
||||
if (!hit) {
|
||||
return;
|
||||
}
|
||||
|
||||
return convertConfigSettingsToString(hit);
|
||||
});
|
||||
return convertConfigSettingsToString(hit);
|
||||
}
|
||||
|
|
|
@ -39,17 +39,20 @@ describe('Create or Update Custom link', () => {
|
|||
|
||||
it('creates a new custom link', () => {
|
||||
createOrUpdateCustomLink({ customLink, setup: mockedSetup });
|
||||
expect(internalClientIndexMock).toHaveBeenCalledWith({
|
||||
refresh: true,
|
||||
index: 'apmCustomLinkIndex',
|
||||
body: {
|
||||
'@timestamp': 1570737000000,
|
||||
label: 'foo',
|
||||
url: 'http://elastic.com/{{trace.id}}',
|
||||
'service.name': ['opbeans-java'],
|
||||
'transaction.type': ['Request'],
|
||||
},
|
||||
});
|
||||
expect(internalClientIndexMock).toHaveBeenCalledWith(
|
||||
'create_or_update_custom_link',
|
||||
{
|
||||
refresh: true,
|
||||
index: 'apmCustomLinkIndex',
|
||||
body: {
|
||||
'@timestamp': 1570737000000,
|
||||
label: 'foo',
|
||||
url: 'http://elastic.com/{{trace.id}}',
|
||||
'service.name': ['opbeans-java'],
|
||||
'transaction.type': ['Request'],
|
||||
},
|
||||
}
|
||||
);
|
||||
});
|
||||
it('update a new custom link', () => {
|
||||
createOrUpdateCustomLink({
|
||||
|
@ -57,17 +60,20 @@ describe('Create or Update Custom link', () => {
|
|||
customLink,
|
||||
setup: mockedSetup,
|
||||
});
|
||||
expect(internalClientIndexMock).toHaveBeenCalledWith({
|
||||
refresh: true,
|
||||
index: 'apmCustomLinkIndex',
|
||||
id: 'bar',
|
||||
body: {
|
||||
'@timestamp': 1570737000000,
|
||||
label: 'foo',
|
||||
url: 'http://elastic.com/{{trace.id}}',
|
||||
'service.name': ['opbeans-java'],
|
||||
'transaction.type': ['Request'],
|
||||
},
|
||||
});
|
||||
expect(internalClientIndexMock).toHaveBeenCalledWith(
|
||||
'create_or_update_custom_link',
|
||||
{
|
||||
refresh: true,
|
||||
index: 'apmCustomLinkIndex',
|
||||
id: 'bar',
|
||||
body: {
|
||||
'@timestamp': 1570737000000,
|
||||
label: 'foo',
|
||||
url: 'http://elastic.com/{{trace.id}}',
|
||||
'service.name': ['opbeans-java'],
|
||||
'transaction.type': ['Request'],
|
||||
},
|
||||
}
|
||||
);
|
||||
});
|
||||
});
|
||||
|
|
|
@ -12,7 +12,6 @@ import {
|
|||
import { Setup } from '../../helpers/setup_request';
|
||||
import { toESFormat } from './helper';
|
||||
import { APMIndexDocumentParams } from '../../helpers/create_es_client/create_internal_es_client';
|
||||
import { withApmSpan } from '../../../utils/with_apm_span';
|
||||
|
||||
export function createOrUpdateCustomLink({
|
||||
customLinkId,
|
||||
|
@ -23,23 +22,21 @@ export function createOrUpdateCustomLink({
|
|||
customLink: Omit<CustomLink, '@timestamp'>;
|
||||
setup: Setup;
|
||||
}) {
|
||||
return withApmSpan('create_or_update_custom_link', () => {
|
||||
const { internalClient, indices } = setup;
|
||||
const { internalClient, indices } = setup;
|
||||
|
||||
const params: APMIndexDocumentParams<CustomLinkES> = {
|
||||
refresh: true,
|
||||
index: indices.apmCustomLinkIndex,
|
||||
body: {
|
||||
'@timestamp': Date.now(),
|
||||
...toESFormat(customLink),
|
||||
},
|
||||
};
|
||||
const params: APMIndexDocumentParams<CustomLinkES> = {
|
||||
refresh: true,
|
||||
index: indices.apmCustomLinkIndex,
|
||||
body: {
|
||||
'@timestamp': Date.now(),
|
||||
...toESFormat(customLink),
|
||||
},
|
||||
};
|
||||
|
||||
// by specifying an id elasticsearch will delete the previous doc and insert the updated doc
|
||||
if (customLinkId) {
|
||||
params.id = customLinkId;
|
||||
}
|
||||
// by specifying an id elasticsearch will delete the previous doc and insert the updated doc
|
||||
if (customLinkId) {
|
||||
params.id = customLinkId;
|
||||
}
|
||||
|
||||
return internalClient.index(params);
|
||||
});
|
||||
return internalClient.index('create_or_update_custom_link', params);
|
||||
}
|
||||
|
|
|
@ -5,7 +5,6 @@
|
|||
* 2.0.
|
||||
*/
|
||||
|
||||
import { withApmSpan } from '../../../utils/with_apm_span';
|
||||
import { Setup } from '../../helpers/setup_request';
|
||||
|
||||
export function deleteCustomLink({
|
||||
|
@ -15,15 +14,13 @@ export function deleteCustomLink({
|
|||
customLinkId: string;
|
||||
setup: Setup;
|
||||
}) {
|
||||
return withApmSpan('delete_custom_link', () => {
|
||||
const { internalClient, indices } = setup;
|
||||
const { internalClient, indices } = setup;
|
||||
|
||||
const params = {
|
||||
refresh: 'wait_for' as const,
|
||||
index: indices.apmCustomLinkIndex,
|
||||
id: customLinkId,
|
||||
};
|
||||
const params = {
|
||||
refresh: 'wait_for' as const,
|
||||
index: indices.apmCustomLinkIndex,
|
||||
id: customLinkId,
|
||||
};
|
||||
|
||||
return internalClient.delete(params);
|
||||
});
|
||||
return internalClient.delete('delete_custom_link', params);
|
||||
}
|
||||
|
|
|
@ -11,43 +11,43 @@ import { Setup } from '../../helpers/setup_request';
|
|||
import { ProcessorEvent } from '../../../../common/processor_event';
|
||||
import { filterOptionsRt } from './custom_link_types';
|
||||
import { splitFilterValueByComma } from './helper';
|
||||
import { withApmSpan } from '../../../utils/with_apm_span';
|
||||
|
||||
export function getTransaction({
|
||||
export async function getTransaction({
|
||||
setup,
|
||||
filters = {},
|
||||
}: {
|
||||
setup: Setup;
|
||||
filters?: t.TypeOf<typeof filterOptionsRt>;
|
||||
}) {
|
||||
return withApmSpan('get_transaction_for_custom_link', async () => {
|
||||
const { apmEventClient } = setup;
|
||||
const { apmEventClient } = setup;
|
||||
|
||||
const esFilters = compact(
|
||||
Object.entries(filters)
|
||||
// loops through the filters splitting the value by comma and removing white spaces
|
||||
.map(([key, value]) => {
|
||||
if (value) {
|
||||
return { terms: { [key]: splitFilterValueByComma(value) } };
|
||||
}
|
||||
})
|
||||
);
|
||||
const esFilters = compact(
|
||||
Object.entries(filters)
|
||||
// loops through the filters splitting the value by comma and removing white spaces
|
||||
.map(([key, value]) => {
|
||||
if (value) {
|
||||
return { terms: { [key]: splitFilterValueByComma(value) } };
|
||||
}
|
||||
})
|
||||
);
|
||||
|
||||
const params = {
|
||||
terminateAfter: 1,
|
||||
apm: {
|
||||
events: [ProcessorEvent.transaction as const],
|
||||
},
|
||||
size: 1,
|
||||
body: {
|
||||
query: {
|
||||
bool: {
|
||||
filter: esFilters,
|
||||
},
|
||||
const params = {
|
||||
terminateAfter: 1,
|
||||
apm: {
|
||||
events: [ProcessorEvent.transaction as const],
|
||||
},
|
||||
size: 1,
|
||||
body: {
|
||||
query: {
|
||||
bool: {
|
||||
filter: esFilters,
|
||||
},
|
||||
},
|
||||
};
|
||||
const resp = await apmEventClient.search(params);
|
||||
return resp.hits.hits[0]?._source;
|
||||
});
|
||||
},
|
||||
};
|
||||
const resp = await apmEventClient.search(
|
||||
'get_transaction_for_custom_link',
|
||||
params
|
||||
);
|
||||
return resp.hits.hits[0]?._source;
|
||||
}
|
||||
|
|
|
@ -14,54 +14,54 @@ import {
|
|||
import { Setup } from '../../helpers/setup_request';
|
||||
import { fromESFormat } from './helper';
|
||||
import { filterOptionsRt } from './custom_link_types';
|
||||
import { withApmSpan } from '../../../utils/with_apm_span';
|
||||
|
||||
export function listCustomLinks({
|
||||
export async function listCustomLinks({
|
||||
setup,
|
||||
filters = {},
|
||||
}: {
|
||||
setup: Setup;
|
||||
filters?: t.TypeOf<typeof filterOptionsRt>;
|
||||
}): Promise<CustomLink[]> {
|
||||
return withApmSpan('list_custom_links', async () => {
|
||||
const { internalClient, indices } = setup;
|
||||
const esFilters = Object.entries(filters).map(([key, value]) => {
|
||||
return {
|
||||
bool: {
|
||||
minimum_should_match: 1,
|
||||
should: [
|
||||
{ term: { [key]: value } },
|
||||
{ bool: { must_not: [{ exists: { field: key } }] } },
|
||||
] as QueryDslQueryContainer[],
|
||||
},
|
||||
};
|
||||
});
|
||||
|
||||
const params = {
|
||||
index: indices.apmCustomLinkIndex,
|
||||
size: 500,
|
||||
body: {
|
||||
query: {
|
||||
bool: {
|
||||
filter: esFilters,
|
||||
},
|
||||
},
|
||||
sort: [
|
||||
{
|
||||
'label.keyword': {
|
||||
order: 'asc' as const,
|
||||
},
|
||||
},
|
||||
],
|
||||
const { internalClient, indices } = setup;
|
||||
const esFilters = Object.entries(filters).map(([key, value]) => {
|
||||
return {
|
||||
bool: {
|
||||
minimum_should_match: 1,
|
||||
should: [
|
||||
{ term: { [key]: value } },
|
||||
{ bool: { must_not: [{ exists: { field: key } }] } },
|
||||
] as QueryDslQueryContainer[],
|
||||
},
|
||||
};
|
||||
const resp = await internalClient.search<CustomLinkES>(params);
|
||||
const customLinks = resp.hits.hits.map((item) =>
|
||||
fromESFormat({
|
||||
id: item._id,
|
||||
...item._source,
|
||||
})
|
||||
);
|
||||
return customLinks;
|
||||
});
|
||||
|
||||
const params = {
|
||||
index: indices.apmCustomLinkIndex,
|
||||
size: 500,
|
||||
body: {
|
||||
query: {
|
||||
bool: {
|
||||
filter: esFilters,
|
||||
},
|
||||
},
|
||||
sort: [
|
||||
{
|
||||
'label.keyword': {
|
||||
order: 'asc' as const,
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
};
|
||||
const resp = await internalClient.search<CustomLinkES>(
|
||||
'list_custom_links',
|
||||
params
|
||||
);
|
||||
const customLinks = resp.hits.hits.map((item) =>
|
||||
fromESFormat({
|
||||
id: item._id,
|
||||
...item._source,
|
||||
})
|
||||
);
|
||||
return customLinks;
|
||||
}
|
||||
|
|
|
@ -19,7 +19,6 @@ import { APMError } from '../../../typings/es_schemas/ui/apm_error';
|
|||
import { rangeQuery } from '../../../server/utils/queries';
|
||||
import { Setup, SetupTimeRange } from '../helpers/setup_request';
|
||||
import { PromiseValueType } from '../../../typings/common';
|
||||
import { withApmSpan } from '../../utils/with_apm_span';
|
||||
|
||||
export interface ErrorsPerTransaction {
|
||||
[transactionId: string]: number;
|
||||
|
@ -29,103 +28,94 @@ export async function getTraceItems(
|
|||
traceId: string,
|
||||
setup: Setup & SetupTimeRange
|
||||
) {
|
||||
return withApmSpan('get_trace_items', async () => {
|
||||
const { start, end, apmEventClient, config } = setup;
|
||||
const maxTraceItems = config['xpack.apm.ui.maxTraceItems'];
|
||||
const excludedLogLevels = ['debug', 'info', 'warning'];
|
||||
const { start, end, apmEventClient, config } = setup;
|
||||
const maxTraceItems = config['xpack.apm.ui.maxTraceItems'];
|
||||
const excludedLogLevels = ['debug', 'info', 'warning'];
|
||||
|
||||
const errorResponsePromise = withApmSpan('get_trace_error_items', () =>
|
||||
apmEventClient.search({
|
||||
apm: {
|
||||
events: [ProcessorEvent.error],
|
||||
},
|
||||
body: {
|
||||
size: maxTraceItems,
|
||||
query: {
|
||||
bool: {
|
||||
filter: [
|
||||
{ term: { [TRACE_ID]: traceId } },
|
||||
...rangeQuery(start, end),
|
||||
],
|
||||
must_not: { terms: { [ERROR_LOG_LEVEL]: excludedLogLevels } },
|
||||
},
|
||||
},
|
||||
aggs: {
|
||||
by_transaction_id: {
|
||||
terms: {
|
||||
field: TRANSACTION_ID,
|
||||
size: maxTraceItems,
|
||||
// high cardinality
|
||||
execution_hint: 'map' as const,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
);
|
||||
|
||||
const traceResponsePromise = withApmSpan('get_trace_span_items', () =>
|
||||
apmEventClient.search({
|
||||
apm: {
|
||||
events: [ProcessorEvent.span, ProcessorEvent.transaction],
|
||||
},
|
||||
body: {
|
||||
size: maxTraceItems,
|
||||
query: {
|
||||
bool: {
|
||||
filter: [
|
||||
{ term: { [TRACE_ID]: traceId } },
|
||||
...rangeQuery(start, end),
|
||||
] as QueryDslQueryContainer[],
|
||||
should: {
|
||||
exists: { field: PARENT_ID },
|
||||
},
|
||||
},
|
||||
},
|
||||
sort: [
|
||||
{ _score: { order: 'asc' as const } },
|
||||
{ [TRANSACTION_DURATION]: { order: 'desc' as const } },
|
||||
{ [SPAN_DURATION]: { order: 'desc' as const } },
|
||||
const errorResponsePromise = apmEventClient.search('get_trace_items', {
|
||||
apm: {
|
||||
events: [ProcessorEvent.error],
|
||||
},
|
||||
body: {
|
||||
size: maxTraceItems,
|
||||
query: {
|
||||
bool: {
|
||||
filter: [
|
||||
{ term: { [TRACE_ID]: traceId } },
|
||||
...rangeQuery(start, end),
|
||||
],
|
||||
track_total_hits: true,
|
||||
must_not: { terms: { [ERROR_LOG_LEVEL]: excludedLogLevels } },
|
||||
},
|
||||
})
|
||||
);
|
||||
|
||||
const [errorResponse, traceResponse]: [
|
||||
// explicit intermediary types to avoid TS "excessively deep" error
|
||||
PromiseValueType<typeof errorResponsePromise>,
|
||||
PromiseValueType<typeof traceResponsePromise>
|
||||
] = (await Promise.all([
|
||||
errorResponsePromise,
|
||||
traceResponsePromise,
|
||||
])) as any;
|
||||
|
||||
const exceedsMax = traceResponse.hits.total.value > maxTraceItems;
|
||||
|
||||
const items = traceResponse.hits.hits.map((hit) => hit._source);
|
||||
|
||||
const errorFrequencies: {
|
||||
errorsPerTransaction: ErrorsPerTransaction;
|
||||
errorDocs: APMError[];
|
||||
} = {
|
||||
errorDocs: errorResponse.hits.hits.map(({ _source }) => _source),
|
||||
errorsPerTransaction:
|
||||
errorResponse.aggregations?.by_transaction_id.buckets.reduce(
|
||||
(acc, current) => {
|
||||
return {
|
||||
...acc,
|
||||
[current.key]: current.doc_count,
|
||||
};
|
||||
},
|
||||
aggs: {
|
||||
by_transaction_id: {
|
||||
terms: {
|
||||
field: TRANSACTION_ID,
|
||||
size: maxTraceItems,
|
||||
// high cardinality
|
||||
execution_hint: 'map' as const,
|
||||
},
|
||||
{} as ErrorsPerTransaction
|
||||
) ?? {},
|
||||
};
|
||||
|
||||
return {
|
||||
items,
|
||||
exceedsMax,
|
||||
...errorFrequencies,
|
||||
};
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
const traceResponsePromise = apmEventClient.search('get_trace_span_items', {
|
||||
apm: {
|
||||
events: [ProcessorEvent.span, ProcessorEvent.transaction],
|
||||
},
|
||||
body: {
|
||||
size: maxTraceItems,
|
||||
query: {
|
||||
bool: {
|
||||
filter: [
|
||||
{ term: { [TRACE_ID]: traceId } },
|
||||
...rangeQuery(start, end),
|
||||
] as QueryDslQueryContainer[],
|
||||
should: {
|
||||
exists: { field: PARENT_ID },
|
||||
},
|
||||
},
|
||||
},
|
||||
sort: [
|
||||
{ _score: { order: 'asc' as const } },
|
||||
{ [TRANSACTION_DURATION]: { order: 'desc' as const } },
|
||||
{ [SPAN_DURATION]: { order: 'desc' as const } },
|
||||
],
|
||||
track_total_hits: true,
|
||||
},
|
||||
});
|
||||
|
||||
const [errorResponse, traceResponse]: [
|
||||
// explicit intermediary types to avoid TS "excessively deep" error
|
||||
PromiseValueType<typeof errorResponsePromise>,
|
||||
PromiseValueType<typeof traceResponsePromise>
|
||||
] = (await Promise.all([errorResponsePromise, traceResponsePromise])) as any;
|
||||
|
||||
const exceedsMax = traceResponse.hits.total.value > maxTraceItems;
|
||||
|
||||
const items = traceResponse.hits.hits.map((hit) => hit._source);
|
||||
|
||||
const errorFrequencies: {
|
||||
errorsPerTransaction: ErrorsPerTransaction;
|
||||
errorDocs: APMError[];
|
||||
} = {
|
||||
errorDocs: errorResponse.hits.hits.map(({ _source }) => _source),
|
||||
errorsPerTransaction:
|
||||
errorResponse.aggregations?.by_transaction_id.buckets.reduce(
|
||||
(acc, current) => {
|
||||
return {
|
||||
...acc,
|
||||
[current.key]: current.doc_count,
|
||||
};
|
||||
},
|
||||
{} as ErrorsPerTransaction
|
||||
) ?? {},
|
||||
};
|
||||
|
||||
return {
|
||||
items,
|
||||
exceedsMax,
|
||||
...errorFrequencies,
|
||||
};
|
||||
}
|
||||
|
|
|
@ -31,7 +31,6 @@ import {
|
|||
getOutcomeAggregation,
|
||||
getTransactionErrorRateTimeSeries,
|
||||
} from '../helpers/transaction_error_rate';
|
||||
import { withApmSpan } from '../../utils/with_apm_span';
|
||||
|
||||
export async function getErrorRate({
|
||||
environment,
|
||||
|
@ -58,81 +57,82 @@ export async function getErrorRate({
|
|||
transactionErrorRate: Coordinate[];
|
||||
average: number | null;
|
||||
}> {
|
||||
return withApmSpan('get_transaction_group_error_rate', async () => {
|
||||
const { apmEventClient } = setup;
|
||||
const { apmEventClient } = setup;
|
||||
|
||||
const transactionNamefilter = transactionName
|
||||
? [{ term: { [TRANSACTION_NAME]: transactionName } }]
|
||||
: [];
|
||||
const transactionTypefilter = transactionType
|
||||
? [{ term: { [TRANSACTION_TYPE]: transactionType } }]
|
||||
: [];
|
||||
const transactionNamefilter = transactionName
|
||||
? [{ term: { [TRANSACTION_NAME]: transactionName } }]
|
||||
: [];
|
||||
const transactionTypefilter = transactionType
|
||||
? [{ term: { [TRANSACTION_TYPE]: transactionType } }]
|
||||
: [];
|
||||
|
||||
const filter = [
|
||||
{ term: { [SERVICE_NAME]: serviceName } },
|
||||
{
|
||||
terms: {
|
||||
[EVENT_OUTCOME]: [EventOutcome.failure, EventOutcome.success],
|
||||
},
|
||||
const filter = [
|
||||
{ term: { [SERVICE_NAME]: serviceName } },
|
||||
{
|
||||
terms: {
|
||||
[EVENT_OUTCOME]: [EventOutcome.failure, EventOutcome.success],
|
||||
},
|
||||
...transactionNamefilter,
|
||||
...transactionTypefilter,
|
||||
...getDocumentTypeFilterForAggregatedTransactions(
|
||||
searchAggregatedTransactions
|
||||
),
|
||||
...rangeQuery(start, end),
|
||||
...environmentQuery(environment),
|
||||
...kqlQuery(kuery),
|
||||
];
|
||||
},
|
||||
...transactionNamefilter,
|
||||
...transactionTypefilter,
|
||||
...getDocumentTypeFilterForAggregatedTransactions(
|
||||
searchAggregatedTransactions
|
||||
),
|
||||
...rangeQuery(start, end),
|
||||
...environmentQuery(environment),
|
||||
...kqlQuery(kuery),
|
||||
];
|
||||
|
||||
const outcomes = getOutcomeAggregation();
|
||||
const outcomes = getOutcomeAggregation();
|
||||
|
||||
const params = {
|
||||
apm: {
|
||||
events: [
|
||||
getProcessorEventForAggregatedTransactions(
|
||||
searchAggregatedTransactions
|
||||
),
|
||||
],
|
||||
},
|
||||
body: {
|
||||
size: 0,
|
||||
query: { bool: { filter } },
|
||||
aggs: {
|
||||
outcomes,
|
||||
timeseries: {
|
||||
date_histogram: {
|
||||
field: '@timestamp',
|
||||
fixed_interval: getBucketSize({ start, end }).intervalString,
|
||||
min_doc_count: 0,
|
||||
extended_bounds: { min: start, max: end },
|
||||
},
|
||||
aggs: {
|
||||
outcomes,
|
||||
},
|
||||
const params = {
|
||||
apm: {
|
||||
events: [
|
||||
getProcessorEventForAggregatedTransactions(
|
||||
searchAggregatedTransactions
|
||||
),
|
||||
],
|
||||
},
|
||||
body: {
|
||||
size: 0,
|
||||
query: { bool: { filter } },
|
||||
aggs: {
|
||||
outcomes,
|
||||
timeseries: {
|
||||
date_histogram: {
|
||||
field: '@timestamp',
|
||||
fixed_interval: getBucketSize({ start, end }).intervalString,
|
||||
min_doc_count: 0,
|
||||
extended_bounds: { min: start, max: end },
|
||||
},
|
||||
aggs: {
|
||||
outcomes,
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
},
|
||||
};
|
||||
|
||||
const resp = await apmEventClient.search(params);
|
||||
const resp = await apmEventClient.search(
|
||||
'get_transaction_group_error_rate',
|
||||
params
|
||||
);
|
||||
|
||||
const noHits = resp.hits.total.value === 0;
|
||||
const noHits = resp.hits.total.value === 0;
|
||||
|
||||
if (!resp.aggregations) {
|
||||
return { noHits, transactionErrorRate: [], average: null };
|
||||
}
|
||||
if (!resp.aggregations) {
|
||||
return { noHits, transactionErrorRate: [], average: null };
|
||||
}
|
||||
|
||||
const transactionErrorRate = getTransactionErrorRateTimeSeries(
|
||||
resp.aggregations.timeseries.buckets
|
||||
);
|
||||
const transactionErrorRate = getTransactionErrorRateTimeSeries(
|
||||
resp.aggregations.timeseries.buckets
|
||||
);
|
||||
|
||||
const average = calculateTransactionErrorPercentage(
|
||||
resp.aggregations.outcomes
|
||||
);
|
||||
const average = calculateTransactionErrorPercentage(
|
||||
resp.aggregations.outcomes
|
||||
);
|
||||
|
||||
return { noHits, transactionErrorRate, average };
|
||||
});
|
||||
return { noHits, transactionErrorRate, average };
|
||||
}
|
||||
|
||||
export async function getErrorRatePeriods({
|
||||
|
|
|
@ -11,7 +11,6 @@ import { TRANSACTION_TYPE } from '../../../common/elasticsearch_fieldnames';
|
|||
import { arrayUnionToCallable } from '../../../common/utils/array_union_to_callable';
|
||||
import { TransactionGroupRequestBase, TransactionGroupSetup } from './fetcher';
|
||||
import { getTransactionDurationFieldForAggregatedTransactions } from '../helpers/aggregated_transactions';
|
||||
import { withApmSpan } from '../../utils/with_apm_span';
|
||||
|
||||
interface MetricParams {
|
||||
request: TransactionGroupRequestBase;
|
||||
|
@ -39,124 +38,128 @@ function mergeRequestWithAggs<
|
|||
});
|
||||
}
|
||||
|
||||
export function getAverages({
|
||||
export async function getAverages({
|
||||
request,
|
||||
setup,
|
||||
searchAggregatedTransactions,
|
||||
}: MetricParams) {
|
||||
return withApmSpan('get_avg_transaction_group_duration', async () => {
|
||||
const params = mergeRequestWithAggs(request, {
|
||||
const params = mergeRequestWithAggs(request, {
|
||||
avg: {
|
||||
avg: {
|
||||
avg: {
|
||||
field: getTransactionDurationFieldForAggregatedTransactions(
|
||||
searchAggregatedTransactions
|
||||
),
|
||||
},
|
||||
field: getTransactionDurationFieldForAggregatedTransactions(
|
||||
searchAggregatedTransactions
|
||||
),
|
||||
},
|
||||
});
|
||||
},
|
||||
});
|
||||
|
||||
const response = await setup.apmEventClient.search(params);
|
||||
const response = await setup.apmEventClient.search(
|
||||
'get_avg_transaction_group_duration',
|
||||
params
|
||||
);
|
||||
|
||||
return arrayUnionToCallable(
|
||||
response.aggregations?.transaction_groups.buckets ?? []
|
||||
).map((bucket) => {
|
||||
return {
|
||||
key: bucket.key as BucketKey,
|
||||
avg: bucket.avg.value,
|
||||
};
|
||||
});
|
||||
return arrayUnionToCallable(
|
||||
response.aggregations?.transaction_groups.buckets ?? []
|
||||
).map((bucket) => {
|
||||
return {
|
||||
key: bucket.key as BucketKey,
|
||||
avg: bucket.avg.value,
|
||||
};
|
||||
});
|
||||
}
|
||||
|
||||
export function getCounts({ request, setup }: MetricParams) {
|
||||
return withApmSpan('get_transaction_group_transaction_count', async () => {
|
||||
const params = mergeRequestWithAggs(request, {
|
||||
transaction_type: {
|
||||
top_metrics: {
|
||||
sort: {
|
||||
'@timestamp': 'desc' as const,
|
||||
},
|
||||
metrics: [
|
||||
{
|
||||
field: TRANSACTION_TYPE,
|
||||
} as const,
|
||||
],
|
||||
export async function getCounts({ request, setup }: MetricParams) {
|
||||
const params = mergeRequestWithAggs(request, {
|
||||
transaction_type: {
|
||||
top_metrics: {
|
||||
sort: {
|
||||
'@timestamp': 'desc' as const,
|
||||
},
|
||||
metrics: [
|
||||
{
|
||||
field: TRANSACTION_TYPE,
|
||||
} as const,
|
||||
],
|
||||
},
|
||||
});
|
||||
},
|
||||
});
|
||||
|
||||
const response = await setup.apmEventClient.search(params);
|
||||
const response = await setup.apmEventClient.search(
|
||||
'get_transaction_group_transaction_count',
|
||||
params
|
||||
);
|
||||
|
||||
return arrayUnionToCallable(
|
||||
response.aggregations?.transaction_groups.buckets ?? []
|
||||
).map((bucket) => {
|
||||
return {
|
||||
key: bucket.key as BucketKey,
|
||||
count: bucket.doc_count,
|
||||
transactionType: bucket.transaction_type.top[0].metrics[
|
||||
TRANSACTION_TYPE
|
||||
] as string,
|
||||
};
|
||||
});
|
||||
return arrayUnionToCallable(
|
||||
response.aggregations?.transaction_groups.buckets ?? []
|
||||
).map((bucket) => {
|
||||
return {
|
||||
key: bucket.key as BucketKey,
|
||||
count: bucket.doc_count,
|
||||
transactionType: bucket.transaction_type.top[0].metrics[
|
||||
TRANSACTION_TYPE
|
||||
] as string,
|
||||
};
|
||||
});
|
||||
}
|
||||
|
||||
export function getSums({
|
||||
export async function getSums({
|
||||
request,
|
||||
setup,
|
||||
searchAggregatedTransactions,
|
||||
}: MetricParams) {
|
||||
return withApmSpan('get_transaction_group_latency_sums', async () => {
|
||||
const params = mergeRequestWithAggs(request, {
|
||||
const params = mergeRequestWithAggs(request, {
|
||||
sum: {
|
||||
sum: {
|
||||
sum: {
|
||||
field: getTransactionDurationFieldForAggregatedTransactions(
|
||||
searchAggregatedTransactions
|
||||
),
|
||||
},
|
||||
field: getTransactionDurationFieldForAggregatedTransactions(
|
||||
searchAggregatedTransactions
|
||||
),
|
||||
},
|
||||
});
|
||||
},
|
||||
});
|
||||
|
||||
const response = await setup.apmEventClient.search(params);
|
||||
const response = await setup.apmEventClient.search(
|
||||
'get_transaction_group_latency_sums',
|
||||
params
|
||||
);
|
||||
|
||||
return arrayUnionToCallable(
|
||||
response.aggregations?.transaction_groups.buckets ?? []
|
||||
).map((bucket) => {
|
||||
return {
|
||||
key: bucket.key as BucketKey,
|
||||
sum: bucket.sum.value,
|
||||
};
|
||||
});
|
||||
return arrayUnionToCallable(
|
||||
response.aggregations?.transaction_groups.buckets ?? []
|
||||
).map((bucket) => {
|
||||
return {
|
||||
key: bucket.key as BucketKey,
|
||||
sum: bucket.sum.value,
|
||||
};
|
||||
});
|
||||
}
|
||||
|
||||
export function getPercentiles({
|
||||
export async function getPercentiles({
|
||||
request,
|
||||
setup,
|
||||
searchAggregatedTransactions,
|
||||
}: MetricParams) {
|
||||
return withApmSpan('get_transaction_group_latency_percentiles', async () => {
|
||||
const params = mergeRequestWithAggs(request, {
|
||||
p95: {
|
||||
percentiles: {
|
||||
field: getTransactionDurationFieldForAggregatedTransactions(
|
||||
searchAggregatedTransactions
|
||||
),
|
||||
hdr: { number_of_significant_value_digits: 2 },
|
||||
percents: [95],
|
||||
},
|
||||
const params = mergeRequestWithAggs(request, {
|
||||
p95: {
|
||||
percentiles: {
|
||||
field: getTransactionDurationFieldForAggregatedTransactions(
|
||||
searchAggregatedTransactions
|
||||
),
|
||||
hdr: { number_of_significant_value_digits: 2 },
|
||||
percents: [95],
|
||||
},
|
||||
});
|
||||
},
|
||||
});
|
||||
|
||||
const response = await setup.apmEventClient.search(params);
|
||||
const response = await setup.apmEventClient.search(
|
||||
'get_transaction_group_latency_percentiles',
|
||||
params
|
||||
);
|
||||
|
||||
return arrayUnionToCallable(
|
||||
response.aggregations?.transaction_groups.buckets ?? []
|
||||
).map((bucket) => {
|
||||
return {
|
||||
key: bucket.key as BucketKey,
|
||||
p95: Object.values(bucket.p95.values)[0],
|
||||
};
|
||||
});
|
||||
return arrayUnionToCallable(
|
||||
response.aggregations?.transaction_groups.buckets ?? []
|
||||
).map((bucket) => {
|
||||
return {
|
||||
key: bucket.key as BucketKey,
|
||||
p95: Object.values(bucket.p95.values)[0],
|
||||
};
|
||||
});
|
||||
}
|
||||
|
|
|
@ -33,7 +33,7 @@ describe('transaction group queries', () => {
|
|||
)
|
||||
);
|
||||
|
||||
const allParams = mock.spy.mock.calls.map((call) => call[0]);
|
||||
const allParams = mock.spy.mock.calls.map((call) => call[1]);
|
||||
|
||||
expect(allParams).toMatchSnapshot();
|
||||
});
|
||||
|
@ -51,7 +51,7 @@ describe('transaction group queries', () => {
|
|||
)
|
||||
);
|
||||
|
||||
const allParams = mock.spy.mock.calls.map((call) => call[0]);
|
||||
const allParams = mock.spy.mock.calls.map((call) => call[1]);
|
||||
|
||||
expect(allParams).toMatchSnapshot();
|
||||
});
|
||||
|
|
|
@ -26,9 +26,8 @@ import {
|
|||
import { getMetricsDateHistogramParams } from '../../helpers/metrics';
|
||||
import { MAX_KPIS } from './constants';
|
||||
import { getVizColorForIndex } from '../../../../common/viz_colors';
|
||||
import { withApmSpan } from '../../../utils/with_apm_span';
|
||||
|
||||
export function getTransactionBreakdown({
|
||||
export async function getTransactionBreakdown({
|
||||
environment,
|
||||
kuery,
|
||||
setup,
|
||||
|
@ -43,205 +42,203 @@ export function getTransactionBreakdown({
|
|||
transactionName?: string;
|
||||
transactionType: string;
|
||||
}) {
|
||||
return withApmSpan('get_transaction_breakdown', async () => {
|
||||
const { apmEventClient, start, end, config } = setup;
|
||||
const { apmEventClient, start, end, config } = setup;
|
||||
|
||||
const subAggs = {
|
||||
sum_all_self_times: {
|
||||
sum: {
|
||||
field: SPAN_SELF_TIME_SUM,
|
||||
const subAggs = {
|
||||
sum_all_self_times: {
|
||||
sum: {
|
||||
field: SPAN_SELF_TIME_SUM,
|
||||
},
|
||||
},
|
||||
total_transaction_breakdown_count: {
|
||||
sum: {
|
||||
field: TRANSACTION_BREAKDOWN_COUNT,
|
||||
},
|
||||
},
|
||||
types: {
|
||||
terms: {
|
||||
field: SPAN_TYPE,
|
||||
size: 20,
|
||||
order: {
|
||||
_count: 'desc' as const,
|
||||
},
|
||||
},
|
||||
total_transaction_breakdown_count: {
|
||||
sum: {
|
||||
field: TRANSACTION_BREAKDOWN_COUNT,
|
||||
},
|
||||
},
|
||||
types: {
|
||||
terms: {
|
||||
field: SPAN_TYPE,
|
||||
size: 20,
|
||||
order: {
|
||||
_count: 'desc' as const,
|
||||
},
|
||||
},
|
||||
aggs: {
|
||||
subtypes: {
|
||||
terms: {
|
||||
field: SPAN_SUBTYPE,
|
||||
missing: '',
|
||||
size: 20,
|
||||
order: {
|
||||
_count: 'desc' as const,
|
||||
},
|
||||
aggs: {
|
||||
subtypes: {
|
||||
terms: {
|
||||
field: SPAN_SUBTYPE,
|
||||
missing: '',
|
||||
size: 20,
|
||||
order: {
|
||||
_count: 'desc' as const,
|
||||
},
|
||||
aggs: {
|
||||
total_self_time_per_subtype: {
|
||||
sum: {
|
||||
field: SPAN_SELF_TIME_SUM,
|
||||
},
|
||||
},
|
||||
aggs: {
|
||||
total_self_time_per_subtype: {
|
||||
sum: {
|
||||
field: SPAN_SELF_TIME_SUM,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
},
|
||||
};
|
||||
|
||||
const filters = [
|
||||
{ term: { [SERVICE_NAME]: serviceName } },
|
||||
{ term: { [TRANSACTION_TYPE]: transactionType } },
|
||||
...rangeQuery(start, end),
|
||||
...environmentQuery(environment),
|
||||
...kqlQuery(kuery),
|
||||
{
|
||||
const filters = [
|
||||
{ term: { [SERVICE_NAME]: serviceName } },
|
||||
{ term: { [TRANSACTION_TYPE]: transactionType } },
|
||||
...rangeQuery(start, end),
|
||||
...environmentQuery(environment),
|
||||
...kqlQuery(kuery),
|
||||
{
|
||||
bool: {
|
||||
should: [
|
||||
{ exists: { field: SPAN_SELF_TIME_SUM } },
|
||||
{ exists: { field: TRANSACTION_BREAKDOWN_COUNT } },
|
||||
],
|
||||
minimum_should_match: 1,
|
||||
},
|
||||
},
|
||||
];
|
||||
|
||||
if (transactionName) {
|
||||
filters.push({ term: { [TRANSACTION_NAME]: transactionName } });
|
||||
}
|
||||
|
||||
const params = {
|
||||
apm: {
|
||||
events: [ProcessorEvent.metric],
|
||||
},
|
||||
body: {
|
||||
size: 0,
|
||||
query: {
|
||||
bool: {
|
||||
should: [
|
||||
{ exists: { field: SPAN_SELF_TIME_SUM } },
|
||||
{ exists: { field: TRANSACTION_BREAKDOWN_COUNT } },
|
||||
],
|
||||
minimum_should_match: 1,
|
||||
filter: filters,
|
||||
},
|
||||
},
|
||||
];
|
||||
aggs: {
|
||||
...subAggs,
|
||||
by_date: {
|
||||
date_histogram: getMetricsDateHistogramParams(
|
||||
start,
|
||||
end,
|
||||
config['xpack.apm.metricsInterval']
|
||||
),
|
||||
aggs: subAggs,
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
if (transactionName) {
|
||||
filters.push({ term: { [TRANSACTION_NAME]: transactionName } });
|
||||
const resp = await apmEventClient.search('get_transaction_breakdown', params);
|
||||
|
||||
const formatBucket = (
|
||||
aggs:
|
||||
| Required<typeof resp>['aggregations']
|
||||
| Required<typeof resp>['aggregations']['by_date']['buckets'][0]
|
||||
) => {
|
||||
const sumAllSelfTimes = aggs.sum_all_self_times.value || 0;
|
||||
|
||||
const breakdowns = flatten(
|
||||
aggs.types.buckets.map((bucket) => {
|
||||
const type = bucket.key as string;
|
||||
|
||||
return bucket.subtypes.buckets.map((subBucket) => {
|
||||
return {
|
||||
name: (subBucket.key as string) || type,
|
||||
percentage:
|
||||
(subBucket.total_self_time_per_subtype.value || 0) /
|
||||
sumAllSelfTimes,
|
||||
};
|
||||
});
|
||||
})
|
||||
);
|
||||
|
||||
return breakdowns;
|
||||
};
|
||||
|
||||
const visibleKpis = resp.aggregations
|
||||
? orderBy(formatBucket(resp.aggregations), 'percentage', 'desc').slice(
|
||||
0,
|
||||
MAX_KPIS
|
||||
)
|
||||
: [];
|
||||
|
||||
const kpis = orderBy(
|
||||
visibleKpis.map((kpi) => ({
|
||||
...kpi,
|
||||
lowerCaseName: kpi.name.toLowerCase(),
|
||||
})),
|
||||
'lowerCaseName'
|
||||
).map((kpi, index) => {
|
||||
const { lowerCaseName, ...rest } = kpi;
|
||||
return {
|
||||
...rest,
|
||||
color: getVizColorForIndex(index),
|
||||
};
|
||||
});
|
||||
|
||||
const kpiNames = kpis.map((kpi) => kpi.name);
|
||||
|
||||
const bucketsByDate = resp.aggregations?.by_date.buckets || [];
|
||||
|
||||
const timeseriesPerSubtype = bucketsByDate.reduce((prev, bucket) => {
|
||||
const formattedValues = formatBucket(bucket);
|
||||
const time = bucket.key;
|
||||
|
||||
const updatedSeries = kpiNames.reduce((p, kpiName) => {
|
||||
const { name, percentage } = formattedValues.find(
|
||||
(val) => val.name === kpiName
|
||||
) || {
|
||||
name: kpiName,
|
||||
percentage: null,
|
||||
};
|
||||
|
||||
if (!p[name]) {
|
||||
p[name] = [];
|
||||
}
|
||||
return {
|
||||
...p,
|
||||
[name]: p[name].concat({
|
||||
x: time,
|
||||
y: percentage,
|
||||
}),
|
||||
};
|
||||
}, prev);
|
||||
|
||||
const lastValues = Object.values(updatedSeries).map(last);
|
||||
|
||||
// If for a given timestamp, some series have data, but others do not,
|
||||
// we have to set any null values to 0 to make sure the stacked area chart
|
||||
// is drawn correctly.
|
||||
// If we set all values to 0, the chart always displays null values as 0,
|
||||
// and the chart looks weird.
|
||||
const hasAnyValues = lastValues.some((value) => value?.y !== null);
|
||||
const hasNullValues = lastValues.some((value) => value?.y === null);
|
||||
|
||||
if (hasAnyValues && hasNullValues) {
|
||||
Object.values(updatedSeries).forEach((series) => {
|
||||
const value = series[series.length - 1];
|
||||
const isEmpty = value.y === null;
|
||||
if (isEmpty) {
|
||||
// local mutation to prevent complicated map/reduce calls
|
||||
value.y = 0;
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
const params = {
|
||||
apm: {
|
||||
events: [ProcessorEvent.metric],
|
||||
},
|
||||
body: {
|
||||
size: 0,
|
||||
query: {
|
||||
bool: {
|
||||
filter: filters,
|
||||
},
|
||||
},
|
||||
aggs: {
|
||||
...subAggs,
|
||||
by_date: {
|
||||
date_histogram: getMetricsDateHistogramParams(
|
||||
start,
|
||||
end,
|
||||
config['xpack.apm.metricsInterval']
|
||||
),
|
||||
aggs: subAggs,
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
return updatedSeries;
|
||||
}, {} as Record<string, Array<{ x: number; y: number | null }>>);
|
||||
|
||||
const resp = await apmEventClient.search(params);
|
||||
const timeseries = kpis.map((kpi) => ({
|
||||
title: kpi.name,
|
||||
color: kpi.color,
|
||||
type: 'areaStacked',
|
||||
data: timeseriesPerSubtype[kpi.name],
|
||||
hideLegend: false,
|
||||
legendValue: asPercent(kpi.percentage, 1),
|
||||
}));
|
||||
|
||||
const formatBucket = (
|
||||
aggs:
|
||||
| Required<typeof resp>['aggregations']
|
||||
| Required<typeof resp>['aggregations']['by_date']['buckets'][0]
|
||||
) => {
|
||||
const sumAllSelfTimes = aggs.sum_all_self_times.value || 0;
|
||||
|
||||
const breakdowns = flatten(
|
||||
aggs.types.buckets.map((bucket) => {
|
||||
const type = bucket.key as string;
|
||||
|
||||
return bucket.subtypes.buckets.map((subBucket) => {
|
||||
return {
|
||||
name: (subBucket.key as string) || type,
|
||||
percentage:
|
||||
(subBucket.total_self_time_per_subtype.value || 0) /
|
||||
sumAllSelfTimes,
|
||||
};
|
||||
});
|
||||
})
|
||||
);
|
||||
|
||||
return breakdowns;
|
||||
};
|
||||
|
||||
const visibleKpis = resp.aggregations
|
||||
? orderBy(formatBucket(resp.aggregations), 'percentage', 'desc').slice(
|
||||
0,
|
||||
MAX_KPIS
|
||||
)
|
||||
: [];
|
||||
|
||||
const kpis = orderBy(
|
||||
visibleKpis.map((kpi) => ({
|
||||
...kpi,
|
||||
lowerCaseName: kpi.name.toLowerCase(),
|
||||
})),
|
||||
'lowerCaseName'
|
||||
).map((kpi, index) => {
|
||||
const { lowerCaseName, ...rest } = kpi;
|
||||
return {
|
||||
...rest,
|
||||
color: getVizColorForIndex(index),
|
||||
};
|
||||
});
|
||||
|
||||
const kpiNames = kpis.map((kpi) => kpi.name);
|
||||
|
||||
const bucketsByDate = resp.aggregations?.by_date.buckets || [];
|
||||
|
||||
const timeseriesPerSubtype = bucketsByDate.reduce((prev, bucket) => {
|
||||
const formattedValues = formatBucket(bucket);
|
||||
const time = bucket.key;
|
||||
|
||||
const updatedSeries = kpiNames.reduce((p, kpiName) => {
|
||||
const { name, percentage } = formattedValues.find(
|
||||
(val) => val.name === kpiName
|
||||
) || {
|
||||
name: kpiName,
|
||||
percentage: null,
|
||||
};
|
||||
|
||||
if (!p[name]) {
|
||||
p[name] = [];
|
||||
}
|
||||
return {
|
||||
...p,
|
||||
[name]: p[name].concat({
|
||||
x: time,
|
||||
y: percentage,
|
||||
}),
|
||||
};
|
||||
}, prev);
|
||||
|
||||
const lastValues = Object.values(updatedSeries).map(last);
|
||||
|
||||
// If for a given timestamp, some series have data, but others do not,
|
||||
// we have to set any null values to 0 to make sure the stacked area chart
|
||||
// is drawn correctly.
|
||||
// If we set all values to 0, the chart always displays null values as 0,
|
||||
// and the chart looks weird.
|
||||
const hasAnyValues = lastValues.some((value) => value?.y !== null);
|
||||
const hasNullValues = lastValues.some((value) => value?.y === null);
|
||||
|
||||
if (hasAnyValues && hasNullValues) {
|
||||
Object.values(updatedSeries).forEach((series) => {
|
||||
const value = series[series.length - 1];
|
||||
const isEmpty = value.y === null;
|
||||
if (isEmpty) {
|
||||
// local mutation to prevent complicated map/reduce calls
|
||||
value.y = 0;
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
return updatedSeries;
|
||||
}, {} as Record<string, Array<{ x: number; y: number | null }>>);
|
||||
|
||||
const timeseries = kpis.map((kpi) => ({
|
||||
title: kpi.name,
|
||||
color: kpi.color,
|
||||
type: 'areaStacked',
|
||||
data: timeseriesPerSubtype[kpi.name],
|
||||
hideLegend: false,
|
||||
legendValue: asPercent(kpi.percentage, 1),
|
||||
}));
|
||||
|
||||
return { timeseries };
|
||||
});
|
||||
return { timeseries };
|
||||
}
|
||||
|
|
|
@ -89,48 +89,47 @@ export async function getBuckets({
|
|||
] as QueryDslQueryContainer[];
|
||||
|
||||
async function getSamplesForDistributionBuckets() {
|
||||
const response = await withApmSpan(
|
||||
const response = await apmEventClient.search(
|
||||
'get_samples_for_latency_distribution_buckets',
|
||||
() =>
|
||||
apmEventClient.search({
|
||||
apm: {
|
||||
events: [ProcessorEvent.transaction],
|
||||
},
|
||||
body: {
|
||||
query: {
|
||||
bool: {
|
||||
filter: [
|
||||
...commonFilters,
|
||||
{ term: { [TRANSACTION_SAMPLED]: true } },
|
||||
],
|
||||
should: [
|
||||
{ term: { [TRACE_ID]: traceId } },
|
||||
{ term: { [TRANSACTION_ID]: transactionId } },
|
||||
] as QueryDslQueryContainer[],
|
||||
},
|
||||
{
|
||||
apm: {
|
||||
events: [ProcessorEvent.transaction],
|
||||
},
|
||||
body: {
|
||||
query: {
|
||||
bool: {
|
||||
filter: [
|
||||
...commonFilters,
|
||||
{ term: { [TRANSACTION_SAMPLED]: true } },
|
||||
],
|
||||
should: [
|
||||
{ term: { [TRACE_ID]: traceId } },
|
||||
{ term: { [TRANSACTION_ID]: transactionId } },
|
||||
] as QueryDslQueryContainer[],
|
||||
},
|
||||
aggs: {
|
||||
distribution: {
|
||||
histogram: getHistogramAggOptions({
|
||||
bucketSize,
|
||||
field: TRANSACTION_DURATION,
|
||||
distributionMax,
|
||||
}),
|
||||
aggs: {
|
||||
samples: {
|
||||
top_hits: {
|
||||
_source: [TRANSACTION_ID, TRACE_ID],
|
||||
size: 10,
|
||||
sort: {
|
||||
_score: 'desc' as const,
|
||||
},
|
||||
},
|
||||
aggs: {
|
||||
distribution: {
|
||||
histogram: getHistogramAggOptions({
|
||||
bucketSize,
|
||||
field: TRANSACTION_DURATION,
|
||||
distributionMax,
|
||||
}),
|
||||
aggs: {
|
||||
samples: {
|
||||
top_hits: {
|
||||
_source: [TRANSACTION_ID, TRACE_ID],
|
||||
size: 10,
|
||||
sort: {
|
||||
_score: 'desc' as const,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
},
|
||||
}
|
||||
);
|
||||
|
||||
return (
|
||||
|
@ -148,41 +147,40 @@ export async function getBuckets({
|
|||
}
|
||||
|
||||
async function getDistributionBuckets() {
|
||||
const response = await withApmSpan(
|
||||
const response = await apmEventClient.search(
|
||||
'get_latency_distribution_buckets',
|
||||
() =>
|
||||
apmEventClient.search({
|
||||
apm: {
|
||||
events: [
|
||||
getProcessorEventForAggregatedTransactions(
|
||||
searchAggregatedTransactions
|
||||
),
|
||||
],
|
||||
},
|
||||
body: {
|
||||
query: {
|
||||
bool: {
|
||||
filter: [
|
||||
...commonFilters,
|
||||
...getDocumentTypeFilterForAggregatedTransactions(
|
||||
searchAggregatedTransactions
|
||||
),
|
||||
],
|
||||
},
|
||||
},
|
||||
aggs: {
|
||||
distribution: {
|
||||
histogram: getHistogramAggOptions({
|
||||
field: getTransactionDurationFieldForAggregatedTransactions(
|
||||
searchAggregatedTransactions
|
||||
),
|
||||
bucketSize,
|
||||
distributionMax,
|
||||
}),
|
||||
},
|
||||
{
|
||||
apm: {
|
||||
events: [
|
||||
getProcessorEventForAggregatedTransactions(
|
||||
searchAggregatedTransactions
|
||||
),
|
||||
],
|
||||
},
|
||||
body: {
|
||||
query: {
|
||||
bool: {
|
||||
filter: [
|
||||
...commonFilters,
|
||||
...getDocumentTypeFilterForAggregatedTransactions(
|
||||
searchAggregatedTransactions
|
||||
),
|
||||
],
|
||||
},
|
||||
},
|
||||
})
|
||||
aggs: {
|
||||
distribution: {
|
||||
histogram: getHistogramAggOptions({
|
||||
field: getTransactionDurationFieldForAggregatedTransactions(
|
||||
searchAggregatedTransactions
|
||||
),
|
||||
bucketSize,
|
||||
distributionMax,
|
||||
}),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
);
|
||||
|
||||
return (
|
||||
|
|
|
@ -20,7 +20,6 @@ import {
|
|||
rangeQuery,
|
||||
kqlQuery,
|
||||
} from '../../../../server/utils/queries';
|
||||
import { withApmSpan } from '../../../utils/with_apm_span';
|
||||
|
||||
export async function getDistributionMax({
|
||||
environment,
|
||||
|
@ -39,44 +38,45 @@ export async function getDistributionMax({
|
|||
setup: Setup & SetupTimeRange;
|
||||
searchAggregatedTransactions: boolean;
|
||||
}) {
|
||||
return withApmSpan('get_latency_distribution_max', async () => {
|
||||
const { start, end, apmEventClient } = setup;
|
||||
const { start, end, apmEventClient } = setup;
|
||||
|
||||
const params = {
|
||||
apm: {
|
||||
events: [
|
||||
getProcessorEventForAggregatedTransactions(
|
||||
searchAggregatedTransactions
|
||||
),
|
||||
],
|
||||
},
|
||||
body: {
|
||||
size: 0,
|
||||
query: {
|
||||
bool: {
|
||||
filter: [
|
||||
{ term: { [SERVICE_NAME]: serviceName } },
|
||||
{ term: { [TRANSACTION_TYPE]: transactionType } },
|
||||
{ term: { [TRANSACTION_NAME]: transactionName } },
|
||||
...rangeQuery(start, end),
|
||||
...environmentQuery(environment),
|
||||
...kqlQuery(kuery),
|
||||
],
|
||||
},
|
||||
const params = {
|
||||
apm: {
|
||||
events: [
|
||||
getProcessorEventForAggregatedTransactions(
|
||||
searchAggregatedTransactions
|
||||
),
|
||||
],
|
||||
},
|
||||
body: {
|
||||
size: 0,
|
||||
query: {
|
||||
bool: {
|
||||
filter: [
|
||||
{ term: { [SERVICE_NAME]: serviceName } },
|
||||
{ term: { [TRANSACTION_TYPE]: transactionType } },
|
||||
{ term: { [TRANSACTION_NAME]: transactionName } },
|
||||
...rangeQuery(start, end),
|
||||
...environmentQuery(environment),
|
||||
...kqlQuery(kuery),
|
||||
],
|
||||
},
|
||||
aggs: {
|
||||
stats: {
|
||||
max: {
|
||||
field: getTransactionDurationFieldForAggregatedTransactions(
|
||||
searchAggregatedTransactions
|
||||
),
|
||||
},
|
||||
},
|
||||
aggs: {
|
||||
stats: {
|
||||
max: {
|
||||
field: getTransactionDurationFieldForAggregatedTransactions(
|
||||
searchAggregatedTransactions
|
||||
),
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
},
|
||||
};
|
||||
|
||||
const resp = await apmEventClient.search(params);
|
||||
return resp.aggregations?.stats.value ?? null;
|
||||
});
|
||||
const resp = await apmEventClient.search(
|
||||
'get_latency_distribution_max',
|
||||
params
|
||||
);
|
||||
return resp.aggregations?.stats.value ?? null;
|
||||
}
|
||||
|
|
|
@ -26,7 +26,6 @@ import {
|
|||
} from '../../../lib/helpers/aggregated_transactions';
|
||||
import { getBucketSize } from '../../../lib/helpers/get_bucket_size';
|
||||
import { Setup, SetupTimeRange } from '../../../lib/helpers/setup_request';
|
||||
import { withApmSpan } from '../../../utils/with_apm_span';
|
||||
import {
|
||||
getLatencyAggregation,
|
||||
getLatencyValue,
|
||||
|
@ -112,10 +111,10 @@ function searchLatency({
|
|||
},
|
||||
};
|
||||
|
||||
return apmEventClient.search(params);
|
||||
return apmEventClient.search('get_latency_charts', params);
|
||||
}
|
||||
|
||||
export function getLatencyTimeseries({
|
||||
export async function getLatencyTimeseries({
|
||||
environment,
|
||||
kuery,
|
||||
serviceName,
|
||||
|
@ -138,40 +137,38 @@ export function getLatencyTimeseries({
|
|||
start: number;
|
||||
end: number;
|
||||
}) {
|
||||
return withApmSpan('get_latency_charts', async () => {
|
||||
const response = await searchLatency({
|
||||
environment,
|
||||
kuery,
|
||||
serviceName,
|
||||
transactionType,
|
||||
transactionName,
|
||||
setup,
|
||||
searchAggregatedTransactions,
|
||||
latencyAggregationType,
|
||||
start,
|
||||
end,
|
||||
});
|
||||
|
||||
if (!response.aggregations) {
|
||||
return { latencyTimeseries: [], overallAvgDuration: null };
|
||||
}
|
||||
|
||||
return {
|
||||
overallAvgDuration:
|
||||
response.aggregations.overall_avg_duration.value || null,
|
||||
latencyTimeseries: response.aggregations.latencyTimeseries.buckets.map(
|
||||
(bucket) => {
|
||||
return {
|
||||
x: bucket.key,
|
||||
y: getLatencyValue({
|
||||
latencyAggregationType,
|
||||
aggregation: bucket.latency,
|
||||
}),
|
||||
};
|
||||
}
|
||||
),
|
||||
};
|
||||
const response = await searchLatency({
|
||||
environment,
|
||||
kuery,
|
||||
serviceName,
|
||||
transactionType,
|
||||
transactionName,
|
||||
setup,
|
||||
searchAggregatedTransactions,
|
||||
latencyAggregationType,
|
||||
start,
|
||||
end,
|
||||
});
|
||||
|
||||
if (!response.aggregations) {
|
||||
return { latencyTimeseries: [], overallAvgDuration: null };
|
||||
}
|
||||
|
||||
return {
|
||||
overallAvgDuration:
|
||||
response.aggregations.overall_avg_duration.value || null,
|
||||
latencyTimeseries: response.aggregations.latencyTimeseries.buckets.map(
|
||||
(bucket) => {
|
||||
return {
|
||||
x: bucket.key,
|
||||
y: getLatencyValue({
|
||||
latencyAggregationType,
|
||||
aggregation: bucket.latency,
|
||||
}),
|
||||
};
|
||||
}
|
||||
),
|
||||
};
|
||||
}
|
||||
|
||||
export async function getLatencyPeriods({
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue