mirror of
https://github.com/elastic/kibana.git
synced 2025-04-23 09:19:04 -04:00
[Monitoring] Recognize Write Threadpool (#18147)
This changes the Advanced Node page for Elasticsearch to recognize the "write" threadpool, while also incorporating the deprecated "index" threadpool and renamed "bulk" usage (renamed to "write"). This sums the three writing threadpools together and displays it as a unified view.
This commit is contained in:
parent
5cb9cd7d33
commit
015e94b5fe
4 changed files with 204 additions and 109 deletions
|
@ -2860,36 +2860,6 @@ Object {
|
|||
"units": "B",
|
||||
"uuidField": "source_node.uuid",
|
||||
},
|
||||
"node_index_threads_bulk_queue": Object {
|
||||
"app": "elasticsearch",
|
||||
"derivative": true,
|
||||
"description": "Number of bulk operations in the queue.",
|
||||
"field": "node_stats.thread_pool.bulk.queue",
|
||||
"format": "0,0.[00]",
|
||||
"label": "Bulk Queue",
|
||||
"metricAgg": "max",
|
||||
"min": 0,
|
||||
"timestampField": "timestamp",
|
||||
"title": "Indexing Threads",
|
||||
"type": "node",
|
||||
"units": "",
|
||||
"uuidField": "source_node.uuid",
|
||||
},
|
||||
"node_index_threads_bulk_rejected": Object {
|
||||
"app": "elasticsearch",
|
||||
"derivative": true,
|
||||
"description": "Number of bulk operations that have been rejected, which occurs when the queue is full.",
|
||||
"field": "node_stats.thread_pool.bulk.rejected",
|
||||
"format": "0,0.[00]",
|
||||
"label": "Bulk Rejections",
|
||||
"metricAgg": "max",
|
||||
"min": 0,
|
||||
"timestampField": "timestamp",
|
||||
"title": "Indexing Threads",
|
||||
"type": "node",
|
||||
"units": "",
|
||||
"uuidField": "source_node.uuid",
|
||||
},
|
||||
"node_index_threads_get_queue": Object {
|
||||
"app": "elasticsearch",
|
||||
"derivative": true,
|
||||
|
@ -2920,36 +2890,6 @@ Object {
|
|||
"units": "",
|
||||
"uuidField": "source_node.uuid",
|
||||
},
|
||||
"node_index_threads_index_queue": Object {
|
||||
"app": "elasticsearch",
|
||||
"derivative": true,
|
||||
"description": "Number of non-bulk, index operations in the queue.",
|
||||
"field": "node_stats.thread_pool.index.queue",
|
||||
"format": "0,0.[00]",
|
||||
"label": "Index Queue",
|
||||
"metricAgg": "max",
|
||||
"min": 0,
|
||||
"timestampField": "timestamp",
|
||||
"title": "Indexing Threads",
|
||||
"type": "node",
|
||||
"units": "",
|
||||
"uuidField": "source_node.uuid",
|
||||
},
|
||||
"node_index_threads_index_rejected": Object {
|
||||
"app": "elasticsearch",
|
||||
"derivative": true,
|
||||
"description": "Number of non-bulk, index operations that have been rejected, which occurs when the queue is full. Generally indicates that bulk should be used.",
|
||||
"field": "node_stats.thread_pool.index.rejected",
|
||||
"format": "0,0.[00]",
|
||||
"label": "Index Rejections",
|
||||
"metricAgg": "max",
|
||||
"min": 0,
|
||||
"timestampField": "timestamp",
|
||||
"title": "Indexing Threads",
|
||||
"type": "node",
|
||||
"units": "",
|
||||
"uuidField": "source_node.uuid",
|
||||
},
|
||||
"node_index_threads_search_queue": Object {
|
||||
"app": "elasticsearch",
|
||||
"derivative": true,
|
||||
|
@ -2980,6 +2920,91 @@ Object {
|
|||
"units": "",
|
||||
"uuidField": "source_node.uuid",
|
||||
},
|
||||
"node_index_threads_write_queue": Object {
|
||||
"app": "elasticsearch",
|
||||
"calculation": [Function],
|
||||
"dateHistogramSubAggs": Object {
|
||||
"bulk": Object {
|
||||
"max": Object {
|
||||
"field": "node_stats.thread_pool.bulk.queue",
|
||||
},
|
||||
},
|
||||
"index": Object {
|
||||
"max": Object {
|
||||
"field": "node_stats.thread_pool.index.queue",
|
||||
},
|
||||
},
|
||||
"write": Object {
|
||||
"max": Object {
|
||||
"field": "node_stats.thread_pool.write.queue",
|
||||
},
|
||||
},
|
||||
},
|
||||
"derivative": false,
|
||||
"description": "Number of index, bulk, and write operations in the queue. The bulk threadpool was renamed to write in 6.3, and the index threadpool is deprecated.",
|
||||
"field": "node_stats.thread_pool.write.queue",
|
||||
"format": "0.[00]",
|
||||
"label": "Write Queue",
|
||||
"metricAgg": "max",
|
||||
"timestampField": "timestamp",
|
||||
"title": "Indexing Threads",
|
||||
"type": "node",
|
||||
"units": "",
|
||||
"uuidField": "source_node.uuid",
|
||||
},
|
||||
"node_index_threads_write_rejected": Object {
|
||||
"app": "elasticsearch",
|
||||
"calculation": [Function],
|
||||
"dateHistogramSubAggs": Object {
|
||||
"bulk_deriv": Object {
|
||||
"derivative": Object {
|
||||
"buckets_path": "bulk_rejections",
|
||||
"gap_policy": "skip",
|
||||
"unit": "1s",
|
||||
},
|
||||
},
|
||||
"bulk_rejections": Object {
|
||||
"max": Object {
|
||||
"field": "node_stats.thread_pool.bulk.rejected",
|
||||
},
|
||||
},
|
||||
"index_deriv": Object {
|
||||
"derivative": Object {
|
||||
"buckets_path": "index_rejections",
|
||||
"gap_policy": "skip",
|
||||
"unit": "1s",
|
||||
},
|
||||
},
|
||||
"index_rejections": Object {
|
||||
"max": Object {
|
||||
"field": "node_stats.thread_pool.index.rejected",
|
||||
},
|
||||
},
|
||||
"write_deriv": Object {
|
||||
"derivative": Object {
|
||||
"buckets_path": "write_rejections",
|
||||
"gap_policy": "skip",
|
||||
"unit": "1s",
|
||||
},
|
||||
},
|
||||
"write_rejections": Object {
|
||||
"max": Object {
|
||||
"field": "node_stats.thread_pool.write.rejected",
|
||||
},
|
||||
},
|
||||
},
|
||||
"derivative": false,
|
||||
"description": "Number of index, bulk, and write operations that have been rejected, which occurs when the queue is full. The bulk threadpool was renamed to write in 6.3, and the index threadpool is deprecated.",
|
||||
"field": "node_stats.thread_pool.write.rejected",
|
||||
"format": "0.[00]",
|
||||
"label": "Write Rejections",
|
||||
"metricAgg": "max",
|
||||
"timestampField": "timestamp",
|
||||
"title": "Indexing Threads",
|
||||
"type": "node",
|
||||
"units": "",
|
||||
"uuidField": "source_node.uuid",
|
||||
},
|
||||
"node_index_time": Object {
|
||||
"app": "elasticsearch",
|
||||
"derivative": true,
|
||||
|
|
|
@ -310,6 +310,109 @@ export class ThreadPoolRejectedMetric extends ElasticsearchMetric {
|
|||
|
||||
}
|
||||
|
||||
export class WriteThreadPoolQueueMetric extends ElasticsearchMetric {
|
||||
|
||||
constructor(opts) {
|
||||
super({
|
||||
...opts,
|
||||
field: 'node_stats.thread_pool.write.queue', // in 7.0, we can only check for this threadpool
|
||||
type: 'node',
|
||||
format: SMALL_FLOAT,
|
||||
metricAgg: 'max',
|
||||
units: ''
|
||||
});
|
||||
|
||||
this.dateHistogramSubAggs = {
|
||||
index: {
|
||||
max: { field: 'node_stats.thread_pool.index.queue' }
|
||||
},
|
||||
bulk: {
|
||||
max: { field: 'node_stats.thread_pool.bulk.queue' }
|
||||
},
|
||||
write: {
|
||||
max: { field: 'node_stats.thread_pool.write.queue' }
|
||||
},
|
||||
};
|
||||
|
||||
this.calculation = (bucket) => {
|
||||
const index = _.get(bucket, 'index.value', null);
|
||||
const bulk = _.get(bucket, 'bulk.value', null);
|
||||
const write = _.get(bucket, 'write.value', null);
|
||||
|
||||
if (index !== null || bulk !== null || write !== null) {
|
||||
return (index || 0) + (bulk || 0) + (write || 0);
|
||||
}
|
||||
|
||||
// ignore the data if none of them exist
|
||||
return null;
|
||||
};
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
export class WriteThreadPoolRejectedMetric extends ElasticsearchMetric {
|
||||
|
||||
constructor(opts) {
|
||||
super({
|
||||
...opts,
|
||||
field: 'node_stats.thread_pool.write.rejected', // in 7.0, we can only check for this threadpool
|
||||
type: 'node',
|
||||
format: SMALL_FLOAT,
|
||||
metricAgg: 'max',
|
||||
units: ''
|
||||
});
|
||||
|
||||
this.dateHistogramSubAggs = {
|
||||
index_rejections: {
|
||||
max: { field: 'node_stats.thread_pool.index.rejected' }
|
||||
},
|
||||
bulk_rejections: {
|
||||
max: { field: 'node_stats.thread_pool.bulk.rejected' }
|
||||
},
|
||||
write_rejections: {
|
||||
max: { field: 'node_stats.thread_pool.write.rejected' }
|
||||
},
|
||||
index_deriv: {
|
||||
derivative: {
|
||||
buckets_path: 'index_rejections',
|
||||
gap_policy: 'skip',
|
||||
unit: NORMALIZED_DERIVATIVE_UNIT,
|
||||
}
|
||||
},
|
||||
bulk_deriv: {
|
||||
derivative: {
|
||||
buckets_path: 'bulk_rejections',
|
||||
gap_policy: 'skip',
|
||||
unit: NORMALIZED_DERIVATIVE_UNIT,
|
||||
}
|
||||
},
|
||||
write_deriv: {
|
||||
derivative: {
|
||||
buckets_path: 'write_rejections',
|
||||
gap_policy: 'skip',
|
||||
unit: NORMALIZED_DERIVATIVE_UNIT,
|
||||
}
|
||||
},
|
||||
};
|
||||
|
||||
this.calculation = (bucket) => {
|
||||
const index = _.get(bucket, 'index_deriv.normalized_value', null);
|
||||
const bulk = _.get(bucket, 'bulk_deriv.normalized_value', null);
|
||||
const write = _.get(bucket, 'write_deriv.normalized_value', null);
|
||||
|
||||
if (index !== null || bulk !== null || write !== null) {
|
||||
const valueOrZero = value => value < 0 ? 0 : (value || 0);
|
||||
|
||||
return valueOrZero(index) + valueOrZero(bulk) + valueOrZero(write);
|
||||
}
|
||||
|
||||
// ignore the data if none of them exist
|
||||
return null;
|
||||
};
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* A generic {@code class} for collecting Index Memory metrics.
|
||||
*
|
||||
|
|
|
@ -30,6 +30,8 @@ import {
|
|||
SingleIndexMemoryMetric,
|
||||
ThreadPoolQueueMetric,
|
||||
ThreadPoolRejectedMetric,
|
||||
WriteThreadPoolQueueMetric,
|
||||
WriteThreadPoolRejectedMetric,
|
||||
} from './classes';
|
||||
|
||||
import {
|
||||
|
@ -653,29 +655,22 @@ const metricInstances = {
|
|||
label: 'Index Writer',
|
||||
description: 'Heap memory used by the Index Writer. This is NOT a part of Lucene Total.'
|
||||
}),
|
||||
'node_index_threads_bulk_queue': new ElasticsearchMetric({
|
||||
field: 'node_stats.thread_pool.bulk.queue',
|
||||
'node_index_threads_write_queue': new WriteThreadPoolQueueMetric({
|
||||
title: 'Indexing Threads',
|
||||
label: 'Bulk Queue',
|
||||
description: 'Number of bulk operations in the queue.',
|
||||
type: 'node',
|
||||
derivative: true,
|
||||
format: LARGE_FLOAT,
|
||||
metricAgg: 'max',
|
||||
units: '',
|
||||
min: 0
|
||||
label: 'Write Queue',
|
||||
description: (
|
||||
'Number of index, bulk, and write operations in the queue. ' +
|
||||
'The bulk threadpool was renamed to write in 6.3, and the index threadpool is deprecated.'
|
||||
),
|
||||
}),
|
||||
'node_index_threads_bulk_rejected': new ElasticsearchMetric({
|
||||
'node_index_threads_write_rejected': new WriteThreadPoolRejectedMetric({
|
||||
field: 'node_stats.thread_pool.bulk.rejected',
|
||||
title: 'Indexing Threads',
|
||||
label: 'Bulk Rejections',
|
||||
description: 'Number of bulk operations that have been rejected, which occurs when the queue is full.',
|
||||
type: 'node',
|
||||
derivative: true,
|
||||
format: LARGE_FLOAT,
|
||||
metricAgg: 'max',
|
||||
units: '',
|
||||
min: 0
|
||||
label: 'Write Rejections',
|
||||
description: (
|
||||
'Number of index, bulk, and write operations that have been rejected, which occurs when the queue is full. ' +
|
||||
'The bulk threadpool was renamed to write in 6.3, and the index threadpool is deprecated.'
|
||||
),
|
||||
}),
|
||||
'node_index_threads_get_queue': new ElasticsearchMetric({
|
||||
field: 'node_stats.thread_pool.get.queue',
|
||||
|
@ -701,32 +696,6 @@ const metricInstances = {
|
|||
units: '',
|
||||
min: 0
|
||||
}),
|
||||
'node_index_threads_index_queue': new ElasticsearchMetric({
|
||||
field: 'node_stats.thread_pool.index.queue',
|
||||
title: 'Indexing Threads',
|
||||
label: 'Index Queue',
|
||||
description: 'Number of non-bulk, index operations in the queue.',
|
||||
type: 'node',
|
||||
derivative: true,
|
||||
format: LARGE_FLOAT,
|
||||
metricAgg: 'max',
|
||||
units: '',
|
||||
min: 0
|
||||
}),
|
||||
'node_index_threads_index_rejected': new ElasticsearchMetric({
|
||||
field: 'node_stats.thread_pool.index.rejected',
|
||||
title: 'Indexing Threads',
|
||||
label: 'Index Rejections',
|
||||
description:
|
||||
'Number of non-bulk, index operations that have been rejected, which occurs when the queue is full. ' +
|
||||
'Generally indicates that bulk should be used.',
|
||||
type: 'node',
|
||||
derivative: true,
|
||||
format: LARGE_FLOAT,
|
||||
metricAgg: 'max',
|
||||
units: '',
|
||||
min: 0
|
||||
}),
|
||||
'node_index_threads_search_queue': new ElasticsearchMetric({
|
||||
field: 'node_stats.thread_pool.search.queue',
|
||||
title: 'Read Threads',
|
||||
|
|
|
@ -63,10 +63,8 @@ export const metricSets = {
|
|||
},
|
||||
{
|
||||
keys: [
|
||||
'node_index_threads_bulk_queue',
|
||||
'node_index_threads_bulk_rejected',
|
||||
'node_index_threads_index_queue',
|
||||
'node_index_threads_index_rejected'
|
||||
'node_index_threads_write_queue',
|
||||
'node_index_threads_write_rejected'
|
||||
],
|
||||
name: 'node_index_threads'
|
||||
},
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue