mirror of
https://github.com/elastic/elasticsearch.git
synced 2025-06-29 18:03:32 -04:00
The _ml/info response now includes two extra fields in its `limits`: 1. `max_single_ml_node_processors` 2. `total_ml_processors` These fields are _only_ included if they can be accurately calculated. If autoscaling is enabled and the ML nodes are not at their maximum size then these fields _cannot_ currently be accurately calculated. (This could potentially be improved in the future with additional settings set by the control plane.)
135 lines
3.6 KiB
Text
135 lines
3.6 KiB
Text
[role="xpack"]
|
|
[[get-ml-info]]
|
|
= Get machine learning info API
|
|
|
|
[subs="attributes"]
|
|
++++
|
|
<titleabbrev>Get {ml} info</titleabbrev>
|
|
++++
|
|
|
|
Returns defaults and limits used by machine learning.
|
|
|
|
[[get-ml-info-request]]
|
|
== {api-request-title}
|
|
|
|
`GET _ml/info`
|
|
|
|
[[get-ml-info-prereqs]]
|
|
== {api-prereq-title}
|
|
|
|
Requires the `monitor_ml` cluster privilege. This privilege is included in the
|
|
`machine_learning_user` built-in role.
|
|
|
|
[[get-ml-info-desc]]
|
|
== {api-description-title}
|
|
|
|
This endpoint is designed to be used by a user interface that needs to fully
|
|
understand machine learning configurations where some options are not specified,
|
|
meaning that the defaults should be used. This endpoint may be used to find out
|
|
what those defaults are. It also provides information about the maximum size
|
|
of {ml} jobs that could run in the current cluster configuration.
|
|
|
|
[[get-ml-info-example]]
|
|
== {api-examples-title}
|
|
|
|
The endpoint takes no arguments:
|
|
|
|
[source,console]
|
|
--------------------------------------------------
|
|
GET _ml/info
|
|
--------------------------------------------------
|
|
// TEST
|
|
|
|
This is a possible response:
|
|
|
|
[source,console-result]
|
|
----
|
|
{
|
|
"defaults" : {
|
|
"anomaly_detectors" : {
|
|
"categorization_analyzer" : {
|
|
"char_filter" : [
|
|
"first_line_with_letters"
|
|
],
|
|
"tokenizer" : "ml_standard",
|
|
"filter" : [
|
|
{
|
|
"type" : "stop",
|
|
"stopwords" : [
|
|
"Monday",
|
|
"Tuesday",
|
|
"Wednesday",
|
|
"Thursday",
|
|
"Friday",
|
|
"Saturday",
|
|
"Sunday",
|
|
"Mon",
|
|
"Tue",
|
|
"Wed",
|
|
"Thu",
|
|
"Fri",
|
|
"Sat",
|
|
"Sun",
|
|
"January",
|
|
"February",
|
|
"March",
|
|
"April",
|
|
"May",
|
|
"June",
|
|
"July",
|
|
"August",
|
|
"September",
|
|
"October",
|
|
"November",
|
|
"December",
|
|
"Jan",
|
|
"Feb",
|
|
"Mar",
|
|
"Apr",
|
|
"May",
|
|
"Jun",
|
|
"Jul",
|
|
"Aug",
|
|
"Sep",
|
|
"Oct",
|
|
"Nov",
|
|
"Dec",
|
|
"GMT",
|
|
"UTC"
|
|
]
|
|
},
|
|
{
|
|
"type": "limit",
|
|
"max_token_count": "100"
|
|
}
|
|
]
|
|
},
|
|
"model_memory_limit" : "1gb",
|
|
"categorization_examples_limit" : 4,
|
|
"model_snapshot_retention_days" : 10,
|
|
"daily_model_snapshot_retention_after_days" : 1
|
|
},
|
|
"datafeeds" : {
|
|
"scroll_size" : 1000
|
|
}
|
|
},
|
|
"upgrade_mode": false,
|
|
"native_code" : {
|
|
"version": "7.0.0",
|
|
"build_hash": "99a07c016d5a73"
|
|
},
|
|
"limits" : {
|
|
"effective_max_model_memory_limit": "28961mb",
|
|
"total_ml_memory": "86883mb",
|
|
"total_ml_processors": 16,
|
|
"max_single_ml_node_processors": 16
|
|
}
|
|
}
|
|
----
|
|
// TESTRESPONSE[s/"upgrade_mode": false/"upgrade_mode": $body.upgrade_mode/]
|
|
// TESTRESPONSE[s/"version": "7.0.0",/"version": "$body.native_code.version",/]
|
|
// TESTRESPONSE[s/"build_hash": "99a07c016d5a73"/"build_hash": "$body.native_code.build_hash"/]
|
|
// TESTRESPONSE[s/"effective_max_model_memory_limit": "28961mb"/"effective_max_model_memory_limit": "$body.limits.effective_max_model_memory_limit"/]
|
|
// TESTRESPONSE[s/"total_ml_memory": "86883mb"/"total_ml_memory": "$body.limits.total_ml_memory"/]
|
|
// TESTRESPONSE[s/"total_ml_processors": 16/"total_ml_processors": $body.limits.total_ml_processors/]
|
|
// TESTRESPONSE[s/"max_single_ml_node_processors": 16/"max_single_ml_node_processors": $body.limits.max_single_ml_node_processors/]
|