mirror of
https://github.com/elastic/elasticsearch.git
synced 2025-06-28 17:34:17 -04:00
* Remove `es-test-dir` book-scoped variable * Remove `plugins-examples-dir` book-scoped variable * Remove `:dependencies-dir:` and `:xes-repo-dir:` book-scoped variables - In `index.asciidoc`, two variables (`:dependencies-dir:` and `:xes-repo-dir:`) were removed. - In `sql/index.asciidoc`, the `:sql-tests:` path was updated to fuller path - In `esql/index.asciidoc`, the `:esql-tests:` path was updated idem * Replace `es-repo-dir` with `es-ref-dir` * Move `:include-xpack: true` to few files that use it, remove from index.asciidoc
1123 lines
30 KiB
Text
1123 lines
30 KiB
Text
[role="xpack"]
|
|
[[infer-trained-model]]
|
|
= Infer trained model API
|
|
[subs="attributes"]
|
|
++++
|
|
<titleabbrev>Infer trained model</titleabbrev>
|
|
++++
|
|
|
|
Evaluates a trained model. The model may be any supervised model either trained
|
|
by {dfanalytics} or imported.
|
|
|
|
NOTE: For model deployments with caching enabled, results may be returned
|
|
directly from the {infer} cache.
|
|
|
|
[[infer-trained-model-request]]
|
|
== {api-request-title}
|
|
|
|
`POST _ml/trained_models/<model_id>/_infer`
|
|
`POST _ml/trained_models/<deployment_id>/_infer`
|
|
|
|
////
|
|
[[infer-trained-model-prereq]]
|
|
== {api-prereq-title}
|
|
|
|
////
|
|
////
|
|
[[infer-trained-model-desc]]
|
|
== {api-description-title}
|
|
|
|
////
|
|
|
|
[[infer-trained-model-path-params]]
|
|
== {api-path-parms-title}
|
|
|
|
`<model_id>`::
|
|
(Optional, string)
|
|
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=model-id-or-alias]
|
|
If you specify the `model_id` in the API call, and the model has multiple
|
|
deployments, a random deployment will be used. If the `model_id` matches the ID
|
|
of one of the deployments, that deployment will be used.
|
|
|
|
`<deployment_id>`::
|
|
(Optional, string)
|
|
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=deployment-id]
|
|
|
|
[[infer-trained-model-query-params]]
|
|
== {api-query-parms-title}
|
|
|
|
`timeout`::
|
|
(Optional, time)
|
|
Controls the amount of time to wait for {infer} results. Defaults to 10 seconds.
|
|
|
|
[[infer-trained-model-request-body]]
|
|
== {api-request-body-title}
|
|
|
|
`docs`::
|
|
(Required, array)
|
|
An array of objects to pass to the model for inference. The objects should
|
|
contain the fields matching your configured trained model input. Typically for
|
|
NLP models, the field name is `text_field`. Each {infer} input field specified
|
|
in this property must be single strings not arrays of strings.
|
|
|
|
//Begin inference_config
|
|
`inference_config`::
|
|
(Optional, object)
|
|
The default configuration for inference. This can be: `regression`,
|
|
`classification`, `fill_mask`, `ner`, `question_answering`,
|
|
`text_classification`, `text_embedding` or `zero_shot_classification`.
|
|
If `regression` or `classification`, it must match the `target_type` of the
|
|
underlying `definition.trained_model`. If `fill_mask`, `ner`,
|
|
`question_answering`, `text_classification`, or `text_embedding`; the
|
|
`model_type` must be `pytorch`. If not specified, the `inference_config`
|
|
from when the model was created is used.
|
|
+
|
|
.Properties of `inference_config`
|
|
[%collapsible%open]
|
|
====
|
|
`classification`:::
|
|
(Optional, object)
|
|
Classification configuration for inference.
|
|
+
|
|
.Properties of classification inference
|
|
[%collapsible%open]
|
|
=====
|
|
`num_top_classes`::::
|
|
(Optional, integer)
|
|
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-classification-num-top-classes]
|
|
|
|
`num_top_feature_importance_values`::::
|
|
(Optional, integer)
|
|
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-classification-num-top-feature-importance-values]
|
|
|
|
`prediction_field_type`::::
|
|
(Optional, string)
|
|
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-classification-prediction-field-type]
|
|
|
|
`results_field`::::
|
|
(Optional, string)
|
|
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-results-field]
|
|
|
|
`top_classes_results_field`::::
|
|
(Optional, string)
|
|
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-classification-top-classes-results-field]
|
|
=====
|
|
|
|
`fill_mask`:::
|
|
(Optional, object)
|
|
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-fill-mask]
|
|
+
|
|
.Properties of fill_mask inference
|
|
[%collapsible%open]
|
|
=====
|
|
`num_top_classes`::::
|
|
(Optional, integer)
|
|
Number of top predicted tokens to return for replacing the mask token. Defaults
|
|
to `0`.
|
|
|
|
`results_field`::::
|
|
(Optional, string)
|
|
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-results-field]
|
|
|
|
`tokenization`::::
|
|
(Optional, object)
|
|
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization]
|
|
+
|
|
.Properties of tokenization
|
|
[%collapsible%open]
|
|
======
|
|
`bert`::::
|
|
(Optional, object)
|
|
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-bert]
|
|
+
|
|
.Properties of bert
|
|
[%collapsible%open]
|
|
=======
|
|
`truncate`::::
|
|
(Optional, string)
|
|
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate]
|
|
=======
|
|
`roberta`::::
|
|
(Optional, object)
|
|
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-roberta]
|
|
+
|
|
.Properties of roberta
|
|
[%collapsible%open]
|
|
=======
|
|
`truncate`::::
|
|
(Optional, string)
|
|
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate]
|
|
=======
|
|
`mpnet`::::
|
|
(Optional, object)
|
|
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-mpnet]
|
|
+
|
|
.Properties of mpnet
|
|
[%collapsible%open]
|
|
=======
|
|
`truncate`::::
|
|
(Optional, string)
|
|
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate]
|
|
=======
|
|
`xlm_roberta`::::
|
|
(Optional, object)
|
|
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-xlm-roberta]
|
|
+
|
|
.Properties of xlm_roberta
|
|
[%collapsible%open]
|
|
=======
|
|
`truncate`::::
|
|
(Optional, string)
|
|
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate]
|
|
=======
|
|
`bert_ja`::::
|
|
(Optional, object)
|
|
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-bert-ja]
|
|
+
|
|
.Properties of bert_ja
|
|
[%collapsible%open]
|
|
=======
|
|
`truncate`::::
|
|
(Optional, string)
|
|
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate]
|
|
=======
|
|
======
|
|
=====
|
|
|
|
`ner`:::
|
|
(Optional, object)
|
|
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-ner]
|
|
+
|
|
.Properties of ner inference
|
|
[%collapsible%open]
|
|
=====
|
|
`results_field`::::
|
|
(Optional, string)
|
|
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-results-field]
|
|
|
|
`tokenization`::::
|
|
(Optional, object)
|
|
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization]
|
|
+
|
|
.Properties of tokenization
|
|
[%collapsible%open]
|
|
======
|
|
`bert`::::
|
|
(Optional, object)
|
|
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-bert]
|
|
+
|
|
.Properties of bert
|
|
[%collapsible%open]
|
|
=======
|
|
`truncate`::::
|
|
(Optional, string)
|
|
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate]
|
|
=======
|
|
`roberta`::::
|
|
(Optional, object)
|
|
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-roberta]
|
|
+
|
|
.Properties of roberta
|
|
[%collapsible%open]
|
|
=======
|
|
`truncate`::::
|
|
(Optional, string)
|
|
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate]
|
|
=======
|
|
`mpnet`::::
|
|
(Optional, object)
|
|
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-mpnet]
|
|
+
|
|
.Properties of mpnet
|
|
[%collapsible%open]
|
|
=======
|
|
`truncate`::::
|
|
(Optional, string)
|
|
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate]
|
|
=======
|
|
`xlm_roberta`::::
|
|
(Optional, object)
|
|
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-xlm-roberta]
|
|
+
|
|
.Properties of xlm_roberta
|
|
[%collapsible%open]
|
|
=======
|
|
`truncate`::::
|
|
(Optional, string)
|
|
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate]
|
|
=======
|
|
`bert_ja`::::
|
|
(Optional, object)
|
|
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-bert-ja]
|
|
+
|
|
.Properties of bert_ja
|
|
[%collapsible%open]
|
|
=======
|
|
`truncate`::::
|
|
(Optional, string)
|
|
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate]
|
|
=======
|
|
======
|
|
=====
|
|
|
|
`pass_through`:::
|
|
(Optional, object)
|
|
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-pass-through]
|
|
+
|
|
.Properties of pass_through inference
|
|
[%collapsible%open]
|
|
=====
|
|
`results_field`::::
|
|
(Optional, string)
|
|
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-results-field]
|
|
|
|
`tokenization`::::
|
|
(Optional, object)
|
|
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization]
|
|
+
|
|
.Properties of tokenization
|
|
[%collapsible%open]
|
|
======
|
|
`bert`::::
|
|
(Optional, object)
|
|
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-bert]
|
|
+
|
|
.Properties of bert
|
|
[%collapsible%open]
|
|
=======
|
|
`truncate`::::
|
|
(Optional, string)
|
|
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate]
|
|
=======
|
|
`roberta`::::
|
|
(Optional, object)
|
|
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-roberta]
|
|
+
|
|
.Properties of roberta
|
|
[%collapsible%open]
|
|
=======
|
|
`truncate`::::
|
|
(Optional, string)
|
|
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate]
|
|
=======
|
|
`mpnet`::::
|
|
(Optional, object)
|
|
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-mpnet]
|
|
+
|
|
.Properties of mpnet
|
|
[%collapsible%open]
|
|
=======
|
|
`truncate`::::
|
|
(Optional, string)
|
|
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate]
|
|
=======
|
|
`xlm_roberta`::::
|
|
(Optional, object)
|
|
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-xlm-roberta]
|
|
+
|
|
.Properties of xlm_roberta
|
|
[%collapsible%open]
|
|
=======
|
|
`truncate`::::
|
|
(Optional, string)
|
|
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate]
|
|
=======
|
|
`bert_ja`::::
|
|
(Optional, object)
|
|
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-bert-ja]
|
|
+
|
|
.Properties of bert_ja
|
|
[%collapsible%open]
|
|
=======
|
|
`truncate`::::
|
|
(Optional, string)
|
|
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate]
|
|
=======
|
|
======
|
|
=====
|
|
|
|
`question_answering`:::
|
|
(Optional, object)
|
|
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-question-answering]
|
|
+
|
|
.Properties of question_answering inference
|
|
[%collapsible%open]
|
|
=====
|
|
`max_answer_length`::::
|
|
(Optional, integer)
|
|
The maximum amount of words in the answer. Defaults to `15`.
|
|
|
|
`num_top_classes`::::
|
|
(Optional, integer)
|
|
The number the top found answers to return. Defaults to `0`, meaning only the
|
|
best found answer is returned.
|
|
|
|
`question`::::
|
|
(Required, string)
|
|
The question to use when extracting an answer
|
|
|
|
`results_field`::::
|
|
(Optional, string)
|
|
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-results-field]
|
|
|
|
`tokenization`::::
|
|
(Optional, object)
|
|
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization]
|
|
+
|
|
Recommended to set `max_sequence_length` to `386` with `128` of `span` and set
|
|
`truncate` to `none`.
|
|
+
|
|
.Properties of tokenization
|
|
[%collapsible%open]
|
|
======
|
|
`bert`::::
|
|
(Optional, object)
|
|
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-bert]
|
|
+
|
|
.Properties of bert
|
|
[%collapsible%open]
|
|
=======
|
|
`span`::::
|
|
(Optional, integer)
|
|
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-span]
|
|
|
|
`truncate`::::
|
|
(Optional, string)
|
|
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate]
|
|
=======
|
|
`roberta`::::
|
|
(Optional, object)
|
|
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-roberta]
|
|
+
|
|
.Properties of roberta
|
|
[%collapsible%open]
|
|
=======
|
|
`span`::::
|
|
(Optional, integer)
|
|
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-span]
|
|
|
|
`truncate`::::
|
|
(Optional, string)
|
|
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate]
|
|
=======
|
|
`mpnet`::::
|
|
(Optional, object)
|
|
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-mpnet]
|
|
+
|
|
.Properties of mpnet
|
|
[%collapsible%open]
|
|
=======
|
|
`span`::::
|
|
(Optional, integer)
|
|
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-span]
|
|
|
|
`truncate`::::
|
|
(Optional, string)
|
|
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate]
|
|
=======
|
|
`xlm_roberta`::::
|
|
(Optional, object)
|
|
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-xlm-roberta]
|
|
+
|
|
.Properties of xlm_roberta
|
|
[%collapsible%open]
|
|
=======
|
|
`span`::::
|
|
(Optional, integer)
|
|
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-span]
|
|
|
|
`truncate`::::
|
|
(Optional, string)
|
|
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate]
|
|
=======
|
|
`bert_ja`::::
|
|
(Optional, object)
|
|
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-bert-ja]
|
|
+
|
|
.Properties of bert_ja
|
|
[%collapsible%open]
|
|
=======
|
|
`span`::::
|
|
(Optional, integer)
|
|
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-span]
|
|
|
|
`truncate`::::
|
|
(Optional, string)
|
|
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate]
|
|
=======
|
|
======
|
|
=====
|
|
|
|
`regression`:::
|
|
(Optional, object)
|
|
Regression configuration for inference.
|
|
+
|
|
.Properties of regression inference
|
|
[%collapsible%open]
|
|
=====
|
|
`num_top_feature_importance_values`::::
|
|
(Optional, integer)
|
|
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-regression-num-top-feature-importance-values]
|
|
|
|
`results_field`::::
|
|
(Optional, string)
|
|
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-results-field]
|
|
=====
|
|
|
|
`text_classification`:::
|
|
(Optional, object)
|
|
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-text-classification]
|
|
+
|
|
.Properties of text_classification inference
|
|
[%collapsible%open]
|
|
=====
|
|
`classification_labels`::::
|
|
(Optional, string) An array of classification labels.
|
|
|
|
`num_top_classes`::::
|
|
(Optional, integer)
|
|
Specifies the number of top class predictions to return. Defaults to all classes
|
|
(-1).
|
|
|
|
`results_field`::::
|
|
(Optional, string)
|
|
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-results-field]
|
|
|
|
`tokenization`::::
|
|
(Optional, object)
|
|
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization]
|
|
+
|
|
.Properties of tokenization
|
|
[%collapsible%open]
|
|
======
|
|
`bert`::::
|
|
(Optional, object)
|
|
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-bert]
|
|
+
|
|
.Properties of bert
|
|
[%collapsible%open]
|
|
=======
|
|
`span`::::
|
|
(Optional, integer)
|
|
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-span]
|
|
|
|
`truncate`::::
|
|
(Optional, string)
|
|
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate]
|
|
=======
|
|
`roberta`::::
|
|
(Optional, object)
|
|
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-roberta]
|
|
+
|
|
.Properties of roberta
|
|
[%collapsible%open]
|
|
=======
|
|
`span`::::
|
|
(Optional, integer)
|
|
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-span]
|
|
|
|
`truncate`::::
|
|
(Optional, string)
|
|
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate]
|
|
=======
|
|
`mpnet`::::
|
|
(Optional, object)
|
|
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-mpnet]
|
|
+
|
|
.Properties of mpnet
|
|
[%collapsible%open]
|
|
=======
|
|
`span`::::
|
|
(Optional, integer)
|
|
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-span]
|
|
|
|
`truncate`::::
|
|
(Optional, string)
|
|
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate]
|
|
=======
|
|
`xlm_roberta`::::
|
|
(Optional, object)
|
|
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-xlm-roberta]
|
|
+
|
|
.Properties of xlm_roberta
|
|
[%collapsible%open]
|
|
=======
|
|
`span`::::
|
|
(Optional, integer)
|
|
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-span]
|
|
|
|
`truncate`::::
|
|
(Optional, string)
|
|
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate]
|
|
=======
|
|
`bert_ja`::::
|
|
(Optional, object)
|
|
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-bert-ja]
|
|
+
|
|
.Properties of bert_ja
|
|
[%collapsible%open]
|
|
=======
|
|
`span`::::
|
|
(Optional, integer)
|
|
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-span]
|
|
|
|
`truncate`::::
|
|
(Optional, string)
|
|
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate]
|
|
=======
|
|
======
|
|
=====
|
|
`text_embedding`:::
|
|
(Object, optional)
|
|
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-text-embedding]
|
|
+
|
|
.Properties of text_embedding inference
|
|
[%collapsible%open]
|
|
=====
|
|
`results_field`::::
|
|
(Optional, string)
|
|
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-results-field]
|
|
|
|
`tokenization`::::
|
|
(Optional, object)
|
|
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization]
|
|
+
|
|
.Properties of tokenization
|
|
[%collapsible%open]
|
|
======
|
|
`bert`::::
|
|
(Optional, object)
|
|
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-bert]
|
|
+
|
|
.Properties of bert
|
|
[%collapsible%open]
|
|
=======
|
|
`truncate`::::
|
|
(Optional, string)
|
|
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate]
|
|
=======
|
|
`roberta`::::
|
|
(Optional, object)
|
|
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-roberta]
|
|
+
|
|
.Properties of roberta
|
|
[%collapsible%open]
|
|
=======
|
|
`truncate`::::
|
|
(Optional, string)
|
|
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate]
|
|
=======
|
|
`mpnet`::::
|
|
(Optional, object)
|
|
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-mpnet]
|
|
+
|
|
.Properties of mpnet
|
|
[%collapsible%open]
|
|
=======
|
|
`truncate`::::
|
|
(Optional, string)
|
|
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate]
|
|
=======
|
|
`xlm_roberta`::::
|
|
(Optional, object)
|
|
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-xlm-roberta]
|
|
+
|
|
.Properties of xlm_roberta
|
|
[%collapsible%open]
|
|
=======
|
|
`truncate`::::
|
|
(Optional, string)
|
|
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate]
|
|
=======
|
|
`bert_ja`::::
|
|
(Optional, object)
|
|
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-bert-ja]
|
|
+
|
|
.Properties of bert_ja
|
|
[%collapsible%open]
|
|
=======
|
|
`truncate`::::
|
|
(Optional, string)
|
|
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate]
|
|
=======
|
|
======
|
|
=====
|
|
`text_similarity`:::
|
|
(Object, optional)
|
|
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-text-similarity]
|
|
+
|
|
.Properties of text_similarity inference
|
|
[%collapsible%open]
|
|
=====
|
|
`span_score_combination_function`::::
|
|
(Optional, string)
|
|
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-text-similarity-span-score-func]
|
|
|
|
`text`::::
|
|
(Required, string)
|
|
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-text-similarity-text]
|
|
|
|
`tokenization`::::
|
|
(Optional, object)
|
|
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization]
|
|
+
|
|
.Properties of tokenization
|
|
[%collapsible%open]
|
|
======
|
|
`bert`::::
|
|
(Optional, object)
|
|
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-bert]
|
|
+
|
|
.Properties of bert
|
|
[%collapsible%open]
|
|
=======
|
|
`span`::::
|
|
(Optional, integer)
|
|
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-span]
|
|
|
|
`with_special_tokens`::::
|
|
(Optional, boolean)
|
|
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-bert-with-special-tokens]
|
|
=======
|
|
`roberta`::::
|
|
(Optional, object)
|
|
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-roberta]
|
|
+
|
|
.Properties of roberta
|
|
[%collapsible%open]
|
|
=======
|
|
`span`::::
|
|
(Optional, integer)
|
|
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-span]
|
|
|
|
`truncate`::::
|
|
(Optional, string)
|
|
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate]
|
|
=======
|
|
`mpnet`::::
|
|
(Optional, object)
|
|
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-mpnet]
|
|
+
|
|
.Properties of mpnet
|
|
[%collapsible%open]
|
|
=======
|
|
`span`::::
|
|
(Optional, integer)
|
|
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-span]
|
|
|
|
`truncate`::::
|
|
(Optional, string)
|
|
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate]
|
|
=======
|
|
`xlm_roberta`::::
|
|
(Optional, object)
|
|
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-xlm-roberta]
|
|
+
|
|
.Properties of xlm_roberta
|
|
[%collapsible%open]
|
|
=======
|
|
`span`::::
|
|
(Optional, integer)
|
|
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-span]
|
|
|
|
`truncate`::::
|
|
(Optional, string)
|
|
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate]
|
|
=======
|
|
`bert_ja`::::
|
|
(Optional, object)
|
|
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-bert-ja]
|
|
+
|
|
.Properties of bert_ja
|
|
[%collapsible%open]
|
|
=======
|
|
`span`::::
|
|
(Optional, integer)
|
|
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-span]
|
|
|
|
`with_special_tokens`::::
|
|
(Optional, boolean)
|
|
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-bert-ja-with-special-tokens]
|
|
=======
|
|
======
|
|
=====
|
|
`zero_shot_classification`:::
|
|
(Object, optional)
|
|
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-zero-shot-classification]
|
|
+
|
|
.Properties of zero_shot_classification inference
|
|
[%collapsible%open]
|
|
=====
|
|
`labels`::::
|
|
(Optional, array)
|
|
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-zero-shot-classification-labels]
|
|
|
|
`multi_label`::::
|
|
(Optional, boolean)
|
|
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-zero-shot-classification-multi-label]
|
|
|
|
`results_field`::::
|
|
(Optional, string)
|
|
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-results-field]
|
|
|
|
`tokenization`::::
|
|
(Optional, object)
|
|
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization]
|
|
+
|
|
.Properties of tokenization
|
|
[%collapsible%open]
|
|
======
|
|
`bert`::::
|
|
(Optional, object)
|
|
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-bert]
|
|
+
|
|
.Properties of bert
|
|
[%collapsible%open]
|
|
=======
|
|
`truncate`::::
|
|
(Optional, string)
|
|
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate]
|
|
=======
|
|
`roberta`::::
|
|
(Optional, object)
|
|
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-roberta]
|
|
+
|
|
.Properties of roberta
|
|
[%collapsible%open]
|
|
=======
|
|
`truncate`::::
|
|
(Optional, string)
|
|
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate]
|
|
=======
|
|
`mpnet`::::
|
|
(Optional, object)
|
|
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-mpnet]
|
|
+
|
|
.Properties of mpnet
|
|
[%collapsible%open]
|
|
=======
|
|
`truncate`::::
|
|
(Optional, string)
|
|
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate]
|
|
=======
|
|
`xlm_roberta`::::
|
|
(Optional, object)
|
|
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-xlm-roberta]
|
|
+
|
|
.Properties of xlm_roberta
|
|
[%collapsible%open]
|
|
=======
|
|
`truncate`::::
|
|
(Optional, string)
|
|
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate]
|
|
=======
|
|
`bert_ja`::::
|
|
(Optional, object)
|
|
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-bert-ja]
|
|
+
|
|
.Properties of bert_ja
|
|
[%collapsible%open]
|
|
=======
|
|
`truncate`::::
|
|
(Optional, string)
|
|
include::{es-ref-dir}/ml/ml-shared.asciidoc[tag=inference-config-nlp-tokenization-truncate]
|
|
=======
|
|
======
|
|
=====
|
|
====
|
|
//End of inference_config
|
|
|
|
////
|
|
[[infer-trained-model-results]]
|
|
== {api-response-body-title}
|
|
////
|
|
////
|
|
[[ml-get-trained-models-response-codes]]
|
|
== {api-response-codes-title}
|
|
|
|
////
|
|
|
|
[[infer-trained-model-example]]
|
|
== {api-examples-title}
|
|
|
|
The response depends on the kind of model.
|
|
|
|
For example, for {lang-ident} the response is the predicted language and the
|
|
score:
|
|
|
|
[source,console]
|
|
--------------------------------------------------
|
|
POST _ml/trained_models/lang_ident_model_1/_infer
|
|
{
|
|
"docs":[{"text": "The fool doth think he is wise, but the wise man knows himself to be a fool."}]
|
|
}
|
|
--------------------------------------------------
|
|
// TEST[skip:TBD]
|
|
|
|
Here are the results predicting english with a high probability.
|
|
|
|
[source,console-result]
|
|
----
|
|
{
|
|
"inference_results": [
|
|
{
|
|
"predicted_value": "en",
|
|
"prediction_probability": 0.9999658805366392,
|
|
"prediction_score": 0.9999658805366392
|
|
}
|
|
]
|
|
}
|
|
----
|
|
// NOTCONSOLE
|
|
|
|
|
|
When it is a text classification model, the response is the score and predicted
|
|
classification.
|
|
|
|
For example:
|
|
|
|
[source,console]
|
|
--------------------------------------------------
|
|
POST _ml/trained_models/model2/_infer
|
|
{
|
|
"docs": [{"text_field": "The movie was awesome!!"}]
|
|
}
|
|
--------------------------------------------------
|
|
// TEST[skip:TBD]
|
|
|
|
The API returns the predicted label and the confidence.
|
|
|
|
[source,console-result]
|
|
----
|
|
{
|
|
"inference_results": [{
|
|
"predicted_value" : "POSITIVE",
|
|
"prediction_probability" : 0.9998667964092964
|
|
}]
|
|
}
|
|
----
|
|
// NOTCONSOLE
|
|
|
|
For named entity recognition (NER) models, the response contains the annotated
|
|
text output and the recognized entities.
|
|
|
|
[source,console]
|
|
--------------------------------------------------
|
|
POST _ml/trained_models/model2/_infer
|
|
{
|
|
"docs": [{"text_field": "Hi my name is Josh and I live in Berlin"}]
|
|
}
|
|
--------------------------------------------------
|
|
// TEST[skip:TBD]
|
|
|
|
The API returns in this case:
|
|
|
|
[source,console-result]
|
|
----
|
|
{
|
|
"inference_results": [{
|
|
"predicted_value" : "Hi my name is [Josh](PER&Josh) and I live in [Berlin](LOC&Berlin)",
|
|
"entities" : [
|
|
{
|
|
"entity" : "Josh",
|
|
"class_name" : "PER",
|
|
"class_probability" : 0.9977303419824,
|
|
"start_pos" : 14,
|
|
"end_pos" : 18
|
|
},
|
|
{
|
|
"entity" : "Berlin",
|
|
"class_name" : "LOC",
|
|
"class_probability" : 0.9992474323902818,
|
|
"start_pos" : 33,
|
|
"end_pos" : 39
|
|
}
|
|
]
|
|
}]
|
|
}
|
|
----
|
|
// NOTCONSOLE
|
|
|
|
Zero-shot classification models require extra configuration defining the class
|
|
labels. These labels are passed in the zero-shot inference config.
|
|
|
|
[source,console]
|
|
--------------------------------------------------
|
|
POST _ml/trained_models/model2/_infer
|
|
{
|
|
"docs": [
|
|
{
|
|
"text_field": "This is a very happy person"
|
|
}
|
|
],
|
|
"inference_config": {
|
|
"zero_shot_classification": {
|
|
"labels": [
|
|
"glad",
|
|
"sad",
|
|
"bad",
|
|
"rad"
|
|
],
|
|
"multi_label": false
|
|
}
|
|
}
|
|
}
|
|
--------------------------------------------------
|
|
// TEST[skip:TBD]
|
|
|
|
The API returns the predicted label and the confidence, as well as the top
|
|
classes:
|
|
|
|
[source,console-result]
|
|
----
|
|
{
|
|
"inference_results": [{
|
|
"predicted_value" : "glad",
|
|
"top_classes" : [
|
|
{
|
|
"class_name" : "glad",
|
|
"class_probability" : 0.8061155063386439,
|
|
"class_score" : 0.8061155063386439
|
|
},
|
|
{
|
|
"class_name" : "rad",
|
|
"class_probability" : 0.18218006158387956,
|
|
"class_score" : 0.18218006158387956
|
|
},
|
|
{
|
|
"class_name" : "bad",
|
|
"class_probability" : 0.006325615787634201,
|
|
"class_score" : 0.006325615787634201
|
|
},
|
|
{
|
|
"class_name" : "sad",
|
|
"class_probability" : 0.0053788162898424545,
|
|
"class_score" : 0.0053788162898424545
|
|
}
|
|
],
|
|
"prediction_probability" : 0.8061155063386439
|
|
}]
|
|
}
|
|
----
|
|
// NOTCONSOLE
|
|
|
|
Question answering models require extra configuration defining the question to
|
|
answer.
|
|
|
|
[source,console]
|
|
--------------------------------------------------
|
|
POST _ml/trained_models/model2/_infer
|
|
{
|
|
"docs": [
|
|
{
|
|
"text_field": "<long text to extract answer>"
|
|
}
|
|
],
|
|
"inference_config": {
|
|
"question_answering": {
|
|
"question": "<question to be answered>"
|
|
}
|
|
}
|
|
}
|
|
--------------------------------------------------
|
|
// TEST[skip:TBD]
|
|
|
|
The API returns a response similar to the following:
|
|
|
|
[source,console-result]
|
|
----
|
|
{
|
|
"predicted_value": <string subsection of the text that is the answer>,
|
|
"start_offset": <character offset in document to start>,
|
|
"end_offset": <character offset end of the answer,
|
|
"prediction_probability": <prediction score>
|
|
}
|
|
----
|
|
// NOTCONSOLE
|
|
|
|
Text similarity models require at least two sequences of text to compare. It's
|
|
possible to provide multiple strings of text to compare to another text
|
|
sequence:
|
|
|
|
[source,console]
|
|
--------------------------------------------------
|
|
POST _ml/trained_models/cross-encoder__ms-marco-tinybert-l-2-v2/_infer
|
|
{
|
|
"docs":[{ "text_field": "Berlin has a population of 3,520,031 registered inhabitants in an area of 891.82 square kilometers."}, {"text_field": "New York City is famous for the Metropolitan Museum of Art."}],
|
|
"inference_config": {
|
|
"text_similarity": {
|
|
"text": "How many people live in Berlin?"
|
|
}
|
|
}
|
|
}
|
|
--------------------------------------------------
|
|
// TEST[skip:TBD]
|
|
|
|
The response contains the prediction for every string that is compared to the
|
|
text provided in the `text_similarity`.`text` field:
|
|
|
|
[source,console-result]
|
|
----
|
|
{
|
|
"inference_results": [
|
|
{
|
|
"predicted_value": 7.235751628875732
|
|
},
|
|
{
|
|
"predicted_value": -11.562295913696289
|
|
}
|
|
]
|
|
}
|
|
----
|
|
// NOTCONSOLE
|
|
|
|
|
|
The tokenization truncate option can be overridden when calling the API:
|
|
|
|
[source,console]
|
|
--------------------------------------------------
|
|
POST _ml/trained_models/model2/_infer
|
|
{
|
|
"docs": [{"text_field": "The Amazon rainforest covers most of the Amazon basin in South America"}],
|
|
"inference_config": {
|
|
"ner": {
|
|
"tokenization": {
|
|
"bert": {
|
|
"truncate": "first"
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
--------------------------------------------------
|
|
// TEST[skip:TBD]
|
|
|
|
When the input has been truncated due to the limit imposed by the model's
|
|
`max_sequence_length` the `is_truncated` field appears in the response.
|
|
|
|
[source,console-result]
|
|
----
|
|
{
|
|
"inference_results": [{
|
|
"predicted_value" : "The [Amazon](LOC&Amazon) rainforest covers most of the [Amazon](LOC&Amazon) basin in [South America](LOC&South+America)",
|
|
"entities" : [
|
|
{
|
|
"entity" : "Amazon",
|
|
"class_name" : "LOC",
|
|
"class_probability" : 0.9505460915724254,
|
|
"start_pos" : 4,
|
|
"end_pos" : 10
|
|
},
|
|
{
|
|
"entity" : "Amazon",
|
|
"class_name" : "LOC",
|
|
"class_probability" : 0.9969992804311777,
|
|
"start_pos" : 41,
|
|
"end_pos" : 47
|
|
}
|
|
],
|
|
"is_truncated" : true
|
|
}]
|
|
}
|
|
----
|
|
// NOTCONSOLE
|