[Enterprise Search] Switch ML inference config to use the multi field selector (#162657)

## Summary

Switching over to the multi field selector component for non-ELSER
pipelines.

We're also cleaning up obsolete code:
- Remove `sourceField`, `destinationField`, `inferenceConfig` references
- Remove generation of field mappings and full pipeline definition from
`sourceField` and `destinationField`


![field_config_non_elser](4f5910dc-1347-4293-8dbc-113d3c70b799)

Use cases tested manually:
- Create ELSER pipeline
- Create non-ELSER pipeline
- Create non-ELSER pipeline with custom target field name
- Attach ELSER pipeline
- Attach non-ELSER pipeline

### Checklist
- [x] Any text added follows [EUI's writing
guidelines](https://elastic.github.io/eui/#/guidelines/writing), uses
sentence case text and includes [i18n
support](https://github.com/elastic/kibana/blob/main/packages/kbn-i18n/README.md)
- ~[ ]
[Documentation](https://www.elastic.co/guide/en/kibana/master/development-documentation.html)
was added for features that require explanation or tutorials~ - will do
in a separate PR
- [x] [Unit or functional
tests](https://www.elastic.co/guide/en/kibana/master/development-tests.html)
were updated or added to match the most common scenarios

---------

Co-authored-by: kibanamachine <42973632+kibanamachine@users.noreply.github.com>
This commit is contained in:
Adam Demjen 2023-07-31 11:26:00 -04:00 committed by GitHub
parent 0728003865
commit 9900c0875c
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
25 changed files with 67 additions and 850 deletions

View file

@ -65,34 +65,34 @@ describe('getMlModelTypesForModelConfig lib function', () => {
});
describe('getRemoveProcessorForInferenceType lib function', () => {
const destinationField = 'dest';
const targetField = 'ml.inference.target';
it('should return expected value for TEXT_CLASSIFICATION', () => {
const inferenceType = SUPPORTED_PYTORCH_TASKS.TEXT_CLASSIFICATION;
const expected: IngestRemoveProcessor = {
field: destinationField,
field: targetField,
ignore_missing: true,
};
expect(getRemoveProcessorForInferenceType(destinationField, inferenceType)).toEqual(expected);
expect(getRemoveProcessorForInferenceType(targetField, inferenceType)).toEqual(expected);
});
it('should return expected value for TEXT_EMBEDDING', () => {
const inferenceType = SUPPORTED_PYTORCH_TASKS.TEXT_EMBEDDING;
const expected: IngestRemoveProcessor = {
field: destinationField,
field: targetField,
ignore_missing: true,
};
expect(getRemoveProcessorForInferenceType(destinationField, inferenceType)).toEqual(expected);
expect(getRemoveProcessorForInferenceType(targetField, inferenceType)).toEqual(expected);
});
it('should return undefined for unknown inferenceType', () => {
const inferenceType = 'wrongInferenceType';
expect(getRemoveProcessorForInferenceType(destinationField, inferenceType)).toBeUndefined();
expect(getRemoveProcessorForInferenceType(targetField, inferenceType)).toBeUndefined();
});
});
@ -377,10 +377,9 @@ describe('parseMlInferenceParametersFromPipeline', () => {
],
})
).toEqual({
destination_field: 'test',
model_id: 'test-model',
pipeline_name: 'unit-test',
source_field: 'body',
pipeline_definition: {},
field_mappings: [
{
sourceField: 'body',
@ -414,10 +413,9 @@ describe('parseMlInferenceParametersFromPipeline', () => {
],
})
).toEqual({
destination_field: 'body',
model_id: 'test-model',
pipeline_name: 'unit-test',
source_field: 'body',
pipeline_definition: {},
field_mappings: [
{
sourceField: 'body',

View file

@ -22,7 +22,7 @@ import {
import {
MlInferencePipeline,
CreateMlInferencePipelineParameters,
CreateMLInferencePipeline,
TrainedModelState,
InferencePipelineInferenceConfig,
} from '../types/pipelines';
@ -215,7 +215,7 @@ export const formatPipelineName = (rawName: string) =>
export const parseMlInferenceParametersFromPipeline = (
name: string,
pipeline: IngestPipeline
): CreateMlInferencePipelineParameters | null => {
): CreateMLInferencePipeline | null => {
const inferenceProcessors = pipeline?.processors
?.filter((p) => p.inference)
.map((p) => p.inference) as IngestInferenceProcessor[];
@ -239,12 +239,9 @@ export const parseMlInferenceParametersFromPipeline = (
return fieldMappings.length === 0
? null
: {
destination_field: fieldMappings[0].targetField // Backward compatibility - TODO: remove after multi-field selector is implemented for all inference types
? stripMlInferencePrefix(fieldMappings[0].targetField)
: '',
model_id: inferenceProcessors[0].model_id,
pipeline_name: name,
source_field: fieldMappings[0].sourceField, // Backward compatibility - TODO: remove after multi-field selector is implemented for all inference types
pipeline_definition: {},
field_mappings: fieldMappings,
};
};
@ -278,8 +275,3 @@ export const parseModelStateReasonFromStats = (trainedModelStats?: Partial<MlTra
export const getMlInferencePrefixedFieldName = (fieldName: string) =>
fieldName.startsWith(ML_INFERENCE_PREFIX) ? fieldName : `${ML_INFERENCE_PREFIX}${fieldName}`;
const stripMlInferencePrefix = (fieldName: string) =>
fieldName.startsWith(ML_INFERENCE_PREFIX)
? fieldName.replace(ML_INFERENCE_PREFIX, '')
: fieldName;

View file

@ -74,18 +74,8 @@ export interface DeleteMlInferencePipelineResponse {
updated?: string;
}
export interface CreateMlInferencePipelineParameters {
destination_field?: string;
inference_config?: InferencePipelineInferenceConfig;
model_id: string;
pipeline_name: string;
source_field: string;
export interface CreateMLInferencePipeline {
field_mappings: FieldMapping[];
}
export interface CreateMLInferencePipelineDefinition {
field_mappings: FieldMapping[];
inference_config?: InferencePipelineInferenceConfig;
model_id: string;
pipeline_definition: MlInferencePipeline;
pipeline_name: string;

View file

@ -7,9 +7,8 @@
import { FieldMapping } from '../../../../../common/ml_inference_pipeline';
import {
CreateMLInferencePipelineDefinition,
CreateMLInferencePipeline,
MlInferencePipeline,
InferencePipelineInferenceConfig,
} from '../../../../../common/types/pipelines';
import { createApiLogic } from '../../../shared/api_logic/create_api_logic';
import { HttpLogic } from '../../../shared/http';
@ -17,7 +16,6 @@ import { HttpLogic } from '../../../shared/http';
export interface CreateMlInferencePipelineApiLogicArgs {
fieldMappings: FieldMapping[];
indexName: string;
inferenceConfig?: InferencePipelineInferenceConfig;
modelId: string;
pipelineDefinition: MlInferencePipeline;
pipelineName: string;
@ -31,9 +29,8 @@ export const createMlInferencePipeline = async (
args: CreateMlInferencePipelineApiLogicArgs
): Promise<CreateMlInferencePipelineResponse> => {
const route = `/internal/enterprise_search/indices/${args.indexName}/ml_inference/pipeline_processors`;
const params: CreateMLInferencePipelineDefinition = {
const params: CreateMLInferencePipeline = {
field_mappings: args.fieldMappings,
inference_config: args.inferenceConfig,
model_id: args.modelId,
pipeline_definition: args.pipelineDefinition,
pipeline_name: args.pipelineName,

View file

@ -8,10 +8,8 @@
export const mockMlInferenceValues: any = {
addInferencePipelineModal: {
configuration: {
destinationField: '',
modelID: '',
pipelineName: '',
sourceField: '',
},
indexName: '',
step: 0,

View file

@ -488,11 +488,9 @@ describe('AddInferencePipelineFlyout', () => {
...DEFAULT_VALUES.addInferencePipelineModal,
step: AddInferencePipelineSteps.Review,
configuration: {
destinationField: 'test',
existingPipeline: false,
modelID: 'test-model',
pipelineName: 'my-test-pipeline',
sourceField: 'body',
},
},
});
@ -514,11 +512,9 @@ describe('AddInferencePipelineFlyout', () => {
...DEFAULT_VALUES.addInferencePipelineModal,
step: AddInferencePipelineSteps.Review,
configuration: {
destinationField: 'test',
existingPipeline: true,
modelID: 'test-model',
pipelineName: 'my-test-pipeline',
sourceField: 'body',
},
},
});

View file

@ -13,7 +13,6 @@ import { shallow } from 'enzyme';
import { ConfigureFields } from './configure_fields';
import { MultiFieldMapping, SelectedFieldMappings } from './multi_field_selector';
import { SingleFieldMapping } from './single_field_selector';
describe('ConfigureFields', () => {
beforeEach(() => {
@ -26,20 +25,12 @@ describe('ConfigureFields', () => {
addInferencePipelineModal: { configuration: { existingPipeline: false } },
};
it('renders single field selector component if non-text expansion model is selected', () => {
setMockValues(mockValues);
const wrapper = shallow(<ConfigureFields />);
expect(wrapper.find(SingleFieldMapping)).toHaveLength(1);
expect(wrapper.find(MultiFieldMapping)).toHaveLength(0);
expect(wrapper.find(SelectedFieldMappings)).toHaveLength(0);
});
it('renders multi-field selector components if text expansion model is selected', () => {
it('renders multi-field selector components', () => {
setMockValues({
...mockValues,
isTextExpansionModelSelected: true,
});
const wrapper = shallow(<ConfigureFields />);
expect(wrapper.find(SingleFieldMapping)).toHaveLength(0);
expect(wrapper.find(MultiFieldMapping)).toHaveLength(1);
expect(wrapper.find(SelectedFieldMappings)).toHaveLength(1);
});
@ -50,7 +41,6 @@ describe('ConfigureFields', () => {
addInferencePipelineModal: { configuration: { existingPipeline: true } },
});
const wrapper = shallow(<ConfigureFields />);
expect(wrapper.find(SingleFieldMapping)).toHaveLength(0);
expect(wrapper.find(MultiFieldMapping)).toHaveLength(0);
expect(wrapper.find(SelectedFieldMappings)).toHaveLength(1);
});

View file

@ -16,11 +16,9 @@ import { FormattedMessage } from '@kbn/i18n-react';
import { InferenceConfiguration } from './inference_config';
import { MLInferenceLogic } from './ml_inference_logic';
import { MultiFieldMapping, SelectedFieldMappings } from './multi_field_selector';
import { SingleFieldMapping } from './single_field_selector';
export const ConfigureFields: React.FC = () => {
const {
isTextExpansionModelSelected,
addInferencePipelineModal: { configuration },
} = useValues(MLInferenceLogic);
const areInputsDisabled = configuration.existingPipeline !== false;
@ -75,24 +73,9 @@ export const ConfigureFields: React.FC = () => {
</EuiFlexGroup>
<EuiSpacer size="m" />
<EuiForm component="form">
{isTextExpansionModelSelected ? (
<>
{areInputsDisabled ? (
<></>
) : (
<>
<MultiFieldMapping />
<EuiSpacer size="l" />
</>
)}
<SelectedFieldMappings isReadOnly={areInputsDisabled} />
</>
) : (
<>
<SingleFieldMapping />
<InferenceConfiguration />
</>
)}
{areInputsDisabled || <MultiFieldMapping />}
<SelectedFieldMappings isReadOnly={areInputsDisabled} />
<InferenceConfiguration />
</EuiForm>
</>
);

View file

@ -43,9 +43,9 @@ const DEFAULT_VALUES: MLInferenceProcessorsValues = {
existingPipeline: undefined,
existingInferencePipelines: [],
formErrors: {
fieldMappings: 'Field is required.',
modelID: 'Field is required.',
pipelineName: 'Field is required.',
sourceField: 'Field is required.',
},
index: null,
isConfigureStepValid: false,
@ -336,11 +336,9 @@ describe('MlInferenceLogic', () => {
describe('mlInferencePipeline', () => {
it('returns undefined when configuration is invalid', () => {
MLInferenceLogic.actions.setInferencePipelineConfiguration({
destinationField: '',
modelID: '',
pipelineName: 'unit-test',
sourceField: '',
fieldMappings: [],
pipelineName: '', // Invalid
fieldMappings: [], // Invalid
targetField: '',
});
@ -349,11 +347,14 @@ describe('MlInferenceLogic', () => {
it('generates inference pipeline', () => {
MLModelsApiLogic.actions.apiSuccess([nerModel]);
MLInferenceLogic.actions.setInferencePipelineConfiguration({
destinationField: '',
modelID: nerModel.model_id,
pipelineName: 'unit-test',
sourceField: 'body',
fieldMappings: [],
fieldMappings: [
{
sourceField: 'body',
targetField: 'ml.inference.body',
},
],
targetField: '',
});
@ -362,10 +363,8 @@ describe('MlInferenceLogic', () => {
it('returns undefined when existing pipeline not yet selected', () => {
MLInferenceLogic.actions.setInferencePipelineConfiguration({
existingPipeline: true,
destinationField: '',
modelID: '',
pipelineName: '',
sourceField: '',
fieldMappings: [],
targetField: '',
});
@ -382,11 +381,14 @@ describe('MlInferenceLogic', () => {
});
MLInferenceLogic.actions.setInferencePipelineConfiguration({
existingPipeline: true,
destinationField: '',
modelID: '',
pipelineName: 'unit-test',
sourceField: '',
fieldMappings: [],
fieldMappings: [
{
sourceField: 'body',
targetField: 'ml.inference.body',
},
],
targetField: '',
});
expect(MLInferenceLogic.values.mlInferencePipeline).not.toBeUndefined();
@ -494,8 +496,8 @@ describe('MlInferenceLogic', () => {
it('has errors when configuration is empty', () => {
expect(MLInferenceLogic.values.formErrors).toEqual({
modelID: 'Field is required.',
fieldMappings: 'Field is required.',
pipelineName: 'Field is required.',
sourceField: 'Field is required.',
});
});
it('has error for invalid pipeline names', () => {
@ -503,7 +505,12 @@ describe('MlInferenceLogic', () => {
...MLInferenceLogic.values.addInferencePipelineModal.configuration,
modelID: 'unit-test-model',
existingPipeline: false,
sourceField: 'body',
fieldMappings: [
{
sourceField: 'body',
targetField: 'ml.inference.body',
},
],
pipelineName: 'Invalid Pipeline Name',
});
const expectedErrors = {
@ -528,7 +535,12 @@ describe('MlInferenceLogic', () => {
pipelineName: 'unit-test-pipeline',
modelID: 'unit-test-model',
existingPipeline: false,
sourceField: 'body',
fieldMappings: [
{
sourceField: 'body',
targetField: 'ml.inference.body',
},
],
});
MLInferenceLogic.actions.fetchPipelineSuccess({
'mock-pipeline': {},
@ -546,10 +558,8 @@ describe('MlInferenceLogic', () => {
const mockModelConfiguration = {
...DEFAULT_VALUES.addInferencePipelineModal,
configuration: {
destinationField: 'mock_destination_field',
modelID: 'mock-model-id',
pipelineName: 'mock-pipeline-name',
sourceField: 'mock_text_field',
},
indexName: 'my-index-123',
};
@ -561,6 +571,7 @@ describe('MlInferenceLogic', () => {
configuration: {
...mockModelConfiguration.configuration,
modelID: textExpansionModel.model_id,
fieldMappings: [],
},
},
});
@ -589,37 +600,6 @@ describe('MlInferenceLogic', () => {
pipelineName: mockModelConfiguration.configuration.pipelineName,
});
});
it('calls makeCreatePipelineRequest with passed pipelineDefinition and default fieldMappings', () => {
mount({
...DEFAULT_VALUES,
addInferencePipelineModal: {
...mockModelConfiguration,
configuration: {
...mockModelConfiguration.configuration,
modelID: nerModel.model_id,
},
},
});
jest.spyOn(MLInferenceLogic.actions, 'makeCreatePipelineRequest');
MLModelsApiLogic.actions.apiSuccess([nerModel]);
MLInferenceLogic.actions.createPipeline();
expect(MLInferenceLogic.actions.makeCreatePipelineRequest).toHaveBeenCalledWith({
indexName: mockModelConfiguration.indexName,
inferenceConfig: undefined,
fieldMappings: [
{
sourceField: mockModelConfiguration.configuration.sourceField,
targetField: `ml.inference.${mockModelConfiguration.configuration.destinationField}`,
},
],
modelId: nerModel.model_id,
pipelineDefinition: expect.any(Object), // Generation logic is tested elsewhere
pipelineName: mockModelConfiguration.configuration.pipelineName,
});
});
});
describe('startTextExpansionModelSuccess', () => {
it('fetches ml models', () => {

View file

@ -89,10 +89,8 @@ import {
} from './utils';
export const EMPTY_PIPELINE_CONFIGURATION: InferencePipelineConfiguration = {
destinationField: '',
modelID: '',
pipelineName: '',
sourceField: '',
targetField: '',
};
@ -308,14 +306,7 @@ export const MLInferenceLogic = kea<
actions.makeCreatePipelineRequest({
indexName,
inferenceConfig: configuration.inferenceConfig,
fieldMappings: configuration.fieldMappings || [
// Temporary while we're using single fields for non-ELSER pipelines
{
sourceField: configuration.sourceField,
targetField: getMlInferencePrefixedFieldName(configuration.destinationField),
},
],
fieldMappings: configuration.fieldMappings ?? [],
modelId: configuration.modelID,
pipelineDefinition: mlInferencePipeline!,
pipelineName: configuration.pipelineName,
@ -327,13 +318,11 @@ export const MLInferenceLogic = kea<
const params = parseMlInferenceParametersFromPipeline(pipelineName, pipeline);
if (params === null) return;
actions.setInferencePipelineConfiguration({
destinationField: params.destination_field ?? '',
existingPipeline: true,
modelID: params.model_id,
pipelineName,
sourceField: params.source_field,
fieldMappings: params.field_mappings,
targetField: params.destination_field ?? '',
targetField: '',
});
},
setIndexName: ({ indexName }) => {
@ -545,13 +534,7 @@ export const MLInferenceLogic = kea<
return generateMlInferencePipelineBody({
model,
pipelineName: configuration.pipelineName,
fieldMappings: configuration.fieldMappings || [
{
sourceField: configuration.sourceField,
targetField:
configuration.destinationField || formatPipelineName(configuration.pipelineName),
},
],
fieldMappings: configuration.fieldMappings ?? [],
inferenceConfig: configuration.inferenceConfig,
});
},

View file

@ -1,87 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0; you may not use this file except in compliance with the Elastic License
* 2.0.
*/
import { setMockValues } from '../../../../../__mocks__/kea_logic';
import React from 'react';
import { shallow } from 'enzyme';
import { EuiFieldText, EuiSelect } from '@elastic/eui';
import { SingleFieldMapping } from './single_field_selector';
const DEFAULT_VALUES = {
addInferencePipelineModal: {
configuration: {
sourceField: 'my-source-field',
destinationField: 'my-target-field',
},
},
formErrors: {},
sourceFields: ['my-source-field1', 'my-source-field2', 'my-source-field3'],
supportedMLModels: [],
};
describe('SingleFieldMapping', () => {
beforeEach(() => {
jest.clearAllMocks();
setMockValues({});
});
it('renders source field selector and target field text field', () => {
setMockValues(DEFAULT_VALUES);
const wrapper = shallow(<SingleFieldMapping />);
expect(wrapper.find(EuiSelect)).toHaveLength(1);
const select = wrapper.find(EuiSelect);
expect(select.prop('options')).toEqual([
{
disabled: true,
text: 'Select a schema field',
value: '',
},
{
text: 'my-source-field1',
value: 'my-source-field1',
},
{
text: 'my-source-field2',
value: 'my-source-field2',
},
{
text: 'my-source-field3',
value: 'my-source-field3',
},
]);
expect(select.prop('value')).toEqual('my-source-field');
expect(wrapper.find(EuiFieldText)).toHaveLength(1);
const textField = wrapper.find(EuiFieldText);
expect(textField.prop('value')).toEqual('my-target-field');
});
it('disables inputs when selecting an existing pipeline', () => {
setMockValues({
...DEFAULT_VALUES,
addInferencePipelineModal: {
...DEFAULT_VALUES.addInferencePipelineModal,
configuration: {
...DEFAULT_VALUES.addInferencePipelineModal.configuration,
existingPipeline: true,
},
},
});
const wrapper = shallow(<SingleFieldMapping />);
expect(wrapper.find(EuiSelect)).toHaveLength(1);
const select = wrapper.find(EuiSelect);
expect(select.prop('disabled')).toBe(true);
expect(wrapper.find(EuiFieldText)).toHaveLength(1);
const textField = wrapper.find(EuiFieldText);
expect(textField.prop('disabled')).toBe(true);
});
});

View file

@ -1,145 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0; you may not use this file except in compliance with the Elastic License
* 2.0.
*/
import React from 'react';
import { useValues, useActions } from 'kea';
import {
EuiFieldText,
EuiFlexGroup,
EuiFlexItem,
EuiFormRow,
EuiLink,
EuiSelect,
} from '@elastic/eui';
import { i18n } from '@kbn/i18n';
import { FormattedMessage } from '@kbn/i18n-react';
import { docLinks } from '../../../../../shared/doc_links';
import { IndexViewLogic } from '../../index_view_logic';
import { MLInferenceLogic } from './ml_inference_logic';
import { TargetFieldHelpText } from './target_field_help_text';
const NoSourceFieldsError: React.FC = () => (
<FormattedMessage
id="xpack.enterpriseSearch.content.indices.pipelines.addInferencePipelineModal.steps.fields.sourceField.error"
defaultMessage="Selecting a source field is required for pipeline configuration, but this index does not have a field mapping. {learnMore}"
values={{
learnMore: (
<EuiLink href={docLinks.elasticsearchMapping} target="_blank" color="danger">
{i18n.translate(
'xpack.enterpriseSearch.content.indices.pipelines.addInferencePipelineModal.steps.fields.sourceField.error.docLink',
{ defaultMessage: 'Learn more about field mapping' }
)}
</EuiLink>
),
}}
/>
);
export const SingleFieldMapping: React.FC = () => {
const {
addInferencePipelineModal: { configuration },
formErrors,
supportedMLModels,
sourceFields,
} = useValues(MLInferenceLogic);
const { setInferencePipelineConfiguration } = useActions(MLInferenceLogic);
const { ingestionMethod } = useValues(IndexViewLogic);
const { destinationField, modelID, pipelineName, sourceField } = configuration;
const isEmptySourceFields = (sourceFields?.length ?? 0) === 0;
const areInputsDisabled = configuration.existingPipeline !== false;
const selectedModel = supportedMLModels.find((model) => model.model_id === modelID);
return (
<>
<EuiFlexGroup>
<EuiFlexItem>
<EuiFormRow
fullWidth
label={i18n.translate(
'xpack.enterpriseSearch.content.indices.pipelines.addInferencePipelineModal.steps.fields.sourceFieldLabel',
{
defaultMessage: 'Source text field',
}
)}
error={isEmptySourceFields && <NoSourceFieldsError />}
isInvalid={isEmptySourceFields}
>
<EuiSelect
fullWidth
data-telemetry-id={`entSearchContent-${ingestionMethod}-pipelines-configureFields-selectSchemaField`}
disabled={areInputsDisabled}
value={sourceField}
options={[
{
disabled: true,
text: i18n.translate(
'xpack.enterpriseSearch.content.indices.pipelines.addInferencePipelineModal.steps.fields.sourceField.placeholder',
{ defaultMessage: 'Select a schema field' }
),
value: '',
},
...(sourceFields?.map((field) => ({
text: field,
value: field,
})) ?? []),
]}
onChange={(e) =>
setInferencePipelineConfiguration({
...configuration,
sourceField: e.target.value,
})
}
/>
</EuiFormRow>
</EuiFlexItem>
<EuiFlexItem>
<EuiFormRow
label={i18n.translate(
'xpack.enterpriseSearch.content.indices.pipelines.addInferencePipelineModal.steps.fields.targetField.label',
{
defaultMessage: 'Target field (optional)',
}
)}
helpText={
formErrors.destinationField === undefined &&
configuration.existingPipeline !== true && (
<TargetFieldHelpText
pipelineName={pipelineName}
targetField={destinationField}
model={selectedModel}
/>
)
}
error={formErrors.destinationField}
isInvalid={formErrors.destinationField !== undefined}
fullWidth
>
<EuiFieldText
data-telemetry-id={`entSearchContent-${ingestionMethod}-pipelines-configureFields-targetField`}
disabled={areInputsDisabled}
placeholder="custom_field_name"
value={destinationField}
onChange={(e) =>
setInferencePipelineConfiguration({
...configuration,
destinationField: e.target.value,
})
}
fullWidth
/>
</EuiFormRow>
</EuiFlexItem>
</EuiFlexGroup>
</>
);
};

View file

@ -34,7 +34,7 @@ import './add_inference_pipeline_flyout.scss';
export const TestPipeline: React.FC = () => {
const {
addInferencePipelineModal: {
configuration: { sourceField, fieldMappings },
configuration: { fieldMappings },
indexName,
},
getDocumentsErr,
@ -168,7 +168,7 @@ export const TestPipeline: React.FC = () => {
`"${fieldMapping.sourceField}": "${sampleFieldValue}"`
)
.join(', ')
: `"${sourceField}":"${sampleFieldValue}"`
: `"my_field": "${sampleFieldValue}"`
}}}]`
),
null,

View file

@ -21,10 +21,8 @@ import { AddInferencePipelineSteps } from './types';
const DEFAULT_VALUES: TestPipelineValues = {
addInferencePipelineModal: {
configuration: {
destinationField: '',
modelID: '',
pipelineName: '',
sourceField: '',
targetField: '',
},
indexName: '',
@ -67,10 +65,8 @@ describe('TestPipelineLogic', () => {
jest.clearAllMocks();
mockMlInferenceValues.addInferencePipelineModal = {
configuration: {
destinationField: '',
modelID: '',
pipelineName: '',
sourceField: '',
targetField: '',
},
indexName: '',
@ -123,10 +119,8 @@ describe('TestPipelineLogic', () => {
describe('simulatePipeline', () => {
const mockModelConfiguration = {
configuration: {
destinationField: '',
modelID: nerModel.model_id,
pipelineName: 'mock-pipeline-name',
sourceField: 'mock_text_field',
},
indexName: 'my-index-123',
};

View file

@ -10,21 +10,18 @@ import { FieldMapping } from '../../../../../../../common/ml_inference_pipeline'
import { InferencePipelineInferenceConfig } from '../../../../../../../common/types/pipelines';
export interface InferencePipelineConfiguration {
destinationField: string;
existingPipeline?: boolean;
inferenceConfig?: InferencePipelineInferenceConfig;
modelID: string;
pipelineName: string;
sourceField: string;
fieldMappings?: FieldMapping[];
targetField: string;
}
export interface AddInferencePipelineFormErrors {
destinationField?: string;
modelID?: string;
fieldMappings?: string;
pipelineName?: string;
sourceField?: string;
}
export enum AddInferencePipelineSteps {

View file

@ -71,16 +71,9 @@ export const validateInferencePipelineFields = (
config: InferencePipelineConfiguration
): AddInferencePipelineFormErrors => {
const errors: AddInferencePipelineFormErrors = {};
// If there are field mappings, we don't need to validate the single source field
if (config.fieldMappings && Object.keys(config.fieldMappings).length > 0) {
return errors;
if ((config.fieldMappings ?? []).length === 0) {
errors.fieldMappings = FIELD_REQUIRED_ERROR;
}
if (config.sourceField.trim().length === 0) {
errors.sourceField = FIELD_REQUIRED_ERROR;
}
return errors;
};

View file

@ -23,9 +23,7 @@ const mockClient = {
describe('createMlInferencePipeline lib function', () => {
const pipelineName = 'my-pipeline';
const modelId = 'my-model-id';
const sourceField = 'my-source-field';
const destinationField = 'my-dest-field';
const pipelineDefinition = { processors: [] };
const inferencePipelineGeneratedName = getPrefixedInferencePipelineProcessorName(pipelineName);
mockClient.ml.getTrainedModels.mockImplementation(() =>
@ -58,11 +56,7 @@ describe('createMlInferencePipeline lib function', () => {
const actualResult = await createMlInferencePipeline(
pipelineName,
undefined,
modelId,
sourceField,
destinationField,
undefined, // Omitted inference config
pipelineDefinition,
mockClient as unknown as ElasticsearchClient
);
@ -73,11 +67,7 @@ describe('createMlInferencePipeline lib function', () => {
it('should convert spaces to underscores in the pipeline name', async () => {
await createMlInferencePipeline(
'my pipeline with spaces ',
undefined,
modelId,
sourceField,
destinationField,
undefined, // Omitted inference config
pipelineDefinition,
mockClient as unknown as ElasticsearchClient
);
@ -88,70 +78,6 @@ describe('createMlInferencePipeline lib function', () => {
);
});
it('should default the destination field to the pipeline name', async () => {
mockClient.ingest.getPipeline.mockImplementation(() => Promise.reject({ statusCode: 404 })); // Pipeline does not exist
mockClient.ingest.putPipeline.mockImplementation(() => Promise.resolve({ acknowledged: true }));
await createMlInferencePipeline(
pipelineName,
undefined,
modelId,
sourceField,
undefined, // Omitted destination field
undefined, // Omitted inference config
mockClient as unknown as ElasticsearchClient
);
// Verify the object passed to pipeline creation contains the default target field name
expect(mockClient.ingest.putPipeline).toHaveBeenCalledWith(
expect.objectContaining({
processors: expect.arrayContaining([
expect.objectContaining({
inference: expect.objectContaining({
target_field: `ml.inference.${pipelineName}`,
}),
}),
]),
})
);
});
it('should set inference config when provided', async () => {
mockClient.ingest.getPipeline.mockImplementation(() => Promise.reject({ statusCode: 404 })); // Pipeline does not exist
mockClient.ingest.putPipeline.mockImplementation(() => Promise.resolve({ acknowledged: true }));
await createMlInferencePipeline(
pipelineName,
undefined,
modelId,
sourceField,
destinationField,
{
zero_shot_classification: {
labels: ['foo', 'bar'],
},
},
mockClient as unknown as ElasticsearchClient
);
// Verify the object passed to pipeline creation contains the default target field name
expect(mockClient.ingest.putPipeline).toHaveBeenCalledWith(
expect.objectContaining({
processors: expect.arrayContaining([
expect.objectContaining({
inference: expect.objectContaining({
inference_config: {
zero_shot_classification: {
labels: ['foo', 'bar'],
},
},
}),
}),
]),
})
);
});
it('should throw an error without creating the pipeline if it already exists', () => {
mockClient.ingest.getPipeline.mockImplementation(() =>
Promise.resolve({
@ -161,11 +87,7 @@ describe('createMlInferencePipeline lib function', () => {
const actualResult = createMlInferencePipeline(
pipelineName,
undefined,
modelId,
sourceField,
destinationField,
undefined, // Omitted inference config
pipelineDefinition,
mockClient as unknown as ElasticsearchClient
);

View file

@ -8,16 +8,14 @@
import { IngestGetPipelineResponse, IngestPipeline } from '@elastic/elasticsearch/lib/api/types';
import { ElasticsearchClient } from '@kbn/core/server';
import { FieldMapping, formatPipelineName } from '../../../../../../common/ml_inference_pipeline';
import { FieldMapping } from '../../../../../../common/ml_inference_pipeline';
import { ErrorCode } from '../../../../../../common/types/error_codes';
import type {
PreparePipelineAndIndexForMlInferenceResult,
InferencePipelineInferenceConfig,
CreatePipelineResult,
} from '../../../../../../common/types/pipelines';
import { addSubPipelineToIndexSpecificMlPipeline } from '../../../../../utils/create_ml_inference_pipeline';
import { getPrefixedInferencePipelineProcessorName } from '../../../../../utils/ml_inference_pipeline_utils';
import { formatMlPipelineBody } from '../../../../pipelines/create_pipeline_definitions';
import { updateMlInferenceMappings } from '../update_ml_inference_mappings';
/**
@ -28,10 +26,7 @@ import { updateMlInferenceMappings } from '../update_ml_inference_mappings';
* @param pipelineName pipeline name set by the user.
* @param pipelineDefinition
* @param modelId model ID selected by the user.
* @param sourceField The document field that model will read.
* @param destinationField The document field that the model will write to.
* @param fieldMappings The array of objects representing the source field (text) names and target fields (ML output) names
* @param inferenceConfig The configuration for the model.
* @param esClient the Elasticsearch Client to use when retrieving pipeline and model details.
*/
export const preparePipelineAndIndexForMlInference = async (
@ -39,19 +34,12 @@ export const preparePipelineAndIndexForMlInference = async (
pipelineName: string,
pipelineDefinition: IngestPipeline | undefined,
modelId: string,
sourceField: string | undefined,
destinationField: string | null | undefined,
fieldMappings: FieldMapping[] | undefined,
inferenceConfig: InferencePipelineInferenceConfig | undefined,
esClient: ElasticsearchClient
): Promise<PreparePipelineAndIndexForMlInferenceResult> => {
const createPipelineResult = await createMlInferencePipeline(
pipelineName,
pipelineDefinition,
modelId,
sourceField,
destinationField,
inferenceConfig,
esClient
);
@ -77,19 +65,11 @@ export const preparePipelineAndIndexForMlInference = async (
* Creates a Machine Learning Inference pipeline with the given settings, if it doesn't exist yet.
* @param pipelineName pipeline name set by the user.
* @param pipelineDefinition full definition of the pipeline
* @param modelId model ID selected by the user.
* @param sourceField The document field that model will read.
* @param destinationField The document field that the model will write to.
* @param inferenceConfig The configuration for the model.
* @param esClient the Elasticsearch Client to use when retrieving pipeline and model details.
*/
export const createMlInferencePipeline = async (
pipelineName: string,
pipelineDefinition: IngestPipeline | undefined,
modelId: string | undefined,
sourceField: string | undefined,
destinationField: string | null | undefined,
inferenceConfig: InferencePipelineInferenceConfig | undefined,
esClient: ElasticsearchClient
): Promise<CreatePipelineResult> => {
const inferencePipelineGeneratedName = getPrefixedInferencePipelineProcessorName(pipelineName);
@ -107,24 +87,15 @@ export const createMlInferencePipeline = async (
throw new Error(ErrorCode.PIPELINE_ALREADY_EXISTS);
}
if (!(modelId && sourceField) && !pipelineDefinition) {
// TODO: See if we can defer this error handling to putPipeline()
if (!pipelineDefinition) {
throw new Error(ErrorCode.PARAMETER_CONFLICT);
}
const mlInferencePipeline =
modelId && sourceField
? await formatMlPipelineBody(
inferencePipelineGeneratedName,
modelId,
sourceField,
destinationField || formatPipelineName(pipelineName),
inferenceConfig,
esClient
)
: { ...pipelineDefinition, version: 1 };
await esClient.ingest.putPipeline({
id: inferencePipelineGeneratedName,
...mlInferencePipeline,
...pipelineDefinition,
version: 1,
});
return {

View file

@ -4,12 +4,9 @@
* 2.0; you may not use this file except in compliance with the Elastic License
* 2.0.
*/
import { merge } from 'lodash';
import { ElasticsearchClient } from '@kbn/core/server';
import { createIndexPipelineDefinitions } from './create_pipeline_definitions';
import { formatMlPipelineBody } from './create_pipeline_definitions';
describe('createIndexPipelineDefinitions util function', () => {
const indexName = 'my-index';
@ -30,159 +27,3 @@ describe('createIndexPipelineDefinitions util function', () => {
expect(mockClient.ingest.putPipeline).toHaveBeenCalledTimes(3);
});
});
describe('formatMlPipelineBody util function', () => {
const pipelineName = 'ml-inference-my-ml-proc';
const modelId = 'my-model-id';
const modelInputField = 'my-model-input-field';
const modelType = 'pytorch';
const inferenceConfigKey = 'my-model-type';
const modelTypes = ['pytorch', 'my-model-type'];
const modelVersion = 3;
const sourceField = 'my-source-field';
const destField = 'my-dest-field';
const expectedResult = {
description: '',
processors: [
{
remove: {
field: `ml.inference.${destField}`,
ignore_missing: true,
},
},
{
inference: {
field_map: {
[sourceField]: modelInputField,
},
model_id: modelId,
target_field: `ml.inference.${destField}`,
on_failure: [
{
append: {
field: '_source._ingest.inference_errors',
allow_duplicates: false,
value: [
{
pipeline: pipelineName,
message: `Processor 'inference' in pipeline '${pipelineName}' failed for field '${sourceField}' with message '{{ _ingest.on_failure_message }}'`,
timestamp: '{{{ _ingest.timestamp }}}',
},
],
},
},
],
},
},
{
append: {
field: '_source._ingest.processors',
value: [
{
model_version: modelVersion,
pipeline: pipelineName,
processed_timestamp: '{{{ _ingest.timestamp }}}',
types: modelTypes,
},
],
},
},
],
version: 1,
};
const mockClient = {
ml: {
getTrainedModels: jest.fn(),
},
};
beforeEach(() => {
jest.clearAllMocks();
});
it('should return the pipeline body', async () => {
const mockResponse = {
count: 1,
trained_model_configs: [
{
inference_config: {
[inferenceConfigKey]: {},
},
input: { field_names: [modelInputField] },
model_id: modelId,
model_type: modelType,
version: modelVersion,
},
],
};
mockClient.ml.getTrainedModels.mockImplementation(() => Promise.resolve(mockResponse));
const actualResult = await formatMlPipelineBody(
pipelineName,
modelId,
sourceField,
destField,
undefined,
mockClient as unknown as ElasticsearchClient
);
expect(actualResult).toEqual(expectedResult);
expect(mockClient.ml.getTrainedModels).toHaveBeenCalledTimes(1);
});
it('should raise an error if no model found', async () => {
const mockError = new Error('No known trained model with model_id [my-model-id]');
mockClient.ml.getTrainedModels.mockImplementation(() => Promise.reject(mockError));
const asyncCall = formatMlPipelineBody(
pipelineName,
modelId,
sourceField,
destField,
undefined,
mockClient as unknown as ElasticsearchClient
);
await expect(asyncCall).rejects.toThrow(Error);
expect(mockClient.ml.getTrainedModels).toHaveBeenCalledTimes(1);
});
it('should insert a placeholder if model has no input fields', async () => {
const expectedResultWithNoInputField = merge({}, expectedResult, {
processors: [
{}, // append - we'll leave it untouched
{
inference: {
field_map: {
[sourceField]: 'MODEL_INPUT_FIELD',
},
},
},
],
});
const mockResponse = {
count: 1,
trained_model_configs: [
{
inference_config: {
[inferenceConfigKey]: {},
},
input: { field_names: [] },
model_id: modelId,
model_type: modelType,
version: modelVersion,
},
],
};
mockClient.ml.getTrainedModels.mockImplementation(() => Promise.resolve(mockResponse));
const actualResult = await formatMlPipelineBody(
pipelineName,
modelId,
sourceField,
destField,
undefined,
mockClient as unknown as ElasticsearchClient
);
expect(actualResult).toEqual(expectedResultWithNoInputField);
expect(mockClient.ml.getTrainedModels).toHaveBeenCalledTimes(1);
});
});

View file

@ -8,11 +8,6 @@
import { IngestPipeline } from '@elastic/elasticsearch/lib/api/types';
import { ElasticsearchClient } from '@kbn/core/server';
import { generateMlInferencePipelineBody } from '../../../common/ml_inference_pipeline';
import {
InferencePipelineInferenceConfig,
MlInferencePipeline,
} from '../../../common/types/pipelines';
import { getInferencePipelineNameFromIndexName } from '../../utils/ml_inference_pipeline_utils';
/**
@ -227,36 +222,3 @@ export const createIndexPipelineDefinitions = async (
throw error;
}
};
/**
* Format the body of an ML inference pipeline for a specified model.
* Does not create the pipeline, only returns JSON for the user to preview.
* @param modelId modelId selected by user.
* @param sourceField The document field that model will read.
* @param destinationField The document field that the model will write to.
* @param inferenceConfig The configuration for the model.
* @param esClient the Elasticsearch Client to use when retrieving model details.
*/
export const formatMlPipelineBody = async (
pipelineName: string,
modelId: string,
sourceField: string,
destinationField: string,
inferenceConfig: InferencePipelineInferenceConfig | undefined,
esClient: ElasticsearchClient
): Promise<MlInferencePipeline> => {
// This will raise a 404 if model doesn't exist
const models = await esClient.ml.getTrainedModels({ model_id: modelId });
const model = models.trained_model_configs[0];
return generateMlInferencePipelineBody({
inferenceConfig,
model,
pipelineName,
fieldMappings: [
{
sourceField,
targetField: destinationField,
},
],
});
};

View file

@ -293,78 +293,6 @@ describe('Enterprise Search Managed Indices', () => {
mockRouter.shouldThrow(request);
});
it('responds with 400 BAD REQUEST with both source/target AND pipeline_definition', async () => {
await mockRouter.callRoute({
body: {
model_id: 'my-model-id',
pipeline_name: 'my-pipeline-name',
source_field: 'my-source-field',
destination_field: 'my-dest-field',
pipeline_definition: {
processors: [],
},
},
params: { indexName: 'my-index-name' },
});
expect(mockRouter.response.customError).toHaveBeenCalledWith(
expect.objectContaining({
statusCode: 400,
})
);
});
it('responds with 400 BAD REQUEST with none of source/target/model OR pipeline_definition', async () => {
await mockRouter.callRoute({
body: {
pipeline_name: 'my-pipeline-name',
},
params: { indexName: 'my-index-name' },
});
expect(mockRouter.response.customError).toHaveBeenCalledWith(
expect.objectContaining({
statusCode: 400,
})
);
});
it('creates an ML inference pipeline from model and source_field', async () => {
(preparePipelineAndIndexForMlInference as jest.Mock).mockImplementationOnce(() => {
return Promise.resolve({
added_to_parent_pipeline: true,
created_pipeline: true,
mapping_updated: false,
pipeline_id: 'ml-inference-my-pipeline-name',
});
});
await mockRouter.callRoute({
params: { indexName: 'my-index-name' },
body: mockRequestBody,
});
expect(preparePipelineAndIndexForMlInference).toHaveBeenCalledWith(
'my-index-name',
mockRequestBody.pipeline_name,
undefined,
mockRequestBody.model_id,
mockRequestBody.source_field,
mockRequestBody.destination_field,
undefined,
undefined,
mockClient.asCurrentUser
);
expect(mockRouter.response.ok).toHaveBeenCalledWith({
body: {
created: 'ml-inference-my-pipeline-name',
mapping_updated: false,
},
headers: { 'content-type': 'application/json' },
});
});
it('creates an ML inference pipeline from pipeline definition', async () => {
(preparePipelineAndIndexForMlInference as jest.Mock).mockImplementationOnce(() => {
return Promise.resolve({
@ -379,10 +307,11 @@ describe('Enterprise Search Managed Indices', () => {
params: { indexName: 'my-index-name' },
body: {
field_mappings: [],
model_id: mockRequestBody.model_id,
pipeline_definition: {
processors: [],
},
pipeline_name: 'my-pipeline-name',
pipeline_name: mockRequestBody.pipeline_name,
},
});
@ -392,11 +321,8 @@ describe('Enterprise Search Managed Indices', () => {
{
processors: [],
},
undefined,
undefined,
undefined,
mockRequestBody.model_id,
[],
undefined,
mockClient.asCurrentUser
);

View file

@ -394,21 +394,11 @@ export function registerIndexRoutes({
indexName: schema.string(),
}),
body: schema.object({
destination_field: schema.maybe(schema.nullable(schema.string())),
field_mappings: schema.maybe(
schema.arrayOf(
schema.object({ sourceField: schema.string(), targetField: schema.string() })
)
),
inference_config: schema.maybe(
schema.object({
zero_shot_classification: schema.maybe(
schema.object({
labels: schema.arrayOf(schema.string()),
})
),
})
),
model_id: schema.string(),
pipeline_definition: schema.maybe(
schema.object({
@ -418,7 +408,6 @@ export function registerIndexRoutes({
})
),
pipeline_name: schema.string(),
source_field: schema.maybe(schema.string()),
}),
},
},
@ -430,41 +419,9 @@ export function registerIndexRoutes({
model_id: modelId,
pipeline_name: pipelineName,
pipeline_definition: pipelineDefinition,
source_field: sourceField,
destination_field: destinationField,
inference_config: inferenceConfig,
field_mappings: fieldMappings,
} = request.body;
// additional validations
if ((pipelineDefinition || fieldMappings) && (sourceField || destinationField)) {
return createError({
errorCode: ErrorCode.PARAMETER_CONFLICT,
message: i18n.translate(
'xpack.enterpriseSearch.server.routes.createMlInferencePipeline.ParameterConflictError',
{
defaultMessage:
'pipeline_definition and field_mappings should only be provided if source_field and destination_field are not provided',
}
),
response,
statusCode: 400,
});
} else if (!((pipelineDefinition && fieldMappings) || (sourceField && modelId))) {
return createError({
errorCode: ErrorCode.PARAMETER_CONFLICT,
message: i18n.translate(
'xpack.enterpriseSearch.server.routes.createMlInferencePipeline.ParameterMissingError',
{
defaultMessage:
'either pipeline_definition AND fieldMappings or source_field AND model_id must be provided',
}
),
response,
statusCode: 400,
});
}
try {
// Create the sub-pipeline for inference
const createPipelineResult = await preparePipelineAndIndexForMlInference(
@ -472,10 +429,7 @@ export function registerIndexRoutes({
pipelineName,
pipelineDefinition,
modelId,
sourceField,
destinationField,
fieldMappings,
inferenceConfig,
client.asCurrentUser
);
return response.ok({

View file

@ -12148,7 +12148,6 @@
"xpack.enterpriseSearch.content.indices.deleteIndex.successToast.title": "Votre index {indexName} et toute configuration d'ingestion associée ont été supprimés avec succès",
"xpack.enterpriseSearch.content.indices.pipelines.addInferencePipelineModal.noModels.description": "Aucun de vos modèles entraînés de Machine Learning ne peut être utilisé par un pipeline d'inférence. {documentationLink}",
"xpack.enterpriseSearch.content.indices.pipelines.addInferencePipelineModal.steps.configure.name.helpText": "Les noms de pipeline sont uniques dans un déploiement, et ils peuvent uniquement contenir des lettres, des chiffres, des traits de soulignement et des traits d'union. Cela créera un pipeline nommé {pipelineName}.",
"xpack.enterpriseSearch.content.indices.pipelines.addInferencePipelineModal.steps.fields.sourceField.error": "La sélection d'un champ source est requise pour la configuration du pipeline, mais cet index n'a pas de mapping de champ. {learnMore}",
"xpack.enterpriseSearch.content.indices.pipelines.addInferencePipelineModal.steps.fields.targetField.helpText.default": "Cela attribue un nom au champ qui contient le résultat d'inférence. Votre nom de champ recevra le préfixe \"ml.inference.\". S'il n'est pas défini, le nom par défaut sera \"ml.inference.{pipelineName}\"",
"xpack.enterpriseSearch.content.indices.pipelines.addInferencePipelineModal.steps.fields.targetField.helpText.textClassificationModel": "De plus, la valeur_prévue (predicted_value) sera copiée sur \"{fieldName}\" si la probabilité de prédiction (prediction_probability) est supérieure à {probabilityThreshold}",
"xpack.enterpriseSearch.content.indices.pipelines.addInferencePipelineModal.steps.fields.targetField.helpText.textEmbeddingModel": "De plus, la valeur_prévue (predicted_value) sera copiée sur \"{fieldName}\"",
@ -13586,12 +13585,9 @@
"xpack.enterpriseSearch.content.indices.pipelines.addInferencePipelineModal.steps.fields.fieldMappings.tableCaption": "Mappings de champs",
"xpack.enterpriseSearch.content.indices.pipelines.addInferencePipelineModal.steps.fields.fieldMappings.targetFieldHeader": "Champs cibles",
"xpack.enterpriseSearch.content.indices.pipelines.addInferencePipelineModal.steps.fields.selectedFields": "Champs sélectionnés",
"xpack.enterpriseSearch.content.indices.pipelines.addInferencePipelineModal.steps.fields.sourceField.error.docLink": "En savoir plus sur le mapping de champs",
"xpack.enterpriseSearch.content.indices.pipelines.addInferencePipelineModal.steps.fields.sourceField.helpText": "Sélectionnez un champ existant ou tapez un nom de champ.",
"xpack.enterpriseSearch.content.indices.pipelines.addInferencePipelineModal.steps.fields.sourceField.placeholder": "Sélectionner un champ de schéma",
"xpack.enterpriseSearch.content.indices.pipelines.addInferencePipelineModal.steps.fields.sourceFieldLabel": "Champ source",
"xpack.enterpriseSearch.content.indices.pipelines.addInferencePipelineModal.steps.fields.targetField.helpText": "Ce nom est créé automatiquement en fonction de votre champ source.",
"xpack.enterpriseSearch.content.indices.pipelines.addInferencePipelineModal.steps.fields.targetField.label": "Champ cible (facultatif)",
"xpack.enterpriseSearch.content.indices.pipelines.addInferencePipelineModal.steps.fields.targetFieldLabel": "Champ cible",
"xpack.enterpriseSearch.content.indices.pipelines.addInferencePipelineModal.steps.fields.title": "Sélectionner les mappings de champs",
"xpack.enterpriseSearch.content.indices.pipelines.addInferencePipelineModal.steps.review.description": "Ce pipeline sera créé et injecté en tant que processeur dans votre pipeline par défaut pour cet index. Vous pourrez également utiliser ce nouveau pipeline de façon indépendante.",
@ -14577,8 +14573,6 @@
"xpack.enterpriseSearch.server.routes.createApiIndex.connectorExistsError": "Un connecteur existe déjà pour cet index",
"xpack.enterpriseSearch.server.routes.createApiIndex.crawlerExistsError": "Un robot d'indexation existe déjà pour cet index",
"xpack.enterpriseSearch.server.routes.createApiIndex.indexExistsError": "L'index existe déjà.",
"xpack.enterpriseSearch.server.routes.createMlInferencePipeline.ParameterConflictError": "pipeline_definition et field_mappings doivent uniquement être fournis si source_field, destination_field et model_id ne sont pas fournis.",
"xpack.enterpriseSearch.server.routes.createMlInferencePipeline.ParameterMissingError": "pipeline_definition ET fieldMappings ou source_field ET model_id doivent être fournis",
"xpack.enterpriseSearch.server.routes.createSearchApplication.searchApplciationExistsError": "Le nom de lapplication de recherche est déjà pris. Choisissez un autre nom.",
"xpack.enterpriseSearch.server.routes.indices.mlInference.pipelineProcessors.pipelineIsInUseError": "Le pipeline d'inférence est utilisé dans le pipeline géré \"{pipelineName}\" d'un autre index",
"xpack.enterpriseSearch.server.routes.recreateConnector.connectorExistsError": "Un connecteur existe déjà pour cet index",

View file

@ -12162,7 +12162,6 @@
"xpack.enterpriseSearch.content.indices.deleteIndex.successToast.title": "インデックス{indexName}と関連付けられたすべての統合構成が正常に削除されました",
"xpack.enterpriseSearch.content.indices.pipelines.addInferencePipelineModal.noModels.description": "推論パイプラインで使用できる学習済み機械学習モデルがありません。{documentationLink}",
"xpack.enterpriseSearch.content.indices.pipelines.addInferencePipelineModal.steps.configure.name.helpText": "パイプライン名はデプロイ内で一意であり、文字、数字、アンダースコア、ハイフンのみを使用できます。これにより、{pipelineName}という名前のパイプラインが作成されます。",
"xpack.enterpriseSearch.content.indices.pipelines.addInferencePipelineModal.steps.fields.sourceField.error": "パイプラインのソースフィールドを選択する必要があります。ただし、このインデックスにはフィールドマッピングがありません。{learnMore}",
"xpack.enterpriseSearch.content.indices.pipelines.addInferencePipelineModal.steps.fields.targetField.helpText.default": "これは、推論結果を保持するフィールドの名前を指定します。\"ml.inference.\"というプレフィックスが付きます。設定されていない場合は、デフォルトで\"ml.inference.{pipelineName}\"となります",
"xpack.enterpriseSearch.content.indices.pipelines.addInferencePipelineModal.steps.fields.targetField.helpText.textClassificationModel": "さらに、predicted_probabilityが{probabilityThreshold}より大きい場合、predicted_valueは\"{fieldName}\"にコピーされます。",
"xpack.enterpriseSearch.content.indices.pipelines.addInferencePipelineModal.steps.fields.targetField.helpText.textEmbeddingModel": "さらにpredicted_valueは\"{fieldName}\"にコピーされます。",
@ -13600,12 +13599,9 @@
"xpack.enterpriseSearch.content.indices.pipelines.addInferencePipelineModal.steps.fields.fieldMappings.tableCaption": "フィールドマッピング",
"xpack.enterpriseSearch.content.indices.pipelines.addInferencePipelineModal.steps.fields.fieldMappings.targetFieldHeader": "ターゲットフィールド",
"xpack.enterpriseSearch.content.indices.pipelines.addInferencePipelineModal.steps.fields.selectedFields": "選択したフィールド",
"xpack.enterpriseSearch.content.indices.pipelines.addInferencePipelineModal.steps.fields.sourceField.error.docLink": "フィールドマッピングの詳細",
"xpack.enterpriseSearch.content.indices.pipelines.addInferencePipelineModal.steps.fields.sourceField.helpText": "既存のフィールドを選択するか、フィールド名を入力してください。",
"xpack.enterpriseSearch.content.indices.pipelines.addInferencePipelineModal.steps.fields.sourceField.placeholder": "スキーマフィールドを選択",
"xpack.enterpriseSearch.content.indices.pipelines.addInferencePipelineModal.steps.fields.sourceFieldLabel": "ソースフィールド",
"xpack.enterpriseSearch.content.indices.pipelines.addInferencePipelineModal.steps.fields.targetField.helpText": "この名前は、ソースフィールドに基づいて自動的に作成されます。",
"xpack.enterpriseSearch.content.indices.pipelines.addInferencePipelineModal.steps.fields.targetField.label": "ターゲットフィールド(任意)",
"xpack.enterpriseSearch.content.indices.pipelines.addInferencePipelineModal.steps.fields.targetFieldLabel": "ターゲットフィールド",
"xpack.enterpriseSearch.content.indices.pipelines.addInferencePipelineModal.steps.fields.title": "フィールドマッピングを選択",
"xpack.enterpriseSearch.content.indices.pipelines.addInferencePipelineModal.steps.review.description": "このパイプラインが作成され、プロセッサーとしてこのインデックスのデフォルトパイプラインに挿入されます。この新しいパイプラインは単独でも使用できます。",
@ -14591,8 +14587,6 @@
"xpack.enterpriseSearch.server.routes.createApiIndex.connectorExistsError": "このインデックスのコネクターはすでに存在します",
"xpack.enterpriseSearch.server.routes.createApiIndex.crawlerExistsError": "このインデックスのクローラーはすでに存在します",
"xpack.enterpriseSearch.server.routes.createApiIndex.indexExistsError": "このインデックスはすでに存在します",
"xpack.enterpriseSearch.server.routes.createMlInferencePipeline.ParameterConflictError": "pipeline_definitionおよびfield_mappingsは、source_field、destination_field、およびmodel_idが指定されていない場合にのみ指定してください。",
"xpack.enterpriseSearch.server.routes.createMlInferencePipeline.ParameterMissingError": "pipeline_definitionとfieldMappingsの両方、またはsource_fieldとmodel_idの両方を指定する必要があります。",
"xpack.enterpriseSearch.server.routes.createSearchApplication.searchApplciationExistsError": "検索アプリケーション名はすでに取得されています。別の名前を選択してください。",
"xpack.enterpriseSearch.server.routes.indices.mlInference.pipelineProcessors.pipelineIsInUseError": "推論パイプラインは、別のインデックスの管理されたパイプライン'{pipelineName}'で使用されています。",
"xpack.enterpriseSearch.server.routes.recreateConnector.connectorExistsError": "このインデックスのコネクターはすでに存在します",

View file

@ -12162,7 +12162,6 @@
"xpack.enterpriseSearch.content.indices.deleteIndex.successToast.title": "您的索引 {indexName} 和任何关联的采集配置已成功删除",
"xpack.enterpriseSearch.content.indices.pipelines.addInferencePipelineModal.noModels.description": "您没有可供推理管道使用的已训练 Machine Learning 模型。{documentationLink}",
"xpack.enterpriseSearch.content.indices.pipelines.addInferencePipelineModal.steps.configure.name.helpText": "管道名称在部署内唯一,并且只能包含字母、数字、下划线和连字符。这会创建名为 {pipelineName} 的管道。",
"xpack.enterpriseSearch.content.indices.pipelines.addInferencePipelineModal.steps.fields.sourceField.error": "配置管道需要设置源字段,但此索引没有字段映射。{learnMore}",
"xpack.enterpriseSearch.content.indices.pipelines.addInferencePipelineModal.steps.fields.targetField.helpText.default": "这会命名存放推理结果的字段。它将加有“ml.inference”前缀如果未设置将默认前缀为“ml.inference.{pipelineName}”",
"xpack.enterpriseSearch.content.indices.pipelines.addInferencePipelineModal.steps.fields.targetField.helpText.textClassificationModel": "此外,如果 prediction_probability 大于 {probabilityThreshold},则会将 predicted_value 复制到“{fieldName}”",
"xpack.enterpriseSearch.content.indices.pipelines.addInferencePipelineModal.steps.fields.targetField.helpText.textEmbeddingModel": "此外,还会将 predicted_value 复制到“{fieldName}”",
@ -13600,12 +13599,9 @@
"xpack.enterpriseSearch.content.indices.pipelines.addInferencePipelineModal.steps.fields.fieldMappings.tableCaption": "字段映射",
"xpack.enterpriseSearch.content.indices.pipelines.addInferencePipelineModal.steps.fields.fieldMappings.targetFieldHeader": "目标字段",
"xpack.enterpriseSearch.content.indices.pipelines.addInferencePipelineModal.steps.fields.selectedFields": "选定字段",
"xpack.enterpriseSearch.content.indices.pipelines.addInferencePipelineModal.steps.fields.sourceField.error.docLink": "详细了解字段映射",
"xpack.enterpriseSearch.content.indices.pipelines.addInferencePipelineModal.steps.fields.sourceField.helpText": "选择现有字段或键入字段名称。",
"xpack.enterpriseSearch.content.indices.pipelines.addInferencePipelineModal.steps.fields.sourceField.placeholder": "选择架构字段",
"xpack.enterpriseSearch.content.indices.pipelines.addInferencePipelineModal.steps.fields.sourceFieldLabel": "源字段",
"xpack.enterpriseSearch.content.indices.pipelines.addInferencePipelineModal.steps.fields.targetField.helpText": "此名称基于您的源字段自动创建。",
"xpack.enterpriseSearch.content.indices.pipelines.addInferencePipelineModal.steps.fields.targetField.label": "目标字段(可选)",
"xpack.enterpriseSearch.content.indices.pipelines.addInferencePipelineModal.steps.fields.targetFieldLabel": "目标字段",
"xpack.enterpriseSearch.content.indices.pipelines.addInferencePipelineModal.steps.fields.title": "选择字段映射",
"xpack.enterpriseSearch.content.indices.pipelines.addInferencePipelineModal.steps.review.description": "将创建此管道并将其作为处理器注入到该索引的默认管道。您还可以独立使用这个新管道。",
@ -14591,8 +14587,6 @@
"xpack.enterpriseSearch.server.routes.createApiIndex.connectorExistsError": "此索引的连接器已存在",
"xpack.enterpriseSearch.server.routes.createApiIndex.crawlerExistsError": "此索引的网络爬虫已存在",
"xpack.enterpriseSearch.server.routes.createApiIndex.indexExistsError": "此索引已存在",
"xpack.enterpriseSearch.server.routes.createMlInferencePipeline.ParameterConflictError": "如果未提供 source_field、destination_field 和 model_id应仅提供 pipeline_definition 和 field_mappings",
"xpack.enterpriseSearch.server.routes.createMlInferencePipeline.ParameterMissingError": "必须提供 pipeline_definition 和 fieldMappings或 source_field 和 model_id",
"xpack.enterpriseSearch.server.routes.createSearchApplication.searchApplciationExistsError": "搜索应用程序名称已占用。请选择其他名称。",
"xpack.enterpriseSearch.server.routes.indices.mlInference.pipelineProcessors.pipelineIsInUseError": "推理管道已用在不同索引的托管管道“{pipelineName}”中",
"xpack.enterpriseSearch.server.routes.recreateConnector.connectorExistsError": "此索引的连接器已存在",