[Enterprise Search] fix ml inference with api index (#142673)

Updates the the Pipelines logic to ensure you can configure ml inference
pipelines with api-based indices.
This commit is contained in:
Rodney Norris 2022-10-05 08:47:41 -05:00 committed by GitHub
parent c19ccdb96c
commit c9af6395b0
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
4 changed files with 55 additions and 19 deletions

View file

@ -85,7 +85,7 @@ export const IngestPipelinesCard: React.FC = () => {
<EuiFlexGroup alignItems="center">
<EuiFlexItem>
<EuiTitle size="xs">
<h4>{pipelineState.name}</h4>
<h4>{pipelineName}</h4>
</EuiTitle>
</EuiFlexItem>
<EuiFlexItem grow={false}>

View file

@ -33,12 +33,8 @@ import { PipelinesJSONConfigurations } from './pipelines_json_configurations';
import { PipelinesLogic } from './pipelines_logic';
export const SearchIndexPipelines: React.FC = () => {
const {
showAddMlInferencePipelineModal,
hasIndexIngestionPipeline,
index,
pipelineState: { name: pipelineName },
} = useValues(PipelinesLogic);
const { showAddMlInferencePipelineModal, hasIndexIngestionPipeline, index, pipelineName } =
useValues(PipelinesLogic);
const { closeAddMlInferencePipelineModal, openAddMlInferencePipelineModal } =
useActions(PipelinesLogic);
const apiIndex = isApiIndex(index);
@ -133,7 +129,7 @@ export const SearchIndexPipelines: React.FC = () => {
'xpack.enterpriseSearch.content.indices.pipelines.mlInferencePipelines.subtitleAPIindex',
{
defaultMessage:
"Inference pipelines will be run as processors from the Enterprise Search Ingest Pipeline. In order to use these pipeline on API-based indices you'll need to reference the {pipelineName} pipeline in your API requests.",
"Inference pipelines will be run as processors from the Enterprise Search Ingest Pipeline. In order to use these pipelines on API-based indices you'll need to reference the {pipelineName} pipeline in your API requests.",
values: {
pipelineName,
},

View file

@ -4,11 +4,13 @@
* 2.0; you may not use this file except in compliance with the Elastic License
* 2.0.
*/
import { LogicMounter, mockFlashMessageHelpers } from '../../../../__mocks__/kea_logic';
import { connectorIndex } from '../../../__mocks__/view_index.mock';
import { apiIndex, connectorIndex } from '../../../__mocks__/view_index.mock';
import { IngestPipeline } from '@elastic/elasticsearch/lib/api/types';
import { UpdatePipelineApiLogic } from '../../../api/connector/update_pipeline_api_logic';
import { FetchCustomPipelineApiLogic } from '../../../api/index/fetch_custom_pipeline_api_logic';
import { FetchIndexApiLogic } from '../../../api/index/fetch_index_api_logic';
import { PipelinesLogic } from './pipelines_logic';
@ -40,6 +42,7 @@ describe('PipelinesLogic', () => {
const { mount } = new LogicMounter(PipelinesLogic);
const { mount: mountFetchIndexApiLogic } = new LogicMounter(FetchIndexApiLogic);
const { mount: mountUpdatePipelineLogic } = new LogicMounter(UpdatePipelineApiLogic);
const { mount: mountFetchCustomPipelineApiLogic } = new LogicMounter(FetchCustomPipelineApiLogic);
const { clearFlashMessages, flashAPIErrors, flashSuccessToast } = mockFlashMessageHelpers;
const newPipeline = {
@ -51,6 +54,7 @@ describe('PipelinesLogic', () => {
beforeEach(() => {
jest.clearAllMocks();
mountFetchIndexApiLogic();
mountFetchCustomPipelineApiLogic();
mountUpdatePipelineLogic();
mount();
});
@ -195,5 +199,41 @@ describe('PipelinesLogic', () => {
});
});
});
describe('fetchCustomPipelineSuccess', () => {
it('should support api indices with custom ingest pipelines', () => {
PipelinesLogic.actions.fetchIndexApiSuccess({
...apiIndex,
});
const indexName = apiIndex.name;
const indexPipelines: Record<string, IngestPipeline> = {
[indexName]: {
processors: [],
version: 1,
},
[`${indexName}@custom`]: {
processors: [],
version: 1,
},
[`${indexName}@ml-inference`]: {
processors: [],
version: 1,
},
};
PipelinesLogic.actions.fetchCustomPipelineSuccess(indexPipelines);
expect(PipelinesLogic.values).toEqual({
...DEFAULT_VALUES,
customPipelineData: indexPipelines,
index: {
...apiIndex,
},
indexName,
pipelineName: indexName,
canSetPipeline: false,
hasIndexIngestionPipeline: true,
canUseMlInferencePipeline: true,
});
});
});
});
});

View file

@ -90,6 +90,10 @@ type PipelinesActions = Pick<
FetchCustomPipelineApiLogicArgs,
FetchCustomPipelineApiLogicResponse
>['makeRequest'];
fetchCustomPipelineSuccess: Actions<
FetchCustomPipelineApiLogicArgs,
FetchCustomPipelineApiLogicResponse
>['apiSuccess'];
fetchDefaultPipeline: Actions<undefined, FetchDefaultPipelineResponse>['makeRequest'];
fetchDefaultPipelineSuccess: Actions<undefined, FetchDefaultPipelineResponse>['apiSuccess'];
fetchIndexApiSuccess: Actions<FetchIndexApiParams, FetchIndexApiResponse>['apiSuccess'];
@ -143,7 +147,7 @@ export const PipelinesLogic = kea<MakeLogicType<PipelinesValues, PipelinesAction
FetchDefaultPipelineApiLogic,
['apiSuccess as fetchDefaultPipelineSuccess', 'makeRequest as fetchDefaultPipeline'],
FetchCustomPipelineApiLogic,
['makeRequest as fetchCustomPipeline'],
['apiSuccess as fetchCustomPipelineSuccess', 'makeRequest as fetchCustomPipeline'],
FetchMlInferencePipelineProcessorsApiLogic,
[
'makeRequest as fetchMlInferenceProcessors',
@ -303,16 +307,12 @@ export const PipelinesLogic = kea<MakeLogicType<PipelinesValues, PipelinesAction
(index: ElasticsearchIndexWithIngestion) => !isApiIndex(index),
],
canUseMlInferencePipeline: [
() => [
selectors.canSetPipeline,
selectors.hasIndexIngestionPipeline,
selectors.pipelineState,
],
() => [selectors.hasIndexIngestionPipeline, selectors.pipelineState, selectors.index],
(
canSetPipeline: boolean,
hasIndexIngestionPipeline: boolean,
pipelineState: IngestPipelineParams
) => canSetPipeline && hasIndexIngestionPipeline && pipelineState.run_ml_inference,
pipelineState: IngestPipelineParams,
index: ElasticsearchIndexWithIngestion
) => hasIndexIngestionPipeline && (pipelineState.run_ml_inference || isApiIndex(index)),
],
defaultPipelineValues: [
() => [selectors.defaultPipelineValuesData],