[8.x] [Index Management + ML] Remove unused code from index management and ml (#211266) (#212591)

# Backport

This will backport the following commits from `main` to `8.x`:
- [[Index Management + ML] Remove unused code from index management and
ml (#211266)](https://github.com/elastic/kibana/pull/211266)

<!--- Backport version: 9.6.6 -->

### Questions ?
Please refer to the [Backport tool
documentation](https://github.com/sorenlouv/backport)

<!--BACKPORT [{"author":{"name":"Samiul
Monir","email":"150824886+Samiul-TheSoccerFan@users.noreply.github.com"},"sourceCommit":{"committedDate":"2025-02-26T21:09:44Z","message":"[Index
Management + ML] Remove unused code from index management and ml
(#211266)\n\n## Summary\n\nThis PR includes\n- Remove unused package
from kibana\n- Remove references from index_management and ml plugins\n-
Update translations file\n- Delete skipped tests\n\nCloses
https://github.com/elastic/kibana/issues/204507\n\n###
Checklist\n\nCheck the PR satisfies following conditions. \n\nReviewers
should verify this PR satisfies this list as well.\n\n- [X] Any text
added follows [EUI's
writing\nguidelines](https://elastic.github.io/eui/#/guidelines/writing),
uses\nsentence case text and includes
[i18n\nsupport](https://github.com/elastic/kibana/blob/main/src/platform/packages/shared/kbn-i18n/README.md)\n-
[x] [Flaky
Test\nRunner](https://ci-stats.kibana.dev/trigger_flaky_test_runner/1)
was\nused on any tests changed\n\n---------\n\nCo-authored-by:
kibanamachine
<42973632+kibanamachine@users.noreply.github.com>","sha":"5270cc621f18f8104338db046ff4c275e7c68769","branchLabelMapping":{"^v9.1.0$":"main","^v8.19.0$":"8.x","^v(\\d+).(\\d+).\\d+$":"$1.$2"}},"sourcePullRequest":{"labels":["Team:Kibana
Management","release_note:skip","Team:ML","ci:project-deploy-elasticsearch","backport:version","v9.1.0","v8.19.0"],"title":"[Index
Management + ML] Remove unused code from index management and
ml","number":211266,"url":"https://github.com/elastic/kibana/pull/211266","mergeCommit":{"message":"[Index
Management + ML] Remove unused code from index management and ml
(#211266)\n\n## Summary\n\nThis PR includes\n- Remove unused package
from kibana\n- Remove references from index_management and ml plugins\n-
Update translations file\n- Delete skipped tests\n\nCloses
https://github.com/elastic/kibana/issues/204507\n\n###
Checklist\n\nCheck the PR satisfies following conditions. \n\nReviewers
should verify this PR satisfies this list as well.\n\n- [X] Any text
added follows [EUI's
writing\nguidelines](https://elastic.github.io/eui/#/guidelines/writing),
uses\nsentence case text and includes
[i18n\nsupport](https://github.com/elastic/kibana/blob/main/src/platform/packages/shared/kbn-i18n/README.md)\n-
[x] [Flaky
Test\nRunner](https://ci-stats.kibana.dev/trigger_flaky_test_runner/1)
was\nused on any tests changed\n\n---------\n\nCo-authored-by:
kibanamachine
<42973632+kibanamachine@users.noreply.github.com>","sha":"5270cc621f18f8104338db046ff4c275e7c68769"}},"sourceBranch":"main","suggestedTargetBranches":["8.x"],"targetPullRequestStates":[{"branch":"main","label":"v9.1.0","branchLabelMappingKey":"^v9.1.0$","isSourceBranch":true,"state":"MERGED","url":"https://github.com/elastic/kibana/pull/211266","number":211266,"mergeCommit":{"message":"[Index
Management + ML] Remove unused code from index management and ml
(#211266)\n\n## Summary\n\nThis PR includes\n- Remove unused package
from kibana\n- Remove references from index_management and ml plugins\n-
Update translations file\n- Delete skipped tests\n\nCloses
https://github.com/elastic/kibana/issues/204507\n\n###
Checklist\n\nCheck the PR satisfies following conditions. \n\nReviewers
should verify this PR satisfies this list as well.\n\n- [X] Any text
added follows [EUI's
writing\nguidelines](https://elastic.github.io/eui/#/guidelines/writing),
uses\nsentence case text and includes
[i18n\nsupport](https://github.com/elastic/kibana/blob/main/src/platform/packages/shared/kbn-i18n/README.md)\n-
[x] [Flaky
Test\nRunner](https://ci-stats.kibana.dev/trigger_flaky_test_runner/1)
was\nused on any tests changed\n\n---------\n\nCo-authored-by:
kibanamachine
<42973632+kibanamachine@users.noreply.github.com>","sha":"5270cc621f18f8104338db046ff4c275e7c68769"}},{"branch":"8.x","label":"v8.19.0","branchLabelMappingKey":"^v8.19.0$","isSourceBranch":false,"state":"NOT_CREATED"}]}]
BACKPORT-->

---------

Co-authored-by: kibanamachine <42973632+kibanamachine@users.noreply.github.com>
This commit is contained in:
Samiul Monir 2025-02-27 10:00:05 -05:00 committed by GitHub
parent a8d5f53354
commit bcfb4f41c0
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
41 changed files with 30 additions and 1846 deletions

1
.github/CODEOWNERS vendored
View file

@ -540,7 +540,6 @@ x-pack/platform/plugins/private/index_lifecycle_management @elastic/kibana-manag
x-pack/platform/plugins/shared/index_management @elastic/kibana-management
x-pack/platform/packages/shared/index-management/index_management_shared_types @elastic/kibana-management
test/plugin_functional/plugins/index_patterns @elastic/kibana-data-discovery
x-pack/platform/packages/private/ml/inference_integration_flyout @elastic/ml-ui
x-pack/platform/packages/shared/ai-infra/inference-common @elastic/appex-ai-infra
x-pack/platform/plugins/shared/inference_endpoint @elastic/ml-ui
x-pack/platform/packages/shared/kbn-inference-endpoint-ui-common @elastic/response-ops @elastic/appex-ai-infra @elastic/obs-ai-assistant @elastic/security-generative-ai

View file

@ -588,7 +588,6 @@
"@kbn/inference-endpoint-ui-common": "link:x-pack/platform/packages/shared/kbn-inference-endpoint-ui-common",
"@kbn/inference-langchain": "link:x-pack/platform/packages/shared/ai-infra/inference-langchain",
"@kbn/inference-plugin": "link:x-pack/platform/plugins/shared/inference",
"@kbn/inference_integration_flyout": "link:x-pack/platform/packages/private/ml/inference_integration_flyout",
"@kbn/infra-forge": "link:x-pack/platform/packages/private/kbn-infra-forge",
"@kbn/infra-plugin": "link:x-pack/solutions/observability/plugins/infra",
"@kbn/ingest-pipelines-plugin": "link:x-pack/platform/plugins/shared/ingest_pipelines",

View file

@ -1074,8 +1074,6 @@
"@kbn/index-management-shared-types/*": ["x-pack/platform/packages/shared/index-management/index_management_shared_types/*"],
"@kbn/index-patterns-test-plugin": ["test/plugin_functional/plugins/index_patterns"],
"@kbn/index-patterns-test-plugin/*": ["test/plugin_functional/plugins/index_patterns/*"],
"@kbn/inference_integration_flyout": ["x-pack/platform/packages/private/ml/inference_integration_flyout"],
"@kbn/inference_integration_flyout/*": ["x-pack/platform/packages/private/ml/inference_integration_flyout/*"],
"@kbn/inference-common": ["x-pack/platform/packages/shared/ai-infra/inference-common"],
"@kbn/inference-common/*": ["x-pack/platform/packages/shared/ai-infra/inference-common/*"],
"@kbn/inference-endpoint-plugin": ["x-pack/platform/plugins/shared/inference_endpoint"],

View file

@ -90,7 +90,6 @@
"platform/packages/private/ml/data_view_utils",
"platform/packages/private/ml/date_picker",
"platform/packages/private/ml/field_stats_flyout",
"platform/packages/private/ml/inference_integration_flyout",
"platform/packages/shared/ml/trained_models_utils",
"platform/packages/private/ml/category_validator",
"platform/packages/private/ml/ui_actions",

View file

@ -1,3 +0,0 @@
# @kbn/inference_integration_flyout
Empty package generated by @kbn/generate

View file

@ -1,182 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0; you may not use this file except in compliance with the Elastic License
* 2.0.
*/
import { EuiSuperSelect } from '@elastic/eui';
import React, { useMemo, useState } from 'react';
import { connectToApiOptions, isEmpty, setModalConfigResponse } from '../lib/shared_values';
import type { ModelConfig } from '../types';
import { Service } from '../types';
import { InferenceFlyout } from './flyout_layout';
import type { SaveMappingOnClick } from './inference_flyout_wrapper';
import { CohereForm } from './service_forms/cohere_form';
import { HuggingFaceForm } from './service_forms/huggingface_form';
import { OpenaiForm } from './service_forms/openai_form';
interface Props extends SaveMappingOnClick {
description: string;
onInferenceEndpointChange: (inferenceId: string) => void;
inferenceEndpointError?: string;
}
export const ConnectToApi: React.FC<Props> = ({
description,
onSaveInferenceEndpoint,
isCreateInferenceApiLoading,
onInferenceEndpointChange,
inferenceEndpointError,
}) => {
const defaultOpenaiUrl = 'https://api.openai.com/v1/embeddings';
const defaultCohereModelId = 'embed-english-v2.0';
const [selectedModelType, setSelectedModelType] = useState(connectToApiOptions[0].value);
const [huggingFaceApiKey, setHuggingFaceApiKey] = useState('');
const [huggingFaceModelUrl, setHuggingFaceModelUrl] = useState('');
const [cohereApiKey, setCohereApiKey] = useState('');
const [cohereModelId, setCohereModelId] = useState(defaultCohereModelId);
const [openaiApiKey, setOpenaiApiKey] = useState('');
const [openaiEndpointlUrl, setOpenaiEndpointUrl] = useState(defaultOpenaiUrl);
const [openaiOrganizationId, openaiSetOrganizationId] = useState('');
const [openaiModelId, setOpenaiModelId] = useState('');
// disable save button if required fields are empty
const areRequiredFieldsEmpty = useMemo(() => {
if (selectedModelType === Service.huggingFace) {
return isEmpty(huggingFaceModelUrl) || isEmpty(huggingFaceApiKey);
} else if (selectedModelType === Service.cohere) {
return isEmpty(cohereApiKey);
} else {
// open ai
return isEmpty(openaiApiKey) || isEmpty(openaiModelId);
}
}, [
selectedModelType,
huggingFaceModelUrl,
huggingFaceApiKey,
cohereApiKey,
openaiApiKey,
openaiModelId,
]);
// reset form values
const onChangeModelType = (newSelectedServiceType: Service) => {
switch (selectedModelType) {
case Service.huggingFace:
setHuggingFaceApiKey('');
setHuggingFaceModelUrl('');
break;
case Service.cohere:
setCohereApiKey('');
setCohereModelId(defaultCohereModelId);
break;
case Service.openai:
setOpenaiApiKey('');
setOpenaiEndpointUrl(defaultOpenaiUrl);
openaiSetOrganizationId('');
setOpenaiModelId('');
break;
}
setSelectedModelType(newSelectedServiceType);
};
const modelConfig: ModelConfig = useMemo(() => {
if (selectedModelType === Service.huggingFace) {
return setModalConfigResponse(Service.huggingFace, {
api_key: huggingFaceApiKey,
url: huggingFaceModelUrl,
});
} else if (selectedModelType === Service.cohere) {
return setModalConfigResponse(Service.cohere, {
api_key: cohereApiKey,
model_id: isEmpty(cohereModelId) ? defaultCohereModelId : cohereModelId,
});
} else {
return setModalConfigResponse(Service.openai, {
api_key: openaiApiKey,
model_id: openaiModelId,
organization_id: isEmpty(openaiOrganizationId) ? undefined : openaiOrganizationId,
url: isEmpty(openaiEndpointlUrl) ? defaultOpenaiUrl : openaiEndpointlUrl,
});
}
}, [
selectedModelType,
huggingFaceApiKey,
huggingFaceModelUrl,
cohereApiKey,
cohereModelId,
openaiApiKey,
openaiModelId,
openaiOrganizationId,
openaiEndpointlUrl,
]);
const renderForm = () => {
if (selectedModelType === Service.huggingFace)
return (
<HuggingFaceForm
apiKey={huggingFaceApiKey}
setApiKey={setHuggingFaceApiKey}
url={huggingFaceModelUrl}
setUrl={setHuggingFaceModelUrl}
/>
);
else if (selectedModelType === Service.cohere)
return (
<CohereForm
apiKey={cohereApiKey}
setApiKey={setCohereApiKey}
modelId={cohereModelId}
setModelId={setCohereModelId}
/>
);
else
return (
<OpenaiForm
apiKey={openaiApiKey}
setApiKey={setOpenaiApiKey}
endpointUrl={openaiEndpointlUrl}
setEndpointUrl={setOpenaiEndpointUrl}
organizationId={openaiOrganizationId}
setOrganizationId={openaiSetOrganizationId}
modelId={openaiModelId}
setModelId={setOpenaiModelId}
/>
);
};
const InferenceSpecificComponent = (
<>
<EuiSuperSelect
fullWidth
data-test-subj="modelTypeSelect"
options={connectToApiOptions}
valueOfSelected={selectedModelType}
onChange={(value) => onChangeModelType(value)}
/>
{renderForm()}
</>
);
return (
<>
<InferenceFlyout
description={description}
onSaveInferenceEndpoint={onSaveInferenceEndpoint}
inferenceComponent={InferenceSpecificComponent}
service={selectedModelType}
modelConfig={modelConfig}
areRequiredFieldsEmpty={areRequiredFieldsEmpty}
isCreateInferenceApiLoading={isCreateInferenceApiLoading}
onInferenceEndpointChange={onInferenceEndpointChange}
inferenceEndpointError={inferenceEndpointError}
/>
</>
);
};

View file

@ -1,207 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0; you may not use this file except in compliance with the Elastic License
* 2.0.
*/
import type { EuiSuperSelectOption } from '@elastic/eui';
import {
EuiFlexItem,
EuiTitle,
EuiSuperSelect,
EuiText,
EuiLink,
EuiSpacer,
useGeneratedHtmlId,
EuiHorizontalRule,
} from '@elastic/eui';
import { i18n } from '@kbn/i18n';
import { FormattedMessage } from '@kbn/i18n-react';
import React, { useMemo, useState, useEffect } from 'react';
import type { ElasticsearchModelDescriptions, ElasticsearchService, ModelConfig } from '../types';
import { ElasticsearchModelDefaultOptions, Service } from '../types';
import type { DocumentationProps, SaveMappingOnClick } from './inference_flyout_wrapper';
import { elasticsearchModelsOptions, setModalConfigResponse } from '../lib/shared_values';
import { ServiceOptions } from './service_options';
import { InferenceFlyout } from './flyout_layout';
interface ElasticsearchModelsProps
extends Omit<DocumentationProps, 'supportedNlpModels' | 'nlpImportModel'>,
SaveMappingOnClick {
description: string;
trainedModels: string[];
onInferenceEndpointChange: (inferenceId: string) => void;
inferenceEndpointError?: string;
}
export const ElasticsearchModels: React.FC<ElasticsearchModelsProps> = ({
description,
elserv2documentationUrl = '',
e5documentationUrl = '',
onSaveInferenceEndpoint,
trainedModels,
isCreateInferenceApiLoading,
onInferenceEndpointChange,
inferenceEndpointError,
}) => {
const [options, setOptions] = useState(elasticsearchModelsOptions);
const [selectedModelType, setSelectedModelType] = useState(elasticsearchModelsOptions[0].value);
const [numberOfAllocations, setNumberOfAllocations] = useState<number>(1);
const [numberOfThreads, setNumberOfThreads] = useState<number>(1);
const serviceType: Service = useMemo(() => {
return selectedModelType === ElasticsearchModelDefaultOptions.elser
? Service.elser
: Service.elasticsearch;
}, [selectedModelType]);
const modelConfig: ModelConfig = useMemo(() => {
const modelAllocationsAndThreads = {
num_allocations: numberOfAllocations,
num_threads: numberOfThreads,
};
if (serviceType === Service.elser)
return setModalConfigResponse(serviceType, {
...modelAllocationsAndThreads,
});
else {
return setModalConfigResponse(serviceType, {
...modelAllocationsAndThreads,
model_id: ElasticsearchModelDefaultOptions.e5,
} as ElasticsearchService);
}
}, [numberOfAllocations, numberOfThreads, serviceType]);
const elasticSearchModelTypesDescriptions: Record<string, ElasticsearchModelDescriptions> = {
[ElasticsearchModelDefaultOptions.elser]: {
description: i18n.translate(
'xpack.ml.addInferenceEndpoint.elasticsearchModels.elser.description',
{
defaultMessage:
"ELSER is Elastic's NLP model for English semantic search, utilizing sparse vectors. It prioritizes intent and contextual meaning over literal term matching, optimized specifically for English documents and queries on the Elastic platform.",
}
),
documentation: elserv2documentationUrl,
title: i18n.translate('xpack.ml.addInferenceEndpoint.elasticsearchModels.elser.title', {
defaultMessage: 'Elastic Learned Sparse Encoder v2',
}),
},
[ElasticsearchModelDefaultOptions.e5]: {
description: i18n.translate(
'xpack.ml.addInferenceEndpoint.elasticsearchModels.e5Model.description',
{
defaultMessage:
'E5 is a third party NLP model that enables you to perform multi-lingual semantic search by using dense vector representations. This model performs best for non-English language documents and queries.',
}
),
documentation: e5documentationUrl,
title: i18n.translate('xpack.ml.addInferenceEndpoint.e5Model.title', {
defaultMessage: 'Multilingual E5 (Embeddings from bidirectional encoder representations)',
}),
},
};
useEffect(() => {
const elasticsearchModelsOptionsList: Array<
EuiSuperSelectOption<ElasticsearchModelDefaultOptions | string>
> = [];
const defaultOptions: string[] = Object.values(ElasticsearchModelDefaultOptions);
trainedModels.map((model) => {
if (!defaultOptions.includes(model)) {
elasticsearchModelsOptionsList.push({
value: model,
inputDisplay: model,
'data-test-subj': `serviceType-${model}`,
});
}
});
const modelOptionsList = elasticsearchModelsOptions.concat(elasticsearchModelsOptionsList);
setOptions(modelOptionsList);
setSelectedModelType(modelOptionsList[0].value);
}, [trainedModels]);
const serviceOptionsId = useGeneratedHtmlId({ prefix: 'serviceOptions' });
const inferenceComponent = (
<>
<EuiFlexItem grow={false}>
<EuiSuperSelect
fullWidth
options={options}
valueOfSelected={selectedModelType}
onChange={(value) => setSelectedModelType(value)}
/>
</EuiFlexItem>
<EuiSpacer />
{Object.keys(elasticSearchModelTypesDescriptions).includes(selectedModelType) ? (
<>
<EuiFlexItem grow={false}>
<EuiTitle size="xs">
<h6>
<FormattedMessage
id="xpack.ml.addInferenceEndpoint.elasticsearchModels.modelTitle"
defaultMessage="{title}"
values={{
title: elasticSearchModelTypesDescriptions[selectedModelType].title,
}}
/>
</h6>
</EuiTitle>
</EuiFlexItem>
<EuiSpacer />
<EuiFlexItem>
<EuiText color="subdued">
<FormattedMessage
id="xpack.ml.addInferenceEndpoint.elasticsearchModels.modelDescription"
defaultMessage="{description}"
values={{
description: elasticSearchModelTypesDescriptions[selectedModelType].description,
}}
/>
</EuiText>
</EuiFlexItem>
<EuiSpacer />
<EuiFlexItem>
<p>
<EuiLink
href={elasticSearchModelTypesDescriptions[selectedModelType].documentation}
external
target={'_blank'}
>
<FormattedMessage
id="xpack.ml.addInferenceEndpoint.elasticsearchModels.modelDocumentation"
defaultMessage="View documentation"
/>
</EuiLink>
</p>
</EuiFlexItem>
<EuiSpacer />
<EuiFlexItem>
<ServiceOptions
id={serviceOptionsId}
numberOfAllocations={numberOfAllocations}
setNumberOfAllocations={setNumberOfAllocations}
setNumberOfThreads={setNumberOfThreads}
numberOfThreads={numberOfThreads}
/>
</EuiFlexItem>
</>
) : (
<EuiHorizontalRule margin="none" />
)}
</>
);
return (
<>
<InferenceFlyout
description={description}
service={serviceType}
onSaveInferenceEndpoint={onSaveInferenceEndpoint}
inferenceComponent={inferenceComponent}
modelConfig={modelConfig}
areRequiredFieldsEmpty={false}
isCreateInferenceApiLoading={isCreateInferenceApiLoading}
onInferenceEndpointChange={onInferenceEndpointChange}
inferenceEndpointError={inferenceEndpointError}
/>
</>
);
};

View file

@ -1,141 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0; you may not use this file except in compliance with the Elastic License
* 2.0.
*/
import {
EuiFieldText,
EuiFlexGroup,
EuiFlexItem,
EuiForm,
EuiFormRow,
EuiLink,
EuiTitle,
} from '@elastic/eui';
import { i18n } from '@kbn/i18n';
import { FormattedMessage } from '@kbn/i18n-react';
import React, { useCallback, useMemo, useState } from 'react';
import { isEmpty, serviceTypeMap } from '../lib/shared_values';
import type { Service, ModelConfig } from '../types';
import type { SaveMappingOnClick } from './inference_flyout_wrapper';
import { SaveInferenceEndpoint } from './save_inference_mappings_button';
interface GenericInferenceFlyoutProps extends SaveMappingOnClick {
inferenceComponent: React.ReactNode;
description: string;
service: Service;
areRequiredFieldsEmpty: boolean;
modelConfig: ModelConfig;
onInferenceEndpointChange: (inferenceId: string) => void;
inferenceEndpointError?: string;
}
export const InferenceFlyout: React.FC<GenericInferenceFlyoutProps> = ({
inferenceComponent,
description,
modelConfig,
onSaveInferenceEndpoint,
areRequiredFieldsEmpty = false,
service,
isCreateInferenceApiLoading,
onInferenceEndpointChange,
inferenceEndpointError,
}) => {
const [inferenceEndpointId, setInferenceEndpointId] = useState<string>('');
const hasError: boolean = useMemo(() => {
if (inferenceEndpointError !== undefined) {
return !isEmpty(inferenceEndpointError);
}
return false;
}, [inferenceEndpointError]);
const onChangingInferenceEndpoint = useCallback(
(value: any) => {
setInferenceEndpointId(value);
onInferenceEndpointChange(value);
},
[setInferenceEndpointId, onInferenceEndpointChange]
);
const isSaveButtonDisabled = useMemo(() => {
return (
isEmpty(inferenceEndpointId) ||
areRequiredFieldsEmpty ||
(inferenceEndpointError !== undefined && !isEmpty(inferenceEndpointError))
);
}, [inferenceEndpointId, areRequiredFieldsEmpty, inferenceEndpointError]);
return (
<EuiFlexGroup direction="column" justifyContent="spaceEvenly">
<EuiFlexItem grow={false}>
<EuiTitle size="s">
<>{description}</>
</EuiTitle>
</EuiFlexItem>
<EuiFlexItem>{inferenceComponent}</EuiFlexItem>
<EuiFlexItem>
<EuiForm component="form" fullWidth>
<EuiFormRow
label={i18n.translate(
'xpack.ml.addInferenceEndpoint.elasticsearchModels.inferenceEndpointIdForm.label',
{
defaultMessage: 'Inference Endpoint ID:',
}
)}
hasChildLabel={false}
labelAppend={
<EuiLink href="TODO" external target={'_blank'}>
<FormattedMessage
id="xpack.ml.inferenceFlyoutWrapper.elasticsearchModels.inferenceEndpointIdForm.inferenceEnpointDocumentation"
defaultMessage="What's this?"
/>
</EuiLink>
}
helpText={i18n.translate(
'xpack.ml.addInferenceEndpoint.elasticsearchModels.inferenceEndpointIdForm.helpText',
{
defaultMessage: 'Must be unique. Only letters and underscores are allowed.',
}
)}
isInvalid={hasError}
error={
<FormattedMessage
id="xpack.ml.addInferenceEndpoint.elasticsearchModels.inferenceEndpointIdForm.error"
defaultMessage="{formError}"
values={{
formError: inferenceEndpointError,
}}
/>
}
>
<EuiFieldText
data-test-subj="inferenceEndpointId"
placeholder={i18n.translate(
'xpack.ml.addInferenceEndpoint.elasticsearchModels.inferenceEndpointIdForm.placeholder',
{
defaultMessage: 'Inference endpoint id',
}
)}
isInvalid={hasError}
value={inferenceEndpointId}
onChange={(e) => onChangingInferenceEndpoint(e.target.value)}
/>
</EuiFormRow>
</EuiForm>
</EuiFlexItem>
<EuiFlexGroup justifyContent="flexEnd">
<EuiFlexItem grow={false}>
<SaveInferenceEndpoint
isSaveButtonDisabled={isSaveButtonDisabled}
inferenceId={inferenceEndpointId}
taskType={serviceTypeMap[service]}
modelConfig={modelConfig}
onSaveInferenceEndpoint={onSaveInferenceEndpoint}
isCreateInferenceApiLoading={isCreateInferenceApiLoading}
/>
</EuiFlexItem>
</EuiFlexGroup>
</EuiFlexGroup>
);
};

View file

@ -1,70 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0; you may not use this file except in compliance with the Elastic License
* 2.0.
*/
import React from 'react';
import type { InferenceFlyoutProps } from './inference_flyout_wrapper';
import { InferenceFlyoutWrapper } from './inference_flyout_wrapper';
import { fireEvent, screen } from '@testing-library/react';
import { renderReactTestingLibraryWithI18n as render } from '@kbn/test-jest-helpers';
import '@testing-library/jest-dom';
export const DEFAULT_VALUES: InferenceFlyoutProps = {
errorCallout: undefined,
nlpImportModel: '',
supportedNlpModels: '',
elserv2documentationUrl: '',
e5documentationUrl: '',
isInferenceFlyoutVisible: false,
onFlyoutClose: jest.fn(),
onSaveInferenceEndpoint: jest.fn(),
trainedModels: [],
isCreateInferenceApiLoading: false,
onInferenceEndpointChange: jest.fn(),
setInferenceEndpointError: jest.fn(),
};
describe('<InferenceFlyoutWrapper />', () => {
beforeEach(() => {
render(<InferenceFlyoutWrapper {...DEFAULT_VALUES} />);
});
test('inference Flyout page is loaded', async () => {
expect(screen.getByTestId('addInferenceEndpointTitle')).toBeInTheDocument();
expect(screen.getAllByTestId('elasticsearch_modelsTab')).toHaveLength(1);
expect(screen.getAllByTestId('euiFlyoutCloseButton')).toHaveLength(1);
});
test('can close Flyout', async () => {
const closeButton = screen.getByTestId('closeInferenceFlyout');
fireEvent.click(closeButton);
expect(DEFAULT_VALUES.onFlyoutClose).toHaveBeenCalled();
});
test('can change tab', async () => {
const connectToApi = screen.getByTestId('connect_to_apiTab');
fireEvent.click(connectToApi);
expect(
screen.getByText('Connect to your preferred model service endpoints.')
).toBeInTheDocument();
const eland = screen.getByTestId('eland_python_clientTab');
fireEvent.click(eland);
expect(screen.getByTestId('mlElandPipInstallCodeBlock')).toBeInTheDocument();
});
test('Can change super select value in connect to api', async () => {
const connectToApi = screen.getByTestId('connect_to_apiTab');
fireEvent.click(connectToApi);
expect(screen.getAllByTestId('huggingFaceUrl')).toHaveLength(1);
expect(screen.getAllByTestId('huggingFaceUrlApiKey')).toHaveLength(1);
const superSelectButton = screen.getByText('HuggingFace');
fireEvent.click(superSelectButton);
expect(screen.getAllByTestId('serviceType-cohere')).toHaveLength(1);
expect(screen.getAllByTestId('serviceType-openai')).toHaveLength(1);
const superSelectChangeModelType = screen.getByText('Open AI');
fireEvent.click(superSelectChangeModelType);
expect(screen.getAllByTestId('openaiApiKey')).toHaveLength(1);
});
});
export {};

View file

@ -1,183 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0; you may not use this file except in compliance with the Elastic License
* 2.0.
*/
import React, { useState } from 'react';
import {
EuiFlyout,
EuiFlyoutHeader,
EuiTab,
EuiTitle,
EuiTabs,
EuiFlyoutBody,
EuiSpacer,
EuiFlyoutFooter,
EuiButtonEmpty,
} from '@elastic/eui';
import { i18n } from '@kbn/i18n';
import { FormattedMessage } from '@kbn/i18n-react';
import type { InferenceTaskType } from '@elastic/elasticsearch/lib/api/types';
import type { ModelConfig, Tab } from '../types';
import { TabType } from '../types';
import { ElandPythonClient } from './eland_python_client';
import { ConnectToApi } from './connect_to_api';
import { ElasticsearchModels } from './elasticsearch_models';
import { flyoutHeaderDescriptions } from '../lib/shared_values';
const tabs: Tab[] = [
{
id: TabType.elasticsearch_models,
name: i18n.translate('xpack.ml.inferenceFlyoutWrapper.elasticsearchModelsTabTitle', {
defaultMessage: 'Elasticsearch models',
}),
},
{
id: TabType.connect_to_api,
name: i18n.translate('xpack.ml.inferenceFlyoutWrapper.connectToAPITabTitle', {
defaultMessage: 'Connect to API',
}),
},
{
id: TabType.eland_python_client,
name: i18n.translate('xpack.ml.inferenceFlyoutWrapper.elandPythonClientTabTitle', {
defaultMessage: 'Eland Python Client',
}),
},
];
interface TabProps {
setActiveTab: (id: TabType) => void;
activeTab: TabType;
setInferenceEndpointError: (error: string | undefined) => void;
}
const InferenceEndpointFlyoutTabs: React.FunctionComponent<TabProps> = React.memo(
({ setActiveTab, activeTab, setInferenceEndpointError }) => {
return (
<EuiTabs>
{tabs.map((tab) => (
<EuiTab
onClick={() => {
setActiveTab(tab.id);
setInferenceEndpointError(undefined);
}}
isSelected={tab.id === activeTab}
key={tab.id}
data-test-subj={`${tab.id}Tab`}
>
{tab.name}
</EuiTab>
))}
</EuiTabs>
);
}
);
export interface SaveMappingOnClick {
onSaveInferenceEndpoint: (
inferenceId: string,
taskType: InferenceTaskType,
modelConfig: ModelConfig
) => void;
isCreateInferenceApiLoading?: boolean;
}
export interface DocumentationProps {
elserv2documentationUrl?: string;
e5documentationUrl?: string;
supportedNlpModels?: string;
nlpImportModel?: string;
}
export interface InferenceFlyoutProps extends SaveMappingOnClick, DocumentationProps {
onFlyoutClose: (value: boolean) => void;
isInferenceFlyoutVisible: boolean;
errorCallout?: JSX.Element | '';
trainedModels: string[];
onInferenceEndpointChange: (inferenceId: string) => void;
inferenceEndpointError?: string;
setInferenceEndpointError: (error: string | undefined) => void;
}
export const InferenceFlyoutWrapper: React.FC<InferenceFlyoutProps> = ({
onSaveInferenceEndpoint,
onFlyoutClose,
isInferenceFlyoutVisible,
e5documentationUrl = '',
elserv2documentationUrl = '',
supportedNlpModels = '',
nlpImportModel = '',
errorCallout,
trainedModels = [],
isCreateInferenceApiLoading,
onInferenceEndpointChange,
inferenceEndpointError = undefined,
setInferenceEndpointError,
}) => {
const [activeTab, setActiveTab] = useState<TabType>(TabType.elasticsearch_models);
const tabToInferenceContentMap: Record<TabType, React.ReactNode> = {
elasticsearch_models: (
<ElasticsearchModels
description={flyoutHeaderDescriptions[activeTab].description}
e5documentationUrl={e5documentationUrl}
elserv2documentationUrl={elserv2documentationUrl}
onSaveInferenceEndpoint={onSaveInferenceEndpoint}
isCreateInferenceApiLoading={isCreateInferenceApiLoading}
trainedModels={trainedModels}
onInferenceEndpointChange={onInferenceEndpointChange}
inferenceEndpointError={inferenceEndpointError}
/>
),
connect_to_api: (
<ConnectToApi
description={flyoutHeaderDescriptions[activeTab].description}
onSaveInferenceEndpoint={onSaveInferenceEndpoint}
isCreateInferenceApiLoading={isCreateInferenceApiLoading}
onInferenceEndpointChange={onInferenceEndpointChange}
inferenceEndpointError={inferenceEndpointError}
/>
),
eland_python_client: (
<ElandPythonClient supportedNlpModels={supportedNlpModels} nlpImportModel={nlpImportModel} />
),
};
const tabContent = tabToInferenceContentMap[activeTab];
const content: React.ReactNode = (
<>
{errorCallout}
<InferenceEndpointFlyoutTabs
activeTab={activeTab}
setActiveTab={setActiveTab}
setInferenceEndpointError={setInferenceEndpointError}
/>
<EuiSpacer size="l" />
{tabContent}
</>
);
return (
<EuiFlyout
onClose={() => onFlyoutClose(!isInferenceFlyoutVisible)}
data-test-subj="addInferenceEndpoint"
ownFocus
>
<EuiFlyoutHeader>
<EuiTitle size="m" data-test-subj="addInferenceEndpointTitle">
<h2>
<FormattedMessage
id="xpack.ml.addInferenceEndpoint.header.title"
defaultMessage="Add inference endpoint"
/>
</h2>
</EuiTitle>
</EuiFlyoutHeader>
<EuiFlyoutBody data-test-subj="inference_endpoint_content">{content}</EuiFlyoutBody>
<EuiFlyoutFooter>
<EuiButtonEmpty
onClick={() => onFlyoutClose(!isInferenceFlyoutVisible)}
data-test-subj="closeInferenceFlyout"
>
{i18n.translate('xpack.ml.addInferenceEndpoint.footer.cancel', {
defaultMessage: 'Cancel',
})}
</EuiButtonEmpty>
</EuiFlyoutFooter>
</EuiFlyout>
);
};

View file

@ -1,44 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0; you may not use this file except in compliance with the Elastic License
* 2.0.
*/
import type { InferenceTaskType } from '@elastic/elasticsearch/lib/api/types';
import { EuiButton } from '@elastic/eui';
import { i18n } from '@kbn/i18n';
import React from 'react';
import type { ModelConfig } from '../types';
import type { SaveMappingOnClick } from './inference_flyout_wrapper';
interface SaveInferenceEndpointProps extends SaveMappingOnClick {
inferenceId: string;
taskType: InferenceTaskType;
modelConfig: ModelConfig;
isSaveButtonDisabled?: boolean;
}
export const SaveInferenceEndpoint: React.FC<SaveInferenceEndpointProps> = ({
inferenceId,
taskType,
modelConfig,
onSaveInferenceEndpoint,
isSaveButtonDisabled,
isCreateInferenceApiLoading,
}) => {
return (
<EuiButton
isDisabled={isSaveButtonDisabled}
fill
isLoading={isCreateInferenceApiLoading}
onClick={() => onSaveInferenceEndpoint(inferenceId, taskType, modelConfig)}
type="submit"
>
{i18n.translate('xpack.ml.addInferenceEndpoint.saveInference', {
defaultMessage: 'Save Inference Endpoint',
})}
</EuiButton>
);
};

View file

@ -1,53 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0; you may not use this file except in compliance with the Elastic License
* 2.0.
*/
import { EuiFieldPassword, EuiFieldText, EuiForm, EuiFormRow, EuiSpacer } from '@elastic/eui';
import { i18n } from '@kbn/i18n';
import React from 'react';
interface CohereFormProps {
modelId: string;
setModelId: (modelId: string) => void;
apiKey: string;
setApiKey: (apiKey: string) => void;
}
export const CohereForm: React.FC<CohereFormProps> = ({
modelId,
setModelId,
apiKey,
setApiKey,
}) => {
return (
<EuiForm component="form" fullWidth>
<EuiSpacer />
<EuiFormRow
label={i18n.translate('xpack.ml.addInferenceEndpoint.connectToApi.cohereModelID.label', {
defaultMessage: 'Model ID',
})}
hasChildLabel={false}
>
<EuiFieldText
data-test-subj="cohereModelId"
value={modelId}
onChange={(e) => setModelId(e.target.value)}
/>
</EuiFormRow>
<EuiSpacer />
<EuiFormRow
label={i18n.translate('xpack.ml.addInferenceEndpoint.connectToApi.cohereApiKey.label', {
defaultMessage: 'Api Key',
})}
>
<EuiFieldPassword
type="dual"
data-test-subj="cohereApiKey"
value={apiKey}
onChange={(e) => setApiKey(e.target.value)}
/>
</EuiFormRow>
</EuiForm>
);
};

View file

@ -1,53 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0; you may not use this file except in compliance with the Elastic License
* 2.0.
*/
import React from 'react';
import { EuiFieldPassword, EuiFieldText, EuiForm, EuiFormRow, EuiSpacer } from '@elastic/eui';
import { i18n } from '@kbn/i18n';
interface HuggingFaceProps {
apiKey: string;
url: string;
setApiKey: (apiKey: string) => void;
setUrl: (url: string) => void;
}
export const HuggingFaceForm: React.FC<HuggingFaceProps> = ({ apiKey, url, setApiKey, setUrl }) => {
return (
<EuiForm component="form" fullWidth>
<EuiSpacer />
<EuiFormRow
label={i18n.translate(
'xpack.ml.addInferenceEndpoint.connectToApi.huggingFaceModelUrl.label',
{
defaultMessage: 'HuggingFace Model URL',
}
)}
hasChildLabel={false}
>
<EuiFieldText
data-test-subj="huggingFaceUrl"
value={url}
onChange={(e) => setUrl(e.target.value)}
/>
</EuiFormRow>
<EuiSpacer />
<EuiFormRow
label={i18n.translate(
'xpack.ml.addInferenceEndpoint.connectToApi.huggingFaceModelApiKey.label',
{
defaultMessage: 'API Key',
}
)}
>
<EuiFieldPassword
type="dual"
data-test-subj="huggingFaceUrlApiKey"
value={apiKey}
onChange={(e) => setApiKey(e.target.value)}
/>
</EuiFormRow>
</EuiForm>
);
};

View file

@ -1,91 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0; you may not use this file except in compliance with the Elastic License
* 2.0.
*/
import { EuiFieldPassword, EuiFieldText, EuiForm, EuiFormRow, EuiSpacer } from '@elastic/eui';
import { i18n } from '@kbn/i18n';
import React from 'react';
interface OpenaiFormProps {
organizationId: string;
setOrganizationId: (organizationId: string) => void;
apiKey: string;
setApiKey: (apiKey: string) => void;
endpointUrl: string;
setEndpointUrl: (endpointUrl: string) => void;
modelId: string;
setModelId: (modelId: string) => void;
}
export const OpenaiForm: React.FC<OpenaiFormProps> = ({
organizationId,
setOrganizationId,
apiKey,
setApiKey,
endpointUrl,
setEndpointUrl,
modelId,
setModelId,
}) => {
return (
<EuiForm component="form" fullWidth>
<EuiSpacer />
<EuiFormRow
label={i18n.translate(
'xpack.ml.addInferenceEndpoint.connectToApi.openaiOrganizationID.label',
{
defaultMessage: 'Organization ID',
}
)}
hasChildLabel={false}
>
<EuiFieldText
data-test-subj="openaiOrganizationId"
value={organizationId}
onChange={(e) => setOrganizationId(e.target.value)}
/>
</EuiFormRow>
<EuiSpacer />
<EuiFormRow
label={i18n.translate('xpack.ml.addInferenceEndpoint.connectToApi.openaiApiKey.label', {
defaultMessage: 'API Key',
})}
>
<EuiFieldPassword
type="dual"
data-test-subj="openaiApiKey"
value={apiKey}
onChange={(e) => setApiKey(e.target.value)}
/>
</EuiFormRow>
<EuiFormRow
label={i18n.translate(
'xpack.ml.addInferenceEndpoint.connectToApi.openaiEndpointUrl.label',
{
defaultMessage: 'Endpoint URL',
}
)}
>
<EuiFieldText
data-test-subj="openaiModelEndpointUrl"
value={endpointUrl}
onChange={(e) => setEndpointUrl(e.target.value)}
/>
</EuiFormRow>
<EuiSpacer />
<EuiFormRow
label={i18n.translate('xpack.ml.addInferenceEndpoint.connectToApi.openaiModel.label', {
defaultMessage: 'Model',
})}
>
<EuiFieldText
data-test-subj="openaiModel"
value={modelId}
onChange={(e) => setModelId(e.target.value)}
/>
</EuiFormRow>
</EuiForm>
);
};

View file

@ -1,130 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0; you may not use this file except in compliance with the Elastic License
* 2.0.
*/
import {
EuiAccordion,
EuiDescribedFormGroup,
EuiFieldNumber,
EuiFormRow,
EuiSpacer,
EuiText,
EuiTitle,
} from '@elastic/eui';
import { i18n } from '@kbn/i18n';
import { FormattedMessage } from '@kbn/i18n-react';
import React from 'react';
export const ServiceOptions: React.FC<{
id: string;
setNumberOfAllocations: (value: number) => void;
numberOfAllocations: number;
setNumberOfThreads: (value: number) => void;
numberOfThreads: number;
}> = ({ id, setNumberOfAllocations, numberOfAllocations, setNumberOfThreads, numberOfThreads }) => {
return (
<EuiAccordion
id={id}
initialIsOpen
data-test-subj="serviceOptions"
aria-label={i18n.translate(
'xpack.ml.addInferenceEndpoint.elasticsearchModels.serviceOptions.accordion.ariaLabel',
{
defaultMessage: 'Service Options',
}
)}
buttonContent={
<EuiTitle size="xxs">
<h6>
{i18n.translate(
'xpack.ml.addInferenceEndpoint.elasticsearchModels.serviceOptions.accordion.title',
{
defaultMessage: 'Service Options',
}
)}
</h6>
</EuiTitle>
}
>
<EuiSpacer size="m" />
<EuiDescribedFormGroup
title={
<EuiTitle size="xxxs">
<h6>
{i18n.translate(
'xpack.ml.addInferenceEndpoint.elasticsearchModels.serviceOptions.allocationTitle',
{
defaultMessage: 'Allocations:',
}
)}
</h6>
</EuiTitle>
}
description={
<EuiText color="subdued" size="s">
<FormattedMessage
id="xpack.ml.addInferenceEndpoint.elasticsearchModels.serviceOptions.allocationDescription"
defaultMessage="The number of model allocations to create."
/>
</EuiText>
}
fullWidth
>
<EuiFormRow>
<EuiFieldNumber
value={numberOfAllocations}
min={1}
onChange={(e) => setNumberOfAllocations(e.target.valueAsNumber)}
aria-label={i18n.translate(
'xpack.ml.addInferenceEndpoint.elasticsearchModels.serviceOptions.allocationNumberField.ariaLabel',
{
defaultMessage: 'Number of allocation',
}
)}
/>
</EuiFormRow>
</EuiDescribedFormGroup>
<EuiSpacer size="m" />
<EuiDescribedFormGroup
title={
<EuiTitle size="xxxs">
<h6>
{i18n.translate(
'xpack.ml.addInferenceEndpoint.elasticsearchModels.serviceOptions.threadsTitle',
{
defaultMessage: 'Threads:',
}
)}
</h6>
</EuiTitle>
}
description={
<EuiText color="subdued" size="s">
<FormattedMessage
id="xpack.ml.addInferenceEndpoint.elasticsearchModels.serviceOptions.threadsDescription"
defaultMessage="The number of threads to use by each model allocation."
/>
</EuiText>
}
fullWidth
>
<EuiFormRow>
<EuiFieldNumber
value={numberOfThreads}
min={1}
max={32}
onChange={(e) => setNumberOfThreads(e.target.valueAsNumber)}
aria-label={i18n.translate(
'xpack.ml.addInferenceEndpoint.elasticsearchModels.serviceOptions.threadsNumberField.ariaLabel',
{
defaultMessage: 'Number of Threads',
}
)}
/>
</EuiFormRow>
</EuiDescribedFormGroup>
</EuiAccordion>
);
};

View file

@ -1,10 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0; you may not use this file except in compliance with the Elastic License
* 2.0.
*/
export { InferenceFlyoutWrapper } from './components/inference_flyout_wrapper';
export { ElandPythonClient } from './components/eland_python_client';
export { type ModelConfig } from './types';

View file

@ -1,12 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0; you may not use this file except in compliance with the Elastic License
* 2.0.
*/
module.exports = {
preset: '@kbn/test',
rootDir: '../../../../../..',
roots: ['<rootDir>/x-pack/platform/packages/private/ml/inference_integration_flyout'],
};

View file

@ -1,9 +0,0 @@
{
"type": "shared-common",
"id": "@kbn/inference_integration_flyout",
"owner": [
"@elastic/ml-ui"
],
"group": "platform",
"visibility": "private"
}

View file

@ -1,103 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0; you may not use this file except in compliance with the Elastic License
* 2.0.
*/
import type { InferenceTaskType } from '@elastic/elasticsearch/lib/api/types';
import type { EuiSuperSelectOption } from '@elastic/eui';
import { i18n } from '@kbn/i18n';
import type {
TabType,
ElserServiceSettings,
ModelConfig,
HuggingFaceServiceSettings,
OpenaiServiceSettings,
CohereServiceSettings,
} from '../types';
import { Service, ElasticsearchModelDefaultOptions } from '../types';
export const elasticsearchModelsOptions: Array<
EuiSuperSelectOption<ElasticsearchModelDefaultOptions | string>
> = [
{
value: ElasticsearchModelDefaultOptions.elser,
inputDisplay: ElasticsearchModelDefaultOptions.elser,
'data-test-subj': `serviceType-${ElasticsearchModelDefaultOptions.elser}`,
},
{
value: ElasticsearchModelDefaultOptions.e5,
inputDisplay: ElasticsearchModelDefaultOptions.e5,
'data-test-subj': `serviceType-${ElasticsearchModelDefaultOptions.e5}`,
},
];
export const connectToApiOptions: Array<EuiSuperSelectOption<Service>> = [
{
value: Service.huggingFace,
inputDisplay: 'HuggingFace',
'data-test-subj': 'serviceType-huggingFace',
},
{
value: Service.cohere,
inputDisplay: 'Cohere',
'data-test-subj': 'serviceType-cohere',
},
{
value: Service.openai,
inputDisplay: 'Open AI',
'data-test-subj': 'serviceType-openai',
},
];
export const flyoutHeaderDescriptions: Record<TabType, { description: string }> = {
elasticsearch_models: {
description: i18n.translate(
'xpack.ml.inferenceFlyoutWrapper.addInferenceEndpoint.elasticsearchModels.FlyoutHeaderdescription',
{
defaultMessage:
'Connect to Elastic preferred models and models hosted on your elasticsearch nodes.',
}
),
},
connect_to_api: {
description: i18n.translate(
'xpack.ml.inferenceFlyoutWrapper.addInferenceEndpoint.connect_to_api.FlyoutHeaderdescription',
{
defaultMessage: 'Connect to your preferred model service endpoints.',
}
),
},
eland_python_client: {
description: i18n.translate(
'xpack.ml.inferenceFlyoutWrapper.addInferenceEndpoint.eland_python_client.FlyoutHeaderdescription',
{
defaultMessage: 'Import custom models through the Elastic python client.',
}
),
},
};
export const serviceTypeMap: Record<Service, InferenceTaskType> = {
[Service.cohere]: 'text_embedding',
[Service.huggingFace]: 'text_embedding',
[Service.openai]: 'text_embedding',
[Service.elasticsearch]: 'text_embedding',
[Service.elser]: 'sparse_embedding',
};
export const setModalConfigResponse = (
serviceType: Service,
serviceSettings:
| ElserServiceSettings
| HuggingFaceServiceSettings
| OpenaiServiceSettings
| CohereServiceSettings
): ModelConfig => {
return {
service: serviceType,
service_settings: serviceSettings,
};
};
export const isEmpty = (field: string) => {
return field.trim() === '';
};

View file

@ -1,7 +0,0 @@
{
"name": "@kbn/inference_integration_flyout",
"private": true,
"version": "1.0.0",
"license": "Elastic License 2.0",
"sideEffects": false
}

View file

@ -1,23 +0,0 @@
{
"extends": "../../../../../../tsconfig.base.json",
"compilerOptions": {
"outDir": "target/types",
"types": [
"jest",
"node",
"react"
]
},
"include": [
"**/*.ts",
"**/*.tsx",
],
"exclude": [
"target/**/*"
],
"kbn_references": [
"@kbn/i18n",
"@kbn/i18n-react",
"@kbn/test-jest-helpers",
]
}

View file

@ -1,64 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0; you may not use this file except in compliance with the Elastic License
* 2.0.
*/
import type { InferenceServiceSettings } from '@elastic/elasticsearch/lib/api/types';
export enum TabType {
elasticsearch_models = 'elasticsearch_models',
connect_to_api = 'connect_to_api',
eland_python_client = 'eland_python_client',
}
export interface Tab {
id: TabType;
name: string;
}
export enum ElasticsearchModelDefaultOptions {
elser = '.elser_model_2',
e5 = '.multilingual-e5-small',
}
export interface ElasticsearchModelDescriptions {
title: string;
description: string;
documentation: string;
}
export interface CohereServiceSettings {
api_key: string;
embedding_type?: string;
model_id?: string;
}
export interface ElserServiceSettings {
num_allocations: number;
num_threads: number;
}
export interface HuggingFaceServiceSettings {
api_key: string;
url: string;
}
export interface OpenaiServiceSettings {
api_key: string;
model_id?: string;
organization_id?: string;
url?: string;
}
// for E5 or a text embedding model uploaded by Eland
export interface ElasticsearchService {
model_id: string;
num_allocations: number;
num_threads: number;
}
export enum Service {
cohere = 'cohere',
elser = 'elser',
huggingFace = 'hugging_face',
openai = 'openai',
elasticsearch = 'elasticsearch',
}
export interface ModelConfig {
service: string;
service_settings: InferenceServiceSettings;
}

View file

@ -28330,47 +28330,6 @@
"xpack.ml.actions.openInSingleMetricViewerTitle": "Ouvrir dans Single Metric Viewer (Visionneuse d'indicateur unique)",
"xpack.ml.actions.runPatternAnalysis.description": "Déclenché lorsque l'utilisateur souhaite effectuer une analyse du modèle sur un champ.",
"xpack.ml.actions.runPatternAnalysis.title": "Exécuter l'analyse du modèle",
"xpack.ml.addInferenceEndpoint.connectToApi.cohereApiKey.label": "Clé d'API",
"xpack.ml.addInferenceEndpoint.connectToApi.cohereModelID.label": "ID du modèle",
"xpack.ml.addInferenceEndpoint.connectToApi.huggingFaceModelApiKey.label": "Clé d'API",
"xpack.ml.addInferenceEndpoint.connectToApi.huggingFaceModelUrl.label": "URL de modèle HuggingFace",
"xpack.ml.addInferenceEndpoint.connectToApi.openaiApiKey.label": "Clé d'API",
"xpack.ml.addInferenceEndpoint.connectToApi.openaiEndpointUrl.label": "URL du point de terminaison",
"xpack.ml.addInferenceEndpoint.connectToApi.openaiModel.label": "Modèle",
"xpack.ml.addInferenceEndpoint.connectToApi.openaiOrganizationID.label": "ID d'organisation",
"xpack.ml.addInferenceEndpoint.e5Model.title": "E5 multilingue (Embeddings from bidirectional encoder representations)",
"xpack.ml.addInferenceEndpoint.elandPythonClient.compatibleModelsButtonLabel": "Modèles NLP compatibles",
"xpack.ml.addInferenceEndpoint.elandPythonClient.condaInstallLabel": "une installation est également possible avec {condaLink} depuis {condaForgeLink} :",
"xpack.ml.addInferenceEndpoint.elandPythonClient.importModelButtonLabel": "Importer des modèles avec Eland",
"xpack.ml.addInferenceEndpoint.elandPythonClient.pipInstallLabel": "Eland peut être installé avec {pipLink} depuis {pypiLink} :",
"xpack.ml.addInferenceEndpoint.elandPythonClient.step1Title": "Installer le client Python Eland",
"xpack.ml.addInferenceEndpoint.elandPythonClient.step2Body": "Suivre les instructions sur l'importation de modèles tiers compatibles",
"xpack.ml.addInferenceEndpoint.elandPythonClient.step2ExampleTitle": "Exemple d'importation",
"xpack.ml.addInferenceEndpoint.elandPythonClient.step2Title": "Importation de vos modèles tiers",
"xpack.ml.addInferenceEndpoint.elandPythonClient.step3Body": "Remarque : La liste des modèles entraînés se réactualise automatiquement avec les modèles importés les plus actuels dans votre cluster. Si la liste ne s'actualise pas, cliquez sur le bouton \"Actualiser\" dans le coin en haut à droite. Dans le cas contraire, consultez à nouveau les instruction ci-dessus pour procéder au dépannage.",
"xpack.ml.addInferenceEndpoint.elandPythonClient.step4Body": "Cliquez sur \"Démarrer le déploiement\" dans la ligne du tableau contenant votre nouveau modèle pour le déployer et l'utiliser.",
"xpack.ml.addInferenceEndpoint.elandPythonClient.step4Title": "Déployer votre modèle",
"xpack.ml.addInferenceEndpoint.elasticsearchModels.e5Model.description": "E5 est un modèle NLP tiers qui vous permet de réaliser des recherches sémantiques multilingues en utilisant des représentations vectorielles denses. Ce modèle fonctionne mieux pour les documents et les requêtes qui ne sont pas en anglais.",
"xpack.ml.addInferenceEndpoint.elasticsearchModels.elser.description": "ELSER est le modèle NLP d'Elastic pour la recherche sémantique en anglais, utilisant des vecteurs creux. Il donne la priorité à l'intention et à la signification contextuelle plutôt qu'à la correspondance littérale des termes. Il est optimisé spécifiquement pour les documents et les recherches en anglais sur la plateforme Elastic.",
"xpack.ml.addInferenceEndpoint.elasticsearchModels.elser.title": "Elastic Learned Sparse Encoder v2",
"xpack.ml.addInferenceEndpoint.elasticsearchModels.inferenceEndpointIdForm.error": "{formError}",
"xpack.ml.addInferenceEndpoint.elasticsearchModels.inferenceEndpointIdForm.helpText": "Doit être unique. Seuls les lettres et les traits de soulignement sont autorisés.",
"xpack.ml.addInferenceEndpoint.elasticsearchModels.inferenceEndpointIdForm.label": "ID de point de terminaison d'inférence :",
"xpack.ml.addInferenceEndpoint.elasticsearchModels.inferenceEndpointIdForm.placeholder": "ID de point de terminaison d'inférence",
"xpack.ml.addInferenceEndpoint.elasticsearchModels.modelDescription": "{description}",
"xpack.ml.addInferenceEndpoint.elasticsearchModels.modelDocumentation": "Afficher la documentation",
"xpack.ml.addInferenceEndpoint.elasticsearchModels.modelTitle": "{title}",
"xpack.ml.addInferenceEndpoint.elasticsearchModels.serviceOptions.accordion.ariaLabel": "Options de services",
"xpack.ml.addInferenceEndpoint.elasticsearchModels.serviceOptions.accordion.title": "Options de services",
"xpack.ml.addInferenceEndpoint.elasticsearchModels.serviceOptions.allocationDescription": "Le nombre d'allocations de modèle à créer.",
"xpack.ml.addInferenceEndpoint.elasticsearchModels.serviceOptions.allocationNumberField.ariaLabel": "Nombre dallocations",
"xpack.ml.addInferenceEndpoint.elasticsearchModels.serviceOptions.allocationTitle": "Allocations :",
"xpack.ml.addInferenceEndpoint.elasticsearchModels.serviceOptions.threadsDescription": "Nombre de threads à utiliser pour chaque allocation de modèle.",
"xpack.ml.addInferenceEndpoint.elasticsearchModels.serviceOptions.threadsNumberField.ariaLabel": "Nombre de threads",
"xpack.ml.addInferenceEndpoint.elasticsearchModels.serviceOptions.threadsTitle": "Threads :",
"xpack.ml.addInferenceEndpoint.footer.cancel": "Annuler",
"xpack.ml.addInferenceEndpoint.header.title": "Ajouter un point de terminaison d'inférence",
"xpack.ml.addInferenceEndpoint.saveInference": "Enregistrer le point de terminaison d'inférence",
"xpack.ml.advancedSettings.anomalyDetectionDefaultTimeRangeDesc": "Sélection du filtre temporel à utiliser lors de l'affichage des résultats de tâche de détection des anomalies.",
"xpack.ml.advancedSettings.anomalyDetectionDefaultTimeRangeName": "Valeurs par défaut du filtre temporel pour les résultats de détection des anomalies",
"xpack.ml.advancedSettings.enableAnomalyDetectionDefaultTimeRangeDesc": "Utilisez le filtre temporel par défaut dans Single Metric Viewer et Anomaly Explorer. Si l'option n'est pas activée, les résultats sont affichés pour la plage temporelle entière de la tâche.",
@ -29685,13 +29644,6 @@
"xpack.ml.inference.modelsList.stopModelDeploymentActionLabel": "Arrêter le déploiement",
"xpack.ml.inference.modelsList.testModelActionLabel": "Modèle de test",
"xpack.ml.inference.modelsList.updateModelDeploymentActionLabel": "Mettre à jour le déploiement",
"xpack.ml.inferenceFlyoutWrapper.addInferenceEndpoint.connect_to_api.FlyoutHeaderdescription": "Connectez-vous à vos points de terminaison de service de modèle préférés.",
"xpack.ml.inferenceFlyoutWrapper.addInferenceEndpoint.eland_python_client.FlyoutHeaderdescription": "Importez des modèles personnalisés via le client Elastic Python.",
"xpack.ml.inferenceFlyoutWrapper.addInferenceEndpoint.elasticsearchModels.FlyoutHeaderdescription": "Connectez-vous aux modèles préférés d'Elastic ainsi quaux modèles hébergés sur vos nœuds elasticsearch.",
"xpack.ml.inferenceFlyoutWrapper.connectToAPITabTitle": "Se connecter à lAPI",
"xpack.ml.inferenceFlyoutWrapper.elandPythonClientTabTitle": "Client Python Eland",
"xpack.ml.inferenceFlyoutWrapper.elasticsearchModels.inferenceEndpointIdForm.inferenceEnpointDocumentation": "Qu'est-ce que c'est ?",
"xpack.ml.inferenceFlyoutWrapper.elasticsearchModelsTabTitle": "Modèles Elasticsearch",
"xpack.ml.inferencePipeline.content.indices.transforms.addInferencePipelineModal.steps.configureProcessor.title": "Configurer le processeur",
"xpack.ml.inferencePipeline.content.indices.transforms.addInferencePipelineModal.steps.create.title": "Créer",
"xpack.ml.inferencePipeline.content.indices.transforms.addInferencePipelineModal.steps.details.title": "Détails",

View file

@ -28192,47 +28192,6 @@
"xpack.ml.actions.openInSingleMetricViewerTitle": "シングルメトリックビューアーで開く",
"xpack.ml.actions.runPatternAnalysis.description": "ユーザーがフィールドに対してパターン分析を実行する場合にトリガーされます。",
"xpack.ml.actions.runPatternAnalysis.title": "パターン分析を実行",
"xpack.ml.addInferenceEndpoint.connectToApi.cohereApiKey.label": "APIキー",
"xpack.ml.addInferenceEndpoint.connectToApi.cohereModelID.label": "モデルID",
"xpack.ml.addInferenceEndpoint.connectToApi.huggingFaceModelApiKey.label": "API キー",
"xpack.ml.addInferenceEndpoint.connectToApi.huggingFaceModelUrl.label": "HuggingFaceモデルURL",
"xpack.ml.addInferenceEndpoint.connectToApi.openaiApiKey.label": "API キー",
"xpack.ml.addInferenceEndpoint.connectToApi.openaiEndpointUrl.label": "エンドポイントURL",
"xpack.ml.addInferenceEndpoint.connectToApi.openaiModel.label": "モデル",
"xpack.ml.addInferenceEndpoint.connectToApi.openaiOrganizationID.label": "組織 ID",
"xpack.ml.addInferenceEndpoint.e5Model.title": "多言語E5Embeddings from bidirectional encoder representations",
"xpack.ml.addInferenceEndpoint.elandPythonClient.compatibleModelsButtonLabel": "互換性があるNLPモデル",
"xpack.ml.addInferenceEndpoint.elandPythonClient.condaInstallLabel": "あるいは、{condaForgeLink}から{condaLink}を使用してインストールすることもできます。",
"xpack.ml.addInferenceEndpoint.elandPythonClient.importModelButtonLabel": "Elandでモデルをインポート",
"xpack.ml.addInferenceEndpoint.elandPythonClient.pipInstallLabel": "Elandは{pypiLink}から{pipLink}を使用してインストールできます。",
"xpack.ml.addInferenceEndpoint.elandPythonClient.step1Title": "Eland Pythonクライアントをインストール",
"xpack.ml.addInferenceEndpoint.elandPythonClient.step2Body": "互換性のあるサードパーティ製モデルのインポートに関する指示に従ってください",
"xpack.ml.addInferenceEndpoint.elandPythonClient.step2ExampleTitle": "インポートの例",
"xpack.ml.addInferenceEndpoint.elandPythonClient.step2Title": "サードパーティ製モデルのインポート",
"xpack.ml.addInferenceEndpoint.elandPythonClient.step3Body": "注:学習済みモデルリストは、クラスターにインポートされた最新のモデルで自動的に更新されます。リストが更新されない場合は、右上の[更新]ボタンをクリックしてください。そうでない場合は、上記の説明を見直して、トラブルシューティングを行ってください。",
"xpack.ml.addInferenceEndpoint.elandPythonClient.step4Body": "新しいモデルをデプロイして使用するには、新しいモデルを含むテーブル行の[デプロイを開始]をクリックします。",
"xpack.ml.addInferenceEndpoint.elandPythonClient.step4Title": "モデルをデプロイ",
"xpack.ml.addInferenceEndpoint.elasticsearchModels.e5Model.description": "E5は、密ベクトル表現を使用して、多言語のセマンティック検索を可能にするサードパーティNLPモデルです。このモデルは、英語以外の言語によるドキュメントやクエリーに最適です。",
"xpack.ml.addInferenceEndpoint.elasticsearchModels.elser.description": "ELSERは、疎ベクトルを利用した英語のセマンティック検索のためのElasticのNLPモデルです。Elasticプラットフォームの英語ドキュメントやクエリー向けに特別に最適化されており、文字通りの用語一致よりも意図や文脈上の意味を優先します。",
"xpack.ml.addInferenceEndpoint.elasticsearchModels.elser.title": "Elastic Learned Sparse Encoder v2",
"xpack.ml.addInferenceEndpoint.elasticsearchModels.inferenceEndpointIdForm.error": "{formError}",
"xpack.ml.addInferenceEndpoint.elasticsearchModels.inferenceEndpointIdForm.helpText": "一意でなければなりません。文字、数字のみを使用できます。",
"xpack.ml.addInferenceEndpoint.elasticsearchModels.inferenceEndpointIdForm.label": "推論エンドポイントID",
"xpack.ml.addInferenceEndpoint.elasticsearchModels.inferenceEndpointIdForm.placeholder": "推論エンドポイントID",
"xpack.ml.addInferenceEndpoint.elasticsearchModels.modelDescription": "{description}",
"xpack.ml.addInferenceEndpoint.elasticsearchModels.modelDocumentation": "ドキュメンテーションを表示",
"xpack.ml.addInferenceEndpoint.elasticsearchModels.modelTitle": "{title}",
"xpack.ml.addInferenceEndpoint.elasticsearchModels.serviceOptions.accordion.ariaLabel": "サービスのオプション",
"xpack.ml.addInferenceEndpoint.elasticsearchModels.serviceOptions.accordion.title": "サービスのオプション",
"xpack.ml.addInferenceEndpoint.elasticsearchModels.serviceOptions.allocationDescription": "作成するモデル割り当ての数。",
"xpack.ml.addInferenceEndpoint.elasticsearchModels.serviceOptions.allocationNumberField.ariaLabel": "割り当て数",
"xpack.ml.addInferenceEndpoint.elasticsearchModels.serviceOptions.allocationTitle": "割り当て:",
"xpack.ml.addInferenceEndpoint.elasticsearchModels.serviceOptions.threadsDescription": "各モデル割り当てで使用するスレッドの数。",
"xpack.ml.addInferenceEndpoint.elasticsearchModels.serviceOptions.threadsNumberField.ariaLabel": "スレッド数",
"xpack.ml.addInferenceEndpoint.elasticsearchModels.serviceOptions.threadsTitle": "スレッド:",
"xpack.ml.addInferenceEndpoint.footer.cancel": "キャンセル",
"xpack.ml.addInferenceEndpoint.header.title": "推論エンドポイントを追加",
"xpack.ml.addInferenceEndpoint.saveInference": "推論エンドポイントを保存",
"xpack.ml.advancedSettings.anomalyDetectionDefaultTimeRangeDesc": "異常検知ジョブ結果を表示するときに使用する時間フィルター選択。",
"xpack.ml.advancedSettings.anomalyDetectionDefaultTimeRangeName": "異常検知結果の時間フィルターデフォルト",
"xpack.ml.advancedSettings.enableAnomalyDetectionDefaultTimeRangeDesc": "シングルメトリックビューアーと異常エクスプローラーでデフォルト時間フィルターを使用します。有効ではない場合、ジョブの全時間範囲の結果が表示されます。",
@ -29548,13 +29507,6 @@
"xpack.ml.inference.modelsList.stopModelDeploymentActionLabel": "デプロイを停止",
"xpack.ml.inference.modelsList.testModelActionLabel": "モデルの学習",
"xpack.ml.inference.modelsList.updateModelDeploymentActionLabel": "デプロイを更新",
"xpack.ml.inferenceFlyoutWrapper.addInferenceEndpoint.connect_to_api.FlyoutHeaderdescription": "任意のモデルサービスエンドポイントに接続します。",
"xpack.ml.inferenceFlyoutWrapper.addInferenceEndpoint.eland_python_client.FlyoutHeaderdescription": "Elastic Pythonクライアント経由でカスタムモデルをインポートします。",
"xpack.ml.inferenceFlyoutWrapper.addInferenceEndpoint.elasticsearchModels.FlyoutHeaderdescription": "Elastic優先モデルとElasticsearchードでホスティングされたモデルに接続します。",
"xpack.ml.inferenceFlyoutWrapper.connectToAPITabTitle": "APIに接続",
"xpack.ml.inferenceFlyoutWrapper.elandPythonClientTabTitle": "Eland Pythonクライアント",
"xpack.ml.inferenceFlyoutWrapper.elasticsearchModels.inferenceEndpointIdForm.inferenceEnpointDocumentation": "概要",
"xpack.ml.inferenceFlyoutWrapper.elasticsearchModelsTabTitle": "Elasticsearchモデル",
"xpack.ml.inferencePipeline.content.indices.transforms.addInferencePipelineModal.steps.configureProcessor.title": "プロセッサーの構成",
"xpack.ml.inferencePipeline.content.indices.transforms.addInferencePipelineModal.steps.create.title": "作成",
"xpack.ml.inferencePipeline.content.indices.transforms.addInferencePipelineModal.steps.details.title": "詳細",

View file

@ -28276,47 +28276,6 @@
"xpack.ml.actions.openInSingleMetricViewerTitle": "在 Single Metric Viewer 中打开",
"xpack.ml.actions.runPatternAnalysis.description": "在用户希望对字段运行模式分析时触发。",
"xpack.ml.actions.runPatternAnalysis.title": "运行模式分析",
"xpack.ml.addInferenceEndpoint.connectToApi.cohereApiKey.label": "API 密钥",
"xpack.ml.addInferenceEndpoint.connectToApi.cohereModelID.label": "模型 ID",
"xpack.ml.addInferenceEndpoint.connectToApi.huggingFaceModelApiKey.label": "API 密钥",
"xpack.ml.addInferenceEndpoint.connectToApi.huggingFaceModelUrl.label": "HuggingFace 模型 URL",
"xpack.ml.addInferenceEndpoint.connectToApi.openaiApiKey.label": "API 密钥",
"xpack.ml.addInferenceEndpoint.connectToApi.openaiEndpointUrl.label": "终端 URL",
"xpack.ml.addInferenceEndpoint.connectToApi.openaiModel.label": "模型",
"xpack.ml.addInferenceEndpoint.connectToApi.openaiOrganizationID.label": "组织 ID",
"xpack.ml.addInferenceEndpoint.e5Model.title": "多语言 E5 (Embeddings from bidirectional encoder representations)",
"xpack.ml.addInferenceEndpoint.elandPythonClient.compatibleModelsButtonLabel": "兼容的 NLP 模型",
"xpack.ml.addInferenceEndpoint.elandPythonClient.condaInstallLabel": "或者,也可以使用 {condaForgeLink} 中的 {condaLink} 进行安装:",
"xpack.ml.addInferenceEndpoint.elandPythonClient.importModelButtonLabel": "使用 Eland 导入模型",
"xpack.ml.addInferenceEndpoint.elandPythonClient.pipInstallLabel": "Eland 可以使用 {pypiLink} 中的 {pipLink} 进行安装:",
"xpack.ml.addInferenceEndpoint.elandPythonClient.step1Title": "安装 Eland Python 客户端",
"xpack.ml.addInferenceEndpoint.elandPythonClient.step2Body": "按照有关导入兼容的第三方模型的说明操作",
"xpack.ml.addInferenceEndpoint.elandPythonClient.step2ExampleTitle": "导入示例",
"xpack.ml.addInferenceEndpoint.elandPythonClient.step2Title": "正在导入第三方模型",
"xpack.ml.addInferenceEndpoint.elandPythonClient.step3Body": "注意:将通过您集群中最新导入的模型自动刷新已训练模型列表。如果该列表未更新,请单击右上角的“刷新”按钮。否则,请重新查阅上面的说明以排除故障。",
"xpack.ml.addInferenceEndpoint.elandPythonClient.step4Body": "在包含新模型的表行中单击“开始部署”,以部署并使用该模型。",
"xpack.ml.addInferenceEndpoint.elandPythonClient.step4Title": "部署您的模型",
"xpack.ml.addInferenceEndpoint.elasticsearchModels.e5Model.description": "E5 是一个第三方 NLP 模型,它允许您通过使用密集向量表示方法来执行多语言语义搜索。此模型在处理非英语语言文档和查询时性能最佳。",
"xpack.ml.addInferenceEndpoint.elasticsearchModels.elser.description": "ELSER 是 Elastic 的利用稀疏向量执行英语语义搜索的 NLP 模型。与字面值匹配相比,它优先处理意图和上下文含义,对 Elastic 平台上的英语文档和查询专门进行了优化。",
"xpack.ml.addInferenceEndpoint.elasticsearchModels.elser.title": "Elastic Learned Sparse Encoder v2",
"xpack.ml.addInferenceEndpoint.elasticsearchModels.inferenceEndpointIdForm.error": "{formError}",
"xpack.ml.addInferenceEndpoint.elasticsearchModels.inferenceEndpointIdForm.helpText": "必须唯一。只允许使用字母和下划线。",
"xpack.ml.addInferenceEndpoint.elasticsearchModels.inferenceEndpointIdForm.label": "推理终端 ID",
"xpack.ml.addInferenceEndpoint.elasticsearchModels.inferenceEndpointIdForm.placeholder": "推理终端 ID",
"xpack.ml.addInferenceEndpoint.elasticsearchModels.modelDescription": "{description}",
"xpack.ml.addInferenceEndpoint.elasticsearchModels.modelDocumentation": "查看文档",
"xpack.ml.addInferenceEndpoint.elasticsearchModels.modelTitle": "{title}",
"xpack.ml.addInferenceEndpoint.elasticsearchModels.serviceOptions.accordion.ariaLabel": "服务选项",
"xpack.ml.addInferenceEndpoint.elasticsearchModels.serviceOptions.accordion.title": "服务选项",
"xpack.ml.addInferenceEndpoint.elasticsearchModels.serviceOptions.allocationDescription": "要创建的模型分配的次数。",
"xpack.ml.addInferenceEndpoint.elasticsearchModels.serviceOptions.allocationNumberField.ariaLabel": "分配次数",
"xpack.ml.addInferenceEndpoint.elasticsearchModels.serviceOptions.allocationTitle": "分配:",
"xpack.ml.addInferenceEndpoint.elasticsearchModels.serviceOptions.threadsDescription": "由每次模型分配要使用的线程数。",
"xpack.ml.addInferenceEndpoint.elasticsearchModels.serviceOptions.threadsNumberField.ariaLabel": "线程数",
"xpack.ml.addInferenceEndpoint.elasticsearchModels.serviceOptions.threadsTitle": "线程:",
"xpack.ml.addInferenceEndpoint.footer.cancel": "取消",
"xpack.ml.addInferenceEndpoint.header.title": "添加推理终端",
"xpack.ml.addInferenceEndpoint.saveInference": "保存推理终端",
"xpack.ml.advancedSettings.anomalyDetectionDefaultTimeRangeDesc": "在查看异常检测作业结果时要使用的时间筛选选项。",
"xpack.ml.advancedSettings.anomalyDetectionDefaultTimeRangeName": "异常检测结果的时间筛选默认值",
"xpack.ml.advancedSettings.enableAnomalyDetectionDefaultTimeRangeDesc": "使用 Single Metric Viewer 和 Anomaly Explorer 中的默认时间筛选。如果未启用,则将显示作业的整个时间范围的结果。",
@ -29635,13 +29594,6 @@
"xpack.ml.inference.modelsList.stopModelDeploymentActionLabel": "停止部署",
"xpack.ml.inference.modelsList.testModelActionLabel": "测试模型",
"xpack.ml.inference.modelsList.updateModelDeploymentActionLabel": "更新部署",
"xpack.ml.inferenceFlyoutWrapper.addInferenceEndpoint.connect_to_api.FlyoutHeaderdescription": "连接到首选模型服务终端。",
"xpack.ml.inferenceFlyoutWrapper.addInferenceEndpoint.eland_python_client.FlyoutHeaderdescription": "通过 Elastic python 客户端导入定制模型。",
"xpack.ml.inferenceFlyoutWrapper.addInferenceEndpoint.elasticsearchModels.FlyoutHeaderdescription": "连接到 Elastic 首选模型和 Elasticsearch 节点上托管的模型。",
"xpack.ml.inferenceFlyoutWrapper.connectToAPITabTitle": "连接到 API",
"xpack.ml.inferenceFlyoutWrapper.elandPythonClientTabTitle": "Eland Python 客户端",
"xpack.ml.inferenceFlyoutWrapper.elasticsearchModels.inferenceEndpointIdForm.inferenceEnpointDocumentation": "这是什么?",
"xpack.ml.inferenceFlyoutWrapper.elasticsearchModelsTabTitle": "Elasticsearch 模型",
"xpack.ml.inferencePipeline.content.indices.transforms.addInferencePipelineModal.steps.configureProcessor.title": "配置处理器",
"xpack.ml.inferencePipeline.content.indices.transforms.addInferencePipelineModal.steps.create.title": "创建",
"xpack.ml.inferencePipeline.content.indices.transforms.addInferencePipelineModal.steps.details.title": "详情",

View file

@ -7,8 +7,6 @@
import { ReactNode } from 'react';
import { InferenceTaskType } from '@elastic/elasticsearch/lib/api/types';
import { ModelConfig } from '@kbn/inference_integration_flyout';
import { GenericObject } from './mappings_editor';
import { PARAMETERS_DEFINITION } from '../constants';
@ -254,12 +252,3 @@ export enum DefaultInferenceModels {
elser_model_2 = 'elser_model_2',
e5 = 'e5',
}
export enum DeploymentState {
'DEPLOYED' = 'deployed',
'NOT_DEPLOYED' = 'not_deployed',
}
export interface CustomInferenceEndpointConfig {
taskType: InferenceTaskType;
modelConfig: ModelConfig;
}

View file

@ -5,7 +5,6 @@
* 2.0.
*/
import { Service } from '@kbn/inference_integration_flyout/types';
import { ModelDownloadState, TrainedModelStat } from '@kbn/ml-plugin/common/types/trained_models';
import { InferenceAPIConfigResponse } from '@kbn/ml-trained-models-utils';
import {
@ -33,7 +32,7 @@ const getCustomInferenceIdMap = (
const inferenceEntry = isLocalModel(model)
? {
trainedModelId: model.service_settings.model_id,
isDeployable: model.service === Service.elasticsearch,
isDeployable: model.service === 'elasticsearch',
isDeployed: modelStatsById[model.inference_id]?.state === 'started',
isDownloading: Boolean(downloadStates[model.service_settings.model_id]),
modelStats: modelStatsById[model.inference_id],

View file

@ -47,7 +47,6 @@
"@kbn/shared-ux-utility",
"@kbn/index-management-shared-types",
"@kbn/utility-types",
"@kbn/inference_integration_flyout",
"@kbn/ml-plugin",
"@kbn/react-kibana-context-render",
"@kbn/react-kibana-mount",

View file

@ -29,10 +29,10 @@ import {
import { FormattedMessage } from '@kbn/i18n-react';
import React, { type FC, useMemo, useState } from 'react';
import { groupBy } from 'lodash';
import { ElandPythonClient } from '@kbn/inference_integration_flyout';
import type { ModelDownloadItem } from '../../../common/types/trained_models';
import { usePermissionCheck } from '../capabilities/check_capabilities';
import { useMlKibana } from '../contexts/kibana';
import { ElandPythonClient } from './eland_python_client';
export interface AddModelFlyoutProps {
modelDownloads: ModelDownloadItem[];

View file

@ -27,14 +27,17 @@ export const ElandPythonClient: React.FC<{
<EuiSteps
steps={[
{
title: i18n.translate('xpack.ml.addInferenceEndpoint.elandPythonClient.step1Title', {
defaultMessage: 'Install the Eland Python Client',
}),
title: i18n.translate(
'xpack.ml.trainedModels.addModelFlyout.elandPythonClient.step1Title',
{
defaultMessage: 'Install the Eland Python Client',
}
),
children: (
<EuiText>
<EuiText size={'s'} color={'subdued'}>
<FormattedMessage
id="xpack.ml.addInferenceEndpoint.elandPythonClient.pipInstallLabel"
id="xpack.ml.trainedModels.addModelFlyout.elandPythonClient.pipInstallLabel"
defaultMessage="Eland can be installed with {pipLink} from {pypiLink}:"
values={{
pipLink: (
@ -62,7 +65,7 @@ export const ElandPythonClient: React.FC<{
<EuiText size={'s'} color={'subdued'}>
<FormattedMessage
id="xpack.ml.addInferenceEndpoint.elandPythonClient.condaInstallLabel"
id="xpack.ml.trainedModels.addModelFlyout.elandPythonClient.condaInstallLabel"
defaultMessage="or it can also be installed with {condaLink} from {condaForgeLink}:"
values={{
condaLink: (
@ -91,15 +94,18 @@ export const ElandPythonClient: React.FC<{
),
},
{
title: i18n.translate('xpack.ml.addInferenceEndpoint.elandPythonClient.step2Title', {
defaultMessage: 'Importing your third-party model',
}),
title: i18n.translate(
'xpack.ml.trainedModels.addModelFlyout.elandPythonClient.step2Title',
{
defaultMessage: 'Importing your third-party model',
}
),
children: (
<EuiText>
<p>
<EuiText size={'s'} color={'subdued'}>
<FormattedMessage
id="xpack.ml.addInferenceEndpoint.elandPythonClient.step2Body"
id="xpack.ml.trainedModels.addModelFlyout.elandPythonClient.step2Body"
defaultMessage="Follow the instructions on importing compatible third-party models"
/>
</EuiText>
@ -108,7 +114,7 @@ export const ElandPythonClient: React.FC<{
<p>
<b>
<FormattedMessage
id="xpack.ml.addInferenceEndpoint.elandPythonClient.step2ExampleTitle"
id="xpack.ml.trainedModels.addModelFlyout.elandPythonClient.step2ExampleTitle"
defaultMessage="Example import"
/>
</b>
@ -131,7 +137,7 @@ export const ElandPythonClient: React.FC<{
<EuiFlexItem grow={false}>
<EuiButtonEmpty href={nlpImportModel} target={'_blank'} iconType={'help'}>
<FormattedMessage
id="xpack.ml.addInferenceEndpoint.elandPythonClient.importModelButtonLabel"
id="xpack.ml.trainedModels.addModelFlyout.elandPythonClient.importModelButtonLabel"
defaultMessage="Import models with Eland"
/>
</EuiButtonEmpty>
@ -139,7 +145,7 @@ export const ElandPythonClient: React.FC<{
<EuiFlexItem grow={false}>
<EuiButtonEmpty href={supportedNlpModels} target={'_blank'} iconType={'help'}>
<FormattedMessage
id="xpack.ml.addInferenceEndpoint.elandPythonClient.compatibleModelsButtonLabel"
id="xpack.ml.trainedModels.addModelFlyout.elandPythonClient.compatibleModelsButtonLabel"
defaultMessage="Compatible NLP models"
/>
</EuiButtonEmpty>
@ -149,15 +155,18 @@ export const ElandPythonClient: React.FC<{
),
},
{
title: i18n.translate('xpack.ml.addInferenceEndpoint.elandPythonClient.step4Title', {
defaultMessage: 'Deploy your model',
}),
title: i18n.translate(
'xpack.ml.trainedModels.addModelFlyout.elandPythonClient.step4Title',
{
defaultMessage: 'Deploy your model',
}
),
children: (
<>
<EuiText size={'s'} color={'subdued'}>
<p>
<FormattedMessage
id="xpack.ml.addInferenceEndpoint.elandPythonClient.step4Body"
id="xpack.ml.trainedModels.addModelFlyout.elandPythonClient.step4Body"
defaultMessage="Click “Start deployment” in the table row containing your new model to deploy and use it."
/>
</p>
@ -166,7 +175,7 @@ export const ElandPythonClient: React.FC<{
<EuiText size="s" color={'subdued'}>
<p>
<FormattedMessage
id="xpack.ml.addInferenceEndpoint.elandPythonClient.step3Body"
id="xpack.ml.trainedModels.addModelFlyout.elandPythonClient.step3Body"
defaultMessage="Note: The trained model list automatically refreshes with the most current imported models in your cluster. If the list is not updated, click the 'Refresh' button in the top right corner. Otherwise, revisit the instructions above to troubleshoot."
/>
</p>

View file

@ -6,32 +6,11 @@
*/
import type { estypes } from '@elastic/elasticsearch';
import type { InferenceTaskType } from '@elastic/elasticsearch/lib/api/typesWithBodyKey';
import type { ModelConfig } from '@kbn/inference_integration_flyout/types';
import type { HttpService } from '../http_service';
import { ML_INTERNAL_BASE_PATH } from '../../../../common/constants/app';
export function inferenceModelsApiProvider(httpService: HttpService) {
return {
/**
* creates inference endpoint id
* @param inferenceId - Inference Endpoint Id
* @param taskType - Inference Task type. Either sparse_embedding or text_embedding
* @param modelConfig - Model configuration based on service type
*/
async createInferenceEndpoint(
inferenceId: string,
taskType: InferenceTaskType,
modelConfig: ModelConfig
) {
const result = await httpService.http<estypes.InferencePutResponse>({
path: `${ML_INTERNAL_BASE_PATH}/_inference/${taskType}/${inferenceId}`,
method: 'PUT',
body: JSON.stringify(modelConfig),
version: '1',
});
return result;
},
/**
* Gets all inference endpoints
*/

View file

@ -5,70 +5,14 @@
* 2.0.
*/
import type { CloudSetup } from '@kbn/cloud-plugin/server';
import { schema } from '@kbn/config-schema';
import type {
InferenceInferenceEndpoint,
InferenceTaskType,
} from '@elastic/elasticsearch/lib/api/types';
import type { RouteInitialization } from '../types';
import { createInferenceSchema } from './schemas/inference_schema';
import { modelsProvider } from '../models/model_management';
import { wrapError } from '../client/error_wrapper';
import { ML_INTERNAL_BASE_PATH } from '../../common/constants/app';
import { syncSavedObjectsFactory } from '../saved_objects';
export function inferenceModelRoutes(
{ router, routeGuard, getEnabledFeatures }: RouteInitialization,
cloud: CloudSetup
) {
router.versioned
.put({
path: `${ML_INTERNAL_BASE_PATH}/_inference/{taskType}/{inferenceId}`,
access: 'internal',
security: {
authz: {
requiredPrivileges: ['ml:canCreateInferenceEndpoint'],
},
},
summary: 'Create an inference endpoint',
description: 'Create an inference endpoint',
})
.addVersion(
{
version: '1',
validate: {
request: {
params: createInferenceSchema,
body: schema.maybe(schema.object({}, { unknowns: 'allow' })),
},
},
},
routeGuard.fullLicenseAPIGuard(
async ({ client, mlClient, request, response, mlSavedObjectService }) => {
try {
const { inferenceId, taskType } = request.params;
const body = await modelsProvider(
client,
mlClient,
cloud,
getEnabledFeatures()
).createInferenceEndpoint(
inferenceId,
taskType as InferenceTaskType,
request.body as InferenceInferenceEndpoint
);
const { syncSavedObjects } = syncSavedObjectsFactory(client, mlSavedObjectService);
await syncSavedObjects(false);
return response.ok({
body,
});
} catch (e) {
return response.customError(wrapError(e));
}
}
)
);
router.versioned
.get({
path: `${ML_INTERNAL_BASE_PATH}/_inference/all`,

View file

@ -62,7 +62,6 @@
"@kbn/home-plugin",
"@kbn/i18n-react",
"@kbn/i18n",
"@kbn/inference_integration_flyout",
"@kbn/inspector-plugin",
"@kbn/json-schemas",
"@kbn/kibana-react-plugin",

View file

@ -15,7 +15,6 @@ export default function ({ loadTestFile }: FtrProviderContext) {
loadTestFile(require.resolve('./stats'));
loadTestFile(require.resolve('./data_streams'));
loadTestFile(require.resolve('./templates'));
loadTestFile(require.resolve('./inference_endpoints'));
loadTestFile(require.resolve('./component_templates'));
loadTestFile(require.resolve('./cluster_nodes'));
loadTestFile(require.resolve('./index_details'));

View file

@ -1,65 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0; you may not use this file except in compliance with the Elastic License
* 2.0.
*/
import expect from '@kbn/expect';
import { InferenceAPIConfigResponse } from '@kbn/ml-trained-models-utils';
import { FtrProviderContext } from '../../../ftr_provider_context';
const API_BASE_PATH = '/api/index_management';
export default function ({ getService }: FtrProviderContext) {
const supertest = getService('supertest');
const log = getService('log');
const ml = getService('ml');
const inferenceId = 'my-elser-model';
const taskType = 'sparse_embedding';
const service = 'elasticsearch';
const modelId = '.elser_model_2';
describe('Inference endpoints', function () {
after(async () => {
try {
log.debug(`Deleting underlying trained model`);
await ml.api.deleteTrainedModelES(modelId);
await ml.testResources.cleanMLSavedObjects();
} catch (err) {
log.debug('[Cleanup error] Error deleting trained model or saved ml objects');
throw err;
}
});
it('create inference endpoint', async () => {
log.debug(`create inference endpoint`);
await ml.api.createInferenceEndpoint(inferenceId, taskType, {
service,
service_settings: {
num_allocations: 1,
num_threads: 1,
model_id: modelId,
},
});
});
it('get all inference endpoints and confirm inference endpoint exist', async () => {
const { body: inferenceEndpoints } = await supertest
.get(`${API_BASE_PATH}/inference/all`)
.set('kbn-xsrf', 'xxx')
.set('x-elastic-internal-origin', 'xxx')
.expect(200);
expect(inferenceEndpoints).to.be.ok();
expect(
inferenceEndpoints.some(
(endpoint: InferenceAPIConfigResponse) => endpoint.inference_id === inferenceId
)
).to.eql(true, `${inferenceId} not found in the GET _inference/_all response`);
});
it('can delete inference endpoint', async () => {
log.debug(`Deleting inference endpoint`);
await ml.api.deleteInferenceEndpoint(inferenceId, taskType);
log.debug('> Inference endpoint deleted');
});
});
}

View file

@ -167,9 +167,7 @@
"@kbn/alerting-state-types",
"@kbn/reporting-server",
"@kbn/data-quality-plugin",
"@kbn/ml-trained-models-utils",
"@kbn/observability-synthetics-test-data",
"@kbn/ml-trained-models-utils",
"@kbn/openapi-common",
"@kbn/securitysolution-lists-common",
"@kbn/securitysolution-exceptions-common",

View file

@ -13,7 +13,6 @@ export default function ({ loadTestFile }: FtrProviderContext) {
loadTestFile(require.resolve('./index_templates'));
loadTestFile(require.resolve('./indices'));
loadTestFile(require.resolve('./inference_endpoints'));
loadTestFile(require.resolve('./enrich_policies'));
loadTestFile(require.resolve('./create_enrich_policies'));
loadTestFile(require.resolve('./index_component_templates'));

View file

@ -1,77 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0; you may not use this file except in compliance with the Elastic License
* 2.0.
*/
import expect from '@kbn/expect';
import { InferenceAPIConfigResponse } from '@kbn/ml-trained-models-utils';
import { InternalRequestHeader, RoleCredentials } from '../../../../shared/services';
import { FtrProviderContext } from '../../../ftr_provider_context';
const API_BASE_PATH = '/api/index_management';
export default function ({ getService }: FtrProviderContext) {
const log = getService('log');
const ml = getService('ml');
const inferenceId = 'my-elser-model';
const taskType = 'sparse_embedding';
const service = 'elasticsearch';
const modelId = '.elser_model_2';
const svlCommonApi = getService('svlCommonApi');
const svlUserManager = getService('svlUserManager');
const supertestWithoutAuth = getService('supertestWithoutAuth');
let roleAuthc: RoleCredentials;
let internalReqHeader: InternalRequestHeader;
describe('Inference endpoints', function () {
before(async () => {
roleAuthc = await svlUserManager.createM2mApiKeyWithRoleScope('admin');
internalReqHeader = svlCommonApi.getInternalRequestHeader();
});
after(async () => {
try {
log.debug(`Deleting underlying trained model`);
await ml.api.deleteTrainedModelES(modelId);
await ml.testResources.cleanMLSavedObjects();
} catch (err) {
log.debug('[Cleanup error] Error deleting trained model and saved ml objects');
throw err;
}
await svlUserManager.invalidateM2mApiKeyWithRoleScope(roleAuthc);
});
it('create inference endpoint', async () => {
log.debug(`create inference endpoint`);
await ml.api.createInferenceEndpoint(inferenceId, taskType, {
service,
service_settings: {
num_allocations: 1,
num_threads: 1,
model_id: modelId,
},
});
});
it('get all inference endpoints and confirm inference endpoint exist', async () => {
const { body: inferenceEndpoints } = await supertestWithoutAuth
.get(`${API_BASE_PATH}/inference/all`)
.set(internalReqHeader)
.set(roleAuthc.apiKeyHeader)
.expect(200);
expect(inferenceEndpoints).to.be.ok();
expect(
inferenceEndpoints.some(
(endpoint: InferenceAPIConfigResponse) => endpoint.inference_id === inferenceId
)
).to.eql(true, `${inferenceId} not found in the GET _inference/_all response`);
});
it('can delete inference endpoint', async () => {
log.debug(`Deleting inference endpoint`);
await ml.api.deleteInferenceEndpoint(inferenceId, taskType);
log.debug('> Inference endpoint deleted');
});
});
}

View file

@ -4,9 +4,6 @@
* 2.0; you may not use this file except in compliance with the Elastic License
* 2.0.
*/
import type * as estypes from '@elastic/elasticsearch/lib/api/types';
import { testHasEmbeddedConsole } from './embedded_console';
import { FtrProviderContext } from '../../ftr_provider_context';
@ -19,20 +16,10 @@ export default function ({ getPageObjects, getService }: FtrProviderContext) {
'header',
]);
const svlSearchNavigation = getService('svlSearchNavigation');
const browser = getService('browser');
const ml = getService('ml');
describe('Serverless Inference Management UI', function () {
const endpoint = 'endpoint-1';
const taskType = 'sparse_embedding';
const modelConfig = {
service: 'elasticsearch',
service_settings: {
num_allocations: 1,
num_threads: 1,
model_id: '.elser_model_2',
},
};
// see details: https://github.com/elastic/kibana/issues/204539
this.tags(['failsOnMKI']);
before(async () => {
await pageObjects.svlCommonPage.loginWithRole('developer');
@ -59,42 +46,6 @@ export default function ({ getPageObjects, getService }: FtrProviderContext) {
});
});
describe('delete action', () => {
const usageIndex = 'elser_index';
beforeEach(async () => {
await ml.api.createInferenceEndpoint(endpoint, taskType, modelConfig);
await browser.refresh();
await pageObjects.header.waitUntilLoadingHasFinished();
});
after(async () => {
await ml.api.deleteIndices(usageIndex);
await ml.api.deleteIngestPipeline(endpoint);
});
it('deletes modal successfully without any usage', async () => {
await pageObjects.svlSearchInferenceManagementPage.InferenceTabularPage.expectEndpointWithoutUsageTobeDelete();
});
it('deletes modal successfully with usage', async () => {
const indexMapping: estypes.MappingTypeMapping = {
properties: {
content: {
type: 'text',
},
content_embedding: {
type: 'semantic_text',
inference_id: endpoint,
},
},
};
await ml.api.createIngestPipeline(endpoint);
await ml.api.createIndex(usageIndex, indexMapping);
await pageObjects.svlSearchInferenceManagementPage.InferenceTabularPage.expectEndpointWithUsageTobeDelete();
});
});
describe('create inference flyout', () => {
it('renders successfully', async () => {
await pageObjects.svlSearchInferenceManagementPage.AddInferenceFlyout.expectInferenceEndpointToBeVisible();

View file

@ -85,7 +85,6 @@
"@kbn/config-schema",
"@kbn/features-plugin",
"@kbn/observability-ai-assistant-plugin",
"@kbn/ml-trained-models-utils",
"@kbn/test-suites-src",
"@kbn/console-plugin",
"@kbn/cloud-security-posture-common",

View file

@ -5979,10 +5979,6 @@
version "0.0.0"
uid ""
"@kbn/inference_integration_flyout@link:x-pack/platform/packages/private/ml/inference_integration_flyout":
version "0.0.0"
uid ""
"@kbn/infra-forge@link:x-pack/platform/packages/private/kbn-infra-forge":
version "0.0.0"
uid ""