[Search] Fix inference endpoints ignoring different deployments (#191223)

## Summary

Inference endpoints were ignoring different deployments, only looking at
the first deployment. This is wrong: we should be checking the specific
inference endpoint's trained model reference deployment. This fixes
that.
This commit is contained in:
Sander Philipse 2024-08-29 15:13:10 +02:00 committed by GitHub
parent d06b063eb5
commit 3de8133a5a
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
22 changed files with 200 additions and 137 deletions

View file

@ -36,4 +36,5 @@ export {
LATEST_ELSER_MODEL_ID,
LATEST_E5_MODEL_ID,
ElserModels,
isLocalModel,
} from './src/constants/trained_models';

View file

@ -284,3 +284,9 @@ export type InferenceAPIConfigResponse = {
model?: string;
};
} & InferenceServiceSettings;
export function isLocalModel(
model: InferenceServiceSettings
): model is LocalInferenceServiceSettings {
return ['elser', 'elasticsearch'].includes((model as LocalInferenceServiceSettings).service);
}

View file

@ -118,7 +118,9 @@ export const IndexError: React.FC<IndexErrorProps> = ({ indexName }) => {
if (isLocalModel(model)) {
const modelId = model.service_settings.model_id;
const modelStats = trainedModelStats?.trained_model_stats.find(
(value) => value.model_id === modelId
(value) =>
value.model_id === modelId &&
value.deployment_stats?.deployment_id === field.source.inference_id
);
if (!modelStats || modelStats.deployment_stats?.state !== 'started') {
return {

View file

@ -217,7 +217,7 @@ describe('When semantic_text is enabled', () => {
},
} as any);
const { find } = setup({
errorsInTrainedModelDeployment: { '.elser_model_2': 'Error' },
errorsInTrainedModelDeployment: { elser_model_2: 'Error' },
saveMappings,
forceSaveMappings,
setErrorsInTrainedModelDeployment,
@ -228,7 +228,7 @@ describe('When semantic_text is enabled', () => {
});
it('should display only the errored deployment', () => {
expect(find('trainedModelsDeploymentModal').text()).toContain('.elser_model_2');
expect(find('trainedModelsDeploymentModal').text()).toContain('elser_model_2');
expect(find('trainedModelsDeploymentModal').text()).not.toContain('valid-model');
});

View file

@ -32,14 +32,16 @@ import { getRequiredParametersFormForType } from './required_parameters_forms';
import { useSemanticText } from './semantic_text/use_semantic_text';
const formWrapper = (props: any) => <form {...props} />;
export interface ModelIdMapEntry {
trainedModelId: string;
isDeployed: boolean;
isDeployable: boolean;
isDownloading: boolean;
modelStats?: TrainedModelStat; // third-party models don't have model stats
}
export interface InferenceToModelIdMap {
[key: string]: {
trainedModelId: string;
isDeployed: boolean;
isDeployable: boolean;
isDownloading: boolean;
modelStats?: TrainedModelStat; // third-party models don't have model stats
};
[key: string]: ModelIdMapEntry;
}
export interface SemanticTextInfo {

View file

@ -36,8 +36,7 @@ export function useSemanticText(props: UseSemanticTextProps) {
const { fields, mappingViewFields } = useMappingsState();
const { fetchInferenceToModelIdMap } = useDetailsPageMappingsModelManagement();
const dispatch = useDispatch();
const { showSuccessToasts, showErrorToasts, showSuccessfullyDeployedToast } =
useMLModelNotificationToasts();
const { showSuccessToasts, showErrorToasts } = useMLModelNotificationToasts();
const fieldTypeValue = form.getFormData()?.type;
useEffect(() => {
@ -127,28 +126,27 @@ export function useSemanticText(props: UseSemanticTextProps) {
}
try {
// Only show toast if it's an internal Elastic model that hasn't been deployed yet
if (trainedModelId && inferenceData.isDeployable && !inferenceData.isDeployed) {
showSuccessToasts(trainedModelId);
}
await createInferenceEndpoint(
trainedModelId,
data.inference_id,
customInferenceEndpointConfig
);
if (trainedModelId) {
if (inferenceData.isDeployable && !inferenceData.isDeployed) {
showSuccessToasts(trainedModelId);
}
// clear error because we've succeeded here
setErrorsInTrainedModelDeployment?.((prevItems) => ({
...prevItems,
[trainedModelId]: undefined,
[data.inference_id]: undefined,
}));
}
showSuccessfullyDeployedToast(trainedModelId);
} catch (error) {
// trainedModelId is empty string when it's a third party model
if (trainedModelId) {
setErrorsInTrainedModelDeployment?.((prevItems) => ({
...prevItems,
[trainedModelId]: error,
[data.inference_id]: error,
}));
}
showErrorToasts(error);

View file

@ -59,7 +59,9 @@ export const useIndexErrors = (
if (isLocalModel(model)) {
const modelId = model.service_settings.model_id;
const modelStats = trainedModelStats?.trained_model_stats.find(
(value) => value.model_id === modelId
(value) =>
value.model_id === modelId &&
value.deployment_stats?.deployment_id === field.source.inference_id
);
if (!modelStats || modelStats.deployment_stats?.state !== 'started') {
return {
@ -68,8 +70,9 @@ export const useIndexErrors = (
'xpack.idxMgmt.indexOverview.indexErrors.modelNotStartedError',
{
defaultMessage:
'Model {modelId} for inference endpoint {inferenceId} in field {fieldName} has not been started',
'Deployment {deploymentId} of model {modelId} for inference endpoint {inferenceId} in field {fieldName} has not been started',
values: {
deploymentId: field.source.inference_id as string,
inferenceId: field.source.inference_id as string,
fieldName: field.path.join('.'),
modelId,

View file

@ -217,10 +217,11 @@ export const DetailsPageMappingsContent: FunctionComponent<{
const updateMappings = useCallback(
async (forceSaveMappings?: boolean) => {
const hasSemanticText = hasSemanticTextField(state.fields);
let inferenceToModelIdMap = state.inferenceToModelIdMap;
setIsUpdatingMappings(true);
try {
if (isSemanticTextEnabled && hasMLPermissions && hasSemanticText && !forceSaveMappings) {
await fetchInferenceToModelIdMap();
inferenceToModelIdMap = await fetchInferenceToModelIdMap();
}
const fields = hasSemanticText ? getStateWithCopyToFields(state).fields : state.fields;
const denormalizedFields = deNormalize(fields);
@ -231,8 +232,8 @@ export const DetailsPageMappingsContent: FunctionComponent<{
.map((field) => field.inference_id)
.filter(
(inferenceId: string) =>
state.inferenceToModelIdMap?.[inferenceId].trainedModelId && // third-party inference models don't have trainedModelId
!state.inferenceToModelIdMap?.[inferenceId].isDeployed
inferenceToModelIdMap?.[inferenceId].trainedModelId && // third-party inference models don't have trainedModelId
!inferenceToModelIdMap?.[inferenceId].isDeployed
);
setHasSavedFields(true);
if (inferenceIdsInPendingList.length === 0) {

View file

@ -26,6 +26,7 @@ import React from 'react';
import { EuiLink } from '@elastic/eui';
import { useEffect, useMemo, useState } from 'react';
import { i18n } from '@kbn/i18n';
import { ModelIdMapEntry } from '../../../../components/mappings_editor/components/document_fields/fields';
import { isSemanticTextField } from '../../../../components/mappings_editor/lib/utils';
import { deNormalize } from '../../../../components/mappings_editor/lib';
import { useMLModelNotificationToasts } from '../../../../../hooks/use_ml_model_status_toasts';
@ -62,7 +63,7 @@ export function TrainedModelsDeploymentModal({
const closeModal = () => setIsModalVisible(false);
const [mlManagementPageUrl, setMlManagementPageUrl] = useState<string>('');
const [allowForceSaveMappings, setAllowForceSaveMappings] = useState<boolean>(false);
const { showErrorToasts } = useMLModelNotificationToasts();
const { showErrorToasts, showSuccessfullyDeployedToast } = useMLModelNotificationToasts();
useEffect(() => {
const mlLocator = url?.locators.get(ML_APP_LOCATOR);
@ -85,13 +86,19 @@ export function TrainedModelsDeploymentModal({
const [pendingDeployments, setPendingDeployments] = useState<string[]>([]);
const startModelAllocation = async (trainedModelId: string) => {
const startModelAllocation = async (entry: ModelIdMapEntry & { inferenceId: string }) => {
try {
await ml?.mlApi?.trainedModels.startModelAllocation(trainedModelId);
await ml?.mlApi?.trainedModels.startModelAllocation(entry.trainedModelId, {
number_of_allocations: 1,
threads_per_allocation: 1,
priority: 'normal',
deployment_id: entry.inferenceId,
});
showSuccessfullyDeployedToast(entry.trainedModelId);
} catch (error) {
setErrorsInTrainedModelDeployment((previousState) => ({
...previousState,
[trainedModelId]: error.message,
[entry.inferenceId]: error.message,
}));
showErrorToasts(error);
setIsModalVisible(true);
@ -99,8 +106,13 @@ export function TrainedModelsDeploymentModal({
};
useEffect(() => {
const models = inferenceIdsInPendingList.map(
(inferenceId) => inferenceToModelIdMap?.[inferenceId]
const models = inferenceIdsInPendingList.map((inferenceId) =>
inferenceToModelIdMap?.[inferenceId]
? {
inferenceId,
...inferenceToModelIdMap?.[inferenceId],
}
: undefined
); // filter out third-party models
for (const model of models) {
if (
@ -110,16 +122,17 @@ export function TrainedModelsDeploymentModal({
!model.isDeployed
) {
// Sometimes the model gets stuck in a ready to deploy state, so we need to trigger deployment manually
startModelAllocation(model.trainedModelId);
// This is currently the only way to surface a specific error message to the user
startModelAllocation(model);
}
}
const pendingModels = models
const allPendingDeployments = models
.map((model) => {
return model?.trainedModelId && !model?.isDeployed ? model?.trainedModelId : '';
return model?.trainedModelId && !model?.isDeployed ? model?.inferenceId : '';
})
.filter((trainedModelId) => !!trainedModelId);
const uniqueDeployments = pendingModels.filter(
(deployment, index) => pendingModels.indexOf(deployment) === index
.filter((id) => !!id);
const uniqueDeployments = allPendingDeployments.filter(
(deployment, index) => allPendingDeployments.indexOf(deployment) === index
);
setPendingDeployments(uniqueDeployments);
// eslint-disable-next-line react-hooks/exhaustive-deps
@ -132,6 +145,8 @@ export function TrainedModelsDeploymentModal({
useEffect(() => {
if (erroredDeployments.length > 0 || pendingDeployments.length > 0) {
setIsModalVisible(true);
} else {
setIsModalVisible(false);
}
}, [erroredDeployments.length, pendingDeployments.length]);
return isModalVisible ? (

View file

@ -34,9 +34,9 @@ const getCustomInferenceIdMap = (
? {
trainedModelId: model.service_settings.model_id,
isDeployable: model.service === Service.elser || model.service === Service.elasticsearch,
isDeployed: modelStatsById[model.service_settings.model_id]?.state === 'started',
isDeployed: modelStatsById[model.inference_id]?.state === 'started',
isDownloading: Boolean(downloadStates[model.service_settings.model_id]),
modelStats: modelStatsById[model.service_settings.model_id],
modelStats: modelStatsById[model.inference_id],
}
: {
trainedModelId: '',
@ -104,7 +104,7 @@ export const useDetailsPageMappingsModelManagement = () => {
Record<string, TrainedModelStat['deployment_stats'] | undefined>
>((acc, { model_id: modelId, deployment_stats: stats }) => {
if (modelId && stats) {
acc[modelId] = stats;
acc[stats.deployment_id] = stats;
}
return acc;
}, {}) || {};

View file

@ -8,20 +8,26 @@
import { render, screen } from '@testing-library/react';
import React from 'react';
import { DeploymentStatus } from './deployment_status';
import { DeploymentStatusEnum } from '../../types';
import { DeploymentState } from '@kbn/ml-trained-models-utils';
describe('DeploymentStatus component', () => {
it.each([[DeploymentStatusEnum.deployed, DeploymentStatusEnum.notDeployed]])(
'renders with %s status, expects %s color, and correct data-test-subj attribute',
(status) => {
render(<DeploymentStatus status={status} />);
const healthComponent = screen.getByTestId(`table-column-deployment-${status}`);
expect(healthComponent).toBeInTheDocument();
}
);
it('starting renders with warning status', () => {
render(<DeploymentStatus status={'starting' as DeploymentState} />);
const healthComponent = screen.getByTestId(`table-column-deployment-starting`);
expect(healthComponent).toBeInTheDocument();
expect(healthComponent).toHaveAttribute('color', 'warning');
});
it('stopping renders with danger status', () => {
render(<DeploymentStatus status={'stopping' as DeploymentState} />);
const healthComponent = screen.getByTestId(`table-column-deployment-stopping`);
expect(healthComponent).toBeInTheDocument();
expect(healthComponent).toHaveAttribute('color', 'danger');
});
it('does not render when status is notApplicable', () => {
const { container } = render(<DeploymentStatus status={DeploymentStatusEnum.notApplicable} />);
expect(container).toBeEmptyDOMElement();
it('started renders with success status', () => {
render(<DeploymentStatus status={'started' as DeploymentState} />);
const healthComponent = screen.getByTestId(`table-column-deployment-started`);
expect(healthComponent).toBeInTheDocument();
expect(healthComponent).toHaveAttribute('color', 'success');
});
});

View file

@ -7,42 +7,52 @@
import React from 'react';
import { EuiIcon, EuiToolTip } from '@elastic/eui';
import { DeploymentStatusEnum } from '../../types';
import { DeploymentState } from '@kbn/ml-trained-models-utils';
import * as i18n from './translations';
interface DeploymentStatusProps {
status: DeploymentStatusEnum;
status: DeploymentState | undefined;
}
function getStatus(status: DeploymentState | undefined) {
switch (status) {
case 'started':
return {
statusColor: 'success',
type: 'dot',
tooltip: i18n.MODEL_DEPLOYED,
};
case 'starting':
return {
statusColor: 'warning',
type: 'warning',
tooltip: i18n.MODEL_STARTING,
};
case 'stopping':
return {
statusColor: 'danger',
type: 'dot',
tooltip: i18n.MODEL_STOPPING,
};
case undefined:
return {
statusColor: 'danger',
type: 'dot',
tooltip: i18n.MODEL_NOT_DEPLOYED,
};
}
}
export const DeploymentStatus: React.FC<DeploymentStatusProps> = ({ status }) => {
if (status === DeploymentStatusEnum.notApplicable || !status) {
return null;
}
let statusColor: string;
let type: string;
let tooltip: string;
switch (status) {
case DeploymentStatusEnum.deployed:
statusColor = 'success';
type = 'dot';
tooltip = i18n.MODEL_DEPLOYED;
break;
case DeploymentStatusEnum.notDeployed:
statusColor = 'warning';
type = 'warning';
tooltip = i18n.MODEL_NOT_DEPLOYED;
break;
case DeploymentStatusEnum.notDeployable:
statusColor = 'danger';
type = 'dot';
tooltip = i18n.MODEL_FAILED_TO_BE_DEPLOYED;
}
const { statusColor, type, tooltip } = getStatus(status);
return (
<EuiToolTip content={tooltip}>
<EuiIcon
aria-label={tooltip}
type={type}
data-test-subj={`table-column-deployment-${status}`}
color={statusColor}

View file

@ -14,6 +14,13 @@ export const MODEL_DEPLOYED = i18n.translate(
}
);
export const MODEL_STARTING = i18n.translate(
'xpack.searchInferenceEndpoints.deploymentStatus.tooltip.modelNotDeployed',
{
defaultMessage: 'Model starting',
}
);
export const MODEL_NOT_DEPLOYED = i18n.translate(
'xpack.searchInferenceEndpoints.deploymentStatus.tooltip.modelNotDeployed',
{
@ -21,9 +28,9 @@ export const MODEL_NOT_DEPLOYED = i18n.translate(
}
);
export const MODEL_FAILED_TO_BE_DEPLOYED = i18n.translate(
'xpack.searchInferenceEndpoints.deploymentStatus.tooltip.modelFailedToBeDeployed',
export const MODEL_STOPPING = i18n.translate(
'xpack.searchInferenceEndpoints.deploymentStatus.tooltip.modelStopped',
{
defaultMessage: 'Model can not be deployed',
defaultMessage: 'Model stopping',
}
);

View file

@ -26,13 +26,24 @@ const inferenceEndpoints = [
task_settings: {},
},
{
inference_id: 'my-elser-model-04',
inference_id: 'local-model',
task_type: 'sparse_embedding',
service: 'elser',
service: 'elasticsearch',
service_settings: {
num_allocations: 1,
num_threads: 1,
model_id: '.elser_model_2',
model_id: '.own_model',
},
task_settings: {},
},
{
inference_id: 'third-party-model',
task_type: 'sparse_embedding',
service: 'openai',
service_settings: {
num_allocations: 1,
num_threads: 1,
model_id: '.own_model',
},
task_settings: {},
},
@ -45,20 +56,39 @@ jest.mock('../../hooks/use_delete_endpoint', () => ({
}));
describe('When the tabular page is loaded', () => {
const queryClient = new QueryClient();
queryClient.setQueryData([TRAINED_MODEL_STATS_QUERY_KEY], {
trained_model_stats: [{ model_id: '.elser_model_2', deployment_stats: { state: 'started' } }],
});
const wrapper = ({ children }: { children: React.ReactNode }) => {
return <QueryClientProvider client={queryClient}>{children}</QueryClientProvider>;
};
beforeEach(() => {
const queryClient = new QueryClient();
queryClient.setQueryData([TRAINED_MODEL_STATS_QUERY_KEY], {
trained_model_stats: [
{
model_id: '.elser_model_2',
deployment_stats: { deployment_id: 'my-elser-model-05', state: 'started' },
},
{
model_id: '.own_model',
deployment_stats: { deployment_id: 'local-model', state: 'started' },
},
],
});
const wrapper = ({ children }: { children: React.ReactNode }) => {
return <QueryClientProvider client={queryClient}>{children}</QueryClientProvider>;
};
render(wrapper({ children: <TabularPage inferenceEndpoints={inferenceEndpoints} /> }));
});
it('should display all model_ids in the table', () => {
const rows = screen.getAllByRole('row');
expect(rows[1]).toHaveTextContent('my-elser-model-04');
expect(rows[1]).toHaveTextContent('local-model');
expect(rows[2]).toHaveTextContent('my-elser-model-05');
expect(rows[3]).toHaveTextContent('third-party-model');
});
it('should render deployment status for inference endpoints with local trained models', () => {
const deploymentStatusStarted = screen.getAllByTestId('table-column-deployment-started');
expect(deploymentStatusStarted).toHaveLength(2);
});
it('should not render deployment status for third-party endpoints', () => {
expect(screen.queryByTestId('table-column-deployment-undefined')).not.toBeInTheDocument();
expect(screen.queryByTestId('table-column-deployment-starting')).not.toBeInTheDocument();
expect(screen.queryByTestId('table-column-deployment-stopping')).not.toBeInTheDocument();
});
});

View file

@ -15,14 +15,13 @@ import {
HorizontalAlignment,
} from '@elastic/eui';
import { InferenceAPIConfigResponse } from '@kbn/ml-trained-models-utils';
import { isLocalModel } from '@kbn/ml-trained-models-utils/src/constants/trained_models';
import { TaskTypes } from '../../../common/types';
import * as i18n from '../../../common/translations';
import { useTableData } from '../../hooks/use_table_data';
import { FilterOptions, InferenceEndpointUI, ServiceProviderKeys } from './types';
import { DeploymentStatusEnum } from './types';
import { useAllInferenceEndpointsState } from '../../hooks/use_all_inference_endpoints_state';
import { ServiceProviderFilter } from './filter/service_provider_filter';
import { TaskTypeFilter } from './filter/task_type_filter';
@ -59,9 +58,9 @@ export const TabularPage: React.FC<TabularPageProps> = ({ inferenceEndpoints })
const tableColumns: Array<EuiBasicTableColumn<InferenceEndpointUI>> = [
{
field: 'deployment',
name: '',
render: (deployment: DeploymentStatusEnum) => <DeploymentStatus status={deployment} />,
render: ({ endpoint, deployment }: InferenceEndpointUI) =>
isLocalModel(endpoint) ? <DeploymentStatus status={deployment} /> : null,
align: 'center' as HorizontalAlignment,
width: '64px',
},

View file

@ -5,7 +5,7 @@
* 2.0.
*/
import { InferenceAPIConfigResponse } from '@kbn/ml-trained-models-utils';
import { DeploymentState, InferenceAPIConfigResponse } from '@kbn/ml-trained-models-utils';
import { TaskTypes } from '../../types';
export const INFERENCE_ENDPOINTS_TABLE_PER_PAGE_VALUES = [10, 25, 50, 100];
@ -55,15 +55,8 @@ export interface EuiBasicTableSortTypes {
field: string;
}
export enum DeploymentStatusEnum {
deployed = 'deployed',
notDeployed = 'not_deployed',
notDeployable = 'not_deployable',
notApplicable = 'not_applicable',
}
export interface InferenceEndpointUI {
deployment: DeploymentStatusEnum;
deployment: DeploymentState | undefined;
endpoint: InferenceAPIConfigResponse;
provider: string;
type: string;

View file

@ -20,17 +20,15 @@ export const InferenceEndpointsHeader: React.FC = () => {
pageTitle={i18n.INFERENCE_ENDPOINT_LABEL}
description={
<EuiText>
<p>
{i18n.MANAGE_INFERENCE_ENDPOINTS_LABEL}
<EuiSpacer size="s" />
<EuiLink
href={docLinks.createInferenceEndpoint}
target="_blank"
data-test-subj="learn-how-to-create-inference-endpoints"
>
{i18n.LEARN_HOW_TO_CREATE_INFERENCE_ENDPOINTS_LINK}
</EuiLink>
</p>
{i18n.MANAGE_INFERENCE_ENDPOINTS_LABEL}
<EuiSpacer size="s" />
<EuiLink
href={docLinks.createInferenceEndpoint}
target="_blank"
data-test-subj="learn-how-to-create-inference-endpoints"
>
{i18n.LEARN_HOW_TO_CREATE_INFERENCE_ENDPOINTS_LINK}
</EuiLink>
</EuiText>
}
bottomBorder={true}

View file

@ -72,7 +72,12 @@ describe('useTableData', () => {
beforeEach(() => {
queryClient.setQueryData([TRAINED_MODEL_STATS_QUERY_KEY], {
trained_model_stats: [{ model_id: '.elser_model_2', deployment_stats: { state: 'started' } }],
trained_model_stats: [
{
model_id: '.elser_model_2',
deployment_stats: { deployment_id: 'my-elser-model-01', state: 'started' },
},
],
});
});
it('should return correct pagination', () => {
@ -160,6 +165,7 @@ describe('useTableData', () => {
);
const updatedData = result.current.sortedTableData;
expect(updatedData[0].deployment).toEqual('deployed');
expect(updatedData[2].deployment).toEqual('started');
});
});

View file

@ -7,7 +7,7 @@
import type { EuiTableSortingType } from '@elastic/eui';
import { Pagination } from '@elastic/eui';
import { InferenceAPIConfigResponse } from '@kbn/ml-trained-models-utils';
import { DeploymentState, InferenceAPIConfigResponse } from '@kbn/ml-trained-models-utils';
import { useMemo } from 'react';
import { TaskTypes } from '../../common/types';
import { DEFAULT_TABLE_LIMIT } from '../components/all_inference_endpoints/constants';
@ -19,7 +19,6 @@ import {
SortOrder,
ServiceProviderKeys,
} from '../components/all_inference_endpoints/types';
import { DeploymentStatusEnum } from '../components/all_inference_endpoints/types';
import { useTrainedModelStats } from './use_trained_model_stats';
interface UseTableDataReturn {
@ -39,14 +38,11 @@ export const useTableData = (
const { data: trainedModelStats } = useTrainedModelStats();
const deploymentStatus = trainedModelStats?.trained_model_stats.reduce((acc, modelStat) => {
if (modelStat.model_id) {
acc[modelStat.model_id] =
modelStat?.deployment_stats?.state === 'started'
? DeploymentStatusEnum.deployed
: DeploymentStatusEnum.notDeployed;
if (modelStat.deployment_stats?.deployment_id) {
acc[modelStat.deployment_stats.deployment_id] = modelStat?.deployment_stats?.state;
}
return acc;
}, {} as Record<string, DeploymentStatusEnum>);
}, {} as Record<string, DeploymentState | undefined>);
const tableData: InferenceEndpointUI[] = useMemo(() => {
let filteredEndpoints = inferenceEndpoints;
@ -69,18 +65,11 @@ export const useTableData = (
const isElasticService =
endpoint.service === ServiceProviderKeys.elasticsearch ||
endpoint.service === ServiceProviderKeys.elser;
let deploymentStatusValue = DeploymentStatusEnum.notApplicable;
if (isElasticService) {
const modelId = endpoint.service_settings?.model_id;
deploymentStatusValue =
modelId && deploymentStatus?.[modelId]
? deploymentStatus[modelId]
: DeploymentStatusEnum.notDeployable;
}
const deploymentId = isElasticService ? endpoint.inference_id : undefined;
const deployment = (deploymentId && deploymentStatus?.[deploymentId]) || undefined;
return {
deployment: deploymentStatusValue,
deployment,
endpoint,
provider: endpoint.service,
type: endpoint.task_type,

View file

@ -34445,7 +34445,6 @@
"xpack.searchInferenceEndpoints.deleteEndpoint.deleteSuccess": "Le point de terminaison dinférence a été supprimé avec succès.",
"xpack.searchInferenceEndpoints.deleteEndpoint.endpointDeletionFailed": "Échec de la suppression du point de terminaison",
"xpack.searchInferenceEndpoints.deploymentStatus.tooltip.modelDeployed": "Le modèle est déployé",
"xpack.searchInferenceEndpoints.deploymentStatus.tooltip.modelFailedToBeDeployed": "Le modèle ne peut être déployé",
"xpack.searchInferenceEndpoints.deploymentStatus.tooltip.modelNotDeployed": "Le modèle nest pas déployé",
"xpack.searchInferenceEndpoints.elasticsearch.allocations": "Allocations : {numAllocations}",
"xpack.searchInferenceEndpoints.elasticsearch.threads": "Threads : {numThreads}",

View file

@ -34430,7 +34430,6 @@
"xpack.searchInferenceEndpoints.deleteEndpoint.deleteSuccess": "推論エンドポイントは正常に削除されました。",
"xpack.searchInferenceEndpoints.deleteEndpoint.endpointDeletionFailed": "エンドポイントの削除が失敗しました",
"xpack.searchInferenceEndpoints.deploymentStatus.tooltip.modelDeployed": "モデルはデプロイされます",
"xpack.searchInferenceEndpoints.deploymentStatus.tooltip.modelFailedToBeDeployed": "モデルをデプロイできません",
"xpack.searchInferenceEndpoints.deploymentStatus.tooltip.modelNotDeployed": "モデルはデプロイされません",
"xpack.searchInferenceEndpoints.elasticsearch.allocations": "割り当て:{numAllocations}",
"xpack.searchInferenceEndpoints.elasticsearch.threads": "スレッド:{numThreads}",

View file

@ -34471,7 +34471,6 @@
"xpack.searchInferenceEndpoints.deleteEndpoint.deleteSuccess": "推理终端已成功删除。",
"xpack.searchInferenceEndpoints.deleteEndpoint.endpointDeletionFailed": "终端删除失败",
"xpack.searchInferenceEndpoints.deploymentStatus.tooltip.modelDeployed": "已部署模型",
"xpack.searchInferenceEndpoints.deploymentStatus.tooltip.modelFailedToBeDeployed": "无法部署模型",
"xpack.searchInferenceEndpoints.deploymentStatus.tooltip.modelNotDeployed": "未部署模型",
"xpack.searchInferenceEndpoints.elasticsearch.allocations": "分配:{numAllocations}",
"xpack.searchInferenceEndpoints.elasticsearch.threads": "线程:{numThreads}",