Refactoring Inference Management UI (#193380)

## Summary

This PR includes:
- Adding header links and text
- Restructure of columns
- Increased pagination limit

![Screenshot 2024-09-18 at 11 51
25 PM](https://github.com/user-attachments/assets/a4fc1324-4365-4f1c-a33e-3d247227bcfa)

### Checklist

Delete any items that are not applicable to this PR.

- [X] Any text added follows [EUI's writing
guidelines](https://elastic.github.io/eui/#/guidelines/writing), uses
sentence case text and includes [i18n
support](https://github.com/elastic/kibana/blob/main/packages/kbn-i18n/README.md)
- [X] [Unit or functional
tests](https://www.elastic.co/guide/en/kibana/master/development-tests.html)
were updated or added to match the most common scenarios

---------

Co-authored-by: Elastic Machine <elasticmachine@users.noreply.github.com>
This commit is contained in:
Samiul Monir 2024-09-19 17:33:28 -04:00 committed by GitHub
parent 885dfe3017
commit fd149a5ab2
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
21 changed files with 433 additions and 814 deletions

View file

@ -10,7 +10,7 @@ import { i18n } from '@kbn/i18n';
export const INFERENCE_ENDPOINT_LABEL = i18n.translate(
'xpack.searchInferenceEndpoints.inferenceEndpointsLabel',
{
defaultMessage: 'Inference Endpoints',
defaultMessage: 'Inference endpoints',
}
);
@ -21,7 +21,8 @@ export const CANCEL = i18n.translate('xpack.searchInferenceEndpoints.cancel', {
export const MANAGE_INFERENCE_ENDPOINTS_LABEL = i18n.translate(
'xpack.searchInferenceEndpoints.allInferenceEndpoints.description',
{
defaultMessage: 'View and manage your deployed inference endpoints.',
defaultMessage:
'Inference endpoints streamline the deployment and management of machine\nlearning models in Elasticsearch. Set up and manage NLP tasks using unique\nendpoints, to build AI-powered search.',
}
);
@ -69,9 +70,16 @@ export const SEMANTIC_SEARCH_WITH_E5_LINK = i18n.translate(
);
export const VIEW_YOUR_MODELS_LINK = i18n.translate(
'xpack.searchInferenceEndpoints.addEmptyPrompt.viewYourModels',
'xpack.searchInferenceEndpoints.viewYourModels',
{
defaultMessage: 'View your models',
defaultMessage: 'ML Trained Models',
}
);
export const API_DOCUMENTATION_LINK = i18n.translate(
'xpack.searchInferenceEndpoints.apiDocumentationLink',
{
defaultMessage: 'API Documentation',
}
);

View file

@ -14,7 +14,7 @@ import {
} from './types';
export const DEFAULT_TABLE_ACTIVE_PAGE = 1;
export const DEFAULT_TABLE_LIMIT = 10;
export const DEFAULT_TABLE_LIMIT = 25;
export const DEFAULT_QUERY_PARAMS: QueryParams = {
page: DEFAULT_TABLE_ACTIVE_PAGE,

View file

@ -28,7 +28,7 @@ export const DeleteAction: React.FC<DeleteActionProps> = ({ selectedEndpoint })
deleteEndpoint({
type: selectedEndpoint.type,
id: selectedEndpoint.endpoint.inference_id,
id: selectedEndpoint.endpoint,
});
};
@ -37,7 +37,7 @@ export const DeleteAction: React.FC<DeleteActionProps> = ({ selectedEndpoint })
<EuiButtonIcon
aria-label={i18n.translate('xpack.searchInferenceEndpoints.actions.deleteEndpoint', {
defaultMessage: 'Delete inference endpoint {selectedEndpointName}',
values: { selectedEndpointName: selectedEndpoint?.endpoint.inference_id },
values: { selectedEndpointName: selectedEndpoint?.endpoint },
})}
key="delete"
iconType="trash"

View file

@ -1,33 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0; you may not use this file except in compliance with the Elastic License
* 2.0.
*/
import { render, screen } from '@testing-library/react';
import React from 'react';
import { DeploymentStatus } from './deployment_status';
import { DeploymentState } from '@kbn/ml-trained-models-utils';
describe('DeploymentStatus component', () => {
it('starting renders with warning status', () => {
render(<DeploymentStatus status={'starting' as DeploymentState} />);
const healthComponent = screen.getByTestId(`table-column-deployment-starting`);
expect(healthComponent).toBeInTheDocument();
expect(healthComponent).toHaveAttribute('color', 'warning');
});
it('stopping renders with danger status', () => {
render(<DeploymentStatus status={'stopping' as DeploymentState} />);
const healthComponent = screen.getByTestId(`table-column-deployment-stopping`);
expect(healthComponent).toBeInTheDocument();
expect(healthComponent).toHaveAttribute('color', 'danger');
});
it('started renders with success status', () => {
render(<DeploymentStatus status={'started' as DeploymentState} />);
const healthComponent = screen.getByTestId(`table-column-deployment-started`);
expect(healthComponent).toBeInTheDocument();
expect(healthComponent).toHaveAttribute('color', 'success');
});
});

View file

@ -1,62 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0; you may not use this file except in compliance with the Elastic License
* 2.0.
*/
import React from 'react';
import { EuiIcon, EuiToolTip } from '@elastic/eui';
import { DeploymentState } from '@kbn/ml-trained-models-utils';
import * as i18n from './translations';
interface DeploymentStatusProps {
status: DeploymentState | undefined;
}
function getStatus(status: DeploymentState | undefined) {
switch (status) {
case 'started':
return {
statusColor: 'success',
type: 'dot',
tooltip: i18n.MODEL_DEPLOYED,
};
case 'starting':
return {
statusColor: 'warning',
type: 'warning',
tooltip: i18n.MODEL_STARTING,
};
case 'stopping':
return {
statusColor: 'danger',
type: 'dot',
tooltip: i18n.MODEL_STOPPING,
};
case undefined:
return {
statusColor: 'danger',
type: 'dot',
tooltip: i18n.MODEL_NOT_DEPLOYED,
};
}
}
export const DeploymentStatus: React.FC<DeploymentStatusProps> = ({ status }) => {
const { statusColor, type, tooltip } = getStatus(status);
return (
<EuiToolTip content={tooltip}>
<EuiIcon
aria-label={tooltip}
type={type}
data-test-subj={`table-column-deployment-${status}`}
color={statusColor}
/>
</EuiToolTip>
);
};

View file

@ -1,36 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0; you may not use this file except in compliance with the Elastic License
* 2.0.
*/
import { i18n } from '@kbn/i18n';
export const MODEL_DEPLOYED = i18n.translate(
'xpack.searchInferenceEndpoints.deploymentStatus.tooltip.modelDeployed',
{
defaultMessage: 'Model is deployed',
}
);
export const MODEL_STARTING = i18n.translate(
'xpack.searchInferenceEndpoints.deploymentStatus.tooltip.modelNotDeployed',
{
defaultMessage: 'Model starting',
}
);
export const MODEL_NOT_DEPLOYED = i18n.translate(
'xpack.searchInferenceEndpoints.deploymentStatus.tooltip.modelNotDeployed',
{
defaultMessage: 'Model is not deployed',
}
);
export const MODEL_STOPPING = i18n.translate(
'xpack.searchInferenceEndpoints.deploymentStatus.tooltip.modelStopped',
{
defaultMessage: 'Model stopping',
}
);

View file

@ -9,315 +9,10 @@ import { render, screen } from '@testing-library/react';
import React from 'react';
import { EndpointInfo } from './endpoint_info';
jest.mock('@kbn/ml-trained-models-utils', () => ({
...jest.requireActual('@kbn/ml-trained-models-utils'),
ELASTIC_MODEL_DEFINITIONS: {
'model-with-mit-license': {
license: 'MIT',
licenseUrl: 'https://abc.com',
},
},
}));
describe('RenderEndpoint component tests', () => {
describe('with cohere service', () => {
const mockEndpoint = {
inference_id: 'cohere-2',
service: 'cohere',
service_settings: {
similarity: 'cosine',
dimensions: 384,
model_id: 'embed-english-light-v3.0',
rate_limit: {
requests_per_minute: 10000,
},
embedding_type: 'byte',
},
task_settings: {},
} as any;
it('renders the component with inference id', () => {
render(<EndpointInfo inferenceId={'cohere-2'} />);
it('renders the component with endpoint details for Cohere service', () => {
render(<EndpointInfo endpoint={mockEndpoint} />);
expect(screen.getByText('cohere-2')).toBeInTheDocument();
expect(screen.getByText('byte')).toBeInTheDocument();
expect(screen.getByText('embed-english-light-v3.0')).toBeInTheDocument();
});
it('does not render model_id badge if serviceSettings.model_id is not provided for Cohere service', () => {
const modifiedEndpoint = {
...mockEndpoint,
service_settings: { ...mockEndpoint.service_settings, model_id: undefined },
};
render(<EndpointInfo endpoint={modifiedEndpoint} />);
expect(screen.queryByText('embed-english-light-v3.0')).not.toBeInTheDocument();
});
it('renders only model_id if other settings are not provided for Cohere service', () => {
const modifiedEndpoint = {
...mockEndpoint,
service_settings: { model_id: 'embed-english-light-v3.0' },
};
render(<EndpointInfo endpoint={modifiedEndpoint} />);
expect(screen.getByText('embed-english-light-v3.0')).toBeInTheDocument();
expect(screen.queryByText(',')).not.toBeInTheDocument();
});
});
describe('with elasticsearch service', () => {
const mockEndpoint = {
inference_id: 'model-123',
service: 'elasticsearch',
service_settings: {
num_allocations: 5,
num_threads: 10,
model_id: 'settings-model-123',
},
} as any;
it('renders the component with endpoint model_id and model settings', () => {
render(<EndpointInfo endpoint={mockEndpoint} />);
expect(screen.getByText('model-123')).toBeInTheDocument();
expect(screen.getByText('settings-model-123')).toBeInTheDocument();
expect(screen.getByText('Threads: 10 | Allocations: 5')).toBeInTheDocument();
});
it('renders the component with only model_id if num_threads and num_allocations are not provided', () => {
const modifiedSettings = {
...mockEndpoint.service_settings,
num_threads: undefined,
num_allocations: undefined,
};
const modifiedEndpoint = { ...mockEndpoint, service_settings: modifiedSettings };
render(<EndpointInfo endpoint={modifiedEndpoint} />);
expect(screen.getByText('model-123')).toBeInTheDocument();
expect(screen.getByText('settings-model-123')).toBeInTheDocument();
expect(screen.queryByText('Threads: 10 | Allocations: 5')).not.toBeInTheDocument();
});
});
describe('with azureaistudio service', () => {
const mockEndpoint = {
inference_id: 'azure-ai-1',
service: 'azureaistudio',
service_settings: {
target: 'westus',
provider: 'microsoft_phi',
endpoint_type: 'realtime',
},
} as any;
it('renders the component with endpoint details', () => {
render(<EndpointInfo endpoint={mockEndpoint} />);
expect(screen.getByText('azure-ai-1')).toBeInTheDocument();
expect(screen.getByText('microsoft_phi, realtime, westus')).toBeInTheDocument();
});
it('renders correctly when some service settings are missing', () => {
const modifiedEndpoint = {
...mockEndpoint,
service_settings: { target: 'westus', provider: 'microsoft_phi' },
};
render(<EndpointInfo endpoint={modifiedEndpoint} />);
expect(screen.getByText('microsoft_phi, westus')).toBeInTheDocument();
});
it('does not render a comma when only one service setting is provided', () => {
const modifiedEndpoint = {
...mockEndpoint,
service_settings: { target: 'westus' },
};
render(<EndpointInfo endpoint={modifiedEndpoint} />);
expect(screen.getByText('westus')).toBeInTheDocument();
expect(screen.queryByText(',')).not.toBeInTheDocument();
});
it('renders nothing related to service settings when all are missing', () => {
const modifiedEndpoint = {
...mockEndpoint,
service_settings: {},
};
render(<EndpointInfo endpoint={modifiedEndpoint} />);
expect(screen.getByText('azure-ai-1')).toBeInTheDocument();
expect(screen.queryByText('westus')).not.toBeInTheDocument();
expect(screen.queryByText('microsoft_phi')).not.toBeInTheDocument();
expect(screen.queryByText('realtime')).not.toBeInTheDocument();
});
});
describe('with azureopenai service', () => {
const mockEndpoint = {
inference_id: 'azure-openai-1',
service: 'azureopenai',
service_settings: {
resource_name: 'resource-xyz',
deployment_id: 'deployment-123',
api_version: 'v1',
},
} as any;
it('renders the component with all required endpoint details', () => {
render(<EndpointInfo endpoint={mockEndpoint} />);
expect(screen.getByText('azure-openai-1')).toBeInTheDocument();
expect(screen.getByText('resource-xyz, deployment-123, v1')).toBeInTheDocument();
});
});
describe('with mistral service', () => {
const mockEndpoint = {
inference_id: 'mistral-ai-1',
service: 'mistral',
service_settings: {
model: 'model-xyz',
max_input_tokens: 512,
rate_limit: {
requests_per_minute: 1000,
},
},
} as any;
it('renders the component with endpoint details', () => {
render(<EndpointInfo endpoint={mockEndpoint} />);
expect(screen.getByText('mistral-ai-1')).toBeInTheDocument();
expect(screen.getByText('model-xyz')).toBeInTheDocument();
expect(screen.getByText('max_input_tokens: 512, rate_limit: 1000')).toBeInTheDocument();
});
it('renders correctly when some service settings are missing', () => {
const modifiedEndpoint = {
...mockEndpoint,
service_settings: {
model: 'model-xyz',
max_input_tokens: 512,
},
};
render(<EndpointInfo endpoint={modifiedEndpoint} />);
expect(screen.getByText('max_input_tokens: 512')).toBeInTheDocument();
});
it('does not render a comma when only one service setting is provided', () => {
const modifiedEndpoint = {
...mockEndpoint,
service_settings: { model: 'model-xyz' },
};
render(<EndpointInfo endpoint={modifiedEndpoint} />);
expect(screen.getByText('model-xyz')).toBeInTheDocument();
expect(screen.queryByText(',')).not.toBeInTheDocument();
});
it('renders nothing related to service settings when all are missing', () => {
const modifiedEndpoint = {
...mockEndpoint,
service_settings: {},
};
render(<EndpointInfo endpoint={modifiedEndpoint} />);
expect(screen.getByText('mistral-ai-1')).toBeInTheDocument();
expect(screen.queryByText('model-xyz')).not.toBeInTheDocument();
expect(screen.queryByText('max_input_tokens: 512')).not.toBeInTheDocument();
expect(screen.queryByText('rate_limit: 1000')).not.toBeInTheDocument();
});
});
describe('with googleaistudio service', () => {
const mockEndpoint = {
inference_id: 'google-ai-1',
service: 'googleaistudio',
service_settings: {
model_id: 'model-abc',
rate_limit: {
requests_per_minute: 500,
},
},
} as any;
it('renders the component with endpoint details', () => {
render(<EndpointInfo endpoint={mockEndpoint} />);
expect(screen.getByText('model-abc')).toBeInTheDocument();
expect(screen.getByText('rate_limit: 500')).toBeInTheDocument();
});
it('renders correctly when rate limit is missing', () => {
const modifiedEndpoint = {
...mockEndpoint,
service_settings: {
model_id: 'model-abc',
},
};
render(<EndpointInfo endpoint={modifiedEndpoint} />);
expect(screen.getByText('model-abc')).toBeInTheDocument();
expect(screen.queryByText('Rate limit:')).not.toBeInTheDocument();
});
});
describe('with amazonbedrock service', () => {
const mockEndpoint = {
inference_id: 'amazon-bedrock-1',
service: 'amazonbedrock',
service_settings: {
region: 'us-west-1',
provider: 'AMAZONTITAN',
model: 'model-bedrock-xyz',
},
} as any;
it('renders the component with endpoint details', () => {
render(<EndpointInfo endpoint={mockEndpoint} />);
expect(screen.getByText('amazon-bedrock-1')).toBeInTheDocument();
expect(screen.getByText('model-bedrock-xyz')).toBeInTheDocument();
expect(screen.getByText('region: us-west-1, provider: amazontitan')).toBeInTheDocument();
});
});
describe('for MIT licensed models', () => {
const mockEndpointWithMitLicensedModel = {
inference_id: 'model-123',
service: 'elasticsearch',
service_settings: {
num_allocations: 5,
num_threads: 10,
model_id: 'model-with-mit-license',
},
} as any;
it('renders the MIT license badge if the model is eligible', () => {
render(<EndpointInfo endpoint={mockEndpointWithMitLicensedModel} />);
const mitBadge = screen.getByTestId('mit-license-badge');
expect(mitBadge).toBeInTheDocument();
expect(mitBadge).toHaveAttribute('href', 'https://abc.com');
});
it('does not render the MIT license badge if the model is not eligible', () => {
const mockEndpointWithNonMitLicensedModel = {
inference_id: 'model-123',
service: 'elasticsearch',
service_settings: {
num_allocations: 5,
num_threads: 10,
model_id: 'model-without-mit-license',
},
} as any;
render(<EndpointInfo endpoint={mockEndpointWithNonMitLicensedModel} />);
expect(screen.queryByTestId('mit-license-badge')).not.toBeInTheDocument();
});
expect(screen.getByText('cohere-2')).toBeInTheDocument();
});
});

View file

@ -6,184 +6,13 @@
*/
import React from 'react';
import {
InferenceAPIConfigResponse,
ELASTIC_MODEL_DEFINITIONS,
} from '@kbn/ml-trained-models-utils';
import { EuiFlexGroup, EuiFlexItem, EuiText, EuiBadge } from '@elastic/eui';
import { ServiceProviderKeys } from '../../types';
import { ModelBadge } from './model_badge';
import * as i18n from './translations';
export interface EndpointInfoProps {
endpoint: InferenceAPIConfigResponse;
inferenceId: string;
}
export const EndpointInfo: React.FC<EndpointInfoProps> = ({ endpoint }) => {
return (
<EuiFlexGroup gutterSize="xs" direction="column">
<EuiFlexItem>
<strong>{endpoint.inference_id}</strong>
</EuiFlexItem>
<EuiFlexItem css={{ textWrap: 'wrap' }}>
<EndpointModelInfo endpoint={endpoint} />
</EuiFlexItem>
</EuiFlexGroup>
);
};
const EndpointModelInfo: React.FC<EndpointInfoProps> = ({ endpoint }) => {
const serviceSettings = endpoint.service_settings;
const modelId =
'model_id' in serviceSettings
? serviceSettings.model_id
: 'model' in serviceSettings
? serviceSettings.model
: undefined;
const isEligibleForMITBadge = modelId && ELASTIC_MODEL_DEFINITIONS[modelId]?.license === 'MIT';
return (
<>
<EuiText color="subdued" size="xs">
{modelId && <ModelBadge model={modelId} />}
{isEligibleForMITBadge ? (
<EuiBadge
color="hollow"
iconType="popout"
iconSide="right"
href={ELASTIC_MODEL_DEFINITIONS[modelId].licenseUrl ?? ''}
target="_blank"
data-test-subj={'mit-license-badge'}
>
{i18n.MIT_LICENSE}
</EuiBadge>
) : null}{' '}
{endpointModelAtrributes(endpoint)}
</EuiText>
</>
);
};
function endpointModelAtrributes(endpoint: InferenceAPIConfigResponse) {
switch (endpoint.service) {
case ServiceProviderKeys.elser:
case ServiceProviderKeys.elasticsearch:
return elasticsearchAttributes(endpoint);
case ServiceProviderKeys.cohere:
return cohereAttributes(endpoint);
case ServiceProviderKeys.hugging_face:
return huggingFaceAttributes(endpoint);
case ServiceProviderKeys.openai:
return openAIAttributes(endpoint);
case ServiceProviderKeys.azureaistudio:
return azureOpenAIStudioAttributes(endpoint);
case ServiceProviderKeys.azureopenai:
return azureOpenAIAttributes(endpoint);
case ServiceProviderKeys.mistral:
return mistralAttributes(endpoint);
case ServiceProviderKeys.googleaistudio:
return googleAIStudioAttributes(endpoint);
case ServiceProviderKeys.amazonbedrock:
return amazonBedrockAttributes(endpoint);
default:
return null;
}
}
function elasticsearchAttributes(endpoint: InferenceAPIConfigResponse) {
const serviceSettings = endpoint.service_settings;
const numAllocations =
'num_allocations' in serviceSettings ? serviceSettings.num_allocations : undefined;
const numThreads = 'num_threads' in serviceSettings ? serviceSettings.num_threads : undefined;
return `${numThreads ? i18n.THREADS(numThreads) : ''}${
numThreads && numAllocations ? ' | ' : ''
}${numAllocations ? i18n.ALLOCATIONS(numAllocations) : ''}`;
}
function cohereAttributes(endpoint: InferenceAPIConfigResponse) {
const serviceSettings = endpoint.service_settings;
const embeddingType =
'embedding_type' in serviceSettings ? serviceSettings.embedding_type : undefined;
const taskSettings = endpoint.task_settings;
const inputType = 'input_type' in taskSettings ? taskSettings.input_type : undefined;
const truncate = 'truncate' in taskSettings ? taskSettings.truncate : undefined;
return [embeddingType, inputType, truncate && `truncate: ${truncate}`].filter(Boolean).join(', ');
}
function huggingFaceAttributes(endpoint: InferenceAPIConfigResponse) {
const serviceSettings = endpoint.service_settings;
const url = 'url' in serviceSettings ? serviceSettings.url : null;
return url;
}
function openAIAttributes(endpoint: InferenceAPIConfigResponse) {
const serviceSettings = endpoint.service_settings;
const url = 'url' in serviceSettings ? serviceSettings.url : null;
return url;
}
function azureOpenAIStudioAttributes(endpoint: InferenceAPIConfigResponse) {
const serviceSettings = endpoint.service_settings;
const provider = 'provider' in serviceSettings ? serviceSettings?.provider : undefined;
const endpointType =
'endpoint_type' in serviceSettings ? serviceSettings.endpoint_type : undefined;
const target = 'target' in serviceSettings ? serviceSettings.target : undefined;
return [provider, endpointType, target].filter(Boolean).join(', ');
}
function azureOpenAIAttributes(endpoint: InferenceAPIConfigResponse) {
const serviceSettings = endpoint.service_settings;
const resourceName =
'resource_name' in serviceSettings ? serviceSettings.resource_name : undefined;
const deploymentId =
'deployment_id' in serviceSettings ? serviceSettings.deployment_id : undefined;
const apiVersion = 'api_version' in serviceSettings ? serviceSettings.api_version : undefined;
return [resourceName, deploymentId, apiVersion].filter(Boolean).join(', ');
}
function mistralAttributes(endpoint: InferenceAPIConfigResponse) {
const serviceSettings = endpoint.service_settings;
const maxInputTokens =
'max_input_tokens' in serviceSettings ? serviceSettings.max_input_tokens : undefined;
const rateLimit =
'rate_limit' in serviceSettings ? serviceSettings.rate_limit.requests_per_minute : undefined;
return [
maxInputTokens && `max_input_tokens: ${maxInputTokens}`,
rateLimit && `rate_limit: ${rateLimit}`,
]
.filter(Boolean)
.join(', ');
}
function amazonBedrockAttributes(endpoint: InferenceAPIConfigResponse) {
const serviceSettings = endpoint.service_settings;
const region = 'region' in serviceSettings ? serviceSettings.region : undefined;
const provider =
'provider' in serviceSettings ? serviceSettings.provider.toLocaleLowerCase() : undefined;
return [region && `region: ${region}`, provider && `provider: ${provider}`]
.filter(Boolean)
.join(', ');
}
function googleAIStudioAttributes(endpoint: InferenceAPIConfigResponse) {
const serviceSettings = endpoint.service_settings;
const rateLimit =
'rate_limit' in serviceSettings ? serviceSettings.rate_limit.requests_per_minute : undefined;
return rateLimit && `rate_limit: ${rateLimit}`;
}
export const EndpointInfo: React.FC<EndpointInfoProps> = ({ inferenceId }) => (
<span>
<strong>{inferenceId}</strong>
</span>
);

View file

@ -1,21 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0; you may not use this file except in compliance with the Elastic License
* 2.0.
*/
import React from 'react';
import { EuiBadge, useEuiTheme } from '@elastic/eui';
interface ModelBadgeProps {
model?: string;
}
export const ModelBadge: React.FC<ModelBadgeProps> = ({ model }) => {
const { euiTheme } = useEuiTheme();
if (!model) return null;
return <EuiBadge color={euiTheme.colors.body}>{model}</EuiBadge>;
};

View file

@ -8,25 +8,264 @@
import { render, screen } from '@testing-library/react';
import React from 'react';
import { ServiceProvider } from './service_provider';
import { ServiceProviderKeys } from '../../types';
jest.mock('../../../../assets/images/providers/elastic.svg', () => 'elasticIcon.svg');
jest.mock('../../../../assets/images/providers/hugging_face.svg', () => 'huggingFaceIcon.svg');
jest.mock('../../../../assets/images/providers/cohere.svg', () => 'cohereIcon.svg');
jest.mock('../../../../assets/images/providers/open_ai.svg', () => 'openAIIcon.svg');
jest.mock('@kbn/ml-trained-models-utils', () => ({
...jest.requireActual('@kbn/ml-trained-models-utils'),
ELASTIC_MODEL_DEFINITIONS: {
'model-with-mit-license': {
license: 'MIT',
licenseUrl: 'https://abc.com',
},
},
}));
describe('ServiceProvider component', () => {
it('renders Hugging Face icon and name when providerKey is hugging_face', () => {
render(<ServiceProvider providerKey={ServiceProviderKeys.hugging_face} />);
expect(screen.getByText('Hugging Face')).toBeInTheDocument();
const icon = screen.getByTestId('table-column-service-provider-hugging_face');
expect(icon).toBeInTheDocument();
describe('with HuggingFace service', () => {
const mockEndpoint = {
inference_id: 'my-hugging-face',
service: 'hugging_face',
service_settings: {
api_key: 'aaaa',
url: 'https://dummy.huggingface.com',
},
task_settings: {},
} as any;
it('renders the component with service and model details', () => {
render(<ServiceProvider providerEndpoint={mockEndpoint} />);
expect(screen.getByText('Hugging Face')).toBeInTheDocument();
const icon = screen.getByTestId('table-column-service-provider-hugging_face');
expect(icon).toBeInTheDocument();
expect(screen.getByText('https://dummy.huggingface.com')).toBeInTheDocument();
});
});
it('renders Open AI icon and name when providerKey is openai', () => {
render(<ServiceProvider providerKey={ServiceProviderKeys.openai} />);
expect(screen.getByText('OpenAI')).toBeInTheDocument();
const icon = screen.getByTestId('table-column-service-provider-openai');
expect(icon).toBeInTheDocument();
describe('with openai service', () => {
const mockEndpoint = {
inference_id: 'my-openai-endpoint',
service: 'openai',
service_settings: {
api_key: 'aaaa',
model_id: 'text-embedding-3-small',
},
task_settings: {},
} as any;
it('renders the component with service and model details', () => {
render(<ServiceProvider providerEndpoint={mockEndpoint} />);
expect(screen.getByText('OpenAI')).toBeInTheDocument();
const icon = screen.getByTestId('table-column-service-provider-openai');
expect(icon).toBeInTheDocument();
expect(screen.getByText('text-embedding-3-small')).toBeInTheDocument();
});
});
describe('with cohere service', () => {
const mockEndpoint = {
inference_id: 'cohere-2',
service: 'cohere',
service_settings: {
similarity: 'cosine',
dimensions: 384,
model_id: 'embed-english-light-v3.0',
rate_limit: {
requests_per_minute: 10000,
},
embedding_type: 'byte',
},
task_settings: {},
} as any;
it('renders the component with service and model details', () => {
render(<ServiceProvider providerEndpoint={mockEndpoint} />);
expect(screen.getByText('Cohere')).toBeInTheDocument();
const icon = screen.getByTestId('table-column-service-provider-cohere');
expect(icon).toBeInTheDocument();
expect(screen.getByText('embed-english-light-v3.0')).toBeInTheDocument();
});
it('does not render model_id badge if serviceSettings.model_id is not provided', () => {
const modifiedEndpoint = {
...mockEndpoint,
service_settings: { ...mockEndpoint.service_settings, model_id: undefined },
};
render(<ServiceProvider providerEndpoint={modifiedEndpoint} />);
expect(screen.queryByText('embed-english-light-v3.0')).not.toBeInTheDocument();
});
});
describe('with azureaistudio service', () => {
const mockEndpoint = {
inference_id: 'azure-ai-1',
service: 'azureaistudio',
service_settings: {
target: 'westus',
provider: 'microsoft_phi',
endpoint_type: 'realtime',
},
} as any;
it('renders the component with endpoint details', () => {
render(<ServiceProvider providerEndpoint={mockEndpoint} />);
expect(screen.getByText('Azure AI Studio')).toBeInTheDocument();
const icon = screen.getByTestId('table-column-service-provider-azureaistudio');
expect(icon).toBeInTheDocument();
expect(screen.getByText('microsoft_phi')).toBeInTheDocument();
});
it('renders nothing related to service settings when all are missing', () => {
const modifiedEndpoint = {
...mockEndpoint,
service_settings: {},
};
render(<ServiceProvider providerEndpoint={modifiedEndpoint} />);
expect(screen.getByText('Azure AI Studio')).toBeInTheDocument();
const icon = screen.getByTestId('table-column-service-provider-azureaistudio');
expect(icon).toBeInTheDocument();
expect(screen.queryByText('microsoft_phi')).not.toBeInTheDocument();
});
});
describe('with azureopenai service', () => {
const mockEndpoint = {
inference_id: 'azure-openai-1',
service: 'azureopenai',
service_settings: {
resource_name: 'resource-xyz',
deployment_id: 'deployment-123',
api_version: 'v1',
},
} as any;
it('renders the component with all required endpoint details', () => {
render(<ServiceProvider providerEndpoint={mockEndpoint} />);
expect(screen.getByText('Azure OpenAI')).toBeInTheDocument();
const icon = screen.getByTestId('table-column-service-provider-azureopenai');
expect(icon).toBeInTheDocument();
expect(screen.getByText('resource-xyz')).toBeInTheDocument();
});
});
describe('with mistral service', () => {
const mockEndpoint = {
inference_id: 'mistral-ai-1',
service: 'mistral',
service_settings: {
model: 'model-xyz',
max_input_tokens: 512,
rate_limit: {
requests_per_minute: 1000,
},
},
} as any;
it('renders the component with endpoint details', () => {
render(<ServiceProvider providerEndpoint={mockEndpoint} />);
expect(screen.getByText('Mistral')).toBeInTheDocument();
const icon = screen.getByTestId('table-column-service-provider-mistral');
expect(icon).toBeInTheDocument();
expect(screen.getByText('model-xyz')).toBeInTheDocument();
});
it('does not render model id if not provided', () => {
const modifiedEndpoint = {
...mockEndpoint,
service_settings: {},
};
render(<ServiceProvider providerEndpoint={modifiedEndpoint} />);
const icon = screen.getByTestId('table-column-service-provider-mistral');
expect(icon).toBeInTheDocument();
expect(screen.getByText('Mistral')).toBeInTheDocument();
expect(screen.queryByText('model-xyz')).not.toBeInTheDocument();
});
});
describe('with elasticsearch service', () => {
const mockEndpoint = {
inference_id: 'model-123',
service: 'elasticsearch',
service_settings: {
num_allocations: 5,
num_threads: 10,
model_id: 'settings-model-123',
},
} as any;
it('renders the component with endpoint model_id', () => {
render(<ServiceProvider providerEndpoint={mockEndpoint} />);
expect(screen.getByText('Elasticsearch')).toBeInTheDocument();
const icon = screen.getByTestId('table-column-service-provider-elasticsearch');
expect(icon).toBeInTheDocument();
expect(screen.getByText('settings-model-123')).toBeInTheDocument();
});
it('renders the MIT license badge if the model is eligible', () => {
const modifiedEndpoint = {
...mockEndpoint,
service_settings: { ...mockEndpoint.service_settings, model_id: 'model-with-mit-license' },
};
render(<ServiceProvider providerEndpoint={modifiedEndpoint} />);
const mitBadge = screen.getByTestId('mit-license-badge');
expect(mitBadge).toBeInTheDocument();
expect(mitBadge).toHaveAttribute('href', 'https://abc.com');
});
it('does not render the MIT license badge if the model is not eligible', () => {
render(<ServiceProvider providerEndpoint={mockEndpoint} />);
expect(screen.queryByTestId('mit-license-badge')).not.toBeInTheDocument();
});
});
describe('with googleaistudio service', () => {
const mockEndpoint = {
inference_id: 'google-ai-1',
service: 'googleaistudio',
service_settings: {
model_id: 'model-abc',
rate_limit: {
requests_per_minute: 500,
},
},
} as any;
it('renders the component with service and model details', () => {
render(<ServiceProvider providerEndpoint={mockEndpoint} />);
expect(screen.getByText('Google AI Studio')).toBeInTheDocument();
const icon = screen.getByTestId('table-column-service-provider-googleaistudio');
expect(icon).toBeInTheDocument();
expect(screen.getByText('model-abc')).toBeInTheDocument();
});
});
describe('with amazonbedrock service', () => {
const mockEndpoint = {
inference_id: 'amazon-bedrock-1',
service: 'amazonbedrock',
service_settings: {
region: 'us-west-1',
provider: 'AMAZONTITAN',
model: 'model-bedrock-xyz',
},
} as any;
it('renders the component with model and service details', () => {
render(<ServiceProvider providerEndpoint={mockEndpoint} />);
expect(screen.getByText('Amazon Bedrock')).toBeInTheDocument();
const icon = screen.getByTestId('table-column-service-provider-amazonbedrock');
expect(icon).toBeInTheDocument();
expect(screen.getByText('model-bedrock-xyz')).toBeInTheDocument();
});
});
});

View file

@ -5,8 +5,12 @@
* 2.0.
*/
import { EuiIcon } from '@elastic/eui';
import { EuiBadge, EuiFlexGroup, EuiFlexItem, EuiIcon, EuiText } from '@elastic/eui';
import React from 'react';
import {
ELASTIC_MODEL_DEFINITIONS,
InferenceAPIConfigResponse,
} from '@kbn/ml-trained-models-utils';
import elasticIcon from '../../../../assets/images/providers/elastic.svg';
import huggingFaceIcon from '../../../../assets/images/providers/hugging_face.svg';
import cohereIcon from '../../../../assets/images/providers/cohere.svg';
@ -17,9 +21,10 @@ import googleAIStudioIcon from '../../../../assets/images/providers/google_ai_st
import mistralIcon from '../../../../assets/images/providers/mistral.svg';
import amazonBedrockIcon from '../../../../assets/images/providers/amazon_bedrock.svg';
import { ServiceProviderKeys } from '../../types';
import * as i18n from './translations';
interface ServiceProviderProps {
providerKey: ServiceProviderKeys;
providerEndpoint: InferenceAPIConfigResponse;
}
interface ServiceProviderRecord {
@ -50,7 +55,7 @@ export const SERVICE_PROVIDERS: Record<ServiceProviderKeys, ServiceProviderRecor
},
[ServiceProviderKeys.elser]: {
icon: elasticIcon,
name: 'ELSER',
name: 'Elasticsearch',
},
[ServiceProviderKeys.googleaistudio]: {
icon: googleAIStudioIcon,
@ -70,19 +75,107 @@ export const SERVICE_PROVIDERS: Record<ServiceProviderKeys, ServiceProviderRecor
},
};
export const ServiceProvider: React.FC<ServiceProviderProps> = ({ providerKey }) => {
const provider = SERVICE_PROVIDERS[providerKey];
export const ServiceProvider: React.FC<ServiceProviderProps> = ({ providerEndpoint }) => {
const { service } = providerEndpoint;
const provider = SERVICE_PROVIDERS[service];
return provider ? (
<>
<EuiIcon
data-test-subj={`table-column-service-provider-${providerKey}`}
type={provider.icon}
style={{ marginRight: '8px' }}
/>
<span>{provider.name}</span>
</>
<EuiFlexGroup gutterSize="xs" direction="row" alignItems="center">
<EuiFlexItem grow={0}>
<EuiIcon
data-test-subj={`table-column-service-provider-${service}`}
type={provider.icon}
style={{ marginRight: '8px' }}
/>
</EuiFlexItem>
<EuiFlexItem>
<EuiFlexGroup gutterSize="xs" direction="column">
<EuiFlexItem>
<EuiText size="s" color="subdued">
{provider.name}
</EuiText>
</EuiFlexItem>
<EuiFlexItem>
<EndpointModelInfo providerEndpoint={providerEndpoint} />
</EuiFlexItem>
</EuiFlexGroup>
</EuiFlexItem>
</EuiFlexGroup>
) : (
<span>{providerKey}</span>
<span>{service}</span>
);
};
const EndpointModelInfo: React.FC<ServiceProviderProps> = ({ providerEndpoint }) => {
const serviceSettings = providerEndpoint.service_settings;
const modelId =
'model_id' in serviceSettings
? serviceSettings.model_id
: 'model' in serviceSettings
? serviceSettings.model
: undefined;
const isEligibleForMITBadge = modelId && ELASTIC_MODEL_DEFINITIONS[modelId]?.license === 'MIT';
return (
<EuiFlexGroup gutterSize="xs" direction="column">
<EuiFlexItem>
<EuiFlexGroup gutterSize="xs" direction="row">
<EuiFlexItem grow={0}>
{modelId && (
<EuiText size="s" color="subdued">
{modelId}
</EuiText>
)}
</EuiFlexItem>
<EuiFlexItem grow={0}>
{isEligibleForMITBadge ? (
<EuiBadge
color="hollow"
iconType="popout"
iconSide="right"
href={ELASTIC_MODEL_DEFINITIONS[modelId].licenseUrl ?? ''}
target="_blank"
data-test-subj={'mit-license-badge'}
>
{i18n.MIT_LICENSE}
</EuiBadge>
) : null}{' '}
</EuiFlexItem>
</EuiFlexGroup>
</EuiFlexItem>
<EuiFlexItem>{endpointModelAtrributes(providerEndpoint)}</EuiFlexItem>
</EuiFlexGroup>
);
};
function endpointModelAtrributes(endpoint: InferenceAPIConfigResponse) {
switch (endpoint.service) {
case ServiceProviderKeys.hugging_face:
return huggingFaceAttributes(endpoint);
case ServiceProviderKeys.azureaistudio:
return azureOpenAIStudioAttributes(endpoint);
case ServiceProviderKeys.azureopenai:
return azureOpenAIAttributes(endpoint);
default:
return null;
}
}
function huggingFaceAttributes(endpoint: InferenceAPIConfigResponse) {
const serviceSettings = endpoint.service_settings;
const url = 'url' in serviceSettings ? serviceSettings.url : null;
return url;
}
function azureOpenAIStudioAttributes(endpoint: InferenceAPIConfigResponse) {
const serviceSettings = endpoint.service_settings;
return 'provider' in serviceSettings ? serviceSettings?.provider : undefined;
}
function azureOpenAIAttributes(endpoint: InferenceAPIConfigResponse) {
const serviceSettings = endpoint.service_settings;
return 'resource_name' in serviceSettings ? serviceSettings.resource_name : undefined;
}

View file

@ -7,18 +7,6 @@
import { i18n } from '@kbn/i18n';
export const THREADS = (numThreads: number) =>
i18n.translate('xpack.searchInferenceEndpoints.elasticsearch.threads', {
defaultMessage: 'Threads: {numThreads}',
values: { numThreads },
});
export const ALLOCATIONS = (numAllocations: number) =>
i18n.translate('xpack.searchInferenceEndpoints.elasticsearch.allocations', {
defaultMessage: 'Allocations: {numAllocations}',
values: { numAllocations },
});
export const MIT_LICENSE = i18n.translate(
'xpack.searchInferenceEndpoints.elasticsearch.mitLicense',
{

View file

@ -10,8 +10,6 @@ import { screen } from '@testing-library/react';
import { render } from '@testing-library/react';
import { TabularPage } from './tabular_page';
import { InferenceAPIConfigResponse } from '@kbn/ml-trained-models-utils';
import { TRAINED_MODEL_STATS_QUERY_KEY } from '../../../common/constants';
import { QueryClient, QueryClientProvider } from '@tanstack/react-query';
const inferenceEndpoints = [
{
@ -56,39 +54,26 @@ jest.mock('../../hooks/use_delete_endpoint', () => ({
}));
describe('When the tabular page is loaded', () => {
beforeEach(() => {
const queryClient = new QueryClient();
queryClient.setQueryData([TRAINED_MODEL_STATS_QUERY_KEY], {
trained_model_stats: [
{
model_id: '.elser_model_2',
deployment_stats: { deployment_id: 'my-elser-model-05', state: 'started' },
},
{
model_id: '.own_model',
deployment_stats: { deployment_id: 'local-model', state: 'started' },
},
],
});
const wrapper = ({ children }: { children: React.ReactNode }) => {
return <QueryClientProvider client={queryClient}>{children}</QueryClientProvider>;
};
render(wrapper({ children: <TabularPage inferenceEndpoints={inferenceEndpoints} /> }));
});
it('should display all inference ids in the table', () => {
render(<TabularPage inferenceEndpoints={inferenceEndpoints} />);
it('should display all model_ids in the table', () => {
const rows = screen.getAllByRole('row');
expect(rows[1]).toHaveTextContent('local-model');
expect(rows[2]).toHaveTextContent('my-elser-model-05');
expect(rows[3]).toHaveTextContent('third-party-model');
});
it('should render deployment status for inference endpoints with local trained models', () => {
const deploymentStatusStarted = screen.getAllByTestId('table-column-deployment-started');
expect(deploymentStatusStarted).toHaveLength(2);
});
it('should not render deployment status for third-party endpoints', () => {
expect(screen.queryByTestId('table-column-deployment-undefined')).not.toBeInTheDocument();
expect(screen.queryByTestId('table-column-deployment-starting')).not.toBeInTheDocument();
expect(screen.queryByTestId('table-column-deployment-stopping')).not.toBeInTheDocument();
it('should display all service and model ids in the table', () => {
render(<TabularPage inferenceEndpoints={inferenceEndpoints} />);
const rows = screen.getAllByRole('row');
expect(rows[1]).toHaveTextContent('Elasticsearch');
expect(rows[1]).toHaveTextContent('.own_model');
expect(rows[2]).toHaveTextContent('Elasticsearch');
expect(rows[2]).toHaveTextContent('.elser_model_2');
expect(rows[3]).toHaveTextContent('OpenAI');
expect(rows[3]).toHaveTextContent('.own_model');
});
});

View file

@ -7,26 +7,18 @@
import React, { useCallback } from 'react';
import {
EuiBasicTable,
EuiBasicTableColumn,
EuiFlexGroup,
EuiFlexItem,
HorizontalAlignment,
} from '@elastic/eui';
import { EuiBasicTable, EuiBasicTableColumn, EuiFlexGroup, EuiFlexItem } from '@elastic/eui';
import { InferenceAPIConfigResponse } from '@kbn/ml-trained-models-utils';
import { isLocalModel } from '@kbn/ml-trained-models-utils/src/constants/trained_models';
import { TaskTypes } from '../../../common/types';
import * as i18n from '../../../common/translations';
import { useTableData } from '../../hooks/use_table_data';
import { FilterOptions, InferenceEndpointUI, ServiceProviderKeys } from './types';
import { FilterOptions, InferenceEndpointUI } from './types';
import { useAllInferenceEndpointsState } from '../../hooks/use_all_inference_endpoints_state';
import { ServiceProviderFilter } from './filter/service_provider_filter';
import { TaskTypeFilter } from './filter/task_type_filter';
import { TableSearch } from './search/table_search';
import { DeploymentStatus } from './render_table_columns/render_deployment_status/deployment_status';
import { EndpointInfo } from './render_table_columns/render_endpoint/endpoint_info';
import { ServiceProvider } from './render_table_columns/render_service_provider/service_provider';
import { TaskType } from './render_table_columns/render_task_type/task_type';
@ -57,38 +49,32 @@ export const TabularPage: React.FC<TabularPageProps> = ({ inferenceEndpoints })
);
const tableColumns: Array<EuiBasicTableColumn<InferenceEndpointUI>> = [
{
name: '',
render: ({ endpoint, deployment }: InferenceEndpointUI) =>
isLocalModel(endpoint) ? <DeploymentStatus status={deployment} /> : null,
align: 'center' as HorizontalAlignment,
width: '64px',
},
{
field: 'endpoint',
name: i18n.ENDPOINT,
render: (endpoint: InferenceAPIConfigResponse) => {
render: (endpoint: string) => {
if (endpoint) {
return <EndpointInfo endpoint={endpoint} />;
return <EndpointInfo inferenceId={endpoint} />;
}
return null;
},
sortable: true,
truncateText: true,
width: '400px',
},
{
field: 'provider',
name: i18n.SERVICE_PROVIDER,
render: (provider: ServiceProviderKeys) => {
render: (provider: InferenceAPIConfigResponse) => {
if (provider) {
return <ServiceProvider providerKey={provider} />;
return <ServiceProvider providerEndpoint={provider} />;
}
return null;
},
sortable: false,
width: '185px',
width: '592px',
},
{
field: 'type',
@ -107,7 +93,7 @@ export const TabularPage: React.FC<TabularPageProps> = ({ inferenceEndpoints })
actions: [
{
render: (inferenceEndpoint: InferenceEndpointUI) => (
<CopyIDAction inferenceId={inferenceEndpoint.endpoint.inference_id} />
<CopyIDAction inferenceId={inferenceEndpoint.endpoint} />
),
},
{

View file

@ -5,9 +5,9 @@
* 2.0.
*/
import { DeploymentState, InferenceAPIConfigResponse } from '@kbn/ml-trained-models-utils';
import { InferenceAPIConfigResponse } from '@kbn/ml-trained-models-utils';
import { TaskTypes } from '../../types';
export const INFERENCE_ENDPOINTS_TABLE_PER_PAGE_VALUES = [10, 25, 50, 100];
export const INFERENCE_ENDPOINTS_TABLE_PER_PAGE_VALUES = [25, 50, 100];
export enum ServiceProviderKeys {
amazonbedrock = 'amazonbedrock',
@ -56,8 +56,7 @@ export interface EuiBasicTableSortTypes {
}
export interface InferenceEndpointUI {
deployment: DeploymentState | undefined;
endpoint: InferenceAPIConfigResponse;
provider: string;
endpoint: string;
provider: InferenceAPIConfigResponse;
type: string;
}

View file

@ -5,7 +5,7 @@
* 2.0.
*/
import { EuiPageTemplate, EuiLink, EuiText, EuiSpacer } from '@elastic/eui';
import { EuiPageTemplate, EuiLink } from '@elastic/eui';
import React from 'react';
import * as i18n from '../../common/translations';
import { docLinks } from '../../common/doc_links';
@ -18,21 +18,16 @@ export const InferenceEndpointsHeader: React.FC = () => {
<EuiPageTemplate.Header
data-test-subj="allInferenceEndpointsPage"
pageTitle={i18n.INFERENCE_ENDPOINT_LABEL}
description={
<EuiText>
{i18n.MANAGE_INFERENCE_ENDPOINTS_LABEL}
<EuiSpacer size="s" />
<EuiLink
href={docLinks.createInferenceEndpoint}
target="_blank"
data-test-subj="learn-how-to-create-inference-endpoints"
>
{i18n.LEARN_HOW_TO_CREATE_INFERENCE_ENDPOINTS_LINK}
</EuiLink>
</EuiText>
}
description={i18n.MANAGE_INFERENCE_ENDPOINTS_LABEL}
bottomBorder={true}
rightSideItems={[
<EuiLink
href={docLinks.createInferenceEndpoint}
target="_blank"
data-test-subj="api-documentation"
>
{i18n.API_DOCUMENTATION_LINK}
</EuiLink>,
<EuiLink href={trainedModelPageUrl} target="_blank" data-test-subj="view-your-models">
{i18n.VIEW_YOUR_MODELS_LINK}
</EuiLink>,

View file

@ -118,9 +118,7 @@ describe('useTableData', () => {
b.inference_id.localeCompare(a.inference_id)
);
const sortedEndpoints = result.current.sortedTableData.map(
(item) => item.endpoint.inference_id
);
const sortedEndpoints = result.current.sortedTableData.map((item) => item.endpoint);
const expectedModelIds = expectedSortedData.map((item) => item.inference_id);
expect(sortedEndpoints).toEqual(expectedModelIds);
@ -153,19 +151,6 @@ describe('useTableData', () => {
{ wrapper }
);
const filteredData = result.current.sortedTableData;
expect(
filteredData.every((item) => item.endpoint.inference_id.includes(searchKey))
).toBeTruthy();
});
it('should update deployment status based on deploymentStatus object', () => {
const { result } = renderHook(
() => useTableData(inferenceEndpoints, queryParams, filterOptions, searchKey),
{ wrapper }
);
const updatedData = result.current.sortedTableData;
expect(updatedData[2].deployment).toEqual('started');
expect(filteredData.every((item) => item.endpoint.includes(searchKey))).toBeTruthy();
});
});

View file

@ -7,7 +7,7 @@
import type { EuiTableSortingType } from '@elastic/eui';
import { Pagination } from '@elastic/eui';
import { DeploymentState, InferenceAPIConfigResponse } from '@kbn/ml-trained-models-utils';
import { InferenceAPIConfigResponse } from '@kbn/ml-trained-models-utils';
import { useMemo } from 'react';
import { TaskTypes } from '../../common/types';
import { DEFAULT_TABLE_LIMIT } from '../components/all_inference_endpoints/constants';
@ -19,7 +19,6 @@ import {
SortOrder,
ServiceProviderKeys,
} from '../components/all_inference_endpoints/types';
import { useTrainedModelStats } from './use_trained_model_stats';
interface UseTableDataReturn {
tableData: InferenceEndpointUI[];
@ -35,15 +34,6 @@ export const useTableData = (
filterOptions: FilterOptions,
searchKey: string
): UseTableDataReturn => {
const { data: trainedModelStats } = useTrainedModelStats();
const deploymentStatus = trainedModelStats?.trained_model_stats.reduce((acc, modelStat) => {
if (modelStat.deployment_stats?.deployment_id) {
acc[modelStat.deployment_stats.deployment_id] = modelStat?.deployment_stats?.state;
}
return acc;
}, {} as Record<string, DeploymentState | undefined>);
const tableData: InferenceEndpointUI[] = useMemo(() => {
let filteredEndpoints = inferenceEndpoints;
@ -61,21 +51,12 @@ export const useTableData = (
return filteredEndpoints
.filter((endpoint) => endpoint.inference_id.includes(searchKey))
.map((endpoint) => {
const isElasticService =
endpoint.service === ServiceProviderKeys.elasticsearch ||
endpoint.service === ServiceProviderKeys.elser;
const deploymentId = isElasticService ? endpoint.inference_id : undefined;
const deployment = (deploymentId && deploymentStatus?.[deploymentId]) || undefined;
return {
deployment,
endpoint,
provider: endpoint.service,
type: endpoint.task_type,
};
});
}, [inferenceEndpoints, searchKey, filterOptions, deploymentStatus]);
.map((endpoint) => ({
endpoint: endpoint.inference_id,
provider: endpoint,
type: endpoint.task_type,
}));
}, [inferenceEndpoints, searchKey, filterOptions]);
const sortedTableData: InferenceEndpointUI[] = useMemo(() => {
return [...tableData].sort((a, b) => {
@ -83,9 +64,9 @@ export const useTableData = (
const bValue = b[queryParams.sortField];
if (queryParams.sortOrder === SortOrder.asc) {
return aValue.inference_id.localeCompare(bValue.inference_id);
return aValue.localeCompare(bValue);
} else {
return bValue.inference_id.localeCompare(aValue.inference_id);
return bValue.localeCompare(aValue);
}
});
}, [tableData, queryParams]);

View file

@ -34242,10 +34242,6 @@
"xpack.searchInferenceEndpoints.confirmDeleteEndpoint.title": "Supprimer le point de terminaison d'inférence",
"xpack.searchInferenceEndpoints.deleteEndpoint.deleteSuccess": "Le point de terminaison dinférence a été supprimé avec succès.",
"xpack.searchInferenceEndpoints.deleteEndpoint.endpointDeletionFailed": "Échec de la suppression du point de terminaison",
"xpack.searchInferenceEndpoints.deploymentStatus.tooltip.modelDeployed": "Le modèle est déployé",
"xpack.searchInferenceEndpoints.deploymentStatus.tooltip.modelNotDeployed": "Le modèle nest pas déployé",
"xpack.searchInferenceEndpoints.elasticsearch.allocations": "Allocations : {numAllocations}",
"xpack.searchInferenceEndpoints.elasticsearch.threads": "Threads : {numThreads}",
"xpack.searchInferenceEndpoints.endpoint": "Point de terminaison",
"xpack.searchInferenceEndpoints.filter.emptyMessage": "Aucune option",
"xpack.searchInferenceEndpoints.filter.options": "{totalCount, plural, one {# option} other {# options}}",

View file

@ -34226,10 +34226,6 @@
"xpack.searchInferenceEndpoints.confirmDeleteEndpoint.title": "推論エンドポイントを削除",
"xpack.searchInferenceEndpoints.deleteEndpoint.deleteSuccess": "推論エンドポイントは正常に削除されました。",
"xpack.searchInferenceEndpoints.deleteEndpoint.endpointDeletionFailed": "エンドポイントの削除が失敗しました",
"xpack.searchInferenceEndpoints.deploymentStatus.tooltip.modelDeployed": "モデルはデプロイされます",
"xpack.searchInferenceEndpoints.deploymentStatus.tooltip.modelNotDeployed": "モデルはデプロイされません",
"xpack.searchInferenceEndpoints.elasticsearch.allocations": "割り当て:{numAllocations}",
"xpack.searchInferenceEndpoints.elasticsearch.threads": "スレッド:{numThreads}",
"xpack.searchInferenceEndpoints.endpoint": "エンドポイント",
"xpack.searchInferenceEndpoints.filter.emptyMessage": "オプションなし",
"xpack.searchInferenceEndpoints.filter.options": "{totalCount, plural, other {# オプション}}",

View file

@ -34267,10 +34267,6 @@
"xpack.searchInferenceEndpoints.confirmDeleteEndpoint.title": "删除推理终端",
"xpack.searchInferenceEndpoints.deleteEndpoint.deleteSuccess": "推理终端已成功删除。",
"xpack.searchInferenceEndpoints.deleteEndpoint.endpointDeletionFailed": "终端删除失败",
"xpack.searchInferenceEndpoints.deploymentStatus.tooltip.modelDeployed": "已部署模型",
"xpack.searchInferenceEndpoints.deploymentStatus.tooltip.modelNotDeployed": "未部署模型",
"xpack.searchInferenceEndpoints.elasticsearch.allocations": "分配:{numAllocations}",
"xpack.searchInferenceEndpoints.elasticsearch.threads": "线程:{numThreads}",
"xpack.searchInferenceEndpoints.endpoint": "终端",
"xpack.searchInferenceEndpoints.filter.emptyMessage": "无选项",
"xpack.searchInferenceEndpoints.filter.options": "{totalCount, plural, other {# 个选项}}",