Revert "Replace 'model_id' with 'inference_id' for inference endpoints" (#189732)

Reverts elastic/kibana#189545

In order to make the serverless release smoothly next week, we require
to revert this PR to only `main` branch, not `8.15 `branch.

Co-authored-by: Elastic Machine <elasticmachine@users.noreply.github.com>
This commit is contained in:
Saikat Sarkar 2024-08-01 19:51:42 -06:00 committed by GitHub
parent e13a1baf72
commit 7519285705
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
27 changed files with 67 additions and 71 deletions

View file

@ -273,7 +273,7 @@ export type InferenceServiceSettings =
export type InferenceAPIConfigResponse = {
// Refers to a deployment id
inference_id: string;
model_id: string;
task_type: 'sparse_embedding' | 'text_embedding';
task_settings: {
model?: string;

View file

@ -731,7 +731,7 @@ describe('<IndexDetailsPage />', () => {
httpRequestsMockHelpers.setInferenceModels({
data: [
{
inference_id: customInferenceModel,
model_id: customInferenceModel,
task_type: 'sparse_embedding',
service: 'elser',
service_settings: {

View file

@ -71,9 +71,9 @@ jest.mock('../../../public/application/components/mappings_editor/mappings_state
jest.mock('../../../public/application/services/api', () => ({
useLoadInferenceEndpoints: jest.fn().mockReturnValue({
data: [
{ inference_id: 'endpoint-1', task_type: 'text_embedding' },
{ inference_id: 'endpoint-2', task_type: 'sparse_embedding' },
{ inference_id: 'endpoint-3', task_type: 'completion' },
{ model_id: 'endpoint-1', task_type: 'text_embedding' },
{ model_id: 'endpoint-2', task_type: 'sparse_embedding' },
{ model_id: 'endpoint-3', task_type: 'completion' },
] as InferenceAPIConfigResponse[],
isLoading: false,
error: null,

View file

@ -54,10 +54,10 @@ type SelectInferenceIdContentProps = SelectInferenceIdProps & {
const defaultEndpoints = [
{
inference_id: 'elser_model_2',
model_id: 'elser_model_2',
},
{
inference_id: 'e5',
model_id: 'e5',
},
];
@ -135,15 +135,15 @@ const SelectInferenceIdContent: React.FC<SelectInferenceIdContentProps> = ({
);
const missingDefaultEndpoints = defaultEndpoints.filter(
(endpoint) => !(filteredEndpoints || []).find((e) => e.inference_id === endpoint.inference_id)
(endpoint) => !(filteredEndpoints || []).find((e) => e.model_id === endpoint.model_id)
);
const newOptions: EuiSelectableOption[] = [
...(filteredEndpoints || []),
...missingDefaultEndpoints,
].map((endpoint) => ({
label: endpoint.inference_id,
'data-test-subj': `custom-inference_${endpoint.inference_id}`,
checked: value === endpoint.inference_id ? 'on' : undefined,
label: endpoint.model_id,
'data-test-subj': `custom-inference_${endpoint.model_id}`,
checked: value === endpoint.model_id ? 'on' : undefined,
}));
if (value && !newOptions.find((option) => option.label === value)) {
// Sometimes we create a new endpoint but the backend is slow in updating so we need to optimistically update

View file

@ -142,7 +142,7 @@ jest.mock('../../../../../../../services/api', () => ({
getInferenceEndpoints: jest.fn().mockResolvedValue({
data: [
{
inference_id: 'e5',
model_id: 'e5',
task_type: 'text_embedding',
service: 'elasticsearch',
service_settings: {

View file

@ -119,7 +119,7 @@ export function useSemanticText(props: UseSemanticTextProps) {
dispatch({ type: 'field.add', value: data });
const inferenceEndpoints = await getInferenceEndpoints();
const hasInferenceEndpoint = inferenceEndpoints.data?.some(
(inference) => inference.inference_id === inferenceId
(inference) => inference.model_id === inferenceId
);
// if inference endpoint exists already, do not create new inference endpoint
if (hasInferenceEndpoint) {

View file

@ -40,7 +40,7 @@ export const useIndexErrors = (
const semanticTextFieldsWithErrors = semanticTextFields
.map((field) => {
const model = endpoints.find(
(endpoint) => endpoint.inference_id === field.source.inference_id
(endpoint) => endpoint.model_id === field.source.inference_id
);
if (!model) {
return {

View file

@ -49,7 +49,7 @@ jest.mock('../application/services/api', () => ({
getInferenceEndpoints: jest.fn().mockResolvedValue({
data: [
{
inference_id: 'e5',
model_id: 'e5',
task_type: 'text_embedding',
service: 'elasticsearch',
service_settings: {

View file

@ -45,7 +45,7 @@ const getCustomInferenceIdMap = (
isDownloading: false,
modelStats: undefined,
};
inferenceMap[model.inference_id] = inferenceEntry;
inferenceMap[model.model_id] = inferenceEntry;
return inferenceMap;
}, {});
const defaultInferenceIds = {

View file

@ -49,7 +49,7 @@ export const DeleteModelsModal: FC<DeleteModelsModalProps> = ({ models, onClose
const modelsWithInferenceAPIs = models.filter((m) => m.hasInferenceServices);
const inferenceAPIsIDs: string[] = modelsWithInferenceAPIs.flatMap((model) => {
return (model.inference_apis ?? []).map((inference) => inference.inference_id);
return (model.inference_apis ?? []).map((inference) => inference.model_id);
});
const pipelinesCount = modelsWithPipelines.reduce((acc, curr) => {

View file

@ -41,7 +41,7 @@ export const StopModelDeploymentsConfirmDialog: FC<ForceStopModelConfirmDialogPr
// Filter out deployments that are used by inference services
.filter((deploymentId) => {
if (!model.inference_apis) return true;
return !model.inference_apis.some((inference) => inference.inference_id === deploymentId);
return !model.inference_apis.some((inference) => inference.model_id === deploymentId);
})
);
}, [model]);
@ -110,7 +110,7 @@ export const StopModelDeploymentsConfirmDialog: FC<ForceStopModelConfirmDialogPr
]);
const inferenceServiceIDs = useMemo<string[]>(() => {
return (model.inference_apis ?? []).map((inference) => inference.inference_id);
return (model.inference_apis ?? []).map((inference) => inference.model_id);
}, [model]);
return (

View file

@ -28,7 +28,7 @@ export const InferenceApi: FC<InferenceAPITabProps> = ({ inferenceApis }) => {
{inferenceApis.map((inferenceApi, i) => {
const initialIsOpen = i <= 2;
const modelId = inferenceApi.inference_id;
const modelId = inferenceApi.model_id;
return (
<React.Fragment key={modelId}>

View file

@ -332,7 +332,7 @@ export function useModelActions({
item.deployment_ids.some(
(dId) =>
Array.isArray(item.inference_apis) &&
!item.inference_apis.some((inference) => inference.inference_id === dId)
!item.inference_apis.some((inference) => inference.model_id === dId)
)),
enabled: (item) => !isLoading,
onClick: async (item) => {

View file

@ -33,7 +33,7 @@ describe('populateInferenceServicesProvider', () => {
{ model_id: 'model2' },
] as TrainedModelConfigResponse[];
client.asInternalUser.transport.request.mockResolvedValue({ endpoints: inferenceServices });
client.asInternalUser.transport.request.mockResolvedValue({ models: inferenceServices });
jest.clearAllMocks();
});
@ -44,7 +44,7 @@ describe('populateInferenceServicesProvider', () => {
describe('when the user has required privileges', () => {
beforeEach(() => {
client.asCurrentUser.transport.request.mockResolvedValue({ endpoints: inferenceServices });
client.asCurrentUser.transport.request.mockResolvedValue({ models: inferenceServices });
});
test('should populate inference services for trained models', async () => {

View file

@ -69,16 +69,16 @@ export const populateInferenceServicesProvider = (client: IScopedClusterClient)
try {
// Check if model is used by an inference service
const { endpoints } = await esClient.transport.request<{
endpoints: InferenceAPIConfigResponse[];
const { models } = await esClient.transport.request<{
models: InferenceAPIConfigResponse[];
}>({
method: 'GET',
path: `/_inference/_all`,
});
const inferenceAPIMap = groupBy(
endpoints,
(endpoint) => endpoint.service === 'elser' && endpoint.service_settings.model_id
models,
(model) => model.service === 'elser' && model.service_settings.model_id
);
for (const model of trainedModels) {

View file

@ -13,7 +13,7 @@ import { CopyIDAction } from './copy_id_action';
const mockInferenceEndpoint = {
deployment: 'not_applicable',
endpoint: {
inference_id: 'hugging-face-embeddings',
model_id: 'hugging-face-embeddings',
task_type: 'text_embedding',
service: 'hugging_face',
service_settings: {
@ -58,7 +58,7 @@ describe('CopyIDAction', () => {
it('renders the label with correct text', () => {
const TestComponent = () => {
return <CopyIDAction inferenceId={mockInferenceEndpoint.endpoint.inference_id} />;
return <CopyIDAction modelId={mockInferenceEndpoint.endpoint.model_id} />;
};
const { getByTestId } = render(<TestComponent />);

View file

@ -11,22 +11,22 @@ import React from 'react';
import { useKibana } from '../../../../../../hooks/use_kibana';
interface CopyIDActionProps {
inferenceId: string;
modelId: string;
}
export const CopyIDAction = ({ inferenceId }: CopyIDActionProps) => {
export const CopyIDAction = ({ modelId }: CopyIDActionProps) => {
const {
services: { notifications },
} = useKibana();
const toasts = notifications?.toasts;
return (
<EuiCopy textToCopy={inferenceId}>
<EuiCopy textToCopy={modelId}>
{(copy) => (
<EuiButtonIcon
aria-label={i18n.translate('xpack.searchInferenceEndpoints.actions.copyID', {
defaultMessage: 'Copy inference endpoint ID {inferenceId}',
values: { inferenceId },
defaultMessage: 'Copy inference endpoint ID {modelId}',
values: { modelId },
})}
data-test-subj="inference-endpoints-action-copy-id-label"
iconType="copyClipboard"
@ -34,8 +34,8 @@ export const CopyIDAction = ({ inferenceId }: CopyIDActionProps) => {
copy();
toasts?.addSuccess({
title: i18n.translate('xpack.searchInferenceEndpoints.actions.copyIDSuccess', {
defaultMessage: 'Inference endpoint ID {inferenceId} copied',
values: { inferenceId },
defaultMessage: 'Inference endpoint ID {modelId} copied',
values: { modelId },
}),
});
}}

View file

@ -28,7 +28,7 @@ export const DeleteAction: React.FC<DeleteActionProps> = ({ selectedEndpoint })
deleteEndpoint({
type: selectedEndpoint.type,
id: selectedEndpoint.endpoint.inference_id,
id: selectedEndpoint.endpoint.model_id,
});
};
@ -37,7 +37,7 @@ export const DeleteAction: React.FC<DeleteActionProps> = ({ selectedEndpoint })
<EuiButtonIcon
aria-label={i18n.translate('xpack.searchInferenceEndpoints.actions.deleteEndpoint', {
defaultMessage: 'Delete inference endpoint {selectedEndpointName}',
values: { selectedEndpointName: selectedEndpoint?.endpoint.inference_id },
values: { selectedEndpointName: selectedEndpoint?.endpoint.model_id },
})}
key="delete"
iconType="trash"

View file

@ -22,7 +22,7 @@ jest.mock('@kbn/ml-trained-models-utils', () => ({
describe('RenderEndpoint component tests', () => {
describe('with cohere service', () => {
const mockEndpoint = {
inference_id: 'cohere-2',
model_id: 'cohere-2',
service: 'cohere',
service_settings: {
similarity: 'cosine',
@ -68,7 +68,7 @@ describe('RenderEndpoint component tests', () => {
describe('with elasticsearch service', () => {
const mockEndpoint = {
inference_id: 'model-123',
model_id: 'model-123',
service: 'elasticsearch',
service_settings: {
num_allocations: 5,
@ -102,7 +102,7 @@ describe('RenderEndpoint component tests', () => {
describe('with azureaistudio service', () => {
const mockEndpoint = {
inference_id: 'azure-ai-1',
model_id: 'azure-ai-1',
service: 'azureaistudio',
service_settings: {
target: 'westus',
@ -155,7 +155,7 @@ describe('RenderEndpoint component tests', () => {
describe('with azureopenai service', () => {
const mockEndpoint = {
inference_id: 'azure-openai-1',
model_id: 'azure-openai-1',
service: 'azureopenai',
service_settings: {
resource_name: 'resource-xyz',
@ -174,7 +174,7 @@ describe('RenderEndpoint component tests', () => {
describe('with mistral service', () => {
const mockEndpoint = {
inference_id: 'mistral-ai-1',
model_id: 'mistral-ai-1',
service: 'mistral',
service_settings: {
model: 'model-xyz',
@ -233,7 +233,7 @@ describe('RenderEndpoint component tests', () => {
describe('with googleaistudio service', () => {
const mockEndpoint = {
inference_id: 'google-ai-1',
model_id: 'google-ai-1',
service: 'googleaistudio',
service_settings: {
model_id: 'model-abc',
@ -267,7 +267,7 @@ describe('RenderEndpoint component tests', () => {
describe('with amazonbedrock service', () => {
const mockEndpoint = {
inference_id: 'amazon-bedrock-1',
model_id: 'amazon-bedrock-1',
service: 'amazonbedrock',
service_settings: {
region: 'us-west-1',
@ -287,7 +287,7 @@ describe('RenderEndpoint component tests', () => {
describe('for MIT licensed models', () => {
const mockEndpointWithMitLicensedModel = {
inference_id: 'model-123',
model_id: 'model-123',
service: 'elasticsearch',
service_settings: {
num_allocations: 5,
@ -306,7 +306,7 @@ describe('RenderEndpoint component tests', () => {
it('does not render the MIT license badge if the model is not eligible', () => {
const mockEndpointWithNonMitLicensedModel = {
inference_id: 'model-123',
model_id: 'model-123',
service: 'elasticsearch',
service_settings: {
num_allocations: 5,

View file

@ -23,7 +23,7 @@ export const EndpointInfo: React.FC<EndpointInfoProps> = ({ endpoint }) => {
return (
<EuiFlexGroup gutterSize="xs" direction="column">
<EuiFlexItem>
<strong>{endpoint.inference_id}</strong>
<strong>{endpoint.model_id}</strong>
</EuiFlexItem>
<EuiFlexItem css={{ textWrap: 'wrap' }}>
<EndpointModelInfo endpoint={endpoint} />

View file

@ -15,7 +15,7 @@ import { QueryClient, QueryClientProvider } from '@tanstack/react-query';
const inferenceEndpoints = [
{
inference_id: 'my-elser-model-05',
model_id: 'my-elser-model-05',
task_type: 'sparse_embedding',
service: 'elser',
service_settings: {
@ -26,7 +26,7 @@ const inferenceEndpoints = [
task_settings: {},
},
{
inference_id: 'my-elser-model-04',
model_id: 'my-elser-model-04',
task_type: 'sparse_embedding',
service: 'elser',
service_settings: {

View file

@ -108,7 +108,7 @@ export const TabularPage: React.FC<TabularPageProps> = ({ inferenceEndpoints })
actions: [
{
render: (inferenceEndpoint: InferenceEndpointUI) => (
<CopyIDAction inferenceId={inferenceEndpoint.endpoint.inference_id} />
<CopyIDAction modelId={inferenceEndpoint.endpoint.model_id} />
),
},
{

View file

@ -16,7 +16,7 @@ import { TRAINED_MODEL_STATS_QUERY_KEY } from '../../common/constants';
const inferenceEndpoints = [
{
inference_id: 'my-elser-model-04',
model_id: 'my-elser-model-04',
task_type: 'sparse_embedding',
service: 'elser',
service_settings: {
@ -27,7 +27,7 @@ const inferenceEndpoints = [
task_settings: {},
},
{
inference_id: 'my-elser-model-01',
model_id: 'my-elser-model-01',
task_type: 'sparse_embedding',
service: 'elser',
service_settings: {
@ -38,7 +38,7 @@ const inferenceEndpoints = [
task_settings: {},
},
{
inference_id: 'my-elser-model-05',
model_id: 'my-elser-model-05',
task_type: 'text_embedding',
service: 'elasticsearch',
service_settings: {
@ -110,13 +110,11 @@ describe('useTableData', () => {
);
const expectedSortedData = [...inferenceEndpoints].sort((a, b) =>
b.inference_id.localeCompare(a.inference_id)
b.model_id.localeCompare(a.model_id)
);
const sortedEndpoints = result.current.sortedTableData.map(
(item) => item.endpoint.inference_id
);
const expectedModelIds = expectedSortedData.map((item) => item.inference_id);
const sortedEndpoints = result.current.sortedTableData.map((item) => item.endpoint.model_id);
const expectedModelIds = expectedSortedData.map((item) => item.model_id);
expect(sortedEndpoints).toEqual(expectedModelIds);
});
@ -148,9 +146,7 @@ describe('useTableData', () => {
{ wrapper }
);
const filteredData = result.current.sortedTableData;
expect(
filteredData.every((item) => item.endpoint.inference_id.includes(searchKey))
).toBeTruthy();
expect(filteredData.every((item) => item.endpoint.model_id.includes(searchKey))).toBeTruthy();
});
it('should update deployment status based on deploymentStatus object', () => {

View file

@ -64,7 +64,7 @@ export const useTableData = (
}
return filteredEndpoints
.filter((endpoint) => endpoint.inference_id.includes(searchKey))
.filter((endpoint) => endpoint.model_id.includes(searchKey))
.map((endpoint) => {
const isElasticService =
endpoint.service === ServiceProviderKeys.elasticsearch ||
@ -94,9 +94,9 @@ export const useTableData = (
const bValue = b[queryParams.sortField];
if (queryParams.sortOrder === SortOrder.asc) {
return aValue.inference_id.localeCompare(bValue.inference_id);
return aValue.model_id.localeCompare(bValue.model_id);
} else {
return bValue.inference_id.localeCompare(aValue.inference_id);
return bValue.model_id.localeCompare(aValue.model_id);
}
});
}, [tableData, queryParams]);

View file

@ -12,28 +12,28 @@ import { fetchInferenceEndpoints } from './fetch_inference_endpoints';
describe('fetch indices', () => {
const mockInferenceEndpointsResponse = [
{
inference_id: 'my-elser-model-03',
model_id: 'my-elser-model-03',
task_type: 'sparse_embedding',
service: 'elser',
service_settings: { num_allocations: 1, num_threads: 1, model_id: '.elser_model_2' },
task_settings: {},
},
{
inference_id: 'my-elser-model-04',
model_id: 'my-elser-model-04',
task_type: 'sparse_embedding',
service: 'elser',
service_settings: { num_allocations: 1, num_threads: 1, model_id: '.elser_model_2' },
task_settings: {},
},
{
inference_id: 'my-elser-model-05',
model_id: 'my-elser-model-05',
task_type: 'sparse_embedding',
service: 'elser',
service_settings: { num_allocations: 1, num_threads: 1, model_id: '.elser_model_2' },
task_settings: {},
},
{
inference_id: 'my-elser-model-06',
model_id: 'my-elser-model-06',
task_type: 'sparse_embedding',
service: 'elser',
service_settings: { num_allocations: 1, num_threads: 1, model_id: '.elser_model_2' },

View file

@ -54,7 +54,7 @@ export default function ({ getService }: FtrProviderContext) {
expect(inferenceEndpoints).to.be.ok();
expect(
inferenceEndpoints.some(
(endpoint: InferenceAPIConfigResponse) => endpoint.inference_id === inferenceId
(endpoint: InferenceAPIConfigResponse) => endpoint.model_id === inferenceId
)
).to.eql(true, `${inferenceId} not found in the GET _inference/_all response`);
});

View file

@ -65,7 +65,7 @@ export default function ({ getService }: FtrProviderContext) {
expect(inferenceEndpoints).to.be.ok();
expect(
inferenceEndpoints.some(
(endpoint: InferenceAPIConfigResponse) => endpoint.inference_id === inferenceId
(endpoint: InferenceAPIConfigResponse) => endpoint.model_id === inferenceId
)
).to.eql(true, `${inferenceId} not found in the GET _inference/_all response`);
});