[Inference] Replace transport request calls with inference.get (#210396)

## Summary

Replaces transport request calls with `inference.get` elasticsearch
client method.

### Checklist

- [x] [Unit or functional
tests](https://www.elastic.co/guide/en/kibana/master/development-tests.html)
were updated or added to match the most common scenarios
This commit is contained in:
Dima Arnautov 2025-02-10 19:31:03 +01:00 committed by GitHub
parent dd2b833acb
commit 6826e7d62f
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
4 changed files with 11 additions and 23 deletions

View file

@ -5,7 +5,6 @@
* 2.0.
*/
import { InferenceAPIConfigResponse } from '@kbn/ml-trained-models-utils';
import { addBasePath } from '..';
import { RouteDependencies } from '../../../types';
@ -25,13 +24,9 @@ export function registerGetAllRoute({ router, lib: { handleEsError } }: RouteDep
async (context, request, response) => {
const { client } = (await context.core).elasticsearch;
// TODO: Use the client's built-in function rather than the transport when it's available
try {
const { endpoints } = await client.asCurrentUser.transport.request<{
endpoints: InferenceAPIConfigResponse[];
}>({
method: 'GET',
path: `/_inference/_all`,
const { endpoints } = await client.asCurrentUser.inference.get({
inference_id: '_all',
});
return response.ok({

View file

@ -10,7 +10,6 @@ import type {
InferenceInferenceEndpoint,
InferenceTaskType,
} from '@elastic/elasticsearch/lib/api/types';
import type { InferenceAPIConfigResponse } from '@kbn/ml-trained-models-utils';
import type { RouteInitialization } from '../types';
import { createInferenceSchema } from './schemas/inference_schema';
import { modelsProvider } from '../models/model_management';
@ -85,15 +84,12 @@ export function inferenceModelRoutes(
.addVersion(
{
version: '1',
validate: {},
validate: false,
},
routeGuard.fullLicenseAPIGuard(async ({ client, response }) => {
try {
const body = await client.asCurrentUser.transport.request<{
models: InferenceAPIConfigResponse[];
}>({
method: 'GET',
path: `/_inference/_all`,
const body = await client.asCurrentUser.inference.get({
inference_id: '_all',
});
return response.ok({
body,

View file

@ -46,14 +46,14 @@ describe('fetch indices', () => {
const mockClient = {
asCurrentUser: {
transport: {
request: jest.fn(),
inference: {
get: jest.fn(),
},
},
};
it('returns all inference endpoints', async () => {
mockClient.asCurrentUser.transport.request.mockImplementationOnce(() => {
mockClient.asCurrentUser.inference.get.mockImplementationOnce(() => {
return Promise.resolve({ endpoints: mockInferenceEndpointsResponse });
});

View file

@ -13,14 +13,11 @@ export const fetchInferenceEndpoints = async (
): Promise<{
inferenceEndpoints: InferenceAPIConfigResponse[];
}> => {
const { endpoints } = await client.transport.request<{
endpoints: any;
}>({
method: 'GET',
path: `/_inference/_all`,
const { endpoints } = await client.inference.get({
inference_id: '_all',
});
return {
inferenceEndpoints: endpoints,
inferenceEndpoints: endpoints as InferenceAPIConfigResponse[],
};
};