mirror of
https://github.com/elastic/kibana.git
synced 2025-04-23 09:19:04 -04:00
[Search] Change model id to inference id in index error message (#190787)
## Summary Changing 'model_id' to 'inference_id' to align with the new API spec. See also: https://github.com/elastic/kibana/pull/190729
This commit is contained in:
parent
6cc30f99d5
commit
8f39bdfe25
2 changed files with 9 additions and 3 deletions
|
@ -100,14 +100,14 @@ export const IndexError: React.FC<IndexErrorProps> = ({ indexName }) => {
|
|||
const semanticTextFieldsWithErrors = semanticTextFields
|
||||
.map((field) => {
|
||||
const model = endpoints.endpoints.find(
|
||||
(endpoint) => endpoint.model_id === field.source.inference_id
|
||||
(endpoint) => endpoint.inference_id === field.source.inference_id
|
||||
);
|
||||
if (!model) {
|
||||
return {
|
||||
error: i18n.translate(
|
||||
'xpack.enterpriseSearch.indexOverview.indexErrors.missingModelError',
|
||||
{
|
||||
defaultMessage: 'Model not found for inference endpoint {inferenceId}',
|
||||
defaultMessage: 'Inference endpoint {inferenceId} not found',
|
||||
values: {
|
||||
inferenceId: field.source.inference_id as string,
|
||||
},
|
||||
|
|
|
@ -10,6 +10,12 @@ import type { InferenceTaskType } from '@elastic/elasticsearch/lib/api/typesWith
|
|||
import type { ModelConfig } from '@kbn/inference_integration_flyout/types';
|
||||
import type { HttpService } from '../http_service';
|
||||
import { ML_INTERNAL_BASE_PATH } from '../../../../common/constants/app';
|
||||
|
||||
// TODO remove inference_id when esType has been updated to include it
|
||||
export interface GetInferenceEndpointsResponse extends estypes.InferenceModelConfigContainer {
|
||||
inference_id: string;
|
||||
}
|
||||
|
||||
export function inferenceModelsApiProvider(httpService: HttpService) {
|
||||
return {
|
||||
/**
|
||||
|
@ -36,7 +42,7 @@ export function inferenceModelsApiProvider(httpService: HttpService) {
|
|||
*/
|
||||
async getAllInferenceEndpoints() {
|
||||
const result = await httpService.http<{
|
||||
endpoints: estypes.InferenceModelConfigContainer[];
|
||||
endpoints: GetInferenceEndpointsResponse[];
|
||||
}>({
|
||||
path: `${ML_INTERNAL_BASE_PATH}/_inference/all`,
|
||||
method: 'GET',
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue