Update dependency @elastic/elasticsearch to ^8.17.1 (8.x) (#213283)

This PR contains the following updates:

| Package | Type | Update | Change |
|---|---|---|---|
|
[@elastic/elasticsearch](http://www.elastic.co/guide/en/elasticsearch/client/javascript-api/current/index.html)
([source](https://redirect.github.com/elastic/elasticsearch-js)) |
dependencies | patch | [`^8.17.0` ->
`^8.17.1`](https://renovatebot.com/diffs/npm/@elastic%2felasticsearch/8.17.0/8.17.1)
|

---

### Release Notes

<details>
<summary>elastic/elasticsearch-js
(@&#8203;elastic/elasticsearch)</summary>

###
[`v8.17.1`](https://redirect.github.com/elastic/elasticsearch-js/releases/tag/v8.17.1)

[Compare
Source](https://redirect.github.com/elastic/elasticsearch-js/compare/v8.17.0...v8.17.1)


[Changelog](https://www.elastic.co/guide/en/elasticsearch/client/javascript-api/8.17/changelog-client.html)

</details>

---

### Configuration

📅 **Schedule**: Branch creation - At any time (no schedule defined),
Automerge - At any time (no schedule defined).

🚦 **Automerge**: Disabled by config. Please merge this manually once you
are satisfied.

♻ **Rebasing**: Whenever PR becomes conflicted, or you tick the
rebase/retry checkbox.

🔕 **Ignore**: Close this PR and you won't be reminded about this update
again.

---

- [ ] <!-- rebase-check -->If you want to rebase/retry this PR, check
this box

---

This PR has been generated by [Renovate
Bot](https://redirect.github.com/renovatebot/renovate).

<!--renovate-debug:eyJjcmVhdGVkSW5WZXIiOiIzOS4xMDcuMCIsInVwZGF0ZWRJblZlciI6IjM5LjEwNy4wIiwidGFyZ2V0QnJhbmNoIjoiOC54IiwibGFiZWxzIjpbIlRlYW06Q29yZSIsIlRlYW06T3BlcmF0aW9ucyIsImJhY2twb3J0OnNraXAiLCJyZWxlYXNlX25vdGU6c2tpcCJdfQ==-->

---------

Co-authored-by: elastic-renovate-prod[bot] <174716857+elastic-renovate-prod[bot]@users.noreply.github.com>
Co-authored-by: Alejandro Fernández Haro <alejandro.haro@elastic.co>
This commit is contained in:
elastic-renovate-prod[bot] 2025-03-07 05:39:04 +01:00 committed by GitHub
parent 8124d5dd3c
commit e2e52c8baa
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
38 changed files with 137 additions and 131 deletions

View file

@ -119,7 +119,7 @@
"@elastic/datemath": "5.0.3",
"@elastic/ebt": "^1.1.1",
"@elastic/ecs": "^8.11.5",
"@elastic/elasticsearch": "^8.17.0",
"@elastic/elasticsearch": "^8.17.1",
"@elastic/ems-client": "8.6.3",
"@elastic/eui": "99.3.0-classic.0",
"@elastic/filesaver": "1.1.2",

View file

@ -28,7 +28,6 @@ export function getCommonDefaultAsyncSubmitParams(
}
): Pick<
AsyncSearchSubmitRequest,
// @ts-expect-error 'keep_alive' has been removed from the spec due to a misunderstanding, but it still works
'keep_alive' | 'wait_for_completion_timeout' | 'keep_on_completion'
> {
const useSearchSessions =

View file

@ -39,7 +39,6 @@ export async function getDefaultAsyncSubmitParams(
): Promise<
Pick<
AsyncSearchSubmitRequest,
// @ts-expect-error 'keep_alive' has been removed from the spec due to a misunderstanding, but it still works
| 'batched_reduce_size'
| 'ccs_minimize_roundtrips'
| 'keep_alive'

View file

@ -6,13 +6,10 @@
*/
import dateMath from '@kbn/datemath';
import type {
IngestPipeline,
IngestSimulateDocument,
} from '@elastic/elasticsearch/lib/api/typesWithBodyKey';
import type { IngestPipeline, IngestDocument } from '@elastic/elasticsearch/lib/api/types';
import type { IScopedClusterClient } from '@kbn/core/server';
type Doc = IngestSimulateDocument['_source'];
type Doc = IngestDocument;
/**
* Returns the start and end time range in epoch milliseconds for a given set of documents

View file

@ -141,7 +141,6 @@ const indexWithLifecyclePhaseDefinition: Index = {
step_time_millis: 1544187776208,
phase_execution: {
policy: 'testy',
// @ts-expect-error ILM type is incorrect https://github.com/elastic/elasticsearch-specification/issues/2326
phase_definition: { min_age: '0s', actions: { rollover: { max_size: '1gb' } } },
version: 1,
modified_date_in_millis: 1544031699844,

View file

@ -28,6 +28,7 @@ import { euiThemeVars } from '@kbn/ui-theme';
import { ApplicationStart } from '@kbn/core/public';
import { Index, IndexDetailsTab } from '@kbn/index-management-shared-types';
import { IlmExplainLifecycleLifecycleExplainManaged } from '@elastic/elasticsearch/lib/api/types';
import type { EuiDescriptionListProps } from '@elastic/eui/src/components/description_list/description_list_types';
import { Phase } from '../../../common/types';
import { getPolicyEditPath } from '../../application/services/navigation';
@ -69,7 +70,7 @@ export const IndexLifecycleSummary: FunctionComponent<Props> = ({ index, getUrlF
color: 'default',
label: ilm.phase,
};
const lifecycleProperties = [
const lifecycleProperties: EuiDescriptionListProps['listItems'] = [
{
title: i18n.translate(
'xpack.indexLifecycleMgmt.indexLifecycleMgmtSummary.headers.policyNameTitle',
@ -77,7 +78,7 @@ export const IndexLifecycleSummary: FunctionComponent<Props> = ({ index, getUrlF
defaultMessage: 'Policy name',
}
),
description: ilm.policy,
description: ilm.policy!,
},
{
title: i18n.translate(
@ -147,7 +148,7 @@ export const IndexLifecycleSummary: FunctionComponent<Props> = ({ index, getUrlF
<EuiLink
color="primary"
href={getUrlForApp('management', {
path: `data/index_lifecycle_management/${getPolicyEditPath(ilm.policy)}`,
path: `data/index_lifecycle_management/${getPolicyEditPath(ilm.policy!)}`,
})}
target="_blank"
>

View file

@ -129,7 +129,6 @@ describe('EPM index template install', () => {
}
expect(packageTemplate.settings?.index?.mapping).toHaveProperty('source');
// @ts-expect-error esclient mapping out-of-date
expect(packageTemplate.settings?.index?.mapping?.source).toEqual({ mode: 'synthetic' });
});
@ -160,7 +159,6 @@ describe('EPM index template install', () => {
}
expect(packageTemplate.settings?.index?.mapping).toHaveProperty('source');
// @ts-expect-error esclient mapping out-of-date
expect(packageTemplate.settings?.index?.mapping?.source).toEqual({ mode: 'synthetic' });
});

View file

@ -1048,7 +1048,6 @@ const updateExistingDataStream = async ({
const existingDsConfig = Object.values(existingDs);
const currentBackingIndexConfig = existingDsConfig.at(-1);
const currentIndexMode = currentBackingIndexConfig?.settings?.index?.mode;
// @ts-expect-error Property 'source.mode' does not exist on type 'IndicesMappingLimitSettings'
const currentSourceType = currentBackingIndexConfig?.settings?.index?.mapping?.source?.mode;
let settings: IndicesIndexSettings;
@ -1148,7 +1147,6 @@ const updateExistingDataStream = async ({
// Trigger a rollover if the index mode or source type has changed
if (
currentIndexMode !== settings?.index?.mode ||
// @ts-expect-error Property 'source.mode' does not exist on type 'IndicesMappingLimitSettings'
currentSourceType !== settings?.index?.mapping?.source?.mode ||
dynamicDimensionMappingsChanged
) {

View file

@ -62,18 +62,16 @@ describe('updateDeprecatedComponentTemplates', () => {
expect(esClientMock.cluster.putComponentTemplate).toHaveBeenCalledTimes(1);
expect(esClientMock.cluster.putComponentTemplate).toHaveBeenCalledWith({
body: {
template: {
mappings: {
_source: {},
properties: {},
},
settings: {
index: {
mapping: {
source: {
mode: 'synthetic',
},
template: {
mappings: {
_source: {},
properties: {},
},
settings: {
index: {
mapping: {
source: {
mode: 'synthetic',
},
},
},

View file

@ -8,6 +8,8 @@
import pMap from 'p-map';
import type { ElasticsearchClient } from '@kbn/core/server';
import type { IndicesSourceMode } from '@elastic/elasticsearch/lib/api/types';
import { appContextService } from '..';
export async function updateDeprecatedComponentTemplates(esClient: ElasticsearchClient) {
@ -37,27 +39,23 @@ export async function updateDeprecatedComponentTemplates(esClient: Elasticsearch
const settings = componentTemplate.component_template.template.settings;
await esClient.cluster.putComponentTemplate({
name: componentTemplate.name,
body: {
template: {
settings: {
...settings,
index: {
...settings?.index,
mapping: {
...settings?.index?.mapping,
// @ts-expect-error Property 'source' does not exist on type 'IndicesMappingLimitSettings'
source: {
// @ts-expect-error Property 'source.mode' does not exist on type 'IndicesMappingLimitSettings'
...settings?.index?.mapping?.source,
mode,
},
template: {
settings: {
...settings,
index: {
...settings?.index,
mapping: {
...settings?.index?.mapping,
source: {
...settings?.index?.mapping?.source,
mode: mode as IndicesSourceMode,
},
},
},
mappings: {
...componentTemplate.component_template.template.mappings,
_source: restOfSource,
},
},
mappings: {
...componentTemplate.component_template.template.mappings,
_source: restOfSource,
},
},
});

View file

@ -42,10 +42,8 @@ export const registerSimulateRoute = ({
try {
const response = await clusterClient.asCurrentUser.ingest.simulate({
verbose,
body: {
pipeline,
docs: documents as estypes.IngestSimulateDocument[],
},
pipeline,
docs: documents as estypes.IngestDocument[],
});
return res.ok({ body: response });

View file

@ -182,18 +182,18 @@ export interface StartTrainedModelDeploymentResponse {
export interface AllocatedModel {
key: string;
deployment_id: string;
allocation_status: {
allocation_status?: {
target_allocation_count: number;
state: string;
state?: string;
allocation_count: number;
};
number_of_allocations: number;
threads_per_allocation: number;
number_of_allocations?: number;
threads_per_allocation?: number;
/**
* Not required for rendering in the Model stats
*/
model_id?: string;
state: string;
state?: string;
reason?: string;
model_size_bytes: number;
required_native_memory_bytes: number;
@ -205,7 +205,7 @@ export interface AllocatedModel {
average_inference_time_ms: number;
inference_count: number;
routing_state: {
routing_state: string;
routing_state?: string;
reason?: string;
};
last_access?: number;

View file

@ -714,7 +714,7 @@ export function validateModelMemoryLimit(job: Job, limits: MlServerLimits): Vali
) {
if (typeof limits === 'object' && typeof limits.max_model_memory_limit !== 'undefined') {
const max = limits.max_model_memory_limit.toUpperCase();
const mml = job.analysis_limits.model_memory_limit.toUpperCase();
const mml = `${job.analysis_limits.model_memory_limit}`.toUpperCase();
// @ts-ignore
const mmlBytes = numeral(mml).value();
@ -738,7 +738,7 @@ export function validateModelMemoryLimit(job: Job, limits: MlServerLimits): Vali
}
export function validateModelMemoryLimitUnits(
modelMemoryLimit: string | undefined
modelMemoryLimit: number | string | undefined
): ValidationResults {
const messages: ValidationResults['messages'] = [];
let valid = true;

View file

@ -30,7 +30,7 @@ import {
htmlIdGenerator,
} from '@elastic/eui';
import type { IngestSimulateDocument } from '@elastic/elasticsearch/lib/api/types';
import type { IngestDocument } from '@elastic/elasticsearch/lib/api/types';
import { extractErrorProperties } from '@kbn/ml-error-utils';
import { i18n } from '@kbn/i18n';
@ -90,7 +90,7 @@ export const TestPipeline: FC<Props> = memo(({ state, sourceIndex, mode }) => {
try {
const result = await mlApi.trainedModels.trainedModelPipelineSimulate(
pipelineConfig,
JSON.parse(sampleDocsString) as IngestSimulateDocument[]
JSON.parse(sampleDocsString) as IngestDocument[]
);
setSimulatePipelineResult(result);
} catch (error) {
@ -124,7 +124,7 @@ export const TestPipeline: FC<Props> = memo(({ state, sourceIndex, mode }) => {
const getDocs = useCallback(
async (body: any) => {
let records: IngestSimulateDocument[] = [];
let records: IngestDocument[] = [];
let resp;
try {
resp = await mlApi.esSearch(body);

View file

@ -361,7 +361,7 @@ export class JobCreator {
this._job_config.analysis_limits &&
this._job_config.analysis_limits.model_memory_limit !== undefined
) {
return this._job_config.analysis_limits.model_memory_limit;
return `${this._job_config.analysis_limits.model_memory_limit}`;
} else {
return null;
}

View file

@ -228,16 +228,17 @@ export class DeploymentParamsMapper {
input: MlTrainedModelAssignmentTaskParametersAdaptive
): DeploymentParamsUI {
let optimized: DeploymentParamsUI['optimized'] = 'optimizedForIngest';
if (input.threads_per_allocation > 1) {
const threadsPerAllocation = input.threads_per_allocation ?? 0;
if (threadsPerAllocation > 1) {
optimized = 'optimizedForSearch';
}
const adaptiveResources = !!input.adaptive_allocations?.enabled;
const vCPUs =
input.threads_per_allocation *
threadsPerAllocation *
(adaptiveResources
? input.adaptive_allocations!.max_number_of_allocations!
: input.number_of_allocations);
: input.number_of_allocations ?? 0);
// The deployment can be created via API with a number of allocations that do not exactly match our vCPU ranges.
// In this case, we should find the closest vCPU range that does not exceed the max or static value of the range.

View file

@ -215,12 +215,11 @@ export const ExpandedRow: FC<ExpandedRowProps> = ({ item }) => {
}
return perDeploymentStat.nodes.map((n) => {
const nodeName = Object.values(n.node)[0].name;
const nodeName = Object.values(n.node ?? {})?.[0]?.name;
return {
key: `${perDeploymentStat.deployment_id}_${nodeName}`,
...perDeploymentStat,
...modelSizeStats,
// @ts-expect-error `throughput_last_minute` is not declared in ES Types
node: {
...pick(n, [
'average_inference_time_ms',

View file

@ -325,7 +325,7 @@ export abstract class InferenceBase<TInferResponse> {
}
protected async runPipelineSimulate(
processResponse: (d: estypes.IngestSimulateDocumentSimulation) => TInferResponse
processResponse: (d: estypes.IngestDocumentSimulation) => TInferResponse
): Promise<TInferResponse[]> {
try {
this.setRunning();
@ -376,10 +376,10 @@ export abstract class InferenceBase<TInferResponse> {
};
}
protected getDocFromResponse({ doc, error }: estypes.IngestSimulatePipelineSimulation) {
protected getDocFromResponse({ doc, error }: estypes.IngestPipelineSimulation) {
if (doc === undefined) {
if (error) {
// @ts-expect-error Error is now typed in estypes. However, I doubt that it doesn't get the HTTP wrapper expected.
// @ts-expect-error Error is now typed in estypes. However, I doubt that it doesn't get the expected HTTP wrapper.
this.setFinishedWithErrors(error);
throw Error(error.reason);
}

View file

@ -34,7 +34,7 @@ describe('TrainedModelsService', () => {
let mockTelemetryService: jest.Mocked<ITelemetryClient>;
let mockDeploymentParamsMapper: jest.Mocked<DeploymentParamsMapper>;
const startModelAllocationResponseMock = {
const startModelAllocationResponseMock: StartTrainedModelDeploymentResponse = {
assignment: {
task_parameters: {
model_id: 'deploy-model',
@ -47,6 +47,7 @@ describe('TrainedModelsService', () => {
deployment_id: 'my-deployment-id',
cache_size: '1mb',
},
// @ts-expect-error `node_count` not available in the types. Is it removed?
node_count: 1,
routing_table: {
'node-1': {

View file

@ -298,10 +298,7 @@ export function trainedModelsApiProvider(httpService: HttpService) {
});
},
trainedModelPipelineSimulate(
pipeline: estypes.IngestPipeline,
docs: estypes.IngestSimulateDocument[]
) {
trainedModelPipelineSimulate(pipeline: estypes.IngestPipeline, docs: estypes.IngestDocument[]) {
const body = JSON.stringify({
pipeline,
docs,

View file

@ -204,7 +204,7 @@ export function jobsHealthServiceProvider(
// match datafeed stats with the job ids
return (datafeedsStats as DatafeedStats[])
.map((datafeedStats) => {
const jobId = datafeedStats.timing_stats.job_id;
const jobId = datafeedStats.timing_stats?.job_id ?? 'unknown_job_id';
const jobState =
jobsStats.find((jobStats) => jobStats.job_id === jobId)?.state ?? 'failed';
return {

View file

@ -153,9 +153,12 @@ export function calculateModelMemoryLimitProvider(
datafeedConfig?: Datafeed
): Promise<ModelMemoryEstimationResult> {
const info = await mlClient.info();
const maxModelMemoryLimit = info.limits.max_model_memory_limit?.toUpperCase();
const effectiveMaxModelMemoryLimit =
info.limits.effective_max_model_memory_limit?.toUpperCase();
const maxModelMemoryLimit = info.limits.max_model_memory_limit
? `${info.limits.max_model_memory_limit}`.toUpperCase()
: undefined;
const effectiveMaxModelMemoryLimit = info.limits.effective_max_model_memory_limit
? `${info.limits.effective_max_model_memory_limit}`?.toUpperCase()
: undefined;
const { overallCardinality, maxBucketCardinality } = await getCardinalities(
analysisConfig,

View file

@ -28,8 +28,10 @@ export async function validateModelMemoryLimit(
// retrieve the model memory limit specified by the user in the job config.
// note, this will probably be the auto generated value, unless the user has
// over written it.
const mml = job?.analysis_limits?.model_memory_limit?.toUpperCase() ?? null;
// overwritten it.
const mml = job?.analysis_limits?.model_memory_limit
? `${job.analysis_limits.model_memory_limit}`.toUpperCase()
: null;
const messages = [];
@ -56,8 +58,12 @@ export async function validateModelMemoryLimit(
// retrieve the max_model_memory_limit value from the server
// this will be unset unless the user has set this on their cluster
const body = await mlClient.info();
const maxModelMemoryLimit = body.limits.max_model_memory_limit?.toUpperCase();
const effectiveMaxModelMemoryLimit = body.limits.effective_max_model_memory_limit?.toUpperCase();
const maxModelMemoryLimit = body.limits.max_model_memory_limit
? `${body.limits.max_model_memory_limit}`.toUpperCase()
: undefined;
const effectiveMaxModelMemoryLimit = body.limits.effective_max_model_memory_limit
? `${body.limits.effective_max_model_memory_limit}`.toUpperCase()
: undefined;
if (runCalcModelMemoryTest) {
const { modelMemoryLimit } = await calculateModelMemoryLimitProvider(client, mlClient)(

View file

@ -164,7 +164,9 @@ export class MemoryUsageService {
id: trainedModelStats.model_id,
type: 'trained-model',
size,
nodeNames: nodes.map((n) => Object.values(n.node)[0].name),
nodeNames: nodes
.map((n) => Object.values(n.node ?? {})[0]?.name)
.filter((s): s is string => typeof s !== 'undefined'),
};
}
@ -193,7 +195,7 @@ export class MemoryUsageService {
(d) =>
isDefined(d.deployment_stats) &&
isDefined(d.deployment_stats.nodes) &&
d.deployment_stats.nodes.some((n) => Object.keys(n.node)[0] === nodeId)
d.deployment_stats.nodes.some((n) => Object.keys(n.node ?? {})[0] === nodeId)
)
.map((d) => {
const modelSizeState = d.model_size_stats;
@ -206,7 +208,7 @@ export class MemoryUsageService {
const { nodes, ...rest } = deploymentStats;
const { node: tempNode, ...nodeRest } = nodes.find(
(v) => Object.keys(v.node)[0] === nodeId
(v) => Object.keys(v.node ?? {})[0] === nodeId
)!;
return {
model_id: d.model_id,

View file

@ -19,7 +19,7 @@ import type {
import type { IndexName, IndicesIndexState } from '@elastic/elasticsearch/lib/api/types';
import type {
IngestPipeline,
IngestSimulateDocument,
IngestDocument,
IngestSimulateRequest,
NodesInfoResponseBase,
} from '@elastic/elasticsearch/lib/api/types';
@ -432,7 +432,7 @@ export class ModelsProvider {
* Simulates the effect of the pipeline on given document.
*
*/
async simulatePipeline(docs: IngestSimulateDocument[], pipelineConfig: IngestPipeline) {
async simulatePipeline(docs: IngestDocument[], pipelineConfig: IngestPipeline) {
const simulateRequest: IngestSimulateRequest = {
docs,
pipeline: pipelineConfig,

View file

@ -130,9 +130,9 @@ export async function getElserModelStatus({
(stats) => stats.deployment_stats?.deployment_id === AI_ASSISTANT_KB_INFERENCE_ID
);
const deploymentState = elserModelStats?.deployment_stats?.state;
const allocationState = elserModelStats?.deployment_stats?.allocation_status.state;
const allocationState = elserModelStats?.deployment_stats?.allocation_status?.state;
const allocationCount =
elserModelStats?.deployment_stats?.allocation_status.allocation_count ?? 0;
elserModelStats?.deployment_stats?.allocation_status?.allocation_count ?? 0;
const ready =
deploymentState === 'started' && allocationState === 'fully_allocated' && allocationCount > 0;

View file

@ -109,12 +109,12 @@ describe('InferenceConnector', () => {
rerank: [
{
index: 2,
score: 0.011597361,
relevance_score: 0.011597361,
text: 'leia',
},
{
index: 0,
score: 0.006338922,
relevance_score: 0.006338922,
text: 'luke',
},
],
@ -158,7 +158,9 @@ describe('InferenceConnector', () => {
},
{ asStream: false }
);
expect(response).toEqual(mockResponseRerank.rerank);
expect(response).toEqual(
mockResponseRerank.rerank.map(({ relevance_score: score, ...rest }) => ({ score, ...rest }))
);
});
});

View file

@ -260,7 +260,7 @@ export class InferenceConnector extends SubActionConnector<Config, Secrets> {
false,
signal
);
return response.rerank!;
return response.rerank!.map(({ relevance_score: score, ...rest }) => ({ score, ...rest }));
}
/**

View file

@ -9,14 +9,14 @@
import { errors as esErrors } from '@elastic/elasticsearch';
import {
IngestSimulateDocument,
IngestDocument,
IngestProcessorContainer,
IngestSimulateRequest,
IngestPipelineConfig,
ClusterComponentTemplateNode,
ErrorCauseKeys,
IngestSimulatePipelineSimulation,
IngestSimulateSimulateDocumentResult,
IngestPipelineSimulation,
IngestSimulateDocumentResult,
} from '@elastic/elasticsearch/lib/api/types';
import { IScopedClusterClient } from '@kbn/core/server';
import { flattenObjectNestedLast, calculateObjectDiff } from '@kbn/object-utils';
@ -81,7 +81,7 @@ export interface ProcessorMetrics {
// Narrow down the type to only successful processor results
export type SuccessfulIngestSimulateDocumentResult = WithRequired<
IngestSimulateSimulateDocumentResult,
IngestSimulateDocumentResult,
'processor_results'
>;
@ -146,7 +146,7 @@ export const simulateProcessing = async ({
const prepareSimulationDocs = (
documents: FlattenRecord[],
streamName: string
): IngestSimulateDocument[] => {
): IngestDocument[] => {
return documents.map((doc, id) => ({
_index: streamName,
_id: id.toString(),
@ -233,7 +233,7 @@ const prepareIngestSimulationBody = (
// TODO: update type once Kibana updates to elasticsearch-js 8.17
const simulationBody: {
docs: IngestSimulateDocument[];
docs: IngestDocument[];
pipeline_substitutions: Record<string, IngestPipelineConfig>;
component_template_substitutions?: Record<string, ClusterComponentTemplateNode>;
} = {
@ -304,7 +304,7 @@ const executePipelineSimulation = async (
// TODO: update type to built-in once Kibana updates to elasticsearch-js 8.17
interface IngestSimulationResult {
docs: Array<{ doc: IngestSimulateDocument & { error?: ErrorCauseKeys } }>;
docs: Array<{ doc: IngestDocument & { error?: ErrorCauseKeys } }>;
}
const conditionallyExecuteIngestSimulation = async (
@ -650,14 +650,14 @@ const computeMappingProperties = (detectedFields: NamedFieldDefinitionConfig[])
* Guard helpers
*/
const isSuccessfulProcessor = (
processor: IngestSimulatePipelineSimulation
): processor is WithRequired<IngestSimulatePipelineSimulation, 'doc' | 'tag'> =>
processor: IngestPipelineSimulation
): processor is WithRequired<IngestPipelineSimulation, 'doc' | 'tag'> =>
processor.status === 'success' && !!processor.tag;
const isSkippedProcessor = (
processor: IngestSimulatePipelineSimulation
): processor is WithRequired<IngestSimulatePipelineSimulation, 'tag'> =>
// @ts-expect-error Looks like the IngestSimulatePipelineSimulation.status is not typed correctly and misses the 'skipped' status
processor: IngestPipelineSimulation
): processor is WithRequired<IngestPipelineSimulation, 'tag'> =>
// @ts-expect-error Looks like the IngestPipelineSimulation.status is not typed correctly and misses the 'skipped' status
processor.status === 'skipped';
// TODO: update type once Kibana updates to elasticsearch-js 8.17

View file

@ -52,7 +52,6 @@ export const textClassificationModel: TrainedModelConfigResponse = {
tokenization: {
roberta: {
add_prefix_space: false,
// @ts-expect-error upgrade typescript v5.1.6
do_lower_case: false,
max_sequence_length: 512,
span: -1,
@ -73,7 +72,11 @@ export const textClassificationModel: TrainedModelConfigResponse = {
export const textExpansionModel: TrainedModelConfigResponse = {
inference_config: {
text_expansion: {},
text_expansion: {
vocabulary: {
index: 'an-index',
},
},
},
input: {
field_names: ['text_field'],

View file

@ -24,7 +24,11 @@ describe('updateMlInferenceMappings', () => {
trained_model_configs: [
{
inference_config: {
text_expansion: {},
text_expansion: {
vocabulary: {
index: indexName,
},
},
},
input: {
field_names: [],

View file

@ -137,11 +137,14 @@ const getModel = (modelConfig: MlTrainedModelConfig, modelStats?: MlTrainedModel
// Enrich deployment stats
if (modelStats && modelStats.deployment_stats) {
model.hasStats = true;
model.deploymentState = getDeploymentState(modelStats.deployment_stats.allocation_status.state);
model.nodeAllocationCount = modelStats.deployment_stats.allocation_status.allocation_count;
model.deploymentState = getDeploymentState(
modelStats.deployment_stats.allocation_status?.state
);
model.nodeAllocationCount =
modelStats.deployment_stats.allocation_status?.allocation_count ?? 0;
model.targetAllocationCount =
modelStats.deployment_stats.allocation_status.target_allocation_count;
model.threadsPerAllocation = modelStats.deployment_stats.threads_per_allocation;
modelStats.deployment_stats.allocation_status?.target_allocation_count ?? 0;
model.threadsPerAllocation = modelStats.deployment_stats.threads_per_allocation ?? 0;
model.startTime = modelStats.deployment_stats.start_time;
} else if (model.modelId === LANG_IDENT_MODEL_ID) {
model.deploymentState = MlModelDeploymentState.FullyAllocated;
@ -246,7 +249,7 @@ const getUserFriendlyTitle = (modelId: string, modelType: string) => {
: modelId;
};
const getDeploymentState = (state: string): MlModelDeploymentState => {
const getDeploymentState = (state: string | undefined): MlModelDeploymentState => {
switch (state) {
case 'starting':
return MlModelDeploymentState.Starting;

View file

@ -70,11 +70,11 @@ export const getMlModelDeploymentStatus = async (
const modelDeployment = modelStatsResponse.trained_model_stats[0].deployment_stats;
return {
deploymentState: getMlModelDeploymentStateForStatus(modelDeployment?.allocation_status.state),
deploymentState: getMlModelDeploymentStateForStatus(modelDeployment?.allocation_status?.state),
modelId: modelName,
nodeAllocationCount: modelDeployment?.allocation_status.allocation_count || 0,
nodeAllocationCount: modelDeployment?.allocation_status?.allocation_count || 0,
startTime: modelDeployment?.start_time || 0,
targetAllocationCount: modelDeployment?.allocation_status.target_allocation_count || 0,
targetAllocationCount: modelDeployment?.allocation_status?.target_allocation_count || 0,
threadsPerAllocation: modelDeployment?.threads_per_allocation || 0,
};
};

View file

@ -221,7 +221,7 @@ export class AIAssistantKnowledgeBaseDataClient extends AIAssistantDataClient {
// For standardized way of checking deployment status see: https://github.com/elastic/elasticsearch/issues/106986
const isReadyESS = (stats: MlTrainedModelStats) =>
stats.deployment_stats?.state === 'started' &&
stats.deployment_stats?.allocation_status.state === 'fully_allocated';
stats.deployment_stats?.allocation_status?.state === 'fully_allocated';
const isReadyServerless = (stats: MlTrainedModelStats) =>
(stats.deployment_stats?.nodes as unknown as MlTrainedModelDeploymentNodesStats[])?.some(

View file

@ -56,11 +56,11 @@ export const getJobCorrelations = ({
datafeed_id: datafeed?.datafeed_id,
state: datafeed?.state,
timing_stats: {
bucket_count: datafeed?.timing_stats.bucket_count,
bucket_count: datafeed?.timing_stats?.bucket_count,
exponential_average_search_time_per_hour_ms:
datafeed?.timing_stats.exponential_average_search_time_per_hour_ms,
search_count: datafeed?.timing_stats.search_count,
total_search_time_ms: datafeed?.timing_stats.total_search_time_ms,
datafeed?.timing_stats?.exponential_average_search_time_per_hour_ms,
search_count: datafeed?.timing_stats?.search_count,
total_search_time_ms: datafeed?.timing_stats?.total_search_time_ms,
},
},
};

View file

@ -159,7 +159,7 @@ export default ({ getService }: FtrProviderContext) => {
expectedJobProperties[i].modelBytes,
`model_bytes should be equal to ${JSON.stringify(expectedJobProperties[i].modelBytes)})`
);
expect(job.datafeed_config.timing_stats.total_search_time_ms).to.eql(
expect(job.datafeed_config.timing_stats?.total_search_time_ms).to.eql(
expectedJobProperties[i].datafeedTotalSearchTimeMs,
`datafeed total_search_time_ms should be equal to ${JSON.stringify(
expectedJobProperties[i].datafeedTotalSearchTimeMs
@ -203,7 +203,7 @@ export default ({ getService }: FtrProviderContext) => {
expectedJobPropertiesWithSpace[i].modelBytes
)})`
);
expect(job.datafeed_config.timing_stats.total_search_time_ms).to.eql(
expect(job.datafeed_config.timing_stats?.total_search_time_ms).to.eql(
expectedJobPropertiesWithSpace[i].datafeedTotalSearchTimeMs,
`datafeed total_search_time_ms should be equal to ${JSON.stringify(
expectedJobPropertiesWithSpace[i].datafeedTotalSearchTimeMs

View file

@ -83,7 +83,7 @@ export default ({ getService }: FtrProviderContext) => {
statsResponse as MlGetTrainedModelsStatsResponse
).trained_model_stats.find((v) => v.deployment_stats?.deployment_id === testModel.id);
expect(modelStats!.deployment_stats!.allocation_status.state).to.match(
expect(modelStats!.deployment_stats!.allocation_status?.state).to.match(
/\bstarted\b|\bfully_allocated\b/
);
});
@ -110,7 +110,7 @@ export default ({ getService }: FtrProviderContext) => {
statsResponse as MlGetTrainedModelsStatsResponse
).trained_model_stats.find((v) => v.deployment_stats?.deployment_id === customDeploymentId);
expect(modelStats!.deployment_stats!.allocation_status.state).to.match(
expect(modelStats!.deployment_stats!.allocation_status?.state).to.match(
/\bstarted\b|\bfully_allocated\b/
);
});
@ -141,7 +141,7 @@ export default ({ getService }: FtrProviderContext) => {
statsResponse as MlGetTrainedModelsStatsResponse
).trained_model_stats.find((v) => v.deployment_stats?.deployment_id === testModel.id);
expect(modelStats!.deployment_stats!.allocation_status.state).to.match(
expect(modelStats!.deployment_stats!.allocation_status?.state).to.match(
/\bstarted\b|\bfully_allocated\b/
);
});

View file

@ -2250,10 +2250,10 @@
"@elastic/transport" "^8.3.1"
tslib "^2.4.0"
"@elastic/elasticsearch@^8.17.0":
version "8.17.0"
resolved "https://registry.yarnpkg.com/@elastic/elasticsearch/-/elasticsearch-8.17.0.tgz#0214265bc04a3fe0d23a410b8d08b28217aac540"
integrity sha512-FZ+gQUrPsMpQ2RRIXwTmCoUeFCEausMhp4eQOyxT9j1cwGXHJrhelR6jffM1SC95kQUkB7+TcTq7oQ+bG2BQ9g==
"@elastic/elasticsearch@^8.17.1":
version "8.17.1"
resolved "https://registry.yarnpkg.com/@elastic/elasticsearch/-/elasticsearch-8.17.1.tgz#97baa241f1f7bdd9b9b5c7479c64996e4c54adea"
integrity sha512-EaDP4/jfNu0nhnHZjxk9bL9ofKWKX9QUdEJ8QsGa+/KMPBEwD+HMyYXH4FSRlg7YONI0UbdO/mMZobvcEnMFBA==
dependencies:
"@elastic/transport" "^8.9.1"
apache-arrow "^18.0.0"