[8.8] [ResponseOps][Task Manager] stop spamming the logs on status changes (#157762) (#157800)

# Backport

This will backport the following commits from `main` to `8.8`:
- [[ResponseOps][Task Manager] stop spamming the logs on status changes
(#157762)](https://github.com/elastic/kibana/pull/157762)

<!--- Backport version: 8.9.7 -->

### Questions ?
Please refer to the [Backport tool
documentation](https://github.com/sqren/backport)

<!--BACKPORT [{"author":{"name":"Patrick
Mueller","email":"patrick.mueller@elastic.co"},"sourceCommit":{"committedDate":"2023-05-15T20:21:16Z","message":"[ResponseOps][Task
Manager] stop spamming the logs on status changes (#157762)\n\nresolves
https://github.com/elastic/kibana/issues/156112\r\n\r\nChange task
manager logging on status errors from `warn` to `debug.\r\nMaking this
change as we recently changed from `debug` to `warn`
in\r\nhttps://github.com/elastic/kibana/pull/154045 . But this ended up
too\r\nnoisy, especially at Kibana
startup.","sha":"b542862904073982a00ecd7418cc77dbe567b2d0","branchLabelMapping":{"^v8.9.0$":"main","^v(\\d+).(\\d+).\\d+$":"$1.$2"}},"sourcePullRequest":{"labels":["release_note:skip","Feature:Task
Manager","Team:ResponseOps","v8.8.0","v8.9.0"],"number":157762,"url":"https://github.com/elastic/kibana/pull/157762","mergeCommit":{"message":"[ResponseOps][Task
Manager] stop spamming the logs on status changes (#157762)\n\nresolves
https://github.com/elastic/kibana/issues/156112\r\n\r\nChange task
manager logging on status errors from `warn` to `debug.\r\nMaking this
change as we recently changed from `debug` to `warn`
in\r\nhttps://github.com/elastic/kibana/pull/154045 . But this ended up
too\r\nnoisy, especially at Kibana
startup.","sha":"b542862904073982a00ecd7418cc77dbe567b2d0"}},"sourceBranch":"main","suggestedTargetBranches":["8.8"],"targetPullRequestStates":[{"branch":"8.8","label":"v8.8.0","labelRegex":"^v(\\d+).(\\d+).\\d+$","isSourceBranch":false,"state":"NOT_CREATED"},{"branch":"main","label":"v8.9.0","labelRegex":"^v8.9.0$","isSourceBranch":true,"state":"MERGED","url":"https://github.com/elastic/kibana/pull/157762","number":157762,"mergeCommit":{"message":"[ResponseOps][Task
Manager] stop spamming the logs on status changes (#157762)\n\nresolves
https://github.com/elastic/kibana/issues/156112\r\n\r\nChange task
manager logging on status errors from `warn` to `debug.\r\nMaking this
change as we recently changed from `debug` to `warn`
in\r\nhttps://github.com/elastic/kibana/pull/154045 . But this ended up
too\r\nnoisy, especially at Kibana
startup.","sha":"b542862904073982a00ecd7418cc77dbe567b2d0"}}]}]
BACKPORT-->

Co-authored-by: Patrick Mueller <patrick.mueller@elastic.co>
This commit is contained in:
Kibana Machine 2023-05-15 18:33:23 -04:00 committed by GitHub
parent 263945cdc0
commit 314cc18584
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
5 changed files with 13 additions and 13 deletions

View file

@ -38,14 +38,14 @@ export function calculateHealthStatus(
if (shouldRunTasks) {
if (hasExpiredHotTimestamps(summarizedStats, now, requiredHotStatsFreshness)) {
const reason = 'setting HealthStatus.Error because of expired hot timestamps';
logger.warn(reason);
logger.debug(reason);
return { status: HealthStatus.Error, reason };
}
}
if (hasExpiredColdTimestamps(summarizedStats, now, requiredColdStatsFreshness)) {
const reason = 'setting HealthStatus.Error because of expired cold timestamps';
logger.warn(reason);
logger.debug(reason);
return { status: HealthStatus.Error, reason };
}

View file

@ -247,12 +247,12 @@ function getHealthStatus(
if (assumedAverageRecurringRequiredThroughputPerMinutePerKibana < capacityPerMinutePerKibana) {
const reason = `setting HealthStatus.Warning because assumedAverageRecurringRequiredThroughputPerMinutePerKibana (${assumedAverageRecurringRequiredThroughputPerMinutePerKibana}) < capacityPerMinutePerKibana (${capacityPerMinutePerKibana})`;
logger.warn(reason);
logger.debug(reason);
return { status: HealthStatus.Warning, reason };
}
const reason = `setting HealthStatus.Error because assumedRequiredThroughputPerMinutePerKibana (${assumedRequiredThroughputPerMinutePerKibana}) >= capacityPerMinutePerKibana (${capacityPerMinutePerKibana}) AND assumedAverageRecurringRequiredThroughputPerMinutePerKibana (${assumedAverageRecurringRequiredThroughputPerMinutePerKibana}) >= capacityPerMinutePerKibana (${capacityPerMinutePerKibana})`;
logger.warn(reason);
logger.debug(reason);
return { status: HealthStatus.Error, reason };
}

View file

@ -375,24 +375,24 @@ describe('Task Run Statistics', () => {
{ Success: 40, RetryScheduled: 40, Failed: 20, status: 'OK' },
]);
expect(logger.warn).toHaveBeenCalledTimes(5);
expect(logger.warn).toHaveBeenNthCalledWith(
expect(logger.debug).toHaveBeenCalledTimes(5);
expect(logger.debug).toHaveBeenNthCalledWith(
1,
'Health Status warn threshold has been exceeded, resultFrequencySummary.Failed (40) is greater than warn_threshold (39)'
);
expect(logger.warn).toHaveBeenNthCalledWith(
expect(logger.debug).toHaveBeenNthCalledWith(
2,
'Health Status error threshold has been exceeded, resultFrequencySummary.Failed (60) is greater than error_threshold (59)'
);
expect(logger.warn).toHaveBeenNthCalledWith(
expect(logger.debug).toHaveBeenNthCalledWith(
3,
'Health Status error threshold has been exceeded, resultFrequencySummary.Failed (60) is greater than error_threshold (59)'
);
expect(logger.warn).toHaveBeenNthCalledWith(
expect(logger.debug).toHaveBeenNthCalledWith(
4,
'Health Status error threshold has been exceeded, resultFrequencySummary.Failed (60) is greater than error_threshold (59)'
);
expect(logger.warn).toHaveBeenNthCalledWith(
expect(logger.debug).toHaveBeenNthCalledWith(
5,
'Health Status warn threshold has been exceeded, resultFrequencySummary.Failed (40) is greater than warn_threshold (39)'
);

View file

@ -433,11 +433,11 @@ function getHealthStatus(
): HealthStatus {
if (resultFrequencySummary.Failed > executionErrorThreshold.warn_threshold) {
if (resultFrequencySummary.Failed > executionErrorThreshold.error_threshold) {
logger.warn(
logger.debug(
`Health Status error threshold has been exceeded, resultFrequencySummary.Failed (${resultFrequencySummary.Failed}) is greater than error_threshold (${executionErrorThreshold.error_threshold})`
);
} else {
logger.warn(
logger.debug(
`Health Status warn threshold has been exceeded, resultFrequencySummary.Failed (${resultFrequencySummary.Failed}) is greater than warn_threshold (${executionErrorThreshold.warn_threshold})`
);
}

View file

@ -488,7 +488,7 @@ describe('healthRoute', () => {
summary:
'Task Manager is unhealthy - Reason: setting HealthStatus.Error because of expired hot timestamps',
});
const warnCalls = (logger as jest.Mocked<Logger>).warn.mock.calls as string[][];
const warnCalls = (logger as jest.Mocked<Logger>).debug.mock.calls as string[][];
const warnMessage =
/^setting HealthStatus.Warning because assumedAverageRecurringRequiredThroughputPerMinutePerKibana/;
const found = warnCalls