Bring over merge metrics from stateless (#128617)

Relates to an effort to combine the merge schedulers from stateless and stateful. The stateless merge scheduler has MergeMetrics that we want in both stateless and stateful. This PR copies over the merge metrics from the stateless merge scheduler into the combined merge scheduler.

Relates ES-9687
This commit is contained in:
Brian Rothermich 2025-06-23 19:42:01 -04:00 committed by GitHub
parent a671505c8a
commit 0f39ff586c
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
24 changed files with 354 additions and 72 deletions

View file

@ -88,12 +88,14 @@ public class ThreadPoolMergeSchedulerStressTestIT extends ESSingleNodeTestCase {
protected ElasticsearchMergeScheduler createMergeScheduler( protected ElasticsearchMergeScheduler createMergeScheduler(
ShardId shardId, ShardId shardId,
IndexSettings indexSettings, IndexSettings indexSettings,
@Nullable ThreadPoolMergeExecutorService threadPoolMergeExecutorService @Nullable ThreadPoolMergeExecutorService threadPoolMergeExecutorService,
MergeMetrics mergeMetrics
) { ) {
ElasticsearchMergeScheduler mergeScheduler = super.createMergeScheduler( ElasticsearchMergeScheduler mergeScheduler = super.createMergeScheduler(
shardId, shardId,
indexSettings, indexSettings,
threadPoolMergeExecutorService threadPoolMergeExecutorService,
mergeMetrics
); );
assertThat(mergeScheduler, instanceOf(ThreadPoolMergeScheduler.class)); assertThat(mergeScheduler, instanceOf(ThreadPoolMergeScheduler.class));
// assert there is a single merge executor service for all shards // assert there is a single merge executor service for all shards

View file

@ -52,6 +52,7 @@ import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.VersionType; import org.elasticsearch.index.VersionType;
import org.elasticsearch.index.engine.CommitStats; import org.elasticsearch.index.engine.CommitStats;
import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.Engine;
import org.elasticsearch.index.engine.MergeMetrics;
import org.elasticsearch.index.engine.NoOpEngine; import org.elasticsearch.index.engine.NoOpEngine;
import org.elasticsearch.index.flush.FlushStats; import org.elasticsearch.index.flush.FlushStats;
import org.elasticsearch.index.mapper.MapperMetrics; import org.elasticsearch.index.mapper.MapperMetrics;
@ -680,7 +681,8 @@ public class IndexShardIT extends ESSingleNodeTestCase {
null, null,
MapperMetrics.NOOP, MapperMetrics.NOOP,
new IndexingStatsSettings(ClusterSettings.createBuiltInClusterSettings()), new IndexingStatsSettings(ClusterSettings.createBuiltInClusterSettings()),
new SearchStatsSettings(ClusterSettings.createBuiltInClusterSettings()) new SearchStatsSettings(ClusterSettings.createBuiltInClusterSettings()),
MergeMetrics.NOOP
); );
} }

View file

@ -88,7 +88,8 @@ public class IndexingMemoryControllerIT extends ESSingleNodeTestCase {
config.getIndexCommitListener(), config.getIndexCommitListener(),
config.isPromotableToPrimary(), config.isPromotableToPrimary(),
config.getMapperService(), config.getMapperService(),
config.getEngineResetLock() config.getEngineResetLock(),
config.getMergeMetrics()
); );
} }

View file

@ -43,6 +43,7 @@ import org.elasticsearch.index.cache.query.IndexQueryCache;
import org.elasticsearch.index.cache.query.QueryCache; import org.elasticsearch.index.cache.query.QueryCache;
import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.Engine;
import org.elasticsearch.index.engine.EngineFactory; import org.elasticsearch.index.engine.EngineFactory;
import org.elasticsearch.index.engine.MergeMetrics;
import org.elasticsearch.index.engine.ThreadPoolMergeExecutorService; import org.elasticsearch.index.engine.ThreadPoolMergeExecutorService;
import org.elasticsearch.index.mapper.IdFieldMapper; import org.elasticsearch.index.mapper.IdFieldMapper;
import org.elasticsearch.index.mapper.MapperMetrics; import org.elasticsearch.index.mapper.MapperMetrics;
@ -181,6 +182,7 @@ public final class IndexModule {
private final MapperMetrics mapperMetrics; private final MapperMetrics mapperMetrics;
private final IndexingStatsSettings indexingStatsSettings; private final IndexingStatsSettings indexingStatsSettings;
private final SearchStatsSettings searchStatsSettings; private final SearchStatsSettings searchStatsSettings;
private final MergeMetrics mergeMetrics;
/** /**
* Construct the index module for the index with the specified index settings. The index module contains extension points for plugins * Construct the index module for the index with the specified index settings. The index module contains extension points for plugins
@ -190,6 +192,7 @@ public final class IndexModule {
* @param analysisRegistry the analysis registry * @param analysisRegistry the analysis registry
* @param engineFactory the engine factory * @param engineFactory the engine factory
* @param directoryFactories the available store types * @param directoryFactories the available store types
* @param mergeMetrics
*/ */
public IndexModule( public IndexModule(
final IndexSettings indexSettings, final IndexSettings indexSettings,
@ -203,7 +206,8 @@ public final class IndexModule {
final MapperMetrics mapperMetrics, final MapperMetrics mapperMetrics,
final List<SearchOperationListener> searchOperationListeners, final List<SearchOperationListener> searchOperationListeners,
final IndexingStatsSettings indexingStatsSettings, final IndexingStatsSettings indexingStatsSettings,
final SearchStatsSettings searchStatsSettings final SearchStatsSettings searchStatsSettings,
final MergeMetrics mergeMetrics
) { ) {
this.indexSettings = indexSettings; this.indexSettings = indexSettings;
this.analysisRegistry = analysisRegistry; this.analysisRegistry = analysisRegistry;
@ -220,6 +224,7 @@ public final class IndexModule {
this.mapperMetrics = mapperMetrics; this.mapperMetrics = mapperMetrics;
this.indexingStatsSettings = indexingStatsSettings; this.indexingStatsSettings = indexingStatsSettings;
this.searchStatsSettings = searchStatsSettings; this.searchStatsSettings = searchStatsSettings;
this.mergeMetrics = mergeMetrics;
} }
/** /**
@ -557,7 +562,8 @@ public final class IndexModule {
mapperMetrics, mapperMetrics,
queryRewriteInterceptor, queryRewriteInterceptor,
indexingStatsSettings, indexingStatsSettings,
searchStatsSettings searchStatsSettings,
mergeMetrics
); );
success = true; success = true;
return indexService; return indexService;

View file

@ -49,6 +49,7 @@ import org.elasticsearch.index.cache.bitset.BitsetFilterCache;
import org.elasticsearch.index.cache.query.QueryCache; import org.elasticsearch.index.cache.query.QueryCache;
import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.Engine;
import org.elasticsearch.index.engine.EngineFactory; import org.elasticsearch.index.engine.EngineFactory;
import org.elasticsearch.index.engine.MergeMetrics;
import org.elasticsearch.index.engine.ThreadPoolMergeExecutorService; import org.elasticsearch.index.engine.ThreadPoolMergeExecutorService;
import org.elasticsearch.index.fielddata.FieldDataContext; import org.elasticsearch.index.fielddata.FieldDataContext;
import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.fielddata.IndexFieldData;
@ -172,6 +173,7 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust
private final QueryRewriteInterceptor queryRewriteInterceptor; private final QueryRewriteInterceptor queryRewriteInterceptor;
private final IndexingStatsSettings indexingStatsSettings; private final IndexingStatsSettings indexingStatsSettings;
private final SearchStatsSettings searchStatsSettings; private final SearchStatsSettings searchStatsSettings;
private final MergeMetrics mergeMetrics;
@SuppressWarnings("this-escape") @SuppressWarnings("this-escape")
public IndexService( public IndexService(
@ -210,7 +212,8 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust
MapperMetrics mapperMetrics, MapperMetrics mapperMetrics,
QueryRewriteInterceptor queryRewriteInterceptor, QueryRewriteInterceptor queryRewriteInterceptor,
IndexingStatsSettings indexingStatsSettings, IndexingStatsSettings indexingStatsSettings,
SearchStatsSettings searchStatsSettings SearchStatsSettings searchStatsSettings,
MergeMetrics mergeMetrics
) { ) {
super(indexSettings); super(indexSettings);
assert indexCreationContext != IndexCreationContext.RELOAD_ANALYZERS assert indexCreationContext != IndexCreationContext.RELOAD_ANALYZERS
@ -297,6 +300,7 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust
} }
this.indexingStatsSettings = indexingStatsSettings; this.indexingStatsSettings = indexingStatsSettings;
this.searchStatsSettings = searchStatsSettings; this.searchStatsSettings = searchStatsSettings;
this.mergeMetrics = mergeMetrics;
updateFsyncTaskIfNecessary(); updateFsyncTaskIfNecessary();
} }
@ -588,7 +592,8 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust
indexCommitListener, indexCommitListener,
mapperMetrics, mapperMetrics,
indexingStatsSettings, indexingStatsSettings,
searchStatsSettings searchStatsSettings,
mergeMetrics
); );
eventListener.indexShardStateChanged(indexShard, null, indexShard.state(), "shard created"); eventListener.indexShardStateChanged(indexShard, null, indexShard.state(), "shard created");
eventListener.afterIndexShardCreated(indexShard); eventListener.afterIndexShardCreated(indexShard);

View file

@ -149,6 +149,8 @@ public final class EngineConfig {
private final EngineResetLock engineResetLock; private final EngineResetLock engineResetLock;
private final MergeMetrics mergeMetrics;
/** /**
* Creates a new {@link org.elasticsearch.index.engine.EngineConfig} * Creates a new {@link org.elasticsearch.index.engine.EngineConfig}
*/ */
@ -181,7 +183,8 @@ public final class EngineConfig {
Engine.IndexCommitListener indexCommitListener, Engine.IndexCommitListener indexCommitListener,
boolean promotableToPrimary, boolean promotableToPrimary,
MapperService mapperService, MapperService mapperService,
EngineResetLock engineResetLock EngineResetLock engineResetLock,
MergeMetrics mergeMetrics
) { ) {
this.shardId = shardId; this.shardId = shardId;
this.indexSettings = indexSettings; this.indexSettings = indexSettings;
@ -229,6 +232,7 @@ public final class EngineConfig {
// always use compound on flush - reduces # of file-handles on refresh // always use compound on flush - reduces # of file-handles on refresh
this.useCompoundFile = indexSettings.getSettings().getAsBoolean(USE_COMPOUND_FILE, true); this.useCompoundFile = indexSettings.getSettings().getAsBoolean(USE_COMPOUND_FILE, true);
this.engineResetLock = engineResetLock; this.engineResetLock = engineResetLock;
this.mergeMetrics = mergeMetrics;
} }
/** /**
@ -477,4 +481,8 @@ public final class EngineConfig {
public EngineResetLock getEngineResetLock() { public EngineResetLock getEngineResetLock() {
return engineResetLock; return engineResetLock;
} }
public MergeMetrics getMergeMetrics() {
return mergeMetrics;
}
} }

View file

@ -257,7 +257,8 @@ public class InternalEngine extends Engine {
mergeScheduler = createMergeScheduler( mergeScheduler = createMergeScheduler(
engineConfig.getShardId(), engineConfig.getShardId(),
engineConfig.getIndexSettings(), engineConfig.getIndexSettings(),
engineConfig.getThreadPoolMergeExecutorService() engineConfig.getThreadPoolMergeExecutorService(),
engineConfig.getMergeMetrics()
); );
scheduler = mergeScheduler.getMergeScheduler(); scheduler = mergeScheduler.getMergeScheduler();
throttle = new IndexThrottle(pauseIndexingOnThrottle); throttle = new IndexThrottle(pauseIndexingOnThrottle);
@ -2908,10 +2909,11 @@ public class InternalEngine extends Engine {
protected ElasticsearchMergeScheduler createMergeScheduler( protected ElasticsearchMergeScheduler createMergeScheduler(
ShardId shardId, ShardId shardId,
IndexSettings indexSettings, IndexSettings indexSettings,
@Nullable ThreadPoolMergeExecutorService threadPoolMergeExecutorService @Nullable ThreadPoolMergeExecutorService threadPoolMergeExecutorService,
MergeMetrics mergeMetrics
) { ) {
if (threadPoolMergeExecutorService != null) { if (threadPoolMergeExecutorService != null) {
return new EngineThreadPoolMergeScheduler(shardId, indexSettings, threadPoolMergeExecutorService); return new EngineThreadPoolMergeScheduler(shardId, indexSettings, threadPoolMergeExecutorService, mergeMetrics);
} else { } else {
return new EngineConcurrentMergeScheduler(shardId, indexSettings); return new EngineConcurrentMergeScheduler(shardId, indexSettings);
} }
@ -2921,9 +2923,10 @@ public class InternalEngine extends Engine {
EngineThreadPoolMergeScheduler( EngineThreadPoolMergeScheduler(
ShardId shardId, ShardId shardId,
IndexSettings indexSettings, IndexSettings indexSettings,
ThreadPoolMergeExecutorService threadPoolMergeExecutorService ThreadPoolMergeExecutorService threadPoolMergeExecutorService,
MergeMetrics mergeMetrics
) { ) {
super(shardId, indexSettings, threadPoolMergeExecutorService, InternalEngine.this::estimateMergeBytes); super(shardId, indexSettings, threadPoolMergeExecutorService, InternalEngine.this::estimateMergeBytes, mergeMetrics);
} }
@Override @Override

View file

@ -0,0 +1,101 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the "Elastic License
* 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side
* Public License v 1"; you may not use this file except in compliance with, at
* your election, the "Elastic License 2.0", the "GNU Affero General Public
* License v3.0 only", or the "Server Side Public License, v 1".
*/
package org.elasticsearch.index.engine;
import org.apache.lucene.index.MergePolicy;
import org.elasticsearch.index.merge.OnGoingMerge;
import org.elasticsearch.telemetry.TelemetryProvider;
import org.elasticsearch.telemetry.metric.LongCounter;
import org.elasticsearch.telemetry.metric.LongHistogram;
import org.elasticsearch.telemetry.metric.LongWithAttributes;
import org.elasticsearch.telemetry.metric.MeterRegistry;
import java.util.concurrent.atomic.AtomicLong;
public class MergeMetrics {
public static final String MERGE_SEGMENTS_SIZE = "es.merge.segments.size";
public static final String MERGE_DOCS_TOTAL = "es.merge.docs.total";
public static final String MERGE_SEGMENTS_QUEUED_USAGE = "es.merge.segments.queued.usage";
public static final String MERGE_SEGMENTS_RUNNING_USAGE = "es.merge.segments.running.usage";
public static final String MERGE_SEGMENTS_MERGED_SIZE = "es.merge.segments.merged.size";
public static final String MERGE_QUEUED_ESTIMATED_MEMORY_SIZE = "es.merge.segments.memory.size";
public static final String MERGE_TIME_IN_SECONDS = "es.merge.time";
public static MergeMetrics NOOP = new MergeMetrics(TelemetryProvider.NOOP.getMeterRegistry());
private final LongCounter mergeSizeInBytes;
private final LongCounter mergeMergedSegmentSizeInBytes;
private final LongCounter mergeNumDocs;
private final LongHistogram mergeTimeInSeconds;
private final AtomicLong runningMergeSizeInBytes = new AtomicLong();
private final AtomicLong queuedMergeSizeInBytes = new AtomicLong();
private final AtomicLong queuedEstimatedMergeMemoryInBytes = new AtomicLong();
public MergeMetrics(MeterRegistry meterRegistry) {
mergeSizeInBytes = meterRegistry.registerLongCounter(MERGE_SEGMENTS_SIZE, "Total size of segments merged", "bytes");
meterRegistry.registerLongGauge(
MERGE_SEGMENTS_QUEUED_USAGE,
"Total usage of segments queued to be merged",
"bytes",
() -> new LongWithAttributes(queuedMergeSizeInBytes.get())
);
meterRegistry.registerLongGauge(
MERGE_SEGMENTS_RUNNING_USAGE,
"Total usage of segments currently being merged",
"bytes",
() -> new LongWithAttributes(runningMergeSizeInBytes.get())
);
mergeMergedSegmentSizeInBytes = meterRegistry.registerLongCounter(
MERGE_SEGMENTS_MERGED_SIZE,
"Total size of the new merged segments",
"bytes"
);
mergeNumDocs = meterRegistry.registerLongCounter(MERGE_DOCS_TOTAL, "Total number of documents merged", "documents");
mergeTimeInSeconds = meterRegistry.registerLongHistogram(MERGE_TIME_IN_SECONDS, "Merge time in seconds", "seconds");
meterRegistry.registerLongGauge(
MERGE_QUEUED_ESTIMATED_MEMORY_SIZE,
"Estimated memory usage for queued merges",
"bytes",
() -> new LongWithAttributes(queuedEstimatedMergeMemoryInBytes.get())
);
}
public void incrementQueuedMergeBytes(OnGoingMerge currentMerge, long estimatedMemorySize) {
queuedMergeSizeInBytes.getAndAdd(currentMerge.getTotalBytesSize());
queuedEstimatedMergeMemoryInBytes.getAndAdd(estimatedMemorySize);
}
public void moveQueuedMergeBytesToRunning(OnGoingMerge currentMerge, long estimatedMemorySize) {
long totalSize = currentMerge.getTotalBytesSize();
queuedMergeSizeInBytes.getAndAdd(-totalSize);
runningMergeSizeInBytes.getAndAdd(totalSize);
queuedEstimatedMergeMemoryInBytes.getAndAdd(-estimatedMemorySize);
}
public void decrementRunningMergeBytes(OnGoingMerge currentMerge) {
runningMergeSizeInBytes.getAndAdd(-currentMerge.getTotalBytesSize());
}
public void markMergeMetrics(MergePolicy.OneMerge currentMerge, long mergedSegmentSize, long tookMillis) {
mergeSizeInBytes.incrementBy(currentMerge.totalBytesSize());
mergeMergedSegmentSizeInBytes.incrementBy(mergedSegmentSize);
mergeNumDocs.incrementBy(currentMerge.totalNumDocs());
mergeTimeInSeconds.record(tookMillis / 1000);
}
public long getQueuedMergeSizeInBytes() {
return queuedMergeSizeInBytes.get();
}
public long getRunningMergeSizeInBytes() {
return runningMergeSizeInBytes.get();
}
}

View file

@ -64,6 +64,7 @@ public class ThreadPoolMergeScheduler extends MergeScheduler implements Elastics
private final MergeSchedulerConfig config; private final MergeSchedulerConfig config;
protected final Logger logger; protected final Logger logger;
private final MergeTracking mergeTracking; private final MergeTracking mergeTracking;
private final MergeMetrics mergeMetrics;
private final ThreadPoolMergeExecutorService threadPoolMergeExecutorService; private final ThreadPoolMergeExecutorService threadPoolMergeExecutorService;
private final PriorityQueue<MergeTask> backloggedMergeTasks = new PriorityQueue<>( private final PriorityQueue<MergeTask> backloggedMergeTasks = new PriorityQueue<>(
16, 16,
@ -86,16 +87,19 @@ public class ThreadPoolMergeScheduler extends MergeScheduler implements Elastics
* @param indexSettings used to obtain the {@link MergeSchedulerConfig} * @param indexSettings used to obtain the {@link MergeSchedulerConfig}
* @param threadPoolMergeExecutorService the executor service used to execute merge tasks from this scheduler * @param threadPoolMergeExecutorService the executor service used to execute merge tasks from this scheduler
* @param mergeMemoryEstimateProvider provides an estimate for how much memory a merge will take * @param mergeMemoryEstimateProvider provides an estimate for how much memory a merge will take
* @param mergeMetrics metrics related to merges
*/ */
public ThreadPoolMergeScheduler( public ThreadPoolMergeScheduler(
ShardId shardId, ShardId shardId,
IndexSettings indexSettings, IndexSettings indexSettings,
ThreadPoolMergeExecutorService threadPoolMergeExecutorService, ThreadPoolMergeExecutorService threadPoolMergeExecutorService,
MergeMemoryEstimateProvider mergeMemoryEstimateProvider MergeMemoryEstimateProvider mergeMemoryEstimateProvider,
MergeMetrics mergeMetrics
) { ) {
this.shardId = shardId; this.shardId = shardId;
this.config = indexSettings.getMergeSchedulerConfig(); this.config = indexSettings.getMergeSchedulerConfig();
this.logger = Loggers.getLogger(getClass(), shardId); this.logger = Loggers.getLogger(getClass(), shardId);
this.mergeMetrics = mergeMetrics;
this.mergeTracking = new MergeTracking( this.mergeTracking = new MergeTracking(
logger, logger,
() -> this.config.isAutoThrottle() () -> this.config.isAutoThrottle()
@ -226,6 +230,7 @@ public class ThreadPoolMergeScheduler extends MergeScheduler implements Elastics
boolean submitNewMergeTask(MergeSource mergeSource, MergePolicy.OneMerge merge, MergeTrigger mergeTrigger) { boolean submitNewMergeTask(MergeSource mergeSource, MergePolicy.OneMerge merge, MergeTrigger mergeTrigger) {
try { try {
MergeTask mergeTask = newMergeTask(mergeSource, merge, mergeTrigger); MergeTask mergeTask = newMergeTask(mergeSource, merge, mergeTrigger);
mergeMetrics.incrementQueuedMergeBytes(mergeTask.getOnGoingMerge(), mergeTask.getMergeMemoryEstimateBytes());
mergeQueued(mergeTask.onGoingMerge); mergeQueued(mergeTask.onGoingMerge);
return threadPoolMergeExecutorService.submitMergeTask(mergeTask); return threadPoolMergeExecutorService.submitMergeTask(mergeTask);
} finally { } finally {
@ -310,6 +315,7 @@ public class ThreadPoolMergeScheduler extends MergeScheduler implements Elastics
private void mergeTaskDone(OnGoingMerge merge) { private void mergeTaskDone(OnGoingMerge merge) {
doneMergeTaskCount.incrementAndGet(); doneMergeTaskCount.incrementAndGet();
mergeMetrics.decrementRunningMergeBytes(merge);
mergeExecutedOrAborted(merge); mergeExecutedOrAborted(merge);
checkMergeTaskThrottling(); checkMergeTaskThrottling();
} }
@ -437,6 +443,7 @@ public class ThreadPoolMergeScheduler extends MergeScheduler implements Elastics
assert hasStartedRunning() == false; assert hasStartedRunning() == false;
assert ThreadPoolMergeScheduler.this.runningMergeTasks.containsKey(onGoingMerge.getMerge()) assert ThreadPoolMergeScheduler.this.runningMergeTasks.containsKey(onGoingMerge.getMerge())
: "runNowOrBacklog must be invoked before actually running the merge task"; : "runNowOrBacklog must be invoked before actually running the merge task";
boolean success = false;
try { try {
beforeMerge(onGoingMerge); beforeMerge(onGoingMerge);
try { try {
@ -444,11 +451,13 @@ public class ThreadPoolMergeScheduler extends MergeScheduler implements Elastics
throw new IllegalStateException("The merge task is already started or aborted"); throw new IllegalStateException("The merge task is already started or aborted");
} }
mergeTracking.mergeStarted(onGoingMerge); mergeTracking.mergeStarted(onGoingMerge);
mergeMetrics.moveQueuedMergeBytesToRunning(onGoingMerge, mergeMemoryEstimateBytes);
if (verbose()) { if (verbose()) {
message(String.format(Locale.ROOT, "merge task %s start", this)); message(String.format(Locale.ROOT, "merge task %s start", this));
} }
try { try {
doMerge(mergeSource, onGoingMerge.getMerge()); doMerge(mergeSource, onGoingMerge.getMerge());
success = onGoingMerge.getMerge().isAborted() == false;
if (verbose()) { if (verbose()) {
message( message(
String.format( String.format(
@ -468,6 +477,10 @@ public class ThreadPoolMergeScheduler extends MergeScheduler implements Elastics
} }
} finally { } finally {
long tookMS = TimeValue.nsecToMSec(System.nanoTime() - mergeStartTimeNS.get()); long tookMS = TimeValue.nsecToMSec(System.nanoTime() - mergeStartTimeNS.get());
if (success) {
long newSegmentSize = getNewSegmentSize(onGoingMerge.getMerge());
mergeMetrics.markMergeMetrics(onGoingMerge.getMerge(), newSegmentSize, tookMS);
}
mergeTracking.mergeFinished(onGoingMerge.getMerge(), onGoingMerge, tookMS); mergeTracking.mergeFinished(onGoingMerge.getMerge(), onGoingMerge, tookMS);
} }
} finally { } finally {
@ -508,6 +521,8 @@ public class ThreadPoolMergeScheduler extends MergeScheduler implements Elastics
// {@code IndexWriter} checks the abort flag internally, while running the merge. // {@code IndexWriter} checks the abort flag internally, while running the merge.
// The segments of an aborted merge become available to subsequent merges. // The segments of an aborted merge become available to subsequent merges.
onGoingMerge.getMerge().setAborted(); onGoingMerge.getMerge().setAborted();
mergeMetrics.moveQueuedMergeBytesToRunning(onGoingMerge, mergeMemoryEstimateBytes);
try { try {
if (verbose()) { if (verbose()) {
message(String.format(Locale.ROOT, "merge task %s start abort", this)); message(String.format(Locale.ROOT, "merge task %s start abort", this));
@ -554,6 +569,21 @@ public class ThreadPoolMergeScheduler extends MergeScheduler implements Elastics
return onGoingMerge; return onGoingMerge;
} }
private static long getNewSegmentSize(MergePolicy.OneMerge currentMerge) {
try {
return currentMerge.getMergeInfo() != null ? currentMerge.getMergeInfo().sizeInBytes() : currentMerge.estimatedMergeBytes;
} catch (IOException e) {
// For stateless only: It is (rarely) possible that the merged segment could be merged away by the IndexWriter prior to
// reaching this point. Once the IW creates the new segment, it could be exposed to be included in a new merge. That
// merge can be executed concurrently if more than 1 merge threads are configured. That new merge allows this IW to
// delete segment created by this merge. Although the files may still be available in the object store for executing
// searches, the IndexDirectory will no longer have references to the underlying segment files and will throw file not
// found if we try to read them. In this case, we will ignore that exception (which would otherwise fail the shard) and
// use the originally estimated merge size for metrics.
return currentMerge.estimatedMergeBytes;
}
}
@Override @Override
public String toString() { public String toString() {
return name + (onGoingMerge.getMerge().isAborted() ? " (aborted)" : ""); return name + (onGoingMerge.getMerge().isAborted() ? " (aborted)" : "");

View file

@ -91,6 +91,7 @@ import org.elasticsearch.index.engine.Engine.GetResult;
import org.elasticsearch.index.engine.EngineConfig; import org.elasticsearch.index.engine.EngineConfig;
import org.elasticsearch.index.engine.EngineException; import org.elasticsearch.index.engine.EngineException;
import org.elasticsearch.index.engine.EngineFactory; import org.elasticsearch.index.engine.EngineFactory;
import org.elasticsearch.index.engine.MergeMetrics;
import org.elasticsearch.index.engine.ReadOnlyEngine; import org.elasticsearch.index.engine.ReadOnlyEngine;
import org.elasticsearch.index.engine.RefreshFailedEngineException; import org.elasticsearch.index.engine.RefreshFailedEngineException;
import org.elasticsearch.index.engine.SafeCommitInfo; import org.elasticsearch.index.engine.SafeCommitInfo;
@ -269,6 +270,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
private final MeanMetric externalRefreshMetric = new MeanMetric(); private final MeanMetric externalRefreshMetric = new MeanMetric();
private final MeanMetric flushMetric = new MeanMetric(); private final MeanMetric flushMetric = new MeanMetric();
private final CounterMetric periodicFlushMetric = new CounterMetric(); private final CounterMetric periodicFlushMetric = new CounterMetric();
private final MergeMetrics mergeMetrics;
private final ShardEventListener shardEventListener = new ShardEventListener(); private final ShardEventListener shardEventListener = new ShardEventListener();
@ -343,7 +345,8 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
final Engine.IndexCommitListener indexCommitListener, final Engine.IndexCommitListener indexCommitListener,
final MapperMetrics mapperMetrics, final MapperMetrics mapperMetrics,
final IndexingStatsSettings indexingStatsSettings, final IndexingStatsSettings indexingStatsSettings,
final SearchStatsSettings searchStatsSettings final SearchStatsSettings searchStatsSettings,
final MergeMetrics mergeMetrics
) throws IOException { ) throws IOException {
super(shardRouting.shardId(), indexSettings); super(shardRouting.shardId(), indexSettings);
assert shardRouting.initializing(); assert shardRouting.initializing();
@ -432,6 +435,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
this.refreshFieldHasValueListener = new RefreshFieldHasValueListener(); this.refreshFieldHasValueListener = new RefreshFieldHasValueListener();
this.relativeTimeInNanosSupplier = relativeTimeInNanosSupplier; this.relativeTimeInNanosSupplier = relativeTimeInNanosSupplier;
this.indexCommitListener = indexCommitListener; this.indexCommitListener = indexCommitListener;
this.mergeMetrics = mergeMetrics;
} }
public ThreadPool getThreadPool() { public ThreadPool getThreadPool() {
@ -3755,7 +3759,8 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
indexCommitListener, indexCommitListener,
routingEntry().isPromotableToPrimary(), routingEntry().isPromotableToPrimary(),
mapperService(), mapperService(),
engineResetLock engineResetLock,
mergeMetrics
); );
} }

View file

@ -98,6 +98,7 @@ import org.elasticsearch.index.cache.request.ShardRequestCache;
import org.elasticsearch.index.engine.CommitStats; import org.elasticsearch.index.engine.CommitStats;
import org.elasticsearch.index.engine.EngineFactory; import org.elasticsearch.index.engine.EngineFactory;
import org.elasticsearch.index.engine.InternalEngineFactory; import org.elasticsearch.index.engine.InternalEngineFactory;
import org.elasticsearch.index.engine.MergeMetrics;
import org.elasticsearch.index.engine.NoOpEngine; import org.elasticsearch.index.engine.NoOpEngine;
import org.elasticsearch.index.engine.ReadOnlyEngine; import org.elasticsearch.index.engine.ReadOnlyEngine;
import org.elasticsearch.index.engine.ThreadPoolMergeExecutorService; import org.elasticsearch.index.engine.ThreadPoolMergeExecutorService;
@ -283,6 +284,7 @@ public class IndicesService extends AbstractLifecycleComponent
final SlowLogFieldProvider slowLogFieldProvider; // pkg-private for testingå final SlowLogFieldProvider slowLogFieldProvider; // pkg-private for testingå
private final IndexingStatsSettings indexStatsSettings; private final IndexingStatsSettings indexStatsSettings;
private final SearchStatsSettings searchStatsSettings; private final SearchStatsSettings searchStatsSettings;
private final MergeMetrics mergeMetrics;
@Override @Override
protected void doStart() { protected void doStart() {
@ -358,6 +360,7 @@ public class IndicesService extends AbstractLifecycleComponent
this.requestCacheKeyDifferentiator = builder.requestCacheKeyDifferentiator; this.requestCacheKeyDifferentiator = builder.requestCacheKeyDifferentiator;
this.queryRewriteInterceptor = builder.queryRewriteInterceptor; this.queryRewriteInterceptor = builder.queryRewriteInterceptor;
this.mapperMetrics = builder.mapperMetrics; this.mapperMetrics = builder.mapperMetrics;
this.mergeMetrics = builder.mergeMetrics;
// doClose() is called when shutting down a node, yet there might still be ongoing requests // doClose() is called when shutting down a node, yet there might still be ongoing requests
// that we need to wait for before closing some resources such as the caches. In order to // that we need to wait for before closing some resources such as the caches. In order to
// avoid closing these resources while ongoing requests are still being processed, we use a // avoid closing these resources while ongoing requests are still being processed, we use a
@ -801,7 +804,8 @@ public class IndicesService extends AbstractLifecycleComponent
mapperMetrics, mapperMetrics,
searchOperationListeners, searchOperationListeners,
indexStatsSettings, indexStatsSettings,
searchStatsSettings searchStatsSettings,
mergeMetrics
); );
for (IndexingOperationListener operationListener : indexingOperationListeners) { for (IndexingOperationListener operationListener : indexingOperationListeners) {
indexModule.addIndexOperationListener(operationListener); indexModule.addIndexOperationListener(operationListener);
@ -900,7 +904,8 @@ public class IndicesService extends AbstractLifecycleComponent
mapperMetrics, mapperMetrics,
searchOperationListeners, searchOperationListeners,
indexStatsSettings, indexStatsSettings,
searchStatsSettings searchStatsSettings,
mergeMetrics
); );
pluginsService.forEach(p -> p.onIndexModule(indexModule)); pluginsService.forEach(p -> p.onIndexModule(indexModule));
return indexModule.newIndexMapperService(clusterService, parserConfig, mapperRegistry, scriptService); return indexModule.newIndexMapperService(clusterService, parserConfig, mapperRegistry, scriptService);

View file

@ -27,6 +27,7 @@ import org.elasticsearch.index.SlowLogFieldProvider;
import org.elasticsearch.index.SlowLogFields; import org.elasticsearch.index.SlowLogFields;
import org.elasticsearch.index.analysis.AnalysisRegistry; import org.elasticsearch.index.analysis.AnalysisRegistry;
import org.elasticsearch.index.engine.EngineFactory; import org.elasticsearch.index.engine.EngineFactory;
import org.elasticsearch.index.engine.MergeMetrics;
import org.elasticsearch.index.mapper.MapperMetrics; import org.elasticsearch.index.mapper.MapperMetrics;
import org.elasticsearch.index.mapper.MapperRegistry; import org.elasticsearch.index.mapper.MapperRegistry;
import org.elasticsearch.index.shard.SearchOperationListener; import org.elasticsearch.index.shard.SearchOperationListener;
@ -79,6 +80,7 @@ public class IndicesServiceBuilder {
@Nullable @Nullable
CheckedBiConsumer<ShardSearchRequest, StreamOutput, IOException> requestCacheKeyDifferentiator; CheckedBiConsumer<ShardSearchRequest, StreamOutput, IOException> requestCacheKeyDifferentiator;
MapperMetrics mapperMetrics; MapperMetrics mapperMetrics;
MergeMetrics mergeMetrics;
List<SearchOperationListener> searchOperationListener = List.of(); List<SearchOperationListener> searchOperationListener = List.of();
QueryRewriteInterceptor queryRewriteInterceptor = null; QueryRewriteInterceptor queryRewriteInterceptor = null;
SlowLogFieldProvider slowLogFieldProvider = new SlowLogFieldProvider() { SlowLogFieldProvider slowLogFieldProvider = new SlowLogFieldProvider() {
@ -206,6 +208,11 @@ public class IndicesServiceBuilder {
return this; return this;
} }
public IndicesServiceBuilder mergeMetrics(MergeMetrics mergeMetrics) {
this.mergeMetrics = mergeMetrics;
return this;
}
public List<SearchOperationListener> searchOperationListeners() { public List<SearchOperationListener> searchOperationListeners() {
return searchOperationListener; return searchOperationListener;
} }
@ -244,6 +251,7 @@ public class IndicesServiceBuilder {
Objects.requireNonNull(indexFoldersDeletionListeners); Objects.requireNonNull(indexFoldersDeletionListeners);
Objects.requireNonNull(snapshotCommitSuppliers); Objects.requireNonNull(snapshotCommitSuppliers);
Objects.requireNonNull(mapperMetrics); Objects.requireNonNull(mapperMetrics);
Objects.requireNonNull(mergeMetrics);
Objects.requireNonNull(searchOperationListener); Objects.requireNonNull(searchOperationListener);
Objects.requireNonNull(slowLogFieldProvider); Objects.requireNonNull(slowLogFieldProvider);

View file

@ -116,6 +116,7 @@ import org.elasticsearch.index.IndexingPressure;
import org.elasticsearch.index.SlowLogFieldProvider; import org.elasticsearch.index.SlowLogFieldProvider;
import org.elasticsearch.index.SlowLogFields; import org.elasticsearch.index.SlowLogFields;
import org.elasticsearch.index.analysis.AnalysisRegistry; import org.elasticsearch.index.analysis.AnalysisRegistry;
import org.elasticsearch.index.engine.MergeMetrics;
import org.elasticsearch.index.mapper.MapperMetrics; import org.elasticsearch.index.mapper.MapperMetrics;
import org.elasticsearch.index.mapper.SourceFieldMetrics; import org.elasticsearch.index.mapper.SourceFieldMetrics;
import org.elasticsearch.index.search.stats.ShardSearchPhaseAPMMetrics; import org.elasticsearch.index.search.stats.ShardSearchPhaseAPMMetrics;
@ -806,6 +807,9 @@ class NodeConstruction {
threadPool::relativeTimeInMillis threadPool::relativeTimeInMillis
); );
MapperMetrics mapperMetrics = new MapperMetrics(sourceFieldMetrics); MapperMetrics mapperMetrics = new MapperMetrics(sourceFieldMetrics);
MergeMetrics mergeMetrics = new MergeMetrics(telemetryProvider.getMeterRegistry());
final List<SearchOperationListener> searchOperationListeners = List.of( final List<SearchOperationListener> searchOperationListeners = List.of(
new ShardSearchPhaseAPMMetrics(telemetryProvider.getMeterRegistry()) new ShardSearchPhaseAPMMetrics(telemetryProvider.getMeterRegistry())
); );
@ -894,6 +898,7 @@ class NodeConstruction {
.valuesSourceRegistry(searchModule.getValuesSourceRegistry()) .valuesSourceRegistry(searchModule.getValuesSourceRegistry())
.requestCacheKeyDifferentiator(searchModule.getRequestCacheKeyDifferentiator()) .requestCacheKeyDifferentiator(searchModule.getRequestCacheKeyDifferentiator())
.mapperMetrics(mapperMetrics) .mapperMetrics(mapperMetrics)
.mergeMetrics(mergeMetrics)
.searchOperationListeners(searchOperationListeners) .searchOperationListeners(searchOperationListeners)
.slowLogFieldProvider(slowLogFieldProvider) .slowLogFieldProvider(slowLogFieldProvider)
.build(); .build();
@ -1290,6 +1295,7 @@ class NodeConstruction {
b.bind(FailureStoreMetrics.class).toInstance(failureStoreMetrics); b.bind(FailureStoreMetrics.class).toInstance(failureStoreMetrics);
b.bind(ShutdownPrepareService.class).toInstance(shutdownPrepareService); b.bind(ShutdownPrepareService.class).toInstance(shutdownPrepareService);
b.bind(OnlinePrewarmingService.class).toInstance(onlinePrewarmingService); b.bind(OnlinePrewarmingService.class).toInstance(onlinePrewarmingService);
b.bind(MergeMetrics.class).toInstance(mergeMetrics);
}); });
if (ReadinessService.enabled(environment)) { if (ReadinessService.enabled(environment)) {

View file

@ -60,6 +60,7 @@ import org.elasticsearch.index.engine.Engine;
import org.elasticsearch.index.engine.EngineTestCase; import org.elasticsearch.index.engine.EngineTestCase;
import org.elasticsearch.index.engine.InternalEngine; import org.elasticsearch.index.engine.InternalEngine;
import org.elasticsearch.index.engine.InternalEngineFactory; import org.elasticsearch.index.engine.InternalEngineFactory;
import org.elasticsearch.index.engine.MergeMetrics;
import org.elasticsearch.index.engine.ThreadPoolMergeExecutorService; import org.elasticsearch.index.engine.ThreadPoolMergeExecutorService;
import org.elasticsearch.index.engine.ThreadPoolMergeScheduler; import org.elasticsearch.index.engine.ThreadPoolMergeScheduler;
import org.elasticsearch.index.fielddata.IndexFieldDataCache; import org.elasticsearch.index.fielddata.IndexFieldDataCache;
@ -256,7 +257,8 @@ public class IndexModuleTests extends ESTestCase {
MapperMetrics.NOOP, MapperMetrics.NOOP,
emptyList(), emptyList(),
new IndexingStatsSettings(ClusterSettings.createBuiltInClusterSettings()), new IndexingStatsSettings(ClusterSettings.createBuiltInClusterSettings()),
new SearchStatsSettings(ClusterSettings.createBuiltInClusterSettings()) new SearchStatsSettings(ClusterSettings.createBuiltInClusterSettings()),
MergeMetrics.NOOP
); );
module.setReaderWrapper(s -> new Wrapper()); module.setReaderWrapper(s -> new Wrapper());
@ -286,7 +288,8 @@ public class IndexModuleTests extends ESTestCase {
MapperMetrics.NOOP, MapperMetrics.NOOP,
emptyList(), emptyList(),
new IndexingStatsSettings(ClusterSettings.createBuiltInClusterSettings()), new IndexingStatsSettings(ClusterSettings.createBuiltInClusterSettings()),
new SearchStatsSettings(ClusterSettings.createBuiltInClusterSettings()) new SearchStatsSettings(ClusterSettings.createBuiltInClusterSettings()),
MergeMetrics.NOOP
); );
final IndexService indexService = newIndexService(module); final IndexService indexService = newIndexService(module);
@ -314,7 +317,8 @@ public class IndexModuleTests extends ESTestCase {
MapperMetrics.NOOP, MapperMetrics.NOOP,
emptyList(), emptyList(),
new IndexingStatsSettings(ClusterSettings.createBuiltInClusterSettings()), new IndexingStatsSettings(ClusterSettings.createBuiltInClusterSettings()),
new SearchStatsSettings(ClusterSettings.createBuiltInClusterSettings()) new SearchStatsSettings(ClusterSettings.createBuiltInClusterSettings()),
MergeMetrics.NOOP
); );
module.setDirectoryWrapper(new TestDirectoryWrapper()); module.setDirectoryWrapper(new TestDirectoryWrapper());
@ -670,7 +674,8 @@ public class IndexModuleTests extends ESTestCase {
MapperMetrics.NOOP, MapperMetrics.NOOP,
emptyList(), emptyList(),
new IndexingStatsSettings(ClusterSettings.createBuiltInClusterSettings()), new IndexingStatsSettings(ClusterSettings.createBuiltInClusterSettings()),
new SearchStatsSettings(ClusterSettings.createBuiltInClusterSettings()) new SearchStatsSettings(ClusterSettings.createBuiltInClusterSettings()),
MergeMetrics.NOOP
); );
final IndexService indexService = newIndexService(module); final IndexService indexService = newIndexService(module);
@ -695,7 +700,8 @@ public class IndexModuleTests extends ESTestCase {
MapperMetrics.NOOP, MapperMetrics.NOOP,
emptyList(), emptyList(),
new IndexingStatsSettings(ClusterSettings.createBuiltInClusterSettings()), new IndexingStatsSettings(ClusterSettings.createBuiltInClusterSettings()),
new SearchStatsSettings(ClusterSettings.createBuiltInClusterSettings()) new SearchStatsSettings(ClusterSettings.createBuiltInClusterSettings()),
MergeMetrics.NOOP
); );
final AtomicLong lastAcquiredPrimaryTerm = new AtomicLong(); final AtomicLong lastAcquiredPrimaryTerm = new AtomicLong();
@ -800,7 +806,8 @@ public class IndexModuleTests extends ESTestCase {
MapperMetrics.NOOP, MapperMetrics.NOOP,
emptyList(), emptyList(),
new IndexingStatsSettings(ClusterSettings.createBuiltInClusterSettings()), new IndexingStatsSettings(ClusterSettings.createBuiltInClusterSettings()),
new SearchStatsSettings(ClusterSettings.createBuiltInClusterSettings()) new SearchStatsSettings(ClusterSettings.createBuiltInClusterSettings()),
MergeMetrics.NOOP
); );
} }

View file

@ -3635,7 +3635,8 @@ public class InternalEngineTests extends EngineTestCase {
null, null,
true, true,
config.getMapperService(), config.getMapperService(),
config.getEngineResetLock() config.getEngineResetLock(),
config.getMergeMetrics()
); );
expectThrows(EngineCreationFailureException.class, () -> new InternalEngine(brokenConfig)); expectThrows(EngineCreationFailureException.class, () -> new InternalEngine(brokenConfig));
@ -7243,7 +7244,8 @@ public class InternalEngineTests extends EngineTestCase {
config.getIndexCommitListener(), config.getIndexCommitListener(),
config.isPromotableToPrimary(), config.isPromotableToPrimary(),
config.getMapperService(), config.getMapperService(),
config.getEngineResetLock() config.getEngineResetLock(),
config.getMergeMetrics()
); );
try (InternalEngine engine = createEngine(configWithWarmer)) { try (InternalEngine engine = createEngine(configWithWarmer)) {
assertThat(warmedUpReaders, empty()); assertThat(warmedUpReaders, empty());

View file

@ -46,6 +46,7 @@ import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.is;
import static org.hamcrest.Matchers.lessThanOrEqualTo; import static org.hamcrest.Matchers.lessThanOrEqualTo;
import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.any;
import static org.mockito.ArgumentMatchers.anyLong;
import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.doAnswer;
import static org.mockito.Mockito.mock; import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.times; import static org.mockito.Mockito.times;
@ -74,12 +75,14 @@ public class ThreadPoolMergeSchedulerTests extends ESTestCase {
nodeEnvironment = newNodeEnvironment(settings); nodeEnvironment = newNodeEnvironment(settings);
ThreadPoolMergeExecutorService threadPoolMergeExecutorService = ThreadPoolMergeExecutorServiceTests ThreadPoolMergeExecutorService threadPoolMergeExecutorService = ThreadPoolMergeExecutorServiceTests
.getThreadPoolMergeExecutorService(threadPoolTaskQueue.getThreadPool(), settings, nodeEnvironment); .getThreadPoolMergeExecutorService(threadPoolTaskQueue.getThreadPool(), settings, nodeEnvironment);
var mergeMetrics = mock(MergeMetrics.class);
try ( try (
ThreadPoolMergeScheduler threadPoolMergeScheduler = new ThreadPoolMergeScheduler( ThreadPoolMergeScheduler threadPoolMergeScheduler = new ThreadPoolMergeScheduler(
new ShardId("index", "_na_", 1), new ShardId("index", "_na_", 1),
IndexSettingsModule.newIndexSettings("index", Settings.EMPTY), IndexSettingsModule.newIndexSettings("index", Settings.EMPTY),
threadPoolMergeExecutorService, threadPoolMergeExecutorService,
merge -> 0 merge -> 0,
mergeMetrics
) )
) { ) {
List<OneMerge> executedMergesList = new ArrayList<>(); List<OneMerge> executedMergesList = new ArrayList<>();
@ -97,9 +100,19 @@ public class ThreadPoolMergeSchedulerTests extends ESTestCase {
return null; return null;
}).when(mergeSource).merge(any(OneMerge.class)); }).when(mergeSource).merge(any(OneMerge.class));
threadPoolMergeScheduler.merge(mergeSource, randomFrom(MergeTrigger.values())); threadPoolMergeScheduler.merge(mergeSource, randomFrom(MergeTrigger.values()));
// verify queued byte metric is recorded for each merge
verify(mergeMetrics, times(i + 1)).incrementQueuedMergeBytes(any(), anyLong());
} }
threadPoolTaskQueue.runAllTasks(); threadPoolTaskQueue.runAllTasks();
assertThat(executedMergesList.size(), is(mergeCount)); assertThat(executedMergesList.size(), is(mergeCount));
// verify metrics are reported for each merge
verify(mergeMetrics, times(mergeCount)).moveQueuedMergeBytesToRunning(any(), anyLong());
verify(mergeMetrics, times(mergeCount)).decrementRunningMergeBytes(any());
verify(mergeMetrics, times(mergeCount)).markMergeMetrics(any(), anyLong(), anyLong());
// assert merges are executed in ascending size order // assert merges are executed in ascending size order
for (int i = 1; i < mergeCount; i++) { for (int i = 1; i < mergeCount; i++) {
assertThat( assertThat(
@ -113,6 +126,7 @@ public class ThreadPoolMergeSchedulerTests extends ESTestCase {
public void testSimpleMergeTaskBacklogging() { public void testSimpleMergeTaskBacklogging() {
int mergeExecutorThreadCount = randomIntBetween(1, 5); int mergeExecutorThreadCount = randomIntBetween(1, 5);
var mergeMetrics = mock(MergeMetrics.class);
Settings mergeSchedulerSettings = Settings.builder() Settings mergeSchedulerSettings = Settings.builder()
.put(MergeSchedulerConfig.MAX_THREAD_COUNT_SETTING.getKey(), mergeExecutorThreadCount) .put(MergeSchedulerConfig.MAX_THREAD_COUNT_SETTING.getKey(), mergeExecutorThreadCount)
.build(); .build();
@ -122,7 +136,8 @@ public class ThreadPoolMergeSchedulerTests extends ESTestCase {
new ShardId("index", "_na_", 1), new ShardId("index", "_na_", 1),
IndexSettingsModule.newIndexSettings("index", mergeSchedulerSettings), IndexSettingsModule.newIndexSettings("index", mergeSchedulerSettings),
threadPoolMergeExecutorService, threadPoolMergeExecutorService,
merge -> 0 merge -> 0,
mergeMetrics
); );
// more merge tasks than merge threads // more merge tasks than merge threads
int mergeCount = mergeExecutorThreadCount + randomIntBetween(1, 5); int mergeCount = mergeExecutorThreadCount + randomIntBetween(1, 5);
@ -143,6 +158,9 @@ public class ThreadPoolMergeSchedulerTests extends ESTestCase {
} }
assertThat(threadPoolMergeScheduler.getRunningMergeTasks().size(), is(mergeExecutorThreadCount)); assertThat(threadPoolMergeScheduler.getRunningMergeTasks().size(), is(mergeExecutorThreadCount));
assertThat(threadPoolMergeScheduler.getBackloggedMergeTasks().size(), is(mergeCount - mergeExecutorThreadCount)); assertThat(threadPoolMergeScheduler.getBackloggedMergeTasks().size(), is(mergeCount - mergeExecutorThreadCount));
// verify no metrics are recorded as no merges have been queued or executed through the merge scheduler
verifyNoInteractions(mergeMetrics);
} }
public void testSimpleMergeTaskReEnqueueingBySize() { public void testSimpleMergeTaskReEnqueueingBySize() {
@ -156,7 +174,8 @@ public class ThreadPoolMergeSchedulerTests extends ESTestCase {
new ShardId("index", "_na_", 1), new ShardId("index", "_na_", 1),
IndexSettingsModule.newIndexSettings("index", mergeSchedulerSettings), IndexSettingsModule.newIndexSettings("index", mergeSchedulerSettings),
threadPoolMergeExecutorService, threadPoolMergeExecutorService,
merge -> 0 merge -> 0,
MergeMetrics.NOOP
); );
// sort backlogged merges by size // sort backlogged merges by size
PriorityQueue<MergeTask> backloggedMergeTasks = new PriorityQueue<>( PriorityQueue<MergeTask> backloggedMergeTasks = new PriorityQueue<>(
@ -388,7 +407,8 @@ public class ThreadPoolMergeSchedulerTests extends ESTestCase {
new ShardId("index", "_na_", 1), new ShardId("index", "_na_", 1),
IndexSettingsModule.newIndexSettings("index", settings), IndexSettingsModule.newIndexSettings("index", settings),
threadPoolMergeExecutorService, threadPoolMergeExecutorService,
merge -> 0 merge -> 0,
MergeMetrics.NOOP
) )
) { ) {
MergeSource mergeSource = mock(MergeSource.class); MergeSource mergeSource = mock(MergeSource.class);
@ -454,6 +474,7 @@ public class ThreadPoolMergeSchedulerTests extends ESTestCase {
// disable fs available disk space feature for this test // disable fs available disk space feature for this test
.put(ThreadPoolMergeExecutorService.INDICES_MERGE_DISK_CHECK_INTERVAL_SETTING.getKey(), "0s") .put(ThreadPoolMergeExecutorService.INDICES_MERGE_DISK_CHECK_INTERVAL_SETTING.getKey(), "0s")
.build(); .build();
var mergeMetrics = mock(MergeMetrics.class);
nodeEnvironment = newNodeEnvironment(settings); nodeEnvironment = newNodeEnvironment(settings);
try (TestThreadPool testThreadPool = new TestThreadPool("test", settings)) { try (TestThreadPool testThreadPool = new TestThreadPool("test", settings)) {
ThreadPoolMergeExecutorService threadPoolMergeExecutorService = ThreadPoolMergeExecutorServiceTests ThreadPoolMergeExecutorService threadPoolMergeExecutorService = ThreadPoolMergeExecutorServiceTests
@ -465,7 +486,8 @@ public class ThreadPoolMergeSchedulerTests extends ESTestCase {
new ShardId("index", "_na_", 1), new ShardId("index", "_na_", 1),
IndexSettingsModule.newIndexSettings("index", settings), IndexSettingsModule.newIndexSettings("index", settings),
threadPoolMergeExecutorService, threadPoolMergeExecutorService,
merge -> 0 merge -> 0,
mergeMetrics
) )
) { ) {
// at least 1 extra merge than there are concurrently allowed // at least 1 extra merge than there are concurrently allowed
@ -485,7 +507,11 @@ public class ThreadPoolMergeSchedulerTests extends ESTestCase {
return null; return null;
}).when(mergeSource).merge(any(OneMerge.class)); }).when(mergeSource).merge(any(OneMerge.class));
threadPoolMergeScheduler.merge(mergeSource, randomFrom(MergeTrigger.values())); threadPoolMergeScheduler.merge(mergeSource, randomFrom(MergeTrigger.values()));
// verify queued byte metric is recorded for each merge
verify(mergeMetrics, times(i + 1)).incrementQueuedMergeBytes(any(), anyLong());
} }
for (int completedMergesCount = 0; completedMergesCount < mergeCount for (int completedMergesCount = 0; completedMergesCount < mergeCount
- mergeSchedulerMaxThreadCount; completedMergesCount++) { - mergeSchedulerMaxThreadCount; completedMergesCount++) {
int finalCompletedMergesCount = completedMergesCount; int finalCompletedMergesCount = completedMergesCount;
@ -530,6 +556,11 @@ public class ThreadPoolMergeSchedulerTests extends ESTestCase {
runMergeSemaphore.release(); runMergeSemaphore.release();
} }
assertBusy(() -> assertTrue(threadPoolMergeExecutorService.allDone())); assertBusy(() -> assertTrue(threadPoolMergeExecutorService.allDone()));
// verify metrics are recorded for each merge
verify(mergeMetrics, times(mergeCount)).moveQueuedMergeBytesToRunning(any(), anyLong());
verify(mergeMetrics, times(mergeCount)).decrementRunningMergeBytes(any());
verify(mergeMetrics, times(mergeCount)).markMergeMetrics(any(), anyLong(), anyLong());
} }
} }
} }
@ -553,7 +584,8 @@ public class ThreadPoolMergeSchedulerTests extends ESTestCase {
new ShardId("index", "_na_", 1), new ShardId("index", "_na_", 1),
IndexSettingsModule.newIndexSettings("index", settings), IndexSettingsModule.newIndexSettings("index", settings),
threadPoolMergeExecutorService, threadPoolMergeExecutorService,
merge -> 0 merge -> 0,
MergeMetrics.NOOP
) )
) { ) {
CountDownLatch mergeDoneLatch = new CountDownLatch(1); CountDownLatch mergeDoneLatch = new CountDownLatch(1);
@ -626,7 +658,8 @@ public class ThreadPoolMergeSchedulerTests extends ESTestCase {
new ShardId("index", "_na_", 1), new ShardId("index", "_na_", 1),
indexSettings, indexSettings,
threadPoolMergeExecutorService, threadPoolMergeExecutorService,
merge -> 0 merge -> 0,
MergeMetrics.NOOP
) )
) { ) {
threadPoolMergeScheduler.merge(mergeSource, randomFrom(MergeTrigger.values())); threadPoolMergeScheduler.merge(mergeSource, randomFrom(MergeTrigger.values()));
@ -656,7 +689,8 @@ public class ThreadPoolMergeSchedulerTests extends ESTestCase {
new ShardId("index", "_na_", 1), new ShardId("index", "_na_", 1),
indexSettings, indexSettings,
threadPoolMergeExecutorService, threadPoolMergeExecutorService,
merge -> 0 merge -> 0,
MergeMetrics.NOOP
) )
) { ) {
threadPoolMergeScheduler.merge(mergeSource, randomFrom(MergeTrigger.values())); threadPoolMergeScheduler.merge(mergeSource, randomFrom(MergeTrigger.values()));
@ -673,7 +707,8 @@ public class ThreadPoolMergeSchedulerTests extends ESTestCase {
new ShardId("index", "_na_", 1), new ShardId("index", "_na_", 1),
indexSettings, indexSettings,
threadPoolMergeExecutorService, threadPoolMergeExecutorService,
merge -> 0 merge -> 0,
MergeMetrics.NOOP
) )
) { ) {
// merge submitted upon closing // merge submitted upon closing
@ -690,7 +725,8 @@ public class ThreadPoolMergeSchedulerTests extends ESTestCase {
new ShardId("index", "_na_", 1), new ShardId("index", "_na_", 1),
indexSettings, indexSettings,
threadPoolMergeExecutorService, threadPoolMergeExecutorService,
merge -> 0 merge -> 0,
MergeMetrics.NOOP
) )
) { ) {
// merge submitted upon closing // merge submitted upon closing
@ -705,29 +741,63 @@ public class ThreadPoolMergeSchedulerTests extends ESTestCase {
} }
} }
public void testMergeSchedulerAbortsMergeWhenShouldSkipMergeIsTrue() { public void testMergeSchedulerAbortsMergeWhenShouldSkipMergeIsTrue() throws IOException {
ThreadPoolMergeExecutorService threadPoolMergeExecutorService = mock(ThreadPoolMergeExecutorService.class); DeterministicTaskQueue threadPoolTaskQueue = new DeterministicTaskQueue();
// build a scheduler that always returns true for shouldSkipMerge Settings settings = Settings.builder()
ThreadPoolMergeScheduler threadPoolMergeScheduler = new ThreadPoolMergeScheduler( // disable fs available disk space feature for this test
new ShardId("index", "_na_", 1), .put(ThreadPoolMergeExecutorService.INDICES_MERGE_DISK_CHECK_INTERVAL_SETTING.getKey(), "0s")
IndexSettingsModule.newIndexSettings("index", Settings.builder().build()), .build();
threadPoolMergeExecutorService, nodeEnvironment = newNodeEnvironment(settings);
merge -> 0 ThreadPoolMergeExecutorService threadPoolMergeExecutorService = ThreadPoolMergeExecutorServiceTests
) { .getThreadPoolMergeExecutorService(threadPoolTaskQueue.getThreadPool(), settings, nodeEnvironment);
@Override var mergeMetrics = mock(MergeMetrics.class);
protected boolean shouldSkipMerge() { try (
return true; // build a scheduler that always returns true for shouldSkipMerge
ThreadPoolMergeScheduler threadPoolMergeScheduler = new ThreadPoolMergeScheduler(
new ShardId("index", "_na_", 1),
IndexSettingsModule.newIndexSettings("index", Settings.EMPTY),
threadPoolMergeExecutorService,
merge -> 0,
mergeMetrics
) {
@Override
protected boolean shouldSkipMerge() {
return true;
}
} }
}; ) {
MergeSource mergeSource = mock(MergeSource.class); int mergeCount = randomIntBetween(2, 10);
OneMerge oneMerge = mock(OneMerge.class); for (int i = 0; i < mergeCount; i++) {
when(oneMerge.getStoreMergeInfo()).thenReturn(getNewMergeInfo(randomLongBetween(1L, 10L))); MergeSource mergeSource = mock(MergeSource.class);
when(oneMerge.getMergeProgress()).thenReturn(new MergePolicy.OneMergeProgress()); OneMerge oneMerge = mock(OneMerge.class);
when(mergeSource.getNextMerge()).thenReturn(oneMerge, (OneMerge) null); when(oneMerge.getStoreMergeInfo()).thenReturn(getNewMergeInfo(randomLongBetween(1L, 10L)));
MergeTask mergeTask = threadPoolMergeScheduler.newMergeTask(mergeSource, oneMerge, randomFrom(MergeTrigger.values())); when(oneMerge.getMergeProgress()).thenReturn(new MergePolicy.OneMergeProgress());
// verify that calling schedule on the merge task indicates the merge should be aborted when(mergeSource.getNextMerge()).thenReturn(oneMerge, (OneMerge) null);
Schedule schedule = threadPoolMergeScheduler.schedule(mergeTask);
assertThat(schedule, is(Schedule.ABORT)); // create the merge task
MergeTask mergeTask = threadPoolMergeScheduler.newMergeTask(mergeSource, oneMerge, randomFrom(MergeTrigger.values()));
// verify that calling schedule on the merge task indicates the merge should be aborted
Schedule schedule = threadPoolMergeScheduler.schedule(mergeTask);
assertThat(schedule, is(Schedule.ABORT));
// run the merge through the scheduler
threadPoolMergeScheduler.merge(mergeSource, randomFrom(MergeTrigger.values()));
// verify queued merge byte metrics are still recorded for each merge
verify(mergeMetrics, times(i + 1)).incrementQueuedMergeBytes(any(), anyLong());
}
// run all merges; they should all be aborted
threadPoolTaskQueue.runAllTasks();
// verify queued bytes metrics are moved to running and decremented
verify(mergeMetrics, times(mergeCount)).moveQueuedMergeBytesToRunning(any(), anyLong());
verify(mergeMetrics, times(mergeCount)).decrementRunningMergeBytes(any());
// verify we did not mark the merges as merged
verify(mergeMetrics, times(0)).markMergeMetrics(any(), anyLong(), anyLong());
}
} }
private static MergeInfo getNewMergeInfo(long estimatedMergeBytes) { private static MergeInfo getNewMergeInfo(long estimatedMergeBytes) {
@ -746,7 +816,7 @@ public class ThreadPoolMergeSchedulerTests extends ESTestCase {
IndexSettings indexSettings, IndexSettings indexSettings,
ThreadPoolMergeExecutorService threadPoolMergeExecutorService ThreadPoolMergeExecutorService threadPoolMergeExecutorService
) { ) {
super(shardId, indexSettings, threadPoolMergeExecutorService, merge -> 0); super(shardId, indexSettings, threadPoolMergeExecutorService, merge -> 0, MergeMetrics.NOOP);
} }
@Override @Override

View file

@ -5064,7 +5064,8 @@ public class IndexShardTests extends IndexShardTestCase {
config.getIndexCommitListener(), config.getIndexCommitListener(),
config.isPromotableToPrimary(), config.isPromotableToPrimary(),
config.getMapperService(), config.getMapperService(),
config.getEngineResetLock() config.getEngineResetLock(),
config.getMergeMetrics()
); );
return new InternalEngine(configWithWarmer); return new InternalEngine(configWithWarmer);
}); });
@ -5346,7 +5347,8 @@ public class IndexShardTests extends IndexShardTestCase {
config.getIndexCommitListener(), config.getIndexCommitListener(),
config.isPromotableToPrimary(), config.isPromotableToPrimary(),
config.getMapperService(), config.getMapperService(),
config.getEngineResetLock() config.getEngineResetLock(),
config.getMergeMetrics()
); );
lazyEngineConfig.set(engineConfigWithBlockingRefreshListener); lazyEngineConfig.set(engineConfigWithBlockingRefreshListener);
return new InternalEngine(engineConfigWithBlockingRefreshListener) { return new InternalEngine(engineConfigWithBlockingRefreshListener) {

View file

@ -43,6 +43,7 @@ import org.elasticsearch.index.engine.Engine;
import org.elasticsearch.index.engine.EngineConfig; import org.elasticsearch.index.engine.EngineConfig;
import org.elasticsearch.index.engine.EngineTestCase; import org.elasticsearch.index.engine.EngineTestCase;
import org.elasticsearch.index.engine.InternalEngine; import org.elasticsearch.index.engine.InternalEngine;
import org.elasticsearch.index.engine.MergeMetrics;
import org.elasticsearch.index.engine.ThreadPoolMergeExecutorService; import org.elasticsearch.index.engine.ThreadPoolMergeExecutorService;
import org.elasticsearch.index.engine.ThreadPoolMergeScheduler; import org.elasticsearch.index.engine.ThreadPoolMergeScheduler;
import org.elasticsearch.index.mapper.IdFieldMapper; import org.elasticsearch.index.mapper.IdFieldMapper;
@ -175,7 +176,8 @@ public class RefreshListenersTests extends ESTestCase {
null, null,
true, true,
EngineTestCase.createMapperService(), EngineTestCase.createMapperService(),
new EngineResetLock() new EngineResetLock(),
MergeMetrics.NOOP
); );
engine = new InternalEngine(config); engine = new InternalEngine(config);
EngineTestCase.recoverFromTranslog(engine, (e, s) -> 0, Long.MAX_VALUE); EngineTestCase.recoverFromTranslog(engine, (e, s) -> 0, Long.MAX_VALUE);

View file

@ -145,6 +145,7 @@ import org.elasticsearch.index.IndexNotFoundException;
import org.elasticsearch.index.IndexSettingProviders; import org.elasticsearch.index.IndexSettingProviders;
import org.elasticsearch.index.IndexingPressure; import org.elasticsearch.index.IndexingPressure;
import org.elasticsearch.index.analysis.AnalysisRegistry; import org.elasticsearch.index.analysis.AnalysisRegistry;
import org.elasticsearch.index.engine.MergeMetrics;
import org.elasticsearch.index.mapper.MapperMetrics; import org.elasticsearch.index.mapper.MapperMetrics;
import org.elasticsearch.index.mapper.MapperRegistry; import org.elasticsearch.index.mapper.MapperRegistry;
import org.elasticsearch.index.seqno.GlobalCheckpointSyncAction; import org.elasticsearch.index.seqno.GlobalCheckpointSyncAction;
@ -2483,6 +2484,7 @@ public class SnapshotResiliencyTests extends ESTestCase {
.client(client) .client(client)
.metaStateService(new MetaStateService(nodeEnv, namedXContentRegistry)) .metaStateService(new MetaStateService(nodeEnv, namedXContentRegistry))
.mapperMetrics(MapperMetrics.NOOP) .mapperMetrics(MapperMetrics.NOOP)
.mergeMetrics(MergeMetrics.NOOP)
.build(); .build();
final RecoverySettings recoverySettings = new RecoverySettings(settings, clusterSettings); final RecoverySettings recoverySettings = new RecoverySettings(settings, clusterSettings);
snapshotShardsService = new SnapshotShardsService( snapshotShardsService = new SnapshotShardsService(

View file

@ -169,6 +169,7 @@ public abstract class EngineTestCase extends ESTestCase {
protected InternalEngine engine; protected InternalEngine engine;
protected InternalEngine replicaEngine; protected InternalEngine replicaEngine;
protected MergeMetrics mergeMetrics;
protected IndexSettings defaultSettings; protected IndexSettings defaultSettings;
protected String codecName; protected String codecName;
@ -263,6 +264,7 @@ public abstract class EngineTestCase extends ESTestCase {
primaryTranslogDir = createTempDir("translog-primary"); primaryTranslogDir = createTempDir("translog-primary");
mapperService = createMapperService(defaultSettings.getSettings(), defaultMapping(), extraMappers()); mapperService = createMapperService(defaultSettings.getSettings(), defaultMapping(), extraMappers());
translogHandler = createTranslogHandler(mapperService); translogHandler = createTranslogHandler(mapperService);
mergeMetrics = MergeMetrics.NOOP;
engine = createEngine(defaultSettings, store, primaryTranslogDir, newMergePolicy()); engine = createEngine(defaultSettings, store, primaryTranslogDir, newMergePolicy());
LiveIndexWriterConfig currentIndexWriterConfig = engine.getCurrentIndexWriterConfig(); LiveIndexWriterConfig currentIndexWriterConfig = engine.getCurrentIndexWriterConfig();
@ -313,7 +315,8 @@ public abstract class EngineTestCase extends ESTestCase {
config.getIndexCommitListener(), config.getIndexCommitListener(),
config.isPromotableToPrimary(), config.isPromotableToPrimary(),
config.getMapperService(), config.getMapperService(),
config.getEngineResetLock() config.getEngineResetLock(),
config.getMergeMetrics()
); );
} }
@ -347,7 +350,8 @@ public abstract class EngineTestCase extends ESTestCase {
config.getIndexCommitListener(), config.getIndexCommitListener(),
config.isPromotableToPrimary(), config.isPromotableToPrimary(),
config.getMapperService(), config.getMapperService(),
config.getEngineResetLock() config.getEngineResetLock(),
config.getMergeMetrics()
); );
} }
@ -381,7 +385,8 @@ public abstract class EngineTestCase extends ESTestCase {
config.getIndexCommitListener(), config.getIndexCommitListener(),
config.isPromotableToPrimary(), config.isPromotableToPrimary(),
config.getMapperService(), config.getMapperService(),
config.getEngineResetLock() config.getEngineResetLock(),
config.getMergeMetrics()
); );
} }
@ -887,7 +892,8 @@ public abstract class EngineTestCase extends ESTestCase {
indexCommitListener, indexCommitListener,
true, true,
mapperService, mapperService,
new EngineResetLock() new EngineResetLock(),
mergeMetrics
); );
} }
@ -929,7 +935,8 @@ public abstract class EngineTestCase extends ESTestCase {
config.getIndexCommitListener(), config.getIndexCommitListener(),
config.isPromotableToPrimary(), config.isPromotableToPrimary(),
config.getMapperService(), config.getMapperService(),
config.getEngineResetLock() config.getEngineResetLock(),
config.getMergeMetrics()
); );
} }

View file

@ -54,6 +54,7 @@ import org.elasticsearch.index.engine.Engine;
import org.elasticsearch.index.engine.EngineFactory; import org.elasticsearch.index.engine.EngineFactory;
import org.elasticsearch.index.engine.EngineTestCase; import org.elasticsearch.index.engine.EngineTestCase;
import org.elasticsearch.index.engine.InternalEngineFactory; import org.elasticsearch.index.engine.InternalEngineFactory;
import org.elasticsearch.index.engine.MergeMetrics;
import org.elasticsearch.index.engine.ThreadPoolMergeExecutorService; import org.elasticsearch.index.engine.ThreadPoolMergeExecutorService;
import org.elasticsearch.index.engine.ThreadPoolMergeScheduler; import org.elasticsearch.index.engine.ThreadPoolMergeScheduler;
import org.elasticsearch.index.mapper.MapperMetrics; import org.elasticsearch.index.mapper.MapperMetrics;
@ -562,7 +563,8 @@ public abstract class IndexShardTestCase extends ESTestCase {
null, null,
MapperMetrics.NOOP, MapperMetrics.NOOP,
new IndexingStatsSettings(ClusterSettings.createBuiltInClusterSettings()), new IndexingStatsSettings(ClusterSettings.createBuiltInClusterSettings()),
new SearchStatsSettings(ClusterSettings.createBuiltInClusterSettings()) new SearchStatsSettings(ClusterSettings.createBuiltInClusterSettings()),
MergeMetrics.NOOP
); );
indexShard.addShardFailureCallback(DEFAULT_SHARD_FAILURE_HANDLER); indexShard.addShardFailureCallback(DEFAULT_SHARD_FAILURE_HANDLER);
success = true; success = true;

View file

@ -37,6 +37,7 @@ import org.elasticsearch.index.engine.Engine;
import org.elasticsearch.index.engine.EngineConfig; import org.elasticsearch.index.engine.EngineConfig;
import org.elasticsearch.index.engine.EngineTestCase; import org.elasticsearch.index.engine.EngineTestCase;
import org.elasticsearch.index.engine.InternalEngine; import org.elasticsearch.index.engine.InternalEngine;
import org.elasticsearch.index.engine.MergeMetrics;
import org.elasticsearch.index.engine.ThreadPoolMergeExecutorService; import org.elasticsearch.index.engine.ThreadPoolMergeExecutorService;
import org.elasticsearch.index.engine.ThreadPoolMergeScheduler; import org.elasticsearch.index.engine.ThreadPoolMergeScheduler;
import org.elasticsearch.index.engine.TranslogHandler; import org.elasticsearch.index.engine.TranslogHandler;
@ -284,7 +285,8 @@ public class FollowingEngineTests extends ESTestCase {
null, null,
true, true,
mapperService, mapperService,
new EngineResetLock() new EngineResetLock(),
MergeMetrics.NOOP
); );
} }

View file

@ -44,6 +44,7 @@ import org.elasticsearch.index.IndexVersions;
import org.elasticsearch.index.SlowLogFieldProvider; import org.elasticsearch.index.SlowLogFieldProvider;
import org.elasticsearch.index.analysis.AnalysisRegistry; import org.elasticsearch.index.analysis.AnalysisRegistry;
import org.elasticsearch.index.engine.InternalEngineFactory; import org.elasticsearch.index.engine.InternalEngineFactory;
import org.elasticsearch.index.engine.MergeMetrics;
import org.elasticsearch.index.mapper.MapperMetrics; import org.elasticsearch.index.mapper.MapperMetrics;
import org.elasticsearch.index.search.stats.SearchStatsSettings; import org.elasticsearch.index.search.stats.SearchStatsSettings;
import org.elasticsearch.index.shard.IndexingStatsSettings; import org.elasticsearch.index.shard.IndexingStatsSettings;
@ -464,7 +465,8 @@ public class SecurityTests extends ESTestCase {
MapperMetrics.NOOP, MapperMetrics.NOOP,
List.of(), List.of(),
new IndexingStatsSettings(ClusterSettings.createBuiltInClusterSettings()), new IndexingStatsSettings(ClusterSettings.createBuiltInClusterSettings()),
new SearchStatsSettings(ClusterSettings.createBuiltInClusterSettings()) new SearchStatsSettings(ClusterSettings.createBuiltInClusterSettings()),
MergeMetrics.NOOP
); );
security.onIndexModule(indexModule); security.onIndexModule(indexModule);
// indexReaderWrapper is a SetOnce so if Security#onIndexModule had already set an ReaderWrapper we would get an exception here // indexReaderWrapper is a SetOnce so if Security#onIndexModule had already set an ReaderWrapper we would get an exception here

View file

@ -14,6 +14,7 @@ import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.SlowLogFieldProvider; import org.elasticsearch.index.SlowLogFieldProvider;
import org.elasticsearch.index.analysis.AnalysisRegistry; import org.elasticsearch.index.analysis.AnalysisRegistry;
import org.elasticsearch.index.engine.InternalEngineFactory; import org.elasticsearch.index.engine.InternalEngineFactory;
import org.elasticsearch.index.engine.MergeMetrics;
import org.elasticsearch.index.mapper.MapperMetrics; import org.elasticsearch.index.mapper.MapperMetrics;
import org.elasticsearch.index.search.stats.SearchStatsSettings; import org.elasticsearch.index.search.stats.SearchStatsSettings;
import org.elasticsearch.index.shard.IndexingStatsSettings; import org.elasticsearch.index.shard.IndexingStatsSettings;
@ -76,7 +77,8 @@ public class WatcherPluginTests extends ESTestCase {
MapperMetrics.NOOP, MapperMetrics.NOOP,
List.of(), List.of(),
new IndexingStatsSettings(ClusterSettings.createBuiltInClusterSettings()), new IndexingStatsSettings(ClusterSettings.createBuiltInClusterSettings()),
new SearchStatsSettings(ClusterSettings.createBuiltInClusterSettings()) new SearchStatsSettings(ClusterSettings.createBuiltInClusterSettings()),
MergeMetrics.NOOP
); );
// this will trip an assertion if the watcher indexing operation listener is null (which it is) but we try to add it // this will trip an assertion if the watcher indexing operation listener is null (which it is) but we try to add it
watcher.onIndexModule(indexModule); watcher.onIndexModule(indexModule);