Add client-benchmark-noop-api-plugin to stress clients even more in benchmarks (#20103)

This commit is contained in:
Daniel Mitterdorfer 2016-08-26 09:05:47 +02:00 committed by GitHub
parent bc136a90d5
commit 7b81c4ca59
25 changed files with 1390 additions and 115 deletions

View file

@ -1,34 +1,53 @@
Steps to execute the benchmark: ### Steps to execute the benchmark
1. Start Elasticsearch on the target host (ideally *not* on the same machine) 1. Build `client-benchmark-noop-api-plugin` with `gradle :client:client-benchmark-noop-api-plugin:assemble`
2. Create an empty index with the mapping you want to benchmark 2. Install it on the target host with `bin/elasticsearch-plugin install file:///full/path/to/client-benchmark-noop-api-plugin.zip`
3. Build an uberjar with `gradle :client:benchmark:shadowJar` and execute it. 3. Start Elasticsearch on the target host (ideally *not* on the same machine)
4. Delete the index 4. Build an uberjar with `gradle :client:benchmark:shadowJar` and execute it.
5. Repeat steps 2. - 4. for multiple iterations. The first iterations are intended as warmup for Elasticsearch itself. Always start the same benchmark in step 3!
4. After the benchmark: Shutdown Elasticsearch and delete the data directory
Repeat all steps above for the other benchmark candidate. Repeat all steps above for the other benchmark candidate.
Example benchmark: ### Example benchmark
* Download benchmark data from http://benchmarks.elastic.co/corpora/geonames/documents.json.bz2 and decompress In general, you should define a few GC-related settings `-Xms8192M -Xmx8192M -XX:+UseConcMarkSweepGC -verbose:gc -XX:+PrintGCDetails` and keep an eye on GC activity. You can also define `-XX:+PrintCompilation` to see JIT activity.
* Use the mapping file https://github.com/elastic/rally-tracks/blob/master/geonames/mappings.json to create the index
Example command line parameter list: #### Bulk indexing
Download benchmark data from http://benchmarks.elastic.co/corpora/geonames/documents.json.bz2 and decompress them.
Example command line parameters:
``` ```
rest 192.168.2.2 /home/your_user_name/.rally/benchmarks/data/geonames/documents.json geonames type 8647880 5000 "{ \"query\": { \"match_phrase\": { \"name\": \"Sankt Georgen\" } } }\"" rest bulk 192.168.2.2 ./documents.json geonames type 8647880 5000
``` ```
The parameters are in order: The parameters are in order:
* Client type: Use either "rest" or "transport" * Client type: Use either "rest" or "transport"
* Benchmark type: Use either "bulk" or "search"
* Benchmark target host IP (the host where Elasticsearch is running) * Benchmark target host IP (the host where Elasticsearch is running)
* full path to the file that should be bulk indexed * full path to the file that should be bulk indexed
* name of the index * name of the index
* name of the (sole) type in the index * name of the (sole) type in the index
* number of documents in the file * number of documents in the file
* bulk size * bulk size
* a search request body (remember to escape double quotes). The `TransportClientBenchmark` uses `QueryBuilders.wrapperQuery()` internally which automatically adds a root key `query`, so it must not be present in the command line parameter.
You should also define a few GC-related settings `-Xms4096M -Xmx4096M -XX:+UseConcMarkSweepGC -verbose:gc -XX:+PrintGCDetails` and keep an eye on GC activity. You can also define `-XX:+PrintCompilation` to see JIT activity. #### Bulk indexing
Example command line parameters:
```
rest search 192.168.2.2 geonames "{ \"query\": { \"match_phrase\": { \"name\": \"Sankt Georgen\" } } }\"" 500,1000,1100,1200
```
The parameters are in order:
* Client type: Use either "rest" or "transport"
* Benchmark type: Use either "bulk" or "search"
* Benchmark target host IP (the host where Elasticsearch is running)
* name of the index
* a search request body (remember to escape double quotes). The `TransportClientBenchmark` uses `QueryBuilders.wrapperQuery()` internally which automatically adds a root key `query`, so it must not be present in the command line parameter.
* A comma-separated list of target throughput rates

View file

@ -50,6 +50,8 @@ dependencies {
compile 'org.apache.commons:commons-math3:3.2' compile 'org.apache.commons:commons-math3:3.2'
compile("org.elasticsearch.client:rest:${version}") compile("org.elasticsearch.client:rest:${version}")
// bottleneck should be the client, not Elasticsearch
compile project(path: ':client:client-benchmark-noop-api-plugin')
// for transport client // for transport client
compile("org.elasticsearch:elasticsearch:${version}") compile("org.elasticsearch:elasticsearch:${version}")
compile("org.elasticsearch.client:transport:${version}") compile("org.elasticsearch.client:transport:${version}")

View file

@ -27,7 +27,11 @@ import org.elasticsearch.common.SuppressForbidden;
import java.io.Closeable; import java.io.Closeable;
import java.lang.management.GarbageCollectorMXBean; import java.lang.management.GarbageCollectorMXBean;
import java.lang.management.ManagementFactory; import java.lang.management.ManagementFactory;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.List; import java.util.List;
import java.util.stream.Collectors;
public abstract class AbstractBenchmark<T extends Closeable> { public abstract class AbstractBenchmark<T extends Closeable> {
private static final int SEARCH_BENCHMARK_ITERATIONS = 10_000; private static final int SEARCH_BENCHMARK_ITERATIONS = 10_000;
@ -40,52 +44,111 @@ public abstract class AbstractBenchmark<T extends Closeable> {
@SuppressForbidden(reason = "system out is ok for a command line tool") @SuppressForbidden(reason = "system out is ok for a command line tool")
public final void run(String[] args) throws Exception { public final void run(String[] args) throws Exception {
if (args.length < 6) { if (args.length < 1) {
System.err.println( System.err.println("usage: [search|bulk]");
"usage: benchmarkTargetHostIp indexFilePath indexName typeName numberOfDocuments bulkSize [search request body]");
System.exit(1); System.exit(1);
} }
String benchmarkTargetHost = args[0]; switch (args[0]) {
String indexFilePath = args[1]; case "search":
String indexName = args[2]; runSearchBenchmark(args);
String typeName = args[3]; break;
int totalDocs = Integer.valueOf(args[4]); case "bulk":
int bulkSize = Integer.valueOf(args[5]); runBulkIndexBenchmark(args);
break;
default:
System.err.println("Unknown benchmark type [" + args[0] + "]");
System.exit(1);
}
}
@SuppressForbidden(reason = "system out is ok for a command line tool")
private void runBulkIndexBenchmark(String[] args) throws Exception {
if (args.length != 7) {
System.err.println(
"usage: 'bulk' benchmarkTargetHostIp indexFilePath indexName typeName numberOfDocuments bulkSize");
System.exit(1);
}
String benchmarkTargetHost = args[1];
String indexFilePath = args[2];
String indexName = args[3];
String typeName = args[4];
int totalDocs = Integer.valueOf(args[5]);
int bulkSize = Integer.valueOf(args[6]);
int totalIterationCount = (int) Math.floor(totalDocs / bulkSize); int totalIterationCount = (int) Math.floor(totalDocs / bulkSize);
// consider 40% of all iterations as warmup iterations // consider 40% of all iterations as warmup iterations
int warmupIterations = (int) (0.4d * totalIterationCount); int warmupIterations = (int) (0.4d * totalIterationCount);
int iterations = totalIterationCount - warmupIterations; int iterations = totalIterationCount - warmupIterations;
String searchBody = (args.length == 7) ? args[6] : null;
T client = client(benchmarkTargetHost); T client = client(benchmarkTargetHost);
BenchmarkRunner benchmark = new BenchmarkRunner(warmupIterations, iterations, BenchmarkRunner benchmark = new BenchmarkRunner(warmupIterations, iterations,
new BulkBenchmarkTask( new BulkBenchmarkTask(
bulkRequestExecutor(client, indexName, typeName), indexFilePath, warmupIterations + iterations, bulkSize)); bulkRequestExecutor(client, indexName, typeName), indexFilePath, warmupIterations, iterations, bulkSize));
try { try {
benchmark.run(); runTrials(() -> {
if (searchBody != null) { runGc();
for (int run = 1; run <= 5; run++) { benchmark.run();
System.out.println("============="); });
System.out.println(" Trial run " + run);
System.out.println("=============");
for (int throughput = 100; throughput <= 100_000; throughput *= 10) {
//GC between trials to reduce the likelihood of a GC occurring in the middle of a trial.
runGc();
BenchmarkRunner searchBenchmark = new BenchmarkRunner(SEARCH_BENCHMARK_ITERATIONS, SEARCH_BENCHMARK_ITERATIONS,
new SearchBenchmarkTask(
searchRequestExecutor(client, indexName), searchBody, 2 * SEARCH_BENCHMARK_ITERATIONS, throughput));
System.out.printf("Target throughput = %d ops / s%n", throughput);
searchBenchmark.run();
}
}
}
} finally { } finally {
client.close(); client.close();
} }
}
@SuppressForbidden(reason = "system out is ok for a command line tool")
private void runSearchBenchmark(String[] args) throws Exception {
if (args.length != 5) {
System.err.println(
"usage: 'search' benchmarkTargetHostIp indexName searchRequestBody throughputRates");
System.exit(1);
}
String benchmarkTargetHost = args[1];
String indexName = args[2];
String searchBody = args[3];
List<Integer> throughputRates = Arrays.asList(args[4].split(",")).stream().map(Integer::valueOf).collect(Collectors.toList());
T client = client(benchmarkTargetHost);
try {
runTrials(() -> {
for (int throughput : throughputRates) {
//GC between trials to reduce the likelihood of a GC occurring in the middle of a trial.
runGc();
BenchmarkRunner benchmark = new BenchmarkRunner(SEARCH_BENCHMARK_ITERATIONS, SEARCH_BENCHMARK_ITERATIONS,
new SearchBenchmarkTask(
searchRequestExecutor(client, indexName), searchBody, SEARCH_BENCHMARK_ITERATIONS,
SEARCH_BENCHMARK_ITERATIONS, throughput));
System.out.printf("Target throughput = %d ops / s%n", throughput);
benchmark.run();
}
});
} finally {
client.close();
}
}
@SuppressForbidden(reason = "system out is ok for a command line tool")
private void runTrials(Runnable runner) {
int totalWarmupTrialRuns = 1;
for (int run = 1; run <= totalWarmupTrialRuns; run++) {
System.out.println("======================");
System.out.println(" Warmup trial run " + run + "/" + totalWarmupTrialRuns);
System.out.println("======================");
runner.run();
}
int totalTrialRuns = 5;
for (int run = 1; run <= totalTrialRuns; run++) {
System.out.println("================");
System.out.println(" Trial run " + run + "/" + totalTrialRuns);
System.out.println("================");
runner.run();
}
} }
/** /**

View file

@ -37,7 +37,7 @@ public class BenchmarkMain {
benchmark = new RestClientBenchmark(); benchmark = new RestClientBenchmark();
break; break;
default: default:
System.err.println("Unknown benchmark type [" + type + "]"); System.err.println("Unknown client type [" + type + "]");
System.exit(1); System.exit(1);
} }
benchmark.run(Arrays.copyOfRange(args, 1, args.length)); benchmark.run(Arrays.copyOfRange(args, 1, args.length));

View file

@ -40,8 +40,8 @@ public final class BenchmarkRunner {
} }
@SuppressForbidden(reason = "system out is ok for a command line tool") @SuppressForbidden(reason = "system out is ok for a command line tool")
public void run() throws Exception { public void run() {
SampleRecorder recorder = new SampleRecorder(warmupIterations, iterations); SampleRecorder recorder = new SampleRecorder(iterations);
System.out.printf("Running %s with %d warmup iterations and %d iterations.%n", System.out.printf("Running %s with %d warmup iterations and %d iterations.%n",
task.getClass().getSimpleName(), warmupIterations, iterations); task.getClass().getSimpleName(), warmupIterations, iterations);
@ -52,6 +52,8 @@ public final class BenchmarkRunner {
} catch (InterruptedException ex) { } catch (InterruptedException ex) {
Thread.currentThread().interrupt(); Thread.currentThread().interrupt();
return; return;
} catch (Exception ex) {
throw new RuntimeException(ex);
} }
List<Sample> samples = recorder.getSamples(); List<Sample> samples = recorder.getSamples();
@ -62,17 +64,24 @@ public final class BenchmarkRunner {
} }
for (Metrics metrics : summaryMetrics) { for (Metrics metrics : summaryMetrics) {
System.out.printf(Locale.ROOT, "Operation: %s%n", metrics.operation); String throughput = String.format(Locale.ROOT, "Throughput [ops/s]: %f", metrics.throughput);
String stats = String.format(Locale.ROOT, String serviceTimes = String.format(Locale.ROOT,
"Throughput = %f ops/s, p90 = %f ms, p95 = %f ms, p99 = %f ms, p99.9 = %f ms, p99.99 = %f ms", "Service time [ms]: p50 = %f, p90 = %f, p95 = %f, p99 = %f, p99.9 = %f, p99.99 = %f",
metrics.throughput, metrics.serviceTimeP50, metrics.serviceTimeP90, metrics.serviceTimeP95,
metrics.serviceTimeP90, metrics.serviceTimeP95, metrics.serviceTimeP99, metrics.serviceTimeP999, metrics.serviceTimeP9999);
metrics.serviceTimeP99, metrics.serviceTimeP999, String latencies = String.format(Locale.ROOT,
metrics.serviceTimeP9999); "Latency [ms]: p50 = %f, p90 = %f, p95 = %f, p99 = %f, p99.9 = %f, p99.99 = %f",
System.out.println(repeat(stats.length(), '-')); metrics.latencyP50, metrics.latencyP90, metrics.latencyP95,
System.out.println(stats); metrics.latencyP99, metrics.latencyP999, metrics.latencyP9999);
int lineLength = Math.max(serviceTimes.length(), latencies.length());
System.out.println(repeat(lineLength, '-'));
System.out.println(throughput);
System.out.println(serviceTimes);
System.out.println(latencies);
System.out.printf("success count = %d, error count = %d%n", metrics.successCount, metrics.errorCount); System.out.printf("success count = %d, error count = %d%n", metrics.successCount, metrics.errorCount);
System.out.println(repeat(stats.length(), '-')); System.out.println(repeat(lineLength, '-'));
} }
} }

View file

@ -23,23 +23,38 @@ public final class Metrics {
public final long successCount; public final long successCount;
public final long errorCount; public final long errorCount;
public final double throughput; public final double throughput;
public final double serviceTimeP50;
public final double serviceTimeP90; public final double serviceTimeP90;
public final double serviceTimeP95; public final double serviceTimeP95;
public final double serviceTimeP99; public final double serviceTimeP99;
public final double serviceTimeP999; public final double serviceTimeP999;
public final double serviceTimeP9999; public final double serviceTimeP9999;
public final double latencyP50;
public final double latencyP90;
public final double latencyP95;
public final double latencyP99;
public final double latencyP999;
public final double latencyP9999;
public Metrics(String operation, long successCount, long errorCount, double throughput, public Metrics(String operation, long successCount, long errorCount, double throughput,
double serviceTimeP90, double serviceTimeP95, double serviceTimeP99, double serviceTimeP50, double serviceTimeP90, double serviceTimeP95, double serviceTimeP99,
double serviceTimeP999, double serviceTimeP9999) { double serviceTimeP999, double serviceTimeP9999, double latencyP50, double latencyP90,
double latencyP95, double latencyP99, double latencyP999, double latencyP9999) {
this.operation = operation; this.operation = operation;
this.successCount = successCount; this.successCount = successCount;
this.errorCount = errorCount; this.errorCount = errorCount;
this.throughput = throughput; this.throughput = throughput;
this.serviceTimeP50 = serviceTimeP50;
this.serviceTimeP90 = serviceTimeP90; this.serviceTimeP90 = serviceTimeP90;
this.serviceTimeP95 = serviceTimeP95; this.serviceTimeP95 = serviceTimeP95;
this.serviceTimeP99 = serviceTimeP99; this.serviceTimeP99 = serviceTimeP99;
this.serviceTimeP999 = serviceTimeP999; this.serviceTimeP999 = serviceTimeP999;
this.serviceTimeP9999 = serviceTimeP9999; this.serviceTimeP9999 = serviceTimeP9999;
this.latencyP50 = latencyP50;
this.latencyP90 = latencyP90;
this.latencyP95 = latencyP95;
this.latencyP99 = latencyP99;
this.latencyP999 = latencyP999;
this.latencyP9999 = latencyP9999;
} }
} }

View file

@ -50,13 +50,16 @@ public final class MetricsCalculator {
for (Map.Entry<String, List<Sample>> operationAndMetrics : samplesPerOperation.entrySet()) { for (Map.Entry<String, List<Sample>> operationAndMetrics : samplesPerOperation.entrySet()) {
List<Sample> samples = operationAndMetrics.getValue(); List<Sample> samples = operationAndMetrics.getValue();
double[] serviceTimes = new double[samples.size()]; double[] serviceTimes = new double[samples.size()];
double[] latencies = new double[samples.size()];
int it = 0; int it = 0;
long firstStart = Long.MAX_VALUE; long firstStart = Long.MAX_VALUE;
long latestEnd = Long.MIN_VALUE; long latestEnd = Long.MIN_VALUE;
for (Sample sample : samples) { for (Sample sample : samples) {
firstStart = Math.min(sample.getStartTimestamp(), firstStart); firstStart = Math.min(sample.getStartTimestamp(), firstStart);
latestEnd = Math.max(sample.getStopTimestamp(), latestEnd); latestEnd = Math.max(sample.getStopTimestamp(), latestEnd);
serviceTimes[it++] = sample.getServiceTime(); serviceTimes[it] = sample.getServiceTime();
latencies[it] = sample.getLatency();
it++;
} }
metrics.add(new Metrics(operationAndMetrics.getKey(), metrics.add(new Metrics(operationAndMetrics.getKey(),
@ -65,11 +68,18 @@ public final class MetricsCalculator {
// throughput calculation is based on the total (Wall clock) time it took to generate all samples // throughput calculation is based on the total (Wall clock) time it took to generate all samples
calculateThroughput(samples.size(), latestEnd - firstStart), calculateThroughput(samples.size(), latestEnd - firstStart),
// convert ns -> ms without losing precision // convert ns -> ms without losing precision
StatUtils.percentile(serviceTimes, 50.0d) / TimeUnit.MILLISECONDS.toNanos(1L),
StatUtils.percentile(serviceTimes, 90.0d) / TimeUnit.MILLISECONDS.toNanos(1L), StatUtils.percentile(serviceTimes, 90.0d) / TimeUnit.MILLISECONDS.toNanos(1L),
StatUtils.percentile(serviceTimes, 95.0d) / TimeUnit.MILLISECONDS.toNanos(1L), StatUtils.percentile(serviceTimes, 95.0d) / TimeUnit.MILLISECONDS.toNanos(1L),
StatUtils.percentile(serviceTimes, 99.0d) / TimeUnit.MILLISECONDS.toNanos(1L), StatUtils.percentile(serviceTimes, 99.0d) / TimeUnit.MILLISECONDS.toNanos(1L),
StatUtils.percentile(serviceTimes, 99.9d) / TimeUnit.MILLISECONDS.toNanos(1L), StatUtils.percentile(serviceTimes, 99.9d) / TimeUnit.MILLISECONDS.toNanos(1L),
StatUtils.percentile(serviceTimes, 99.99d) / TimeUnit.MILLISECONDS.toNanos(1L))); StatUtils.percentile(serviceTimes, 99.99d) / TimeUnit.MILLISECONDS.toNanos(1L),
StatUtils.percentile(latencies, 50.0d) / TimeUnit.MILLISECONDS.toNanos(1L),
StatUtils.percentile(latencies, 90.0d) / TimeUnit.MILLISECONDS.toNanos(1L),
StatUtils.percentile(latencies, 95.0d) / TimeUnit.MILLISECONDS.toNanos(1L),
StatUtils.percentile(latencies, 99.0d) / TimeUnit.MILLISECONDS.toNanos(1L),
StatUtils.percentile(latencies, 99.9d) / TimeUnit.MILLISECONDS.toNanos(1L),
StatUtils.percentile(latencies, 99.99d) / TimeUnit.MILLISECONDS.toNanos(1L)));
} }
return metrics; return metrics;
} }

View file

@ -20,12 +20,14 @@ package org.elasticsearch.client.benchmark.metrics;
public final class Sample { public final class Sample {
private final String operation; private final String operation;
private final long expectedStartTimestamp;
private final long startTimestamp; private final long startTimestamp;
private final long stopTimestamp; private final long stopTimestamp;
private final boolean success; private final boolean success;
public Sample(String operation, long startTimestamp, long stopTimestamp, boolean success) { public Sample(String operation, long expectedStartTimestamp, long startTimestamp, long stopTimestamp, boolean success) {
this.operation = operation; this.operation = operation;
this.expectedStartTimestamp = expectedStartTimestamp;
this.startTimestamp = startTimestamp; this.startTimestamp = startTimestamp;
this.stopTimestamp = stopTimestamp; this.stopTimestamp = stopTimestamp;
this.success = success; this.success = success;
@ -48,7 +50,10 @@ public final class Sample {
} }
public long getServiceTime() { public long getServiceTime() {
// this is *not* latency, we're not including wait time in the queue (on purpose)
return stopTimestamp - startTimestamp; return stopTimestamp - startTimestamp;
} }
public long getLatency() {
return stopTimestamp - expectedStartTimestamp;
}
} }

View file

@ -28,21 +28,14 @@ import java.util.List;
* This class is NOT threadsafe. * This class is NOT threadsafe.
*/ */
public final class SampleRecorder { public final class SampleRecorder {
private final int warmupIterations;
private final List<Sample> samples; private final List<Sample> samples;
private int currentIteration;
public SampleRecorder(int warmupIterations, int iterations) { public SampleRecorder(int iterations) {
this.warmupIterations = warmupIterations;
this.samples = new ArrayList<>(iterations); this.samples = new ArrayList<>(iterations);
} }
public void addSample(Sample sample) { public void addSample(Sample sample) {
currentIteration++; samples.add(sample);
// only add samples after warmup
if (currentIteration > warmupIterations) {
samples.add(sample);
}
} }
public List<Sample> getSamples() { public List<Sample> getSamples() {

View file

@ -43,15 +43,18 @@ import java.util.concurrent.TimeUnit;
public class BulkBenchmarkTask implements BenchmarkTask { public class BulkBenchmarkTask implements BenchmarkTask {
private final BulkRequestExecutor requestExecutor; private final BulkRequestExecutor requestExecutor;
private final String indexFilePath; private final String indexFilePath;
private final int totalIterations; private final int warmupIterations;
private final int measurementIterations;
private final int bulkSize; private final int bulkSize;
private LoadGenerator generator; private LoadGenerator generator;
private ExecutorService executorService; private ExecutorService executorService;
public BulkBenchmarkTask(BulkRequestExecutor requestExecutor, String indexFilePath, int totalIterations, int bulkSize) { public BulkBenchmarkTask(BulkRequestExecutor requestExecutor, String indexFilePath, int warmupIterations, int measurementIterations,
int bulkSize) {
this.requestExecutor = requestExecutor; this.requestExecutor = requestExecutor;
this.indexFilePath = indexFilePath; this.indexFilePath = indexFilePath;
this.totalIterations = totalIterations; this.warmupIterations = warmupIterations;
this.measurementIterations = measurementIterations;
this.bulkSize = bulkSize; this.bulkSize = bulkSize;
} }
@ -60,7 +63,7 @@ public class BulkBenchmarkTask implements BenchmarkTask {
public void setUp(SampleRecorder sampleRecorder) { public void setUp(SampleRecorder sampleRecorder) {
BlockingQueue<List<String>> bulkQueue = new ArrayBlockingQueue<>(256); BlockingQueue<List<String>> bulkQueue = new ArrayBlockingQueue<>(256);
BulkIndexer runner = new BulkIndexer(bulkQueue, totalIterations, sampleRecorder, requestExecutor); BulkIndexer runner = new BulkIndexer(bulkQueue, warmupIterations, measurementIterations, sampleRecorder, requestExecutor);
executorService = Executors.newSingleThreadExecutor((r) -> new Thread(r, "bulk-index-runner")); executorService = Executors.newSingleThreadExecutor((r) -> new Thread(r, "bulk-index-runner"));
executorService.submit(runner); executorService.submit(runner);
@ -135,21 +138,23 @@ public class BulkBenchmarkTask implements BenchmarkTask {
private static final ESLogger logger = ESLoggerFactory.getLogger(BulkIndexer.class.getName()); private static final ESLogger logger = ESLoggerFactory.getLogger(BulkIndexer.class.getName());
private final BlockingQueue<List<String>> bulkData; private final BlockingQueue<List<String>> bulkData;
private final int totalIterations; private final int warmupIterations;
private final int measurementIterations;
private final BulkRequestExecutor bulkRequestExecutor; private final BulkRequestExecutor bulkRequestExecutor;
private final SampleRecorder sampleRecorder; private final SampleRecorder sampleRecorder;
public BulkIndexer(BlockingQueue<List<String>> bulkData, int totalIterations, SampleRecorder sampleRecorder, public BulkIndexer(BlockingQueue<List<String>> bulkData, int warmupIterations, int measurementIterations,
BulkRequestExecutor bulkRequestExecutor) { SampleRecorder sampleRecorder, BulkRequestExecutor bulkRequestExecutor) {
this.bulkData = bulkData; this.bulkData = bulkData;
this.totalIterations = totalIterations; this.warmupIterations = warmupIterations;
this.measurementIterations = measurementIterations;
this.bulkRequestExecutor = bulkRequestExecutor; this.bulkRequestExecutor = bulkRequestExecutor;
this.sampleRecorder = sampleRecorder; this.sampleRecorder = sampleRecorder;
} }
@Override @Override
public void run() { public void run() {
for (int iteration = 0; iteration < totalIterations; iteration++) { for (int iteration = 0; iteration < warmupIterations + measurementIterations; iteration++) {
boolean success = false; boolean success = false;
List<String> currentBulk; List<String> currentBulk;
try { try {
@ -158,8 +163,7 @@ public class BulkBenchmarkTask implements BenchmarkTask {
Thread.currentThread().interrupt(); Thread.currentThread().interrupt();
return; return;
} }
// Yes, this approach is prone to coordinated omission *but* we have to consider that we want to benchmark a closed system //measure only service time, latency is not that interesting for a throughput benchmark
// with backpressure here instead of an open system. So this is actually correct in this case.
long start = System.nanoTime(); long start = System.nanoTime();
try { try {
success = bulkRequestExecutor.bulkIndex(currentBulk); success = bulkRequestExecutor.bulkIndex(currentBulk);
@ -167,7 +171,9 @@ public class BulkBenchmarkTask implements BenchmarkTask {
logger.warn("Error while executing bulk request", ex); logger.warn("Error while executing bulk request", ex);
} }
long stop = System.nanoTime(); long stop = System.nanoTime();
sampleRecorder.addSample(new Sample("bulk", start, stop, success)); if (iteration < warmupIterations) {
sampleRecorder.addSample(new Sample("bulk", start, start, stop, success));
}
} }
} }
} }

View file

@ -25,20 +25,20 @@ import org.elasticsearch.client.benchmark.metrics.SampleRecorder;
import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeUnit;
public class SearchBenchmarkTask implements BenchmarkTask { public class SearchBenchmarkTask implements BenchmarkTask {
private static final long MICROS_PER_SEC = TimeUnit.SECONDS.toMicros(1L);
private static final long NANOS_PER_MICRO = TimeUnit.MICROSECONDS.toNanos(1L);
private final SearchRequestExecutor searchRequestExecutor; private final SearchRequestExecutor searchRequestExecutor;
private final String searchRequestBody; private final String searchRequestBody;
private final int iterations; private final int warmupIterations;
private final int measurementIterations;
private final int targetThroughput; private final int targetThroughput;
private SampleRecorder sampleRecorder; private SampleRecorder sampleRecorder;
public SearchBenchmarkTask(SearchRequestExecutor searchRequestExecutor, String body, int iterations, int targetThroughput) { public SearchBenchmarkTask(SearchRequestExecutor searchRequestExecutor, String body, int warmupIterations,
int measurementIterations, int targetThroughput) {
this.searchRequestExecutor = searchRequestExecutor; this.searchRequestExecutor = searchRequestExecutor;
this.searchRequestBody = body; this.searchRequestBody = body;
this.iterations = iterations; this.warmupIterations = warmupIterations;
this.measurementIterations = measurementIterations;
this.targetThroughput = targetThroughput; this.targetThroughput = targetThroughput;
} }
@ -49,28 +49,25 @@ public class SearchBenchmarkTask implements BenchmarkTask {
@Override @Override
public void run() throws Exception { public void run() throws Exception {
for (int iteration = 0; iteration < this.iterations; iteration++) { runIterations(warmupIterations, false);
final long start = System.nanoTime(); runIterations(measurementIterations, true);
boolean success = searchRequestExecutor.search(searchRequestBody);
final long stop = System.nanoTime();
sampleRecorder.addSample(new Sample("search", start, stop, success));
int waitTime = (int) Math.floor(MICROS_PER_SEC / targetThroughput - (stop - start) / NANOS_PER_MICRO);
if (waitTime > 0) {
waitMicros(waitTime);
}
}
} }
private void waitMicros(int waitTime) throws InterruptedException { private void runIterations(int iterations, boolean addSample) {
// Thread.sleep() time is not very accurate (it's most of the time around 1 - 2 ms off) long interval = TimeUnit.SECONDS.toNanos(1L) / targetThroughput;
// we busy spin all the time to avoid introducing additional measurement artifacts (noticed 100% skew on 99.9th percentile)
// this approach is not suitable for low throughput rates (in the second range) though long totalStart = System.nanoTime();
if (waitTime > 0) { for (int iteration = 0; iteration < iterations; iteration++) {
long end = System.nanoTime() + 1000L * waitTime; long expectedStart = totalStart + iteration * interval;
while (end > System.nanoTime()) { while (System.nanoTime() < expectedStart) {
// busy spin // busy spin
} }
long start = System.nanoTime();
boolean success = searchRequestExecutor.search(searchRequestBody);
long stop = System.nanoTime();
if (addSample) {
sampleRecorder.addSample(new Sample("search", expectedStart, start, stop, success));
}
} }
} }

View file

@ -19,14 +19,20 @@
package org.elasticsearch.client.benchmark.rest; package org.elasticsearch.client.benchmark.rest;
import org.apache.http.HttpEntity; import org.apache.http.HttpEntity;
import org.apache.http.HttpHeaders;
import org.apache.http.HttpHost; import org.apache.http.HttpHost;
import org.apache.http.HttpStatus; import org.apache.http.HttpStatus;
import org.apache.http.client.config.RequestConfig;
import org.apache.http.conn.ConnectionKeepAliveStrategy;
import org.apache.http.entity.ContentType; import org.apache.http.entity.ContentType;
import org.apache.http.entity.StringEntity; import org.apache.http.entity.StringEntity;
import org.apache.http.impl.nio.client.HttpAsyncClientBuilder;
import org.apache.http.message.BasicHeader;
import org.apache.http.nio.entity.NStringEntity; import org.apache.http.nio.entity.NStringEntity;
import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.client.Response; import org.elasticsearch.client.Response;
import org.elasticsearch.client.RestClient; import org.elasticsearch.client.RestClient;
import org.elasticsearch.client.RestClientBuilder;
import org.elasticsearch.client.benchmark.AbstractBenchmark; import org.elasticsearch.client.benchmark.AbstractBenchmark;
import org.elasticsearch.client.benchmark.ops.bulk.BulkRequestExecutor; import org.elasticsearch.client.benchmark.ops.bulk.BulkRequestExecutor;
import org.elasticsearch.client.benchmark.ops.search.SearchRequestExecutor; import org.elasticsearch.client.benchmark.ops.search.SearchRequestExecutor;
@ -45,7 +51,12 @@ public final class RestClientBenchmark extends AbstractBenchmark<RestClient> {
@Override @Override
protected RestClient client(String benchmarkTargetHost) { protected RestClient client(String benchmarkTargetHost) {
return RestClient.builder(new HttpHost(benchmarkTargetHost, 9200)).build(); return RestClient
.builder(new HttpHost(benchmarkTargetHost, 9200))
.setHttpClientConfigCallback(b -> b.setDefaultHeaders(
Collections.singleton(new BasicHeader(HttpHeaders.ACCEPT_ENCODING, "gzip"))))
.setRequestConfigCallback(b -> b.setContentCompressionEnabled(true))
.build();
} }
@Override @Override
@ -77,7 +88,7 @@ public final class RestClientBenchmark extends AbstractBenchmark<RestClient> {
} }
HttpEntity entity = new NStringEntity(bulkRequestBody.toString(), ContentType.APPLICATION_JSON); HttpEntity entity = new NStringEntity(bulkRequestBody.toString(), ContentType.APPLICATION_JSON);
try { try {
Response response = client.performRequest("POST", "/geonames/type/_bulk", Collections.emptyMap(), entity); Response response = client.performRequest("POST", "/geonames/type/_noop_bulk", Collections.emptyMap(), entity);
return response.getStatusLine().getStatusCode() == HttpStatus.SC_OK; return response.getStatusLine().getStatusCode() == HttpStatus.SC_OK;
} catch (Exception e) { } catch (Exception e) {
throw new ElasticsearchException(e); throw new ElasticsearchException(e);
@ -91,7 +102,7 @@ public final class RestClientBenchmark extends AbstractBenchmark<RestClient> {
private RestSearchRequestExecutor(RestClient client, String indexName) { private RestSearchRequestExecutor(RestClient client, String indexName) {
this.client = client; this.client = client;
this.endpoint = "/" + indexName + "/_search"; this.endpoint = "/" + indexName + "/_noop_search";
} }
@Override @Override

View file

@ -19,7 +19,6 @@
package org.elasticsearch.client.benchmark.transport; package org.elasticsearch.client.benchmark.transport;
import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.action.bulk.BulkRequestBuilder;
import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.bulk.BulkResponse;
import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchResponse;
@ -30,6 +29,11 @@ import org.elasticsearch.client.transport.TransportClient;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.InetSocketTransportAddress; import org.elasticsearch.common.transport.InetSocketTransportAddress;
import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.plugin.noop.NoopPlugin;
import org.elasticsearch.plugin.noop.action.bulk.NoopBulkAction;
import org.elasticsearch.plugin.noop.action.bulk.NoopBulkRequestBuilder;
import org.elasticsearch.plugin.noop.action.search.NoopSearchAction;
import org.elasticsearch.plugin.noop.action.search.NoopSearchRequestBuilder;
import org.elasticsearch.rest.RestStatus; import org.elasticsearch.rest.RestStatus;
import org.elasticsearch.transport.client.PreBuiltTransportClient; import org.elasticsearch.transport.client.PreBuiltTransportClient;
@ -46,7 +50,7 @@ public final class TransportClientBenchmark extends AbstractBenchmark<TransportC
@Override @Override
protected TransportClient client(String benchmarkTargetHost) throws Exception { protected TransportClient client(String benchmarkTargetHost) throws Exception {
TransportClient client = new PreBuiltTransportClient(Settings.EMPTY); TransportClient client = new PreBuiltTransportClient(Settings.EMPTY, NoopPlugin.class);
client.addTransportAddress(new InetSocketTransportAddress(InetAddress.getByName(benchmarkTargetHost), 9300)); client.addTransportAddress(new InetSocketTransportAddress(InetAddress.getByName(benchmarkTargetHost), 9300));
return client; return client;
} }
@ -74,7 +78,7 @@ public final class TransportClientBenchmark extends AbstractBenchmark<TransportC
@Override @Override
public boolean bulkIndex(List<String> bulkData) { public boolean bulkIndex(List<String> bulkData) {
BulkRequestBuilder builder = client.prepareBulk(); NoopBulkRequestBuilder builder = NoopBulkAction.INSTANCE.newRequestBuilder(client);
for (String bulkItem : bulkData) { for (String bulkItem : bulkData) {
builder.add(new IndexRequest(indexName, typeName).source(bulkItem.getBytes(StandardCharsets.UTF_8))); builder.add(new IndexRequest(indexName, typeName).source(bulkItem.getBytes(StandardCharsets.UTF_8)));
} }
@ -103,8 +107,11 @@ public final class TransportClientBenchmark extends AbstractBenchmark<TransportC
@Override @Override
public boolean search(String source) { public boolean search(String source) {
final SearchResponse response; final SearchResponse response;
NoopSearchRequestBuilder builder = NoopSearchAction.INSTANCE.newRequestBuilder(client);
try { try {
response = client.prepareSearch(indexName).setQuery(QueryBuilders.wrapperQuery(source)).execute().get(); builder.setIndices(indexName);
builder.setQuery(QueryBuilders.wrapperQuery(source));
response = client.execute(NoopSearchAction.INSTANCE, builder.request()).get();
return response.status() == RestStatus.OK; return response.status() == RestStatus.OK;
} catch (InterruptedException e) { } catch (InterruptedException e) {
Thread.currentThread().interrupt(); Thread.currentThread().interrupt();

View file

@ -0,0 +1,23 @@
### Purpose
This plugin provides empty REST and transport endpoints for bulk indexing and search. It is used to avoid accidental server-side bottlenecks in client-side benchmarking.
### Build Instructions
Build the plugin with `gradle :client:client-benchmark-noop-api-plugin:assemble` from the Elasticsearch root project directory.
### Installation Instructions
After, the binary has been built, install it with `bin/elasticsearch-plugin install file:///full/path/to/noop-plugin.zip`.
### Usage
The plugin provides two REST endpoints:
* `/_noop_bulk` and all variations that the bulk endpoint provides (except that all no op endpoints are called `_noop_bulk` instead of `_bulk`)
* `_noop_search` and all variations that the search endpoint provides (except that all no op endpoints are called `_noop_search` instead of `_search`)
The corresponding transport actions are:
* `org.elasticsearch.plugin.noop.action.bulk.TransportNoopBulkAction`
* `org.elasticsearch.plugin.noop.action.search.TransportNoopSearchAction`

View file

@ -0,0 +1,36 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
group = 'org.elasticsearch.plugin'
apply plugin: 'elasticsearch.esplugin'
apply plugin: 'com.bmuschko.nexus'
esplugin {
name 'client-benchmark-noop-api'
description 'Stubbed out Elasticsearch actions that can be used for client-side benchmarking'
classname 'org.elasticsearch.plugin.noop.NoopPlugin'
}
compileJava.options.compilerArgs << "-Xlint:-cast,-deprecation,-rawtypes,-try,-unchecked"
// no unit tests
test.enabled = false
integTest.enabled = false

View file

@ -0,0 +1,49 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.plugin.noop;
import org.elasticsearch.plugin.noop.action.bulk.NoopBulkAction;
import org.elasticsearch.plugin.noop.action.bulk.RestNoopBulkAction;
import org.elasticsearch.plugin.noop.action.bulk.TransportNoopBulkAction;
import org.elasticsearch.action.ActionRequest;
import org.elasticsearch.action.ActionResponse;
import org.elasticsearch.plugin.noop.action.search.NoopSearchAction;
import org.elasticsearch.plugin.noop.action.search.RestNoopSearchAction;
import org.elasticsearch.plugin.noop.action.search.TransportNoopSearchAction;
import org.elasticsearch.plugins.ActionPlugin;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.rest.RestHandler;
import java.util.Arrays;
import java.util.List;
public class NoopPlugin extends Plugin implements ActionPlugin {
@Override
public List<ActionHandler<? extends ActionRequest<?>, ? extends ActionResponse>> getActions() {
return Arrays.asList(
new ActionHandler<>(NoopBulkAction.INSTANCE, TransportNoopBulkAction.class),
new ActionHandler<>(NoopSearchAction.INSTANCE, TransportNoopSearchAction.class)
);
}
@Override
public List<Class<? extends RestHandler>> getRestHandlers() {
return Arrays.asList(RestNoopBulkAction.class, RestNoopSearchAction.class);
}
}

View file

@ -0,0 +1,44 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.plugin.noop.action.bulk;
import org.elasticsearch.action.Action;
import org.elasticsearch.action.bulk.BulkRequest;
import org.elasticsearch.action.bulk.BulkResponse;
import org.elasticsearch.client.ElasticsearchClient;
public class NoopBulkAction extends Action<BulkRequest, BulkResponse, NoopBulkRequestBuilder> {
public static final String NAME = "mock:data/write/bulk";
public static final NoopBulkAction INSTANCE = new NoopBulkAction();
private NoopBulkAction() {
super(NAME);
}
@Override
public NoopBulkRequestBuilder newRequestBuilder(ElasticsearchClient client) {
return new NoopBulkRequestBuilder(client, this);
}
@Override
public BulkResponse newResponse() {
return new BulkResponse(null, 0);
}
}

View file

@ -0,0 +1,153 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.plugin.noop.action.bulk;
import org.elasticsearch.action.ActionRequestBuilder;
import org.elasticsearch.action.bulk.BulkRequest;
import org.elasticsearch.action.bulk.BulkResponse;
import org.elasticsearch.action.delete.DeleteRequest;
import org.elasticsearch.action.delete.DeleteRequestBuilder;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.index.IndexRequestBuilder;
import org.elasticsearch.action.support.ActiveShardCount;
import org.elasticsearch.action.support.WriteRequestBuilder;
import org.elasticsearch.action.support.replication.ReplicationRequest;
import org.elasticsearch.action.update.UpdateRequest;
import org.elasticsearch.action.update.UpdateRequestBuilder;
import org.elasticsearch.client.ElasticsearchClient;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.unit.TimeValue;
public class NoopBulkRequestBuilder extends ActionRequestBuilder<BulkRequest, BulkResponse, NoopBulkRequestBuilder>
implements WriteRequestBuilder<NoopBulkRequestBuilder> {
public NoopBulkRequestBuilder(ElasticsearchClient client, NoopBulkAction action) {
super(client, action, new BulkRequest());
}
/**
* Adds an {@link IndexRequest} to the list of actions to execute. Follows the same behavior of {@link IndexRequest}
* (for example, if no id is provided, one will be generated, or usage of the create flag).
*/
public NoopBulkRequestBuilder add(IndexRequest request) {
super.request.add(request);
return this;
}
/**
* Adds an {@link IndexRequest} to the list of actions to execute. Follows the same behavior of {@link IndexRequest}
* (for example, if no id is provided, one will be generated, or usage of the create flag).
*/
public NoopBulkRequestBuilder add(IndexRequestBuilder request) {
super.request.add(request.request());
return this;
}
/**
* Adds an {@link DeleteRequest} to the list of actions to execute.
*/
public NoopBulkRequestBuilder add(DeleteRequest request) {
super.request.add(request);
return this;
}
/**
* Adds an {@link DeleteRequest} to the list of actions to execute.
*/
public NoopBulkRequestBuilder add(DeleteRequestBuilder request) {
super.request.add(request.request());
return this;
}
/**
* Adds an {@link UpdateRequest} to the list of actions to execute.
*/
public NoopBulkRequestBuilder add(UpdateRequest request) {
super.request.add(request);
return this;
}
/**
* Adds an {@link UpdateRequest} to the list of actions to execute.
*/
public NoopBulkRequestBuilder add(UpdateRequestBuilder request) {
super.request.add(request.request());
return this;
}
/**
* Adds a framed data in binary format
*/
public NoopBulkRequestBuilder add(byte[] data, int from, int length) throws Exception {
request.add(data, from, length, null, null);
return this;
}
/**
* Adds a framed data in binary format
*/
public NoopBulkRequestBuilder add(byte[] data, int from, int length, @Nullable String defaultIndex, @Nullable String defaultType)
throws Exception {
request.add(data, from, length, defaultIndex, defaultType);
return this;
}
/**
* Sets the number of shard copies that must be active before proceeding with the write.
* See {@link ReplicationRequest#waitForActiveShards(ActiveShardCount)} for details.
*/
public NoopBulkRequestBuilder setWaitForActiveShards(ActiveShardCount waitForActiveShards) {
request.waitForActiveShards(waitForActiveShards);
return this;
}
/**
* A shortcut for {@link #setWaitForActiveShards(ActiveShardCount)} where the numerical
* shard count is passed in, instead of having to first call {@link ActiveShardCount#from(int)}
* to get the ActiveShardCount.
*/
public NoopBulkRequestBuilder setWaitForActiveShards(final int waitForActiveShards) {
return setWaitForActiveShards(ActiveShardCount.from(waitForActiveShards));
}
/**
* A timeout to wait if the index operation can't be performed immediately. Defaults to <tt>1m</tt>.
*/
public final NoopBulkRequestBuilder setTimeout(TimeValue timeout) {
request.timeout(timeout);
return this;
}
/**
* A timeout to wait if the index operation can't be performed immediately. Defaults to <tt>1m</tt>.
*/
public final NoopBulkRequestBuilder setTimeout(String timeout) {
request.timeout(timeout);
return this;
}
/**
* The number of actions currently in the bulk.
*/
public int numberOfActions() {
return request.numberOfActions();
}
}

View file

@ -0,0 +1,117 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.plugin.noop.action.bulk;
import org.elasticsearch.action.DocWriteResponse;
import org.elasticsearch.action.bulk.BulkItemResponse;
import org.elasticsearch.action.bulk.BulkRequest;
import org.elasticsearch.action.bulk.BulkShardRequest;
import org.elasticsearch.action.support.ActiveShardCount;
import org.elasticsearch.action.update.UpdateResponse;
import org.elasticsearch.client.Requests;
import org.elasticsearch.client.node.NodeClient;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.rest.BaseRestHandler;
import org.elasticsearch.rest.BytesRestResponse;
import org.elasticsearch.rest.RestChannel;
import org.elasticsearch.rest.RestController;
import org.elasticsearch.rest.RestRequest;
import org.elasticsearch.rest.RestResponse;
import org.elasticsearch.rest.action.RestBuilderListener;
import static org.elasticsearch.rest.RestRequest.Method.POST;
import static org.elasticsearch.rest.RestRequest.Method.PUT;
import static org.elasticsearch.rest.RestStatus.OK;
public class RestNoopBulkAction extends BaseRestHandler {
@Inject
public RestNoopBulkAction(Settings settings, RestController controller) {
super(settings);
controller.registerHandler(POST, "/_noop_bulk", this);
controller.registerHandler(PUT, "/_noop_bulk", this);
controller.registerHandler(POST, "/{index}/_noop_bulk", this);
controller.registerHandler(PUT, "/{index}/_noop_bulk", this);
controller.registerHandler(POST, "/{index}/{type}/_noop_bulk", this);
controller.registerHandler(PUT, "/{index}/{type}/_noop_bulk", this);
}
@Override
public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) throws Exception {
BulkRequest bulkRequest = Requests.bulkRequest();
String defaultIndex = request.param("index");
String defaultType = request.param("type");
String defaultRouting = request.param("routing");
String fieldsParam = request.param("fields");
String defaultPipeline = request.param("pipeline");
String[] defaultFields = fieldsParam != null ? Strings.commaDelimitedListToStringArray(fieldsParam) : null;
String waitForActiveShards = request.param("wait_for_active_shards");
if (waitForActiveShards != null) {
bulkRequest.waitForActiveShards(ActiveShardCount.parseString(waitForActiveShards));
}
bulkRequest.timeout(request.paramAsTime("timeout", BulkShardRequest.DEFAULT_TIMEOUT));
bulkRequest.setRefreshPolicy(request.param("refresh"));
bulkRequest.add(request.content(), defaultIndex, defaultType, defaultRouting, defaultFields, defaultPipeline, null, true);
// short circuit the call to the transport layer
BulkRestBuilderListener listener = new BulkRestBuilderListener(channel, request);
listener.onResponse(bulkRequest);
}
private static class BulkRestBuilderListener extends RestBuilderListener<BulkRequest> {
private final BulkItemResponse ITEM_RESPONSE = new BulkItemResponse(1, "update",
new UpdateResponse(new ShardId("mock", "", 1), "mock_type", "1", 1L, DocWriteResponse.Result.CREATED));
private final RestRequest request;
public BulkRestBuilderListener(RestChannel channel, RestRequest request) {
super(channel);
this.request = request;
}
@Override
public RestResponse buildResponse(BulkRequest bulkRequest, XContentBuilder builder) throws Exception {
builder.startObject();
builder.field(Fields.TOOK, 0);
builder.field(Fields.ERRORS, false);
builder.startArray(Fields.ITEMS);
for (int idx = 0; idx < bulkRequest.numberOfActions(); idx++) {
builder.startObject();
ITEM_RESPONSE.toXContent(builder, request);
builder.endObject();
}
builder.endArray();
builder.endObject();
return new BytesRestResponse(OK, builder);
}
}
static final class Fields {
static final String ITEMS = "items";
static final String ERRORS = "errors";
static final String TOOK = "took";
}
}

View file

@ -0,0 +1,56 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.plugin.noop.action.bulk;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.DocWriteResponse;
import org.elasticsearch.action.bulk.BulkItemResponse;
import org.elasticsearch.action.bulk.BulkRequest;
import org.elasticsearch.action.bulk.BulkResponse;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.HandledTransportAction;
import org.elasticsearch.action.update.UpdateResponse;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
public class TransportNoopBulkAction extends HandledTransportAction<BulkRequest, BulkResponse> {
private static final BulkItemResponse ITEM_RESPONSE = new BulkItemResponse(1, "update",
new UpdateResponse(new ShardId("mock", "", 1), "mock_type", "1", 1L, DocWriteResponse.Result.CREATED));
@Inject
public TransportNoopBulkAction(Settings settings, ThreadPool threadPool, TransportService transportService,
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
super(settings, NoopBulkAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver, BulkRequest::new);
}
@Override
protected void doExecute(BulkRequest request, ActionListener<BulkResponse> listener) {
final int itemCount = request.subRequests().size();
// simulate at least a realistic amount of data that gets serialized
BulkItemResponse[] bulkItemResponses = new BulkItemResponse[itemCount];
for (int idx = 0; idx < itemCount; idx++) {
bulkItemResponses[idx] = ITEM_RESPONSE;
}
listener.onResponse(new BulkResponse(bulkItemResponses, 0));
}
}

View file

@ -0,0 +1,43 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.plugin.noop.action.search;
import org.elasticsearch.action.Action;
import org.elasticsearch.action.search.SearchRequest;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.client.ElasticsearchClient;
public class NoopSearchAction extends Action<SearchRequest, SearchResponse, NoopSearchRequestBuilder> {
public static final NoopSearchAction INSTANCE = new NoopSearchAction();
public static final String NAME = "mock:data/read/search";
public NoopSearchAction() {
super(NAME);
}
@Override
public NoopSearchRequestBuilder newRequestBuilder(ElasticsearchClient client) {
return new NoopSearchRequestBuilder(client, this);
}
@Override
public SearchResponse newResponse() {
return new SearchResponse();
}
}

View file

@ -0,0 +1,504 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.plugin.noop.action.search;
import org.elasticsearch.action.ActionRequestBuilder;
import org.elasticsearch.action.search.SearchRequest;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.action.search.SearchType;
import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.client.ElasticsearchClient;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.script.Script;
import org.elasticsearch.search.Scroll;
import org.elasticsearch.search.aggregations.AggregationBuilder;
import org.elasticsearch.search.aggregations.PipelineAggregationBuilder;
import org.elasticsearch.search.builder.SearchSourceBuilder;
import org.elasticsearch.search.fetch.subphase.highlight.HighlightBuilder;
import org.elasticsearch.search.rescore.RescoreBuilder;
import org.elasticsearch.search.slice.SliceBuilder;
import org.elasticsearch.search.sort.SortBuilder;
import org.elasticsearch.search.sort.SortOrder;
import org.elasticsearch.search.suggest.SuggestBuilder;
import java.util.Arrays;
import java.util.List;
public class NoopSearchRequestBuilder extends ActionRequestBuilder<SearchRequest, SearchResponse, NoopSearchRequestBuilder> {
public NoopSearchRequestBuilder(ElasticsearchClient client, NoopSearchAction action) {
super(client, action, new SearchRequest());
}
/**
* Sets the indices the search will be executed on.
*/
public NoopSearchRequestBuilder setIndices(String... indices) {
request.indices(indices);
return this;
}
/**
* The document types to execute the search against. Defaults to be executed against
* all types.
*/
public NoopSearchRequestBuilder setTypes(String... types) {
request.types(types);
return this;
}
/**
* The search type to execute, defaults to {@link org.elasticsearch.action.search.SearchType#DEFAULT}.
*/
public NoopSearchRequestBuilder setSearchType(SearchType searchType) {
request.searchType(searchType);
return this;
}
/**
* The a string representation search type to execute, defaults to {@link org.elasticsearch.action.search.SearchType#DEFAULT}. Can be
* one of "dfs_query_then_fetch"/"dfsQueryThenFetch", "dfs_query_and_fetch"/"dfsQueryAndFetch",
* "query_then_fetch"/"queryThenFetch", and "query_and_fetch"/"queryAndFetch".
*/
public NoopSearchRequestBuilder setSearchType(String searchType) {
request.searchType(searchType);
return this;
}
/**
* If set, will enable scrolling of the search request.
*/
public NoopSearchRequestBuilder setScroll(Scroll scroll) {
request.scroll(scroll);
return this;
}
/**
* If set, will enable scrolling of the search request for the specified timeout.
*/
public NoopSearchRequestBuilder setScroll(TimeValue keepAlive) {
request.scroll(keepAlive);
return this;
}
/**
* If set, will enable scrolling of the search request for the specified timeout.
*/
public NoopSearchRequestBuilder setScroll(String keepAlive) {
request.scroll(keepAlive);
return this;
}
/**
* An optional timeout to control how long search is allowed to take.
*/
public NoopSearchRequestBuilder setTimeout(TimeValue timeout) {
sourceBuilder().timeout(timeout);
return this;
}
/**
* An optional document count, upon collecting which the search
* query will early terminate
*/
public NoopSearchRequestBuilder setTerminateAfter(int terminateAfter) {
sourceBuilder().terminateAfter(terminateAfter);
return this;
}
/**
* A comma separated list of routing values to control the shards the search will be executed on.
*/
public NoopSearchRequestBuilder setRouting(String routing) {
request.routing(routing);
return this;
}
/**
* The routing values to control the shards that the search will be executed on.
*/
public NoopSearchRequestBuilder setRouting(String... routing) {
request.routing(routing);
return this;
}
/**
* Sets the preference to execute the search. Defaults to randomize across shards. Can be set to
* <tt>_local</tt> to prefer local shards, <tt>_primary</tt> to execute only on primary shards, or
* a custom value, which guarantees that the same order will be used across different requests.
*/
public NoopSearchRequestBuilder setPreference(String preference) {
request.preference(preference);
return this;
}
/**
* Specifies what type of requested indices to ignore and wildcard indices expressions.
* <p>
* For example indices that don't exist.
*/
public NoopSearchRequestBuilder setIndicesOptions(IndicesOptions indicesOptions) {
request().indicesOptions(indicesOptions);
return this;
}
/**
* Constructs a new search source builder with a search query.
*
* @see org.elasticsearch.index.query.QueryBuilders
*/
public NoopSearchRequestBuilder setQuery(QueryBuilder queryBuilder) {
sourceBuilder().query(queryBuilder);
return this;
}
/**
* Sets a filter that will be executed after the query has been executed and only has affect on the search hits
* (not aggregations). This filter is always executed as last filtering mechanism.
*/
public NoopSearchRequestBuilder setPostFilter(QueryBuilder postFilter) {
sourceBuilder().postFilter(postFilter);
return this;
}
/**
* Sets the minimum score below which docs will be filtered out.
*/
public NoopSearchRequestBuilder setMinScore(float minScore) {
sourceBuilder().minScore(minScore);
return this;
}
/**
* From index to start the search from. Defaults to <tt>0</tt>.
*/
public NoopSearchRequestBuilder setFrom(int from) {
sourceBuilder().from(from);
return this;
}
/**
* The number of search hits to return. Defaults to <tt>10</tt>.
*/
public NoopSearchRequestBuilder setSize(int size) {
sourceBuilder().size(size);
return this;
}
/**
* Should each {@link org.elasticsearch.search.SearchHit} be returned with an
* explanation of the hit (ranking).
*/
public NoopSearchRequestBuilder setExplain(boolean explain) {
sourceBuilder().explain(explain);
return this;
}
/**
* Should each {@link org.elasticsearch.search.SearchHit} be returned with its
* version.
*/
public NoopSearchRequestBuilder setVersion(boolean version) {
sourceBuilder().version(version);
return this;
}
/**
* Sets the boost a specific index will receive when the query is executed against it.
*
* @param index The index to apply the boost against
* @param indexBoost The boost to apply to the index
*/
public NoopSearchRequestBuilder addIndexBoost(String index, float indexBoost) {
sourceBuilder().indexBoost(index, indexBoost);
return this;
}
/**
* The stats groups this request will be aggregated under.
*/
public NoopSearchRequestBuilder setStats(String... statsGroups) {
sourceBuilder().stats(Arrays.asList(statsGroups));
return this;
}
/**
* The stats groups this request will be aggregated under.
*/
public NoopSearchRequestBuilder setStats(List<String> statsGroups) {
sourceBuilder().stats(statsGroups);
return this;
}
/**
* Sets no fields to be loaded, resulting in only id and type to be returned per field.
*/
public NoopSearchRequestBuilder setNoStoredFields() {
sourceBuilder().noStoredFields();
return this;
}
/**
* Indicates whether the response should contain the stored _source for every hit
*/
public NoopSearchRequestBuilder setFetchSource(boolean fetch) {
sourceBuilder().fetchSource(fetch);
return this;
}
/**
* Indicate that _source should be returned with every hit, with an "include" and/or "exclude" set which can include simple wildcard
* elements.
*
* @param include An optional include (optionally wildcarded) pattern to filter the returned _source
* @param exclude An optional exclude (optionally wildcarded) pattern to filter the returned _source
*/
public NoopSearchRequestBuilder setFetchSource(@Nullable String include, @Nullable String exclude) {
sourceBuilder().fetchSource(include, exclude);
return this;
}
/**
* Indicate that _source should be returned with every hit, with an "include" and/or "exclude" set which can include simple wildcard
* elements.
*
* @param includes An optional list of include (optionally wildcarded) pattern to filter the returned _source
* @param excludes An optional list of exclude (optionally wildcarded) pattern to filter the returned _source
*/
public NoopSearchRequestBuilder setFetchSource(@Nullable String[] includes, @Nullable String[] excludes) {
sourceBuilder().fetchSource(includes, excludes);
return this;
}
/**
* Adds a docvalue based field to load and return. The field does not have to be stored,
* but its recommended to use non analyzed or numeric fields.
*
* @param name The field to get from the docvalue
*/
public NoopSearchRequestBuilder addDocValueField(String name) {
sourceBuilder().docValueField(name);
return this;
}
/**
* Adds a stored field to load and return (note, it must be stored) as part of the search request.
* If none are specified, the source of the document will be return.
*/
public NoopSearchRequestBuilder addStoredField(String field) {
sourceBuilder().storedField(field);
return this;
}
/**
* Adds a script based field to load and return. The field does not have to be stored,
* but its recommended to use non analyzed or numeric fields.
*
* @param name The name that will represent this value in the return hit
* @param script The script to use
*/
public NoopSearchRequestBuilder addScriptField(String name, Script script) {
sourceBuilder().scriptField(name, script);
return this;
}
/**
* Adds a sort against the given field name and the sort ordering.
*
* @param field The name of the field
* @param order The sort ordering
*/
public NoopSearchRequestBuilder addSort(String field, SortOrder order) {
sourceBuilder().sort(field, order);
return this;
}
/**
* Adds a generic sort builder.
*
* @see org.elasticsearch.search.sort.SortBuilders
*/
public NoopSearchRequestBuilder addSort(SortBuilder sort) {
sourceBuilder().sort(sort);
return this;
}
/**
* Set the sort values that indicates which docs this request should "search after".
*/
public NoopSearchRequestBuilder searchAfter(Object[] values) {
sourceBuilder().searchAfter(values);
return this;
}
public NoopSearchRequestBuilder slice(SliceBuilder builder) {
sourceBuilder().slice(builder);
return this;
}
/**
* Applies when sorting, and controls if scores will be tracked as well. Defaults to
* <tt>false</tt>.
*/
public NoopSearchRequestBuilder setTrackScores(boolean trackScores) {
sourceBuilder().trackScores(trackScores);
return this;
}
/**
* Sets the fields to load and return as part of the search request. If none
* are specified, the source of the document will be returned.
*/
public NoopSearchRequestBuilder storedFields(String... fields) {
sourceBuilder().storedFields(Arrays.asList(fields));
return this;
}
/**
* Adds an aggregation to the search operation.
*/
public NoopSearchRequestBuilder addAggregation(AggregationBuilder aggregation) {
sourceBuilder().aggregation(aggregation);
return this;
}
/**
* Adds an aggregation to the search operation.
*/
public NoopSearchRequestBuilder addAggregation(PipelineAggregationBuilder aggregation) {
sourceBuilder().aggregation(aggregation);
return this;
}
public NoopSearchRequestBuilder highlighter(HighlightBuilder highlightBuilder) {
sourceBuilder().highlighter(highlightBuilder);
return this;
}
/**
* Delegates to {@link org.elasticsearch.search.builder.SearchSourceBuilder#suggest(SuggestBuilder)}
*/
public NoopSearchRequestBuilder suggest(SuggestBuilder suggestBuilder) {
sourceBuilder().suggest(suggestBuilder);
return this;
}
/**
* Clears all rescorers on the builder and sets the first one. To use multiple rescore windows use
* {@link #addRescorer(org.elasticsearch.search.rescore.RescoreBuilder, int)}.
*
* @param rescorer rescorer configuration
* @return this for chaining
*/
public NoopSearchRequestBuilder setRescorer(RescoreBuilder<?> rescorer) {
sourceBuilder().clearRescorers();
return addRescorer(rescorer);
}
/**
* Clears all rescorers on the builder and sets the first one. To use multiple rescore windows use
* {@link #addRescorer(org.elasticsearch.search.rescore.RescoreBuilder, int)}.
*
* @param rescorer rescorer configuration
* @param window rescore window
* @return this for chaining
*/
public NoopSearchRequestBuilder setRescorer(RescoreBuilder rescorer, int window) {
sourceBuilder().clearRescorers();
return addRescorer(rescorer.windowSize(window));
}
/**
* Adds a new rescorer.
*
* @param rescorer rescorer configuration
* @return this for chaining
*/
public NoopSearchRequestBuilder addRescorer(RescoreBuilder<?> rescorer) {
sourceBuilder().addRescorer(rescorer);
return this;
}
/**
* Adds a new rescorer.
*
* @param rescorer rescorer configuration
* @param window rescore window
* @return this for chaining
*/
public NoopSearchRequestBuilder addRescorer(RescoreBuilder<?> rescorer, int window) {
sourceBuilder().addRescorer(rescorer.windowSize(window));
return this;
}
/**
* Clears all rescorers from the builder.
*
* @return this for chaining
*/
public NoopSearchRequestBuilder clearRescorers() {
sourceBuilder().clearRescorers();
return this;
}
/**
* Sets the source of the request as a SearchSourceBuilder.
*/
public NoopSearchRequestBuilder setSource(SearchSourceBuilder source) {
request.source(source);
return this;
}
/**
* Sets if this request should use the request cache or not, assuming that it can (for
* example, if "now" is used, it will never be cached). By default (not set, or null,
* will default to the index level setting if request cache is enabled or not).
*/
public NoopSearchRequestBuilder setRequestCache(Boolean requestCache) {
request.requestCache(requestCache);
return this;
}
/**
* Should the query be profiled. Defaults to <code>false</code>
*/
public NoopSearchRequestBuilder setProfile(boolean profile) {
sourceBuilder().profile(profile);
return this;
}
@Override
public String toString() {
if (request.source() != null) {
return request.source().toString();
}
return new SearchSourceBuilder().toString();
}
private SearchSourceBuilder sourceBuilder() {
if (request.source() == null) {
request.source(new SearchSourceBuilder());
}
return request.source();
}
}

View file

@ -0,0 +1,54 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.plugin.noop.action.search;
import org.elasticsearch.action.search.SearchRequest;
import org.elasticsearch.client.node.NodeClient;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.rest.BaseRestHandler;
import org.elasticsearch.rest.RestChannel;
import org.elasticsearch.rest.RestController;
import org.elasticsearch.rest.RestRequest;
import org.elasticsearch.rest.action.RestStatusToXContentListener;
import java.io.IOException;
import static org.elasticsearch.rest.RestRequest.Method.GET;
import static org.elasticsearch.rest.RestRequest.Method.POST;
public class RestNoopSearchAction extends BaseRestHandler {
@Inject
public RestNoopSearchAction(Settings settings, RestController controller) {
super(settings);
controller.registerHandler(GET, "/_noop_search", this);
controller.registerHandler(POST, "/_noop_search", this);
controller.registerHandler(GET, "/{index}/_noop_search", this);
controller.registerHandler(POST, "/{index}/_noop_search", this);
controller.registerHandler(GET, "/{index}/{type}/_noop_search", this);
controller.registerHandler(POST, "/{index}/{type}/_noop_search", this);
}
@Override
public void handleRequest(final RestRequest request, final RestChannel channel, final NodeClient client) throws IOException {
SearchRequest searchRequest = new SearchRequest();
client.execute(NoopSearchAction.INSTANCE, searchRequest, new RestStatusToXContentListener<>(channel));
}
}

View file

@ -0,0 +1,58 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.plugin.noop.action.search;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.search.SearchRequest;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.action.search.ShardSearchFailure;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.HandledTransportAction;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.search.aggregations.InternalAggregations;
import org.elasticsearch.search.internal.InternalSearchHit;
import org.elasticsearch.search.internal.InternalSearchHits;
import org.elasticsearch.search.internal.InternalSearchResponse;
import org.elasticsearch.search.profile.SearchProfileShardResults;
import org.elasticsearch.search.suggest.Suggest;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
import java.util.Collections;
public class TransportNoopSearchAction extends HandledTransportAction<SearchRequest, SearchResponse> {
@Inject
public TransportNoopSearchAction(Settings settings, ThreadPool threadPool, TransportService transportService, ActionFilters
actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
super(settings, NoopSearchAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver,
SearchRequest::new);
}
@Override
protected void doExecute(SearchRequest request, ActionListener<SearchResponse> listener) {
listener.onResponse(new SearchResponse(new InternalSearchResponse(
new InternalSearchHits(
new InternalSearchHit[0], 0L, 0.0f),
new InternalAggregations(Collections.emptyList()),
new Suggest(Collections.emptyList()),
new SearchProfileShardResults(Collections.emptyMap()), false, false), "", 1, 1, 0, new ShardSearchFailure[0]));
}
}

View file

@ -9,6 +9,7 @@ List projects = [
'client:sniffer', 'client:sniffer',
'client:transport', 'client:transport',
'client:test', 'client:test',
'client:client-benchmark-noop-api-plugin',
'client:benchmark', 'client:benchmark',
'benchmarks', 'benchmarks',
'distribution:integ-test-zip', 'distribution:integ-test-zip',