LLClient: Support host selection (#30523)

Allows users of the Low Level REST client to specify which hosts a
request should be run on. They implement the  `NodeSelector` interface
or reuse a built in selector like `NOT_MASTER_ONLY` to chose which nodes
are valid. Using it looks like:
```
Request request = new Request("POST", "/foo/_search");
RequestOptions options = request.getOptions().toBuilder();
options.setNodeSelector(NodeSelector.NOT_MASTER_ONLY);
request.setOptions(options);
...
```

This introduces a new `Node` object which contains a `HttpHost` and the
metadata about the host. At this point that metadata is just `version`
and `roles` but I plan to add node attributes in a followup. The
canonical way to **get** this metadata is to use the `Sniffer` to pull
the information from the Elasticsearch cluster.

I've marked this as "breaking-java" because it breaks custom
implementations of `HostsSniffer` by renaming the interface to
`NodesSniffer` and by changing it from returning a `List<HttpHost>` to a
`List<Node>`. It *shouldn't* break anyone else though.

Because we expect to find it useful, this also implements `host_selector`
support to `do` statements in the yaml tests. Using it looks a little
like:

```
---
"example test":
  - skip:
      features: host_selector
  - do:
      host_selector:
        version: " - 7.0.0" # same syntax as skip
      apiname:
        something: true
```

The `do` section parses the `version` string into a host selector that
uses the same version comparison logic as the `skip` section. When the
`do` section is executed it passed the off to the `RestClient`, using
the `ElasticsearchHostsSniffer` to sniff the required metadata.

The idea is to use this in mixed version tests to target a specific
version of Elasticsearch so we can be sure about the deprecation
logging though we don't currently have any examples that need it. We do,
however, have at least one open pull request that requires something
like this to properly test it.

Closes #21888
This commit is contained in:
Nik Everett 2018-06-11 17:07:27 -04:00 committed by GitHub
parent 563141c6c9
commit 0d9b78834f
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
57 changed files with 2434 additions and 467 deletions

View file

@ -29,7 +29,7 @@ import java.util.concurrent.TimeUnit;
final class DeadHostState implements Comparable<DeadHostState> { final class DeadHostState implements Comparable<DeadHostState> {
private static final long MIN_CONNECTION_TIMEOUT_NANOS = TimeUnit.MINUTES.toNanos(1); private static final long MIN_CONNECTION_TIMEOUT_NANOS = TimeUnit.MINUTES.toNanos(1);
private static final long MAX_CONNECTION_TIMEOUT_NANOS = TimeUnit.MINUTES.toNanos(30); static final long MAX_CONNECTION_TIMEOUT_NANOS = TimeUnit.MINUTES.toNanos(30);
private final int failedAttempts; private final int failedAttempts;
private final long deadUntilNanos; private final long deadUntilNanos;
@ -55,12 +55,12 @@ final class DeadHostState implements Comparable<DeadHostState> {
* *
* @param previousDeadHostState the previous state of the host which allows us to increase the wait till the next retry attempt * @param previousDeadHostState the previous state of the host which allows us to increase the wait till the next retry attempt
*/ */
DeadHostState(DeadHostState previousDeadHostState, TimeSupplier timeSupplier) { DeadHostState(DeadHostState previousDeadHostState) {
long timeoutNanos = (long)Math.min(MIN_CONNECTION_TIMEOUT_NANOS * 2 * Math.pow(2, previousDeadHostState.failedAttempts * 0.5 - 1), long timeoutNanos = (long)Math.min(MIN_CONNECTION_TIMEOUT_NANOS * 2 * Math.pow(2, previousDeadHostState.failedAttempts * 0.5 - 1),
MAX_CONNECTION_TIMEOUT_NANOS); MAX_CONNECTION_TIMEOUT_NANOS);
this.deadUntilNanos = timeSupplier.nanoTime() + timeoutNanos; this.deadUntilNanos = previousDeadHostState.timeSupplier.nanoTime() + timeoutNanos;
this.failedAttempts = previousDeadHostState.failedAttempts + 1; this.failedAttempts = previousDeadHostState.failedAttempts + 1;
this.timeSupplier = timeSupplier; this.timeSupplier = previousDeadHostState.timeSupplier;
} }
/** /**
@ -86,6 +86,10 @@ final class DeadHostState implements Comparable<DeadHostState> {
@Override @Override
public int compareTo(DeadHostState other) { public int compareTo(DeadHostState other) {
if (timeSupplier != other.timeSupplier) {
throw new IllegalArgumentException("can't compare DeadHostStates with different clocks ["
+ timeSupplier + " != " + other.timeSupplier + "]");
}
return Long.compare(deadUntilNanos, other.deadUntilNanos); return Long.compare(deadUntilNanos, other.deadUntilNanos);
} }
@ -94,6 +98,7 @@ final class DeadHostState implements Comparable<DeadHostState> {
return "DeadHostState{" + return "DeadHostState{" +
"failedAttempts=" + failedAttempts + "failedAttempts=" + failedAttempts +
", deadUntilNanos=" + deadUntilNanos + ", deadUntilNanos=" + deadUntilNanos +
", timeSupplier=" + timeSupplier +
'}'; '}';
} }
@ -101,12 +106,16 @@ final class DeadHostState implements Comparable<DeadHostState> {
* Time supplier that makes timing aspects pluggable to ease testing * Time supplier that makes timing aspects pluggable to ease testing
*/ */
interface TimeSupplier { interface TimeSupplier {
TimeSupplier DEFAULT = new TimeSupplier() { TimeSupplier DEFAULT = new TimeSupplier() {
@Override @Override
public long nanoTime() { public long nanoTime() {
return System.nanoTime(); return System.nanoTime();
} }
@Override
public String toString() {
return "nanoTime";
}
}; };
long nanoTime(); long nanoTime();

View file

@ -0,0 +1,213 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.client;
import java.util.Objects;
import java.util.Set;
import org.apache.http.HttpHost;
/**
* Metadata about an {@link HttpHost} running Elasticsearch.
*/
public class Node {
/**
* Address that this host claims is its primary contact point.
*/
private final HttpHost host;
/**
* Addresses on which the host is listening. These are useful to have
* around because they allow you to find a host based on any address it
* is listening on.
*/
private final Set<HttpHost> boundHosts;
/**
* Name of the node as configured by the {@code node.name} attribute.
*/
private final String name;
/**
* Version of Elasticsearch that the node is running or {@code null}
* if we don't know the version.
*/
private final String version;
/**
* Roles that the Elasticsearch process on the host has or {@code null}
* if we don't know what roles the node has.
*/
private final Roles roles;
/**
* Create a {@linkplain Node} with metadata. All parameters except
* {@code host} are nullable and implementations of {@link NodeSelector}
* need to decide what to do in their absence.
*/
public Node(HttpHost host, Set<HttpHost> boundHosts, String name, String version, Roles roles) {
if (host == null) {
throw new IllegalArgumentException("host cannot be null");
}
this.host = host;
this.boundHosts = boundHosts;
this.name = name;
this.version = version;
this.roles = roles;
}
/**
* Create a {@linkplain Node} without any metadata.
*/
public Node(HttpHost host) {
this(host, null, null, null, null);
}
/**
* Contact information for the host.
*/
public HttpHost getHost() {
return host;
}
/**
* Addresses on which the host is listening. These are useful to have
* around because they allow you to find a host based on any address it
* is listening on.
*/
public Set<HttpHost> getBoundHosts() {
return boundHosts;
}
/**
* The {@code node.name} of the node.
*/
public String getName() {
return name;
}
/**
* Version of Elasticsearch that the node is running or {@code null}
* if we don't know the version.
*/
public String getVersion() {
return version;
}
/**
* Roles that the Elasticsearch process on the host has or {@code null}
* if we don't know what roles the node has.
*/
public Roles getRoles() {
return roles;
}
@Override
public String toString() {
StringBuilder b = new StringBuilder();
b.append("[host=").append(host);
if (boundHosts != null) {
b.append(", bound=").append(boundHosts);
}
if (name != null) {
b.append(", name=").append(name);
}
if (version != null) {
b.append(", version=").append(version);
}
if (roles != null) {
b.append(", roles=").append(roles);
}
return b.append(']').toString();
}
@Override
public boolean equals(Object obj) {
if (obj == null || obj.getClass() != getClass()) {
return false;
}
Node other = (Node) obj;
return host.equals(other.host)
&& Objects.equals(boundHosts, other.boundHosts)
&& Objects.equals(name, other.name)
&& Objects.equals(version, other.version)
&& Objects.equals(roles, other.roles);
}
@Override
public int hashCode() {
return Objects.hash(host, boundHosts, name, version, roles);
}
/**
* Role information about an Elasticsearch process.
*/
public static final class Roles {
private final boolean masterEligible;
private final boolean data;
private final boolean ingest;
public Roles(boolean masterEligible, boolean data, boolean ingest) {
this.masterEligible = masterEligible;
this.data = data;
this.ingest = ingest;
}
/**
* Teturns whether or not the node <strong>could</strong> be elected master.
*/
public boolean isMasterEligible() {
return masterEligible;
}
/**
* Teturns whether or not the node stores data.
*/
public boolean isData() {
return data;
}
/**
* Teturns whether or not the node runs ingest pipelines.
*/
public boolean isIngest() {
return ingest;
}
@Override
public String toString() {
StringBuilder result = new StringBuilder(3);
if (masterEligible) result.append('m');
if (data) result.append('d');
if (ingest) result.append('i');
return result.toString();
}
@Override
public boolean equals(Object obj) {
if (obj == null || obj.getClass() != getClass()) {
return false;
}
Roles other = (Roles) obj;
return masterEligible == other.masterEligible
&& data == other.data
&& ingest == other.ingest;
}
@Override
public int hashCode() {
return Objects.hash(masterEligible, data, ingest);
}
}
}

View file

@ -0,0 +1,90 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.client;
import java.util.Iterator;
/**
* Selects nodes that can receive requests. Used to keep requests away
* from master nodes or to send them to nodes with a particular attribute.
* Use with {@link RequestOptions.Builder#setNodeSelector(NodeSelector)}.
*/
public interface NodeSelector {
/**
* Select the {@link Node}s to which to send requests. This is called with
* a mutable {@link Iterable} of {@linkplain Node}s in the order that the
* rest client would prefer to use them and implementers should remove
* nodes from the that should not receive the request. Implementers may
* iterate the nodes as many times as they need.
* <p>
* This may be called twice per request: first for "living" nodes that
* have not been blacklisted by previous errors. If the selector removes
* all nodes from the list or if there aren't any living nodes then the
* {@link RestClient} will call this method with a list of "dead" nodes.
* <p>
* Implementers should not rely on the ordering of the nodes.
*/
void select(Iterable<Node> nodes);
/*
* We were fairly careful with our choice of Iterable here. The caller has
* a List but reordering the list is likely to break round robin. Luckily
* Iterable doesn't allow any reordering.
*/
/**
* Selector that matches any node.
*/
NodeSelector ANY = new NodeSelector() {
@Override
public void select(Iterable<Node> nodes) {
// Intentionally does nothing
}
@Override
public String toString() {
return "ANY";
}
};
/**
* Selector that matches any node that has metadata and doesn't
* have the {@code master} role OR it has the data {@code data}
* role.
*/
NodeSelector NOT_MASTER_ONLY = new NodeSelector() {
@Override
public void select(Iterable<Node> nodes) {
for (Iterator<Node> itr = nodes.iterator(); itr.hasNext();) {
Node node = itr.next();
if (node.getRoles() == null) continue;
if (node.getRoles().isMasterEligible()
&& false == node.getRoles().isData()
&& false == node.getRoles().isIngest()) {
itr.remove();
}
}
}
@Override
public String toString() {
return "NOT_MASTER_ONLY";
}
};
}

View file

@ -87,14 +87,14 @@ final class RequestLogger {
/** /**
* Logs a request that failed * Logs a request that failed
*/ */
static void logFailedRequest(Log logger, HttpUriRequest request, HttpHost host, Exception e) { static void logFailedRequest(Log logger, HttpUriRequest request, Node node, Exception e) {
if (logger.isDebugEnabled()) { if (logger.isDebugEnabled()) {
logger.debug("request [" + request.getMethod() + " " + host + getUri(request.getRequestLine()) + "] failed", e); logger.debug("request [" + request.getMethod() + " " + node.getHost() + getUri(request.getRequestLine()) + "] failed", e);
} }
if (tracer.isTraceEnabled()) { if (tracer.isTraceEnabled()) {
String traceRequest; String traceRequest;
try { try {
traceRequest = buildTraceRequest(request, host); traceRequest = buildTraceRequest(request, node.getHost());
} catch (IOException e1) { } catch (IOException e1) {
tracer.trace("error while reading request for trace purposes", e); tracer.trace("error while reading request for trace purposes", e);
traceRequest = ""; traceRequest = "";

View file

@ -37,18 +37,21 @@ import java.util.ArrayList;
*/ */
public final class RequestOptions { public final class RequestOptions {
public static final RequestOptions DEFAULT = new Builder( public static final RequestOptions DEFAULT = new Builder(
Collections.<Header>emptyList(), HeapBufferedResponseConsumerFactory.DEFAULT).build(); Collections.<Header>emptyList(), NodeSelector.ANY,
HeapBufferedResponseConsumerFactory.DEFAULT).build();
private final List<Header> headers; private final List<Header> headers;
private final NodeSelector nodeSelector;
private final HttpAsyncResponseConsumerFactory httpAsyncResponseConsumerFactory; private final HttpAsyncResponseConsumerFactory httpAsyncResponseConsumerFactory;
private RequestOptions(Builder builder) { private RequestOptions(Builder builder) {
this.headers = Collections.unmodifiableList(new ArrayList<>(builder.headers)); this.headers = Collections.unmodifiableList(new ArrayList<>(builder.headers));
this.nodeSelector = builder.nodeSelector;
this.httpAsyncResponseConsumerFactory = builder.httpAsyncResponseConsumerFactory; this.httpAsyncResponseConsumerFactory = builder.httpAsyncResponseConsumerFactory;
} }
public Builder toBuilder() { public Builder toBuilder() {
Builder builder = new Builder(headers, httpAsyncResponseConsumerFactory); Builder builder = new Builder(headers, nodeSelector, httpAsyncResponseConsumerFactory);
return builder; return builder;
} }
@ -59,6 +62,14 @@ public final class RequestOptions {
return headers; return headers;
} }
/**
* The selector that chooses which nodes are valid destinations for
* {@link Request}s with these options.
*/
public NodeSelector getNodeSelector() {
return nodeSelector;
}
/** /**
* The {@link HttpAsyncResponseConsumerFactory} used to create one * The {@link HttpAsyncResponseConsumerFactory} used to create one
* {@link HttpAsyncResponseConsumer} callback per retry. Controls how the * {@link HttpAsyncResponseConsumer} callback per retry. Controls how the
@ -82,6 +93,9 @@ public final class RequestOptions {
b.append(headers.get(h).toString()); b.append(headers.get(h).toString());
} }
} }
if (nodeSelector != NodeSelector.ANY) {
b.append(", nodeSelector=").append(nodeSelector);
}
if (httpAsyncResponseConsumerFactory != HttpAsyncResponseConsumerFactory.DEFAULT) { if (httpAsyncResponseConsumerFactory != HttpAsyncResponseConsumerFactory.DEFAULT) {
b.append(", consumerFactory=").append(httpAsyncResponseConsumerFactory); b.append(", consumerFactory=").append(httpAsyncResponseConsumerFactory);
} }
@ -99,20 +113,24 @@ public final class RequestOptions {
RequestOptions other = (RequestOptions) obj; RequestOptions other = (RequestOptions) obj;
return headers.equals(other.headers) return headers.equals(other.headers)
&& nodeSelector.equals(other.nodeSelector)
&& httpAsyncResponseConsumerFactory.equals(other.httpAsyncResponseConsumerFactory); && httpAsyncResponseConsumerFactory.equals(other.httpAsyncResponseConsumerFactory);
} }
@Override @Override
public int hashCode() { public int hashCode() {
return Objects.hash(headers, httpAsyncResponseConsumerFactory); return Objects.hash(headers, nodeSelector, httpAsyncResponseConsumerFactory);
} }
public static class Builder { public static class Builder {
private final List<Header> headers; private final List<Header> headers;
private NodeSelector nodeSelector;
private HttpAsyncResponseConsumerFactory httpAsyncResponseConsumerFactory; private HttpAsyncResponseConsumerFactory httpAsyncResponseConsumerFactory;
private Builder(List<Header> headers, HttpAsyncResponseConsumerFactory httpAsyncResponseConsumerFactory) { private Builder(List<Header> headers, NodeSelector nodeSelector,
HttpAsyncResponseConsumerFactory httpAsyncResponseConsumerFactory) {
this.headers = new ArrayList<>(headers); this.headers = new ArrayList<>(headers);
this.nodeSelector = nodeSelector;
this.httpAsyncResponseConsumerFactory = httpAsyncResponseConsumerFactory; this.httpAsyncResponseConsumerFactory = httpAsyncResponseConsumerFactory;
} }
@ -133,7 +151,15 @@ public final class RequestOptions {
} }
/** /**
* set the {@link HttpAsyncResponseConsumerFactory} used to create one * Configure the selector that chooses which nodes are valid
* destinations for {@link Request}s with these options
*/
public void setNodeSelector(NodeSelector nodeSelector) {
this.nodeSelector = Objects.requireNonNull(nodeSelector, "nodeSelector cannot be null");
}
/**
* Set the {@link HttpAsyncResponseConsumerFactory} used to create one
* {@link HttpAsyncResponseConsumer} callback per retry. Controls how the * {@link HttpAsyncResponseConsumer} callback per retry. Controls how the
* response body gets streamed from a non-blocking HTTP connection on the * response body gets streamed from a non-blocking HTTP connection on the
* client side. * client side.

View file

@ -40,7 +40,7 @@ public class Response {
Response(RequestLine requestLine, HttpHost host, HttpResponse response) { Response(RequestLine requestLine, HttpHost host, HttpResponse response) {
Objects.requireNonNull(requestLine, "requestLine cannot be null"); Objects.requireNonNull(requestLine, "requestLine cannot be null");
Objects.requireNonNull(host, "node cannot be null"); Objects.requireNonNull(host, "host cannot be null");
Objects.requireNonNull(response, "response cannot be null"); Objects.requireNonNull(response, "response cannot be null");
this.requestLine = requestLine; this.requestLine = requestLine;
this.host = host; this.host = host;

View file

@ -46,10 +46,11 @@ import org.apache.http.impl.nio.client.CloseableHttpAsyncClient;
import org.apache.http.nio.client.methods.HttpAsyncMethods; import org.apache.http.nio.client.methods.HttpAsyncMethods;
import org.apache.http.nio.protocol.HttpAsyncRequestProducer; import org.apache.http.nio.protocol.HttpAsyncRequestProducer;
import org.apache.http.nio.protocol.HttpAsyncResponseConsumer; import org.apache.http.nio.protocol.HttpAsyncResponseConsumer;
import org.elasticsearch.client.DeadHostState.TimeSupplier;
import javax.net.ssl.SSLHandshakeException;
import java.io.Closeable; import java.io.Closeable;
import java.io.IOException; import java.io.IOException;
import java.net.ConnectException;
import java.net.SocketTimeoutException; import java.net.SocketTimeoutException;
import java.net.URI; import java.net.URI;
import java.net.URISyntaxException; import java.net.URISyntaxException;
@ -57,11 +58,10 @@ import java.util.ArrayList;
import java.util.Arrays; import java.util.Arrays;
import java.util.Collection; import java.util.Collection;
import java.util.Collections; import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap; import java.util.HashMap;
import java.util.HashSet; import java.util.HashSet;
import java.util.Iterator; import java.util.Iterator;
import java.util.LinkedHashSet; import java.util.LinkedHashMap;
import java.util.List; import java.util.List;
import java.util.Locale; import java.util.Locale;
import java.util.Map; import java.util.Map;
@ -74,13 +74,16 @@ import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicReference; import java.util.concurrent.atomic.AtomicReference;
import javax.net.ssl.SSLHandshakeException;
import static java.util.Collections.singletonList;
/** /**
* Client that connects to an Elasticsearch cluster through HTTP. * Client that connects to an Elasticsearch cluster through HTTP.
* <p> * <p>
* Must be created using {@link RestClientBuilder}, which allows to set all the different options or just rely on defaults. * Must be created using {@link RestClientBuilder}, which allows to set all the different options or just rely on defaults.
* The hosts that are part of the cluster need to be provided at creation time, but can also be replaced later * The hosts that are part of the cluster need to be provided at creation time, but can also be replaced later
* by calling {@link #setHosts(HttpHost...)}. * by calling {@link #setNodes(Collection)}.
* <p> * <p>
* The method {@link #performRequest(String, String, Map, HttpEntity, Header...)} allows to send a request to the cluster. When * The method {@link #performRequest(String, String, Map, HttpEntity, Header...)} allows to send a request to the cluster. When
* sending a request, a host gets selected out of the provided ones in a round-robin fashion. Failing hosts are marked dead and * sending a request, a host gets selected out of the provided ones in a round-robin fashion. Failing hosts are marked dead and
@ -102,53 +105,93 @@ public class RestClient implements Closeable {
final List<Header> defaultHeaders; final List<Header> defaultHeaders;
private final long maxRetryTimeoutMillis; private final long maxRetryTimeoutMillis;
private final String pathPrefix; private final String pathPrefix;
private final AtomicInteger lastHostIndex = new AtomicInteger(0); private final AtomicInteger lastNodeIndex = new AtomicInteger(0);
private volatile HostTuple<Set<HttpHost>> hostTuple;
private final ConcurrentMap<HttpHost, DeadHostState> blacklist = new ConcurrentHashMap<>(); private final ConcurrentMap<HttpHost, DeadHostState> blacklist = new ConcurrentHashMap<>();
private final FailureListener failureListener; private final FailureListener failureListener;
private volatile NodeTuple<List<Node>> nodeTuple;
RestClient(CloseableHttpAsyncClient client, long maxRetryTimeoutMillis, Header[] defaultHeaders, RestClient(CloseableHttpAsyncClient client, long maxRetryTimeoutMillis, Header[] defaultHeaders,
HttpHost[] hosts, String pathPrefix, FailureListener failureListener) { List<Node> nodes, String pathPrefix, FailureListener failureListener) {
this.client = client; this.client = client;
this.maxRetryTimeoutMillis = maxRetryTimeoutMillis; this.maxRetryTimeoutMillis = maxRetryTimeoutMillis;
this.defaultHeaders = Collections.unmodifiableList(Arrays.asList(defaultHeaders)); this.defaultHeaders = Collections.unmodifiableList(Arrays.asList(defaultHeaders));
this.failureListener = failureListener; this.failureListener = failureListener;
this.pathPrefix = pathPrefix; this.pathPrefix = pathPrefix;
setHosts(hosts); setNodes(nodes);
} }
/** /**
* Returns a new {@link RestClientBuilder} to help with {@link RestClient} creation. * Returns a new {@link RestClientBuilder} to help with {@link RestClient} creation.
* Creates a new builder instance and sets the hosts that the client will send requests to. * Creates a new builder instance and sets the hosts that the client will send requests to.
* <p>
* Prefer this to {@link #builder(HttpHost...)} if you have metadata up front about the nodes.
* If you don't either one is fine.
*/ */
public static RestClientBuilder builder(HttpHost... hosts) { public static RestClientBuilder builder(Node... nodes) {
return new RestClientBuilder(hosts); return new RestClientBuilder(nodes == null ? null : Arrays.asList(nodes));
} }
/** /**
* Replaces the hosts that the client communicates with. * Returns a new {@link RestClientBuilder} to help with {@link RestClient} creation.
* @see HttpHost * Creates a new builder instance and sets the nodes that the client will send requests to.
* <p>
* You can use this if you do not have metadata up front about the nodes. If you do, prefer
* {@link #builder(Node...)}.
* @see Node#Node(HttpHost)
*/ */
public synchronized void setHosts(HttpHost... hosts) { public static RestClientBuilder builder(HttpHost... hosts) {
if (hosts == null || hosts.length == 0) { return new RestClientBuilder(hostsToNodes(hosts));
throw new IllegalArgumentException("hosts must not be null nor empty"); }
/**
* Replaces the hosts with which the client communicates.
*
* @deprecated prefer {@link setNodes} because it allows you
* to set metadata for use with {@link NodeSelector}s
*/
@Deprecated
public void setHosts(HttpHost... hosts) {
setNodes(hostsToNodes(hosts));
}
/**
* Replaces the nodes with which the client communicates.
*/
public synchronized void setNodes(Collection<Node> nodes) {
if (nodes == null || nodes.isEmpty()) {
throw new IllegalArgumentException("nodes must not be null or empty");
} }
Set<HttpHost> httpHosts = new LinkedHashSet<>();
AuthCache authCache = new BasicAuthCache(); AuthCache authCache = new BasicAuthCache();
for (HttpHost host : hosts) {
Objects.requireNonNull(host, "host cannot be null"); Map<HttpHost, Node> nodesByHost = new LinkedHashMap<>();
httpHosts.add(host); for (Node node : nodes) {
authCache.put(host, new BasicScheme()); Objects.requireNonNull(node, "node cannot be null");
// TODO should we throw an IAE if we have two nodes with the same host?
nodesByHost.put(node.getHost(), node);
authCache.put(node.getHost(), new BasicScheme());
} }
this.hostTuple = new HostTuple<>(Collections.unmodifiableSet(httpHosts), authCache); this.nodeTuple = new NodeTuple<>(
Collections.unmodifiableList(new ArrayList<>(nodesByHost.values())), authCache);
this.blacklist.clear(); this.blacklist.clear();
} }
private static List<Node> hostsToNodes(HttpHost[] hosts) {
if (hosts == null || hosts.length == 0) {
throw new IllegalArgumentException("hosts must not be null nor empty");
}
List<Node> nodes = new ArrayList<>(hosts.length);
for (int i = 0; i < hosts.length; i++) {
nodes.add(new Node(hosts[i]));
}
return nodes;
}
/** /**
* Returns the configured hosts * Get the list of nodes that the client knows about. The list is
* unmodifiable.
*/ */
public List<HttpHost> getHosts() { public List<Node> getNodes() {
return new ArrayList<>(hostTuple.hosts); return nodeTuple.nodes;
} }
/** /**
@ -434,7 +477,7 @@ public class RestClient implements Closeable {
performRequestAsync(request, responseListener); performRequestAsync(request, responseListener);
} }
void performRequestAsyncNoCatch(Request request, ResponseListener listener) { void performRequestAsyncNoCatch(Request request, ResponseListener listener) throws IOException {
Map<String, String> requestParams = new HashMap<>(request.getParameters()); Map<String, String> requestParams = new HashMap<>(request.getParameters());
//ignore is a special parameter supported by the clients, shouldn't be sent to es //ignore is a special parameter supported by the clients, shouldn't be sent to es
String ignoreString = requestParams.remove("ignore"); String ignoreString = requestParams.remove("ignore");
@ -466,40 +509,40 @@ public class RestClient implements Closeable {
setHeaders(httpRequest, request.getOptions().getHeaders()); setHeaders(httpRequest, request.getOptions().getHeaders());
FailureTrackingResponseListener failureTrackingResponseListener = new FailureTrackingResponseListener(listener); FailureTrackingResponseListener failureTrackingResponseListener = new FailureTrackingResponseListener(listener);
long startTime = System.nanoTime(); long startTime = System.nanoTime();
performRequestAsync(startTime, nextHost(), httpRequest, ignoreErrorCodes, performRequestAsync(startTime, nextNode(request.getOptions().getNodeSelector()), httpRequest, ignoreErrorCodes,
request.getOptions().getHttpAsyncResponseConsumerFactory(), failureTrackingResponseListener); request.getOptions().getHttpAsyncResponseConsumerFactory(), failureTrackingResponseListener);
} }
private void performRequestAsync(final long startTime, final HostTuple<Iterator<HttpHost>> hostTuple, final HttpRequestBase request, private void performRequestAsync(final long startTime, final NodeTuple<Iterator<Node>> nodeTuple, final HttpRequestBase request,
final Set<Integer> ignoreErrorCodes, final Set<Integer> ignoreErrorCodes,
final HttpAsyncResponseConsumerFactory httpAsyncResponseConsumerFactory, final HttpAsyncResponseConsumerFactory httpAsyncResponseConsumerFactory,
final FailureTrackingResponseListener listener) { final FailureTrackingResponseListener listener) {
final HttpHost host = hostTuple.hosts.next(); final Node node = nodeTuple.nodes.next();
//we stream the request body if the entity allows for it //we stream the request body if the entity allows for it
final HttpAsyncRequestProducer requestProducer = HttpAsyncMethods.create(host, request); final HttpAsyncRequestProducer requestProducer = HttpAsyncMethods.create(node.getHost(), request);
final HttpAsyncResponseConsumer<HttpResponse> asyncResponseConsumer = final HttpAsyncResponseConsumer<HttpResponse> asyncResponseConsumer =
httpAsyncResponseConsumerFactory.createHttpAsyncResponseConsumer(); httpAsyncResponseConsumerFactory.createHttpAsyncResponseConsumer();
final HttpClientContext context = HttpClientContext.create(); final HttpClientContext context = HttpClientContext.create();
context.setAuthCache(hostTuple.authCache); context.setAuthCache(nodeTuple.authCache);
client.execute(requestProducer, asyncResponseConsumer, context, new FutureCallback<HttpResponse>() { client.execute(requestProducer, asyncResponseConsumer, context, new FutureCallback<HttpResponse>() {
@Override @Override
public void completed(HttpResponse httpResponse) { public void completed(HttpResponse httpResponse) {
try { try {
RequestLogger.logResponse(logger, request, host, httpResponse); RequestLogger.logResponse(logger, request, node.getHost(), httpResponse);
int statusCode = httpResponse.getStatusLine().getStatusCode(); int statusCode = httpResponse.getStatusLine().getStatusCode();
Response response = new Response(request.getRequestLine(), host, httpResponse); Response response = new Response(request.getRequestLine(), node.getHost(), httpResponse);
if (isSuccessfulResponse(statusCode) || ignoreErrorCodes.contains(response.getStatusLine().getStatusCode())) { if (isSuccessfulResponse(statusCode) || ignoreErrorCodes.contains(response.getStatusLine().getStatusCode())) {
onResponse(host); onResponse(node);
listener.onSuccess(response); listener.onSuccess(response);
} else { } else {
ResponseException responseException = new ResponseException(response); ResponseException responseException = new ResponseException(response);
if (isRetryStatus(statusCode)) { if (isRetryStatus(statusCode)) {
//mark host dead and retry against next one //mark host dead and retry against next one
onFailure(host); onFailure(node);
retryIfPossible(responseException); retryIfPossible(responseException);
} else { } else {
//mark host alive and don't retry, as the error should be a request problem //mark host alive and don't retry, as the error should be a request problem
onResponse(host); onResponse(node);
listener.onDefinitiveFailure(responseException); listener.onDefinitiveFailure(responseException);
} }
} }
@ -511,8 +554,8 @@ public class RestClient implements Closeable {
@Override @Override
public void failed(Exception failure) { public void failed(Exception failure) {
try { try {
RequestLogger.logFailedRequest(logger, request, host, failure); RequestLogger.logFailedRequest(logger, request, node, failure);
onFailure(host); onFailure(node);
retryIfPossible(failure); retryIfPossible(failure);
} catch(Exception e) { } catch(Exception e) {
listener.onDefinitiveFailure(e); listener.onDefinitiveFailure(e);
@ -520,7 +563,7 @@ public class RestClient implements Closeable {
} }
private void retryIfPossible(Exception exception) { private void retryIfPossible(Exception exception) {
if (hostTuple.hosts.hasNext()) { if (nodeTuple.nodes.hasNext()) {
//in case we are retrying, check whether maxRetryTimeout has been reached //in case we are retrying, check whether maxRetryTimeout has been reached
long timeElapsedMillis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startTime); long timeElapsedMillis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startTime);
long timeout = maxRetryTimeoutMillis - timeElapsedMillis; long timeout = maxRetryTimeoutMillis - timeElapsedMillis;
@ -531,7 +574,7 @@ public class RestClient implements Closeable {
} else { } else {
listener.trackFailure(exception); listener.trackFailure(exception);
request.reset(); request.reset();
performRequestAsync(startTime, hostTuple, request, ignoreErrorCodes, httpAsyncResponseConsumerFactory, listener); performRequestAsync(startTime, nodeTuple, request, ignoreErrorCodes, httpAsyncResponseConsumerFactory, listener);
} }
} else { } else {
listener.onDefinitiveFailure(exception); listener.onDefinitiveFailure(exception);
@ -560,54 +603,103 @@ public class RestClient implements Closeable {
} }
/** /**
* Returns an {@link Iterable} of hosts to be used for a request call. * Returns a non-empty {@link Iterator} of nodes to be used for a request
* Ideally, the first host is retrieved from the iterable and used successfully for the request. * that match the {@link NodeSelector}.
* Otherwise, after each failure the next host has to be retrieved from the iterator so that the request can be retried until * <p>
* there are no more hosts available to retry against. The maximum total of attempts is equal to the number of hosts in the iterable. * If there are no living nodes that match the {@link NodeSelector}
* The iterator returned will never be empty. In case there are no healthy hosts available, or dead ones to be be retried, * this will return the dead node that matches the {@link NodeSelector}
* one dead host gets returned so that it can be retried. * that is closest to being revived.
* @throws IOException if no nodes are available
*/ */
private HostTuple<Iterator<HttpHost>> nextHost() { private NodeTuple<Iterator<Node>> nextNode(NodeSelector nodeSelector) throws IOException {
final HostTuple<Set<HttpHost>> hostTuple = this.hostTuple; NodeTuple<List<Node>> nodeTuple = this.nodeTuple;
Collection<HttpHost> nextHosts = Collections.emptySet(); List<Node> hosts = selectHosts(nodeTuple, blacklist, lastNodeIndex, nodeSelector);
do { return new NodeTuple<>(hosts.iterator(), nodeTuple.authCache);
Set<HttpHost> filteredHosts = new HashSet<>(hostTuple.hosts); }
for (Map.Entry<HttpHost, DeadHostState> entry : blacklist.entrySet()) {
if (entry.getValue().shallBeRetried() == false) { /**
filteredHosts.remove(entry.getKey()); * Select hosts to try. Package private for testing.
} */
static List<Node> selectHosts(NodeTuple<List<Node>> nodeTuple,
Map<HttpHost, DeadHostState> blacklist, AtomicInteger lastNodeIndex,
NodeSelector nodeSelector) throws IOException {
/*
* Sort the nodes into living and dead lists.
*/
List<Node> livingNodes = new ArrayList<>(nodeTuple.nodes.size() - blacklist.size());
List<DeadNode> deadNodes = new ArrayList<>(blacklist.size());
for (Node node : nodeTuple.nodes) {
DeadHostState deadness = blacklist.get(node.getHost());
if (deadness == null) {
livingNodes.add(node);
continue;
} }
if (filteredHosts.isEmpty()) { if (deadness.shallBeRetried()) {
//last resort: if there are no good hosts to use, return a single dead one, the one that's closest to being retried livingNodes.add(node);
List<Map.Entry<HttpHost, DeadHostState>> sortedHosts = new ArrayList<>(blacklist.entrySet()); continue;
if (sortedHosts.size() > 0) {
Collections.sort(sortedHosts, new Comparator<Map.Entry<HttpHost, DeadHostState>>() {
@Override
public int compare(Map.Entry<HttpHost, DeadHostState> o1, Map.Entry<HttpHost, DeadHostState> o2) {
return o1.getValue().compareTo(o2.getValue());
}
});
HttpHost deadHost = sortedHosts.get(0).getKey();
logger.trace("resurrecting host [" + deadHost + "]");
nextHosts = Collections.singleton(deadHost);
}
} else {
List<HttpHost> rotatedHosts = new ArrayList<>(filteredHosts);
Collections.rotate(rotatedHosts, rotatedHosts.size() - lastHostIndex.getAndIncrement());
nextHosts = rotatedHosts;
} }
} while(nextHosts.isEmpty()); deadNodes.add(new DeadNode(node, deadness));
return new HostTuple<>(nextHosts.iterator(), hostTuple.authCache); }
if (false == livingNodes.isEmpty()) {
/*
* Normal state: there is at least one living node. If the
* selector is ok with any over the living nodes then use them
* for the request.
*/
List<Node> selectedLivingNodes = new ArrayList<>(livingNodes);
nodeSelector.select(selectedLivingNodes);
if (false == selectedLivingNodes.isEmpty()) {
/*
* Rotate the list so subsequent requests will prefer the
* nodes in a different order.
*/
Collections.rotate(selectedLivingNodes, lastNodeIndex.getAndIncrement());
return selectedLivingNodes;
}
}
/*
* Last resort: If there are no good nodes to use, either because
* the selector rejected all the living nodes or because there aren't
* any living ones. Either way, we want to revive a single dead node
* that the NodeSelectors are OK with. We do this by sorting the dead
* nodes by their revival time and passing them through the
* NodeSelector so it can have its say in which nodes are ok and their
* ordering. If the selector is ok with any of the nodes then use just
* the first one in the list because we only want to revive a single
* node.
*/
if (false == deadNodes.isEmpty()) {
final List<DeadNode> selectedDeadNodes = new ArrayList<>(deadNodes);
/*
* We'd like NodeSelectors to remove items directly from deadNodes
* so we can find the minimum after it is filtered without having
* to compare many things. This saves us a sort on the unfiltered
* list.
*/
nodeSelector.select(new Iterable<Node>() {
@Override
public Iterator<Node> iterator() {
return new DeadNodeIteratorAdapter(selectedDeadNodes.iterator());
}
});
if (false == selectedDeadNodes.isEmpty()) {
return singletonList(Collections.min(selectedDeadNodes).node);
}
}
throw new IOException("NodeSelector [" + nodeSelector + "] rejected all nodes, "
+ "living " + livingNodes + " and dead " + deadNodes);
} }
/** /**
* Called after each successful request call. * Called after each successful request call.
* Receives as an argument the host that was used for the successful request. * Receives as an argument the host that was used for the successful request.
*/ */
private void onResponse(HttpHost host) { private void onResponse(Node node) {
DeadHostState removedHost = this.blacklist.remove(host); DeadHostState removedHost = this.blacklist.remove(node.getHost());
if (logger.isDebugEnabled() && removedHost != null) { if (logger.isDebugEnabled() && removedHost != null) {
logger.debug("removed host [" + host + "] from blacklist"); logger.debug("removed [" + node + "] from blacklist");
} }
} }
@ -615,20 +707,25 @@ public class RestClient implements Closeable {
* Called after each failed attempt. * Called after each failed attempt.
* Receives as an argument the host that was used for the failed attempt. * Receives as an argument the host that was used for the failed attempt.
*/ */
private void onFailure(HttpHost host) { private void onFailure(Node node) {
while(true) { while(true) {
DeadHostState previousDeadHostState = blacklist.putIfAbsent(host, new DeadHostState(DeadHostState.TimeSupplier.DEFAULT)); DeadHostState previousDeadHostState =
blacklist.putIfAbsent(node.getHost(), new DeadHostState(TimeSupplier.DEFAULT));
if (previousDeadHostState == null) { if (previousDeadHostState == null) {
logger.debug("added host [" + host + "] to blacklist"); if (logger.isDebugEnabled()) {
logger.debug("added [" + node + "] to blacklist");
}
break; break;
} }
if (blacklist.replace(host, previousDeadHostState, if (blacklist.replace(node.getHost(), previousDeadHostState,
new DeadHostState(previousDeadHostState, DeadHostState.TimeSupplier.DEFAULT))) { new DeadHostState(previousDeadHostState))) {
logger.debug("updated host [" + host + "] already in blacklist"); if (logger.isDebugEnabled()) {
logger.debug("updated [" + node + "] already in blacklist");
}
break; break;
} }
} }
failureListener.onFailure(host); failureListener.onFailure(node);
} }
@Override @Override
@ -840,6 +937,11 @@ public class RestClient implements Closeable {
e.initCause(exception); e.initCause(exception);
throw e; throw e;
} }
if (exception instanceof ConnectException) {
ConnectException e = new ConnectException(exception.getMessage());
e.initCause(exception);
throw e;
}
if (exception instanceof IOException) { if (exception instanceof IOException) {
throw new IOException(exception.getMessage(), exception); throw new IOException(exception.getMessage(), exception);
} }
@ -862,24 +964,73 @@ public class RestClient implements Closeable {
*/ */
public static class FailureListener { public static class FailureListener {
/** /**
* Notifies that the host provided as argument has just failed * Notifies that the node provided as argument has just failed
*/ */
public void onFailure(HttpHost host) { public void onFailure(Node node) {}
}
/**
* {@link NodeTuple} enables the {@linkplain Node}s and {@linkplain AuthCache}
* to be set together in a thread safe, volatile way.
*/
static class NodeTuple<T> {
final T nodes;
final AuthCache authCache;
NodeTuple(final T nodes, final AuthCache authCache) {
this.nodes = nodes;
this.authCache = authCache;
} }
} }
/** /**
* {@code HostTuple} enables the {@linkplain HttpHost}s and {@linkplain AuthCache} to be set together in a thread * Contains a reference to a blacklisted node and the time until it is
* safe, volatile way. * revived. We use this so we can do a single pass over the blacklist.
*/ */
private static class HostTuple<T> { private static class DeadNode implements Comparable<DeadNode> {
final T hosts; final Node node;
final AuthCache authCache; final DeadHostState deadness;
HostTuple(final T hosts, final AuthCache authCache) { DeadNode(Node node, DeadHostState deadness) {
this.hosts = hosts; this.node = node;
this.authCache = authCache; this.deadness = deadness;
}
@Override
public String toString() {
return node.toString();
}
@Override
public int compareTo(DeadNode rhs) {
return deadness.compareTo(rhs.deadness);
}
}
/**
* Adapts an <code>Iterator<DeadNodeAndRevival></code> into an
* <code>Iterator<Node></code>.
*/
private static class DeadNodeIteratorAdapter implements Iterator<Node> {
private final Iterator<DeadNode> itr;
private DeadNodeIteratorAdapter(Iterator<DeadNode> itr) {
this.itr = itr;
}
@Override
public boolean hasNext() {
return itr.hasNext();
}
@Override
public Node next() {
return itr.next().node;
}
@Override
public void remove() {
itr.remove();
} }
} }

View file

@ -20,7 +20,6 @@
package org.elasticsearch.client; package org.elasticsearch.client;
import org.apache.http.Header; import org.apache.http.Header;
import org.apache.http.HttpHost;
import org.apache.http.client.config.RequestConfig; import org.apache.http.client.config.RequestConfig;
import org.apache.http.impl.client.CloseableHttpClient; import org.apache.http.impl.client.CloseableHttpClient;
import org.apache.http.impl.client.HttpClientBuilder; import org.apache.http.impl.client.HttpClientBuilder;
@ -32,6 +31,7 @@ import javax.net.ssl.SSLContext;
import java.security.AccessController; import java.security.AccessController;
import java.security.NoSuchAlgorithmException; import java.security.NoSuchAlgorithmException;
import java.security.PrivilegedAction; import java.security.PrivilegedAction;
import java.util.List;
import java.util.Objects; import java.util.Objects;
/** /**
@ -48,7 +48,7 @@ public final class RestClientBuilder {
private static final Header[] EMPTY_HEADERS = new Header[0]; private static final Header[] EMPTY_HEADERS = new Header[0];
private final HttpHost[] hosts; private final List<Node> nodes;
private int maxRetryTimeout = DEFAULT_MAX_RETRY_TIMEOUT_MILLIS; private int maxRetryTimeout = DEFAULT_MAX_RETRY_TIMEOUT_MILLIS;
private Header[] defaultHeaders = EMPTY_HEADERS; private Header[] defaultHeaders = EMPTY_HEADERS;
private RestClient.FailureListener failureListener; private RestClient.FailureListener failureListener;
@ -59,18 +59,18 @@ public final class RestClientBuilder {
/** /**
* Creates a new builder instance and sets the hosts that the client will send requests to. * Creates a new builder instance and sets the hosts that the client will send requests to.
* *
* @throws NullPointerException if {@code hosts} or any host is {@code null}. * @throws IllegalArgumentException if {@code nodes} is {@code null} or empty.
* @throws IllegalArgumentException if {@code hosts} is empty.
*/ */
RestClientBuilder(HttpHost... hosts) { RestClientBuilder(List<Node> nodes) {
Objects.requireNonNull(hosts, "hosts must not be null"); if (nodes == null || nodes.isEmpty()) {
if (hosts.length == 0) { throw new IllegalArgumentException("nodes must not be null or empty");
throw new IllegalArgumentException("no hosts provided");
} }
for (HttpHost host : hosts) { for (Node node : nodes) {
Objects.requireNonNull(host, "host cannot be null"); if (node == null) {
throw new IllegalArgumentException("node cannot be null");
}
} }
this.hosts = hosts; this.nodes = nodes;
} }
/** /**
@ -186,7 +186,7 @@ public final class RestClientBuilder {
return createHttpClient(); return createHttpClient();
} }
}); });
RestClient restClient = new RestClient(httpClient, maxRetryTimeout, defaultHeaders, hosts, pathPrefix, failureListener); RestClient restClient = new RestClient(httpClient, maxRetryTimeout, defaultHeaders, nodes, pathPrefix, failureListener);
httpClient.start(); httpClient.start();
return restClient; return restClient;
} }

View file

@ -21,11 +21,15 @@ package org.elasticsearch.client;
import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeUnit;
import org.elasticsearch.client.DeadHostState.TimeSupplier;
import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.MatcherAssert.assertThat;
import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.greaterThan;
import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.is;
import static org.hamcrest.Matchers.lessThan; import static org.hamcrest.Matchers.lessThan;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.fail;
public class DeadHostStateTests extends RestClientTestCase { public class DeadHostStateTests extends RestClientTestCase {
@ -42,7 +46,7 @@ public class DeadHostStateTests extends RestClientTestCase {
DeadHostState previous = new DeadHostState(DeadHostState.TimeSupplier.DEFAULT); DeadHostState previous = new DeadHostState(DeadHostState.TimeSupplier.DEFAULT);
int iters = randomIntBetween(5, 30); int iters = randomIntBetween(5, 30);
for (int i = 0; i < iters; i++) { for (int i = 0; i < iters; i++) {
DeadHostState deadHostState = new DeadHostState(previous, DeadHostState.TimeSupplier.DEFAULT); DeadHostState deadHostState = new DeadHostState(previous);
assertThat(deadHostState.getDeadUntilNanos(), greaterThan(previous.getDeadUntilNanos())); assertThat(deadHostState.getDeadUntilNanos(), greaterThan(previous.getDeadUntilNanos()));
assertThat(deadHostState.getFailedAttempts(), equalTo(previous.getFailedAttempts() + 1)); assertThat(deadHostState.getFailedAttempts(), equalTo(previous.getFailedAttempts() + 1));
previous = deadHostState; previous = deadHostState;
@ -56,7 +60,7 @@ public class DeadHostStateTests extends RestClientTestCase {
if (i == 0) { if (i == 0) {
deadHostStates[i] = new DeadHostState(DeadHostState.TimeSupplier.DEFAULT); deadHostStates[i] = new DeadHostState(DeadHostState.TimeSupplier.DEFAULT);
} else { } else {
deadHostStates[i] = new DeadHostState(deadHostStates[i - 1], DeadHostState.TimeSupplier.DEFAULT); deadHostStates[i] = new DeadHostState(deadHostStates[i - 1]);
} }
} }
for (int k = 1; k < deadHostStates.length; k++) { for (int k = 1; k < deadHostStates.length; k++) {
@ -65,6 +69,17 @@ public class DeadHostStateTests extends RestClientTestCase {
} }
} }
public void testCompareToDifferingTimeSupplier() {
try {
new DeadHostState(TimeSupplier.DEFAULT).compareTo(
new DeadHostState(new ConfigurableTimeSupplier()));
fail("expected failure");
} catch (IllegalArgumentException e) {
assertEquals("can't compare DeadHostStates with different clocks [nanoTime != configured[0]]",
e.getMessage());
}
}
public void testShallBeRetried() { public void testShallBeRetried() {
ConfigurableTimeSupplier timeSupplier = new ConfigurableTimeSupplier(); ConfigurableTimeSupplier timeSupplier = new ConfigurableTimeSupplier();
DeadHostState deadHostState = null; DeadHostState deadHostState = null;
@ -74,7 +89,7 @@ public class DeadHostStateTests extends RestClientTestCase {
if (i == 0) { if (i == 0) {
deadHostState = new DeadHostState(timeSupplier); deadHostState = new DeadHostState(timeSupplier);
} else { } else {
deadHostState = new DeadHostState(deadHostState, timeSupplier); deadHostState = new DeadHostState(deadHostState);
} }
for (int j = 0; j < expectedTimeoutSecond; j++) { for (int j = 0; j < expectedTimeoutSecond; j++) {
timeSupplier.nanoTime += TimeUnit.SECONDS.toNanos(1); timeSupplier.nanoTime += TimeUnit.SECONDS.toNanos(1);
@ -94,25 +109,29 @@ public class DeadHostStateTests extends RestClientTestCase {
DeadHostState previous = new DeadHostState(zeroTimeSupplier); DeadHostState previous = new DeadHostState(zeroTimeSupplier);
for (long expectedTimeoutsSecond : EXPECTED_TIMEOUTS_SECONDS) { for (long expectedTimeoutsSecond : EXPECTED_TIMEOUTS_SECONDS) {
assertThat(TimeUnit.NANOSECONDS.toSeconds(previous.getDeadUntilNanos()), equalTo(expectedTimeoutsSecond)); assertThat(TimeUnit.NANOSECONDS.toSeconds(previous.getDeadUntilNanos()), equalTo(expectedTimeoutsSecond));
previous = new DeadHostState(previous, zeroTimeSupplier); previous = new DeadHostState(previous);
} }
//check that from here on the timeout does not increase //check that from here on the timeout does not increase
int iters = randomIntBetween(5, 30); int iters = randomIntBetween(5, 30);
for (int i = 0; i < iters; i++) { for (int i = 0; i < iters; i++) {
DeadHostState deadHostState = new DeadHostState(previous, zeroTimeSupplier); DeadHostState deadHostState = new DeadHostState(previous);
assertThat(TimeUnit.NANOSECONDS.toSeconds(deadHostState.getDeadUntilNanos()), assertThat(TimeUnit.NANOSECONDS.toSeconds(deadHostState.getDeadUntilNanos()),
equalTo(EXPECTED_TIMEOUTS_SECONDS[EXPECTED_TIMEOUTS_SECONDS.length - 1])); equalTo(EXPECTED_TIMEOUTS_SECONDS[EXPECTED_TIMEOUTS_SECONDS.length - 1]));
previous = deadHostState; previous = deadHostState;
} }
} }
private static class ConfigurableTimeSupplier implements DeadHostState.TimeSupplier { static class ConfigurableTimeSupplier implements DeadHostState.TimeSupplier {
long nanoTime; long nanoTime;
@Override @Override
public long nanoTime() { public long nanoTime() {
return nanoTime; return nanoTime;
} }
@Override
public String toString() {
return "configured[" + nanoTime + "]";
}
} }
} }

View file

@ -22,6 +22,7 @@ package org.elasticsearch.client;
import org.apache.http.HttpHost; import org.apache.http.HttpHost;
import java.util.HashSet; import java.util.HashSet;
import java.util.List;
import java.util.Set; import java.util.Set;
import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.containsInAnyOrder;
@ -29,14 +30,22 @@ import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertThat; import static org.junit.Assert.assertThat;
/** /**
* {@link org.elasticsearch.client.RestClient.FailureListener} impl that allows to track when it gets called for which host. * {@link RestClient.FailureListener} impl that allows to track when it gets called for which host.
*/ */
class HostsTrackingFailureListener extends RestClient.FailureListener { class HostsTrackingFailureListener extends RestClient.FailureListener {
private volatile Set<HttpHost> hosts = new HashSet<>(); private volatile Set<HttpHost> hosts = new HashSet<>();
@Override @Override
public void onFailure(HttpHost host) { public void onFailure(Node node) {
hosts.add(host); hosts.add(node.getHost());
}
void assertCalled(List<Node> nodes) {
HttpHost[] hosts = new HttpHost[nodes.size()];
for (int i = 0 ; i < nodes.size(); i++) {
hosts[i] = nodes.get(i).getHost();
}
assertCalled(hosts);
} }
void assertCalled(HttpHost... hosts) { void assertCalled(HttpHost... hosts) {
@ -48,4 +57,4 @@ class HostsTrackingFailureListener extends RestClient.FailureListener {
void assertNotCalled() { void assertNotCalled() {
assertEquals(0, hosts.size()); assertEquals(0, hosts.size());
} }
} }

View file

@ -0,0 +1,71 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.client;
import org.apache.http.HttpHost;
import org.elasticsearch.client.Node.Roles;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import static org.junit.Assert.assertEquals;
public class NodeSelectorTests extends RestClientTestCase {
public void testAny() {
List<Node> nodes = new ArrayList<>();
int size = between(2, 5);
for (int i = 0; i < size; i++) {
nodes.add(dummyNode(randomBoolean(), randomBoolean(), randomBoolean()));
}
List<Node> expected = new ArrayList<>(nodes);
NodeSelector.ANY.select(nodes);
assertEquals(expected, nodes);
}
public void testNotMasterOnly() {
Node masterOnly = dummyNode(true, false, false);
Node all = dummyNode(true, true, true);
Node masterAndData = dummyNode(true, true, false);
Node masterAndIngest = dummyNode(true, false, true);
Node coordinatingOnly = dummyNode(false, false, false);
Node ingestOnly = dummyNode(false, false, true);
Node data = dummyNode(false, true, randomBoolean());
List<Node> nodes = new ArrayList<>();
nodes.add(masterOnly);
nodes.add(all);
nodes.add(masterAndData);
nodes.add(masterAndIngest);
nodes.add(coordinatingOnly);
nodes.add(ingestOnly);
nodes.add(data);
Collections.shuffle(nodes, getRandom());
List<Node> expected = new ArrayList<>(nodes);
expected.remove(masterOnly);
NodeSelector.NOT_MASTER_ONLY.select(nodes);
assertEquals(expected, nodes);
}
private Node dummyNode(boolean master, boolean data, boolean ingest) {
return new Node(new HttpHost("dummy"), Collections.<HttpHost>emptySet(),
randomAsciiAlphanumOfLength(5), randomAsciiAlphanumOfLength(5),
new Roles(master, data, ingest));
}
}

View file

@ -0,0 +1,71 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.client;
import org.apache.http.HttpHost;
import org.elasticsearch.client.Node.Roles;
import java.util.Arrays;
import java.util.HashSet;
import static java.util.Collections.singleton;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
public class NodeTests extends RestClientTestCase {
public void testToString() {
assertEquals("[host=http://1]", new Node(new HttpHost("1")).toString());
assertEquals("[host=http://1, roles=mdi]", new Node(new HttpHost("1"),
null, null, null, new Roles(true, true, true)).toString());
assertEquals("[host=http://1, version=ver]", new Node(new HttpHost("1"),
null, null, "ver", null).toString());
assertEquals("[host=http://1, name=nam]", new Node(new HttpHost("1"),
null, "nam", null, null).toString());
assertEquals("[host=http://1, bound=[http://1, http://2]]", new Node(new HttpHost("1"),
new HashSet<>(Arrays.asList(new HttpHost("1"), new HttpHost("2"))), null, null, null).toString());
assertEquals("[host=http://1, bound=[http://1, http://2], name=nam, version=ver, roles=m]",
new Node(new HttpHost("1"), new HashSet<>(Arrays.asList(new HttpHost("1"), new HttpHost("2"))),
"nam", "ver", new Roles(true, false, false)).toString());
}
public void testEqualsAndHashCode() {
HttpHost host = new HttpHost(randomAsciiAlphanumOfLength(5));
Node node = new Node(host,
randomBoolean() ? null : singleton(host),
randomBoolean() ? null : randomAsciiAlphanumOfLength(5),
randomBoolean() ? null : randomAsciiAlphanumOfLength(5),
randomBoolean() ? null : new Roles(true, true, true));
assertFalse(node.equals(null));
assertTrue(node.equals(node));
assertEquals(node.hashCode(), node.hashCode());
Node copy = new Node(host, node.getBoundHosts(), node.getName(), node.getVersion(), node.getRoles());
assertTrue(node.equals(copy));
assertEquals(node.hashCode(), copy.hashCode());
assertFalse(node.equals(new Node(new HttpHost(host.toHostString() + "changed"), node.getBoundHosts(),
node.getName(), node.getVersion(), node.getRoles())));
assertFalse(node.equals(new Node(host, new HashSet<>(Arrays.asList(host, new HttpHost(host.toHostString() + "changed"))),
node.getName(), node.getVersion(), node.getRoles())));
assertFalse(node.equals(new Node(host, node.getBoundHosts(), node.getName() + "changed", node.getVersion(), node.getRoles())));
assertFalse(node.equals(new Node(host, node.getBoundHosts(), node.getName(), node.getVersion() + "changed", node.getRoles())));
assertFalse(node.equals(new Node(host, node.getBoundHosts(), node.getName(), node.getVersion(), new Roles(false, false, false))));
}
}

View file

@ -114,6 +114,10 @@ public class RequestOptionsTests extends RestClientTestCase {
} }
} }
if (randomBoolean()) {
builder.setNodeSelector(mock(NodeSelector.class));
}
if (randomBoolean()) { if (randomBoolean()) {
builder.setHttpAsyncResponseConsumerFactory(new HeapBufferedResponseConsumerFactory(1)); builder.setHttpAsyncResponseConsumerFactory(new HeapBufferedResponseConsumerFactory(1));
} }
@ -127,12 +131,15 @@ public class RequestOptionsTests extends RestClientTestCase {
private static RequestOptions mutate(RequestOptions options) { private static RequestOptions mutate(RequestOptions options) {
RequestOptions.Builder mutant = options.toBuilder(); RequestOptions.Builder mutant = options.toBuilder();
int mutationType = between(0, 1); int mutationType = between(0, 2);
switch (mutationType) { switch (mutationType) {
case 0: case 0:
mutant.addHeader("extra", "m"); mutant.addHeader("extra", "m");
return mutant.build(); return mutant.build();
case 1: case 1:
mutant.setNodeSelector(mock(NodeSelector.class));
return mutant.build();
case 2:
mutant.setHttpAsyncResponseConsumerFactory(new HeapBufferedResponseConsumerFactory(5)); mutant.setHttpAsyncResponseConsumerFactory(new HeapBufferedResponseConsumerFactory(5));
return mutant.build(); return mutant.build();
default: default:

View file

@ -39,21 +39,42 @@ public class RestClientBuilderTests extends RestClientTestCase {
try { try {
RestClient.builder((HttpHost[])null); RestClient.builder((HttpHost[])null);
fail("should have failed"); fail("should have failed");
} catch(NullPointerException e) { } catch(IllegalArgumentException e) {
assertEquals("hosts must not be null", e.getMessage()); assertEquals("hosts must not be null nor empty", e.getMessage());
} }
try { try {
RestClient.builder(); RestClient.builder(new HttpHost[] {});
fail("should have failed"); fail("should have failed");
} catch(IllegalArgumentException e) { } catch(IllegalArgumentException e) {
assertEquals("no hosts provided", e.getMessage()); assertEquals("hosts must not be null nor empty", e.getMessage());
}
try {
RestClient.builder((Node[])null);
fail("should have failed");
} catch(IllegalArgumentException e) {
assertEquals("nodes must not be null or empty", e.getMessage());
}
try {
RestClient.builder(new Node[] {});
fail("should have failed");
} catch(IllegalArgumentException e) {
assertEquals("nodes must not be null or empty", e.getMessage());
}
try {
RestClient.builder(new Node(new HttpHost("localhost", 9200)), null);
fail("should have failed");
} catch(IllegalArgumentException e) {
assertEquals("node cannot be null", e.getMessage());
} }
try { try {
RestClient.builder(new HttpHost("localhost", 9200), null); RestClient.builder(new HttpHost("localhost", 9200), null);
fail("should have failed"); fail("should have failed");
} catch(NullPointerException e) { } catch(IllegalArgumentException e) {
assertEquals("host cannot be null", e.getMessage()); assertEquals("host cannot be null", e.getMessage());
} }

View file

@ -29,19 +29,24 @@ import org.junit.Before;
import org.junit.BeforeClass; import org.junit.BeforeClass;
import java.io.IOException; import java.io.IOException;
import java.net.ConnectException;
import java.net.InetAddress; import java.net.InetAddress;
import java.net.InetSocketAddress; import java.net.InetSocketAddress;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Collections;
import java.util.Iterator;
import java.util.List; import java.util.List;
import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.CountDownLatch; import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeUnit;
import static java.util.Collections.singletonList;
import static org.elasticsearch.client.RestClientTestUtil.getAllStatusCodes; import static org.elasticsearch.client.RestClientTestUtil.getAllStatusCodes;
import static org.elasticsearch.client.RestClientTestUtil.randomErrorNoRetryStatusCode; import static org.elasticsearch.client.RestClientTestUtil.randomErrorNoRetryStatusCode;
import static org.elasticsearch.client.RestClientTestUtil.randomOkStatusCode; import static org.elasticsearch.client.RestClientTestUtil.randomOkStatusCode;
import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue; import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
/** /**
* Integration test to check interaction between {@link RestClient} and {@link org.apache.http.client.HttpClient}. * Integration test to check interaction between {@link RestClient} and {@link org.apache.http.client.HttpClient}.
@ -50,31 +55,37 @@ import static org.junit.Assert.assertTrue;
public class RestClientMultipleHostsIntegTests extends RestClientTestCase { public class RestClientMultipleHostsIntegTests extends RestClientTestCase {
private static HttpServer[] httpServers; private static HttpServer[] httpServers;
private static RestClient restClient; private static HttpHost[] httpHosts;
private static boolean stoppedFirstHost = false;
private static String pathPrefixWithoutLeadingSlash;
private static String pathPrefix; private static String pathPrefix;
private static RestClient restClient;
@BeforeClass @BeforeClass
public static void startHttpServer() throws Exception { public static void startHttpServer() throws Exception {
String pathPrefixWithoutLeadingSlash;
if (randomBoolean()) { if (randomBoolean()) {
pathPrefixWithoutLeadingSlash = "testPathPrefix/" + randomAsciiOfLengthBetween(1, 5); pathPrefixWithoutLeadingSlash = "testPathPrefix/" + randomAsciiLettersOfLengthBetween(1, 5);
pathPrefix = "/" + pathPrefixWithoutLeadingSlash; pathPrefix = "/" + pathPrefixWithoutLeadingSlash;
} else { } else {
pathPrefix = pathPrefixWithoutLeadingSlash = ""; pathPrefix = pathPrefixWithoutLeadingSlash = "";
} }
int numHttpServers = randomIntBetween(2, 4); int numHttpServers = randomIntBetween(2, 4);
httpServers = new HttpServer[numHttpServers]; httpServers = new HttpServer[numHttpServers];
HttpHost[] httpHosts = new HttpHost[numHttpServers]; httpHosts = new HttpHost[numHttpServers];
for (int i = 0; i < numHttpServers; i++) { for (int i = 0; i < numHttpServers; i++) {
HttpServer httpServer = createHttpServer(); HttpServer httpServer = createHttpServer();
httpServers[i] = httpServer; httpServers[i] = httpServer;
httpHosts[i] = new HttpHost(httpServer.getAddress().getHostString(), httpServer.getAddress().getPort()); httpHosts[i] = new HttpHost(httpServer.getAddress().getHostString(), httpServer.getAddress().getPort());
} }
restClient = buildRestClient();
}
private static RestClient buildRestClient() {
RestClientBuilder restClientBuilder = RestClient.builder(httpHosts); RestClientBuilder restClientBuilder = RestClient.builder(httpHosts);
if (pathPrefix.length() > 0) { if (pathPrefix.length() > 0) {
restClientBuilder.setPathPrefix((randomBoolean() ? "/" : "") + pathPrefixWithoutLeadingSlash); restClientBuilder.setPathPrefix((randomBoolean() ? "/" : "") + pathPrefixWithoutLeadingSlash);
} }
restClient = restClientBuilder.build(); return restClientBuilder.build();
} }
private static HttpServer createHttpServer() throws Exception { private static HttpServer createHttpServer() throws Exception {
@ -118,6 +129,9 @@ public class RestClientMultipleHostsIntegTests extends RestClientTestCase {
if (httpServers.length > 1 && randomBoolean()) { if (httpServers.length > 1 && randomBoolean()) {
List<HttpServer> updatedHttpServers = new ArrayList<>(httpServers.length - 1); List<HttpServer> updatedHttpServers = new ArrayList<>(httpServers.length - 1);
int nodeIndex = randomInt(httpServers.length - 1); int nodeIndex = randomInt(httpServers.length - 1);
if (0 == nodeIndex) {
stoppedFirstHost = true;
}
for (int i = 0; i < httpServers.length; i++) { for (int i = 0; i < httpServers.length; i++) {
HttpServer httpServer = httpServers[i]; HttpServer httpServer = httpServers[i];
if (i == nodeIndex) { if (i == nodeIndex) {
@ -182,6 +196,35 @@ public class RestClientMultipleHostsIntegTests extends RestClientTestCase {
} }
} }
/**
* Test host selector against a real server <strong>and</strong>
* test what happens after calling
*/
public void testNodeSelector() throws IOException {
Request request = new Request("GET", "/200");
RequestOptions.Builder options = request.getOptions().toBuilder();
options.setNodeSelector(firstPositionNodeSelector());
request.setOptions(options);
int rounds = between(1, 10);
for (int i = 0; i < rounds; i++) {
/*
* Run the request more than once to verify that the
* NodeSelector overrides the round robin behavior.
*/
if (stoppedFirstHost) {
try {
restClient.performRequest(request);
fail("expected to fail to connect");
} catch (ConnectException e) {
assertEquals("Connection refused", e.getMessage());
}
} else {
Response response = restClient.performRequest(request);
assertEquals(httpHosts[0], response.getHost());
}
}
}
private static class TestResponse { private static class TestResponse {
private final String method; private final String method;
private final int statusCode; private final int statusCode;
@ -203,4 +246,17 @@ public class RestClientMultipleHostsIntegTests extends RestClientTestCase {
throw new AssertionError("unexpected response " + response.getClass()); throw new AssertionError("unexpected response " + response.getClass());
} }
} }
private NodeSelector firstPositionNodeSelector() {
return new NodeSelector() {
@Override
public void select(Iterable<Node> nodes) {
for (Iterator<Node> itr = nodes.iterator(); itr.hasNext();) {
if (httpHosts[0] != itr.next().getHost()) {
itr.remove();
}
}
}
};
}
} }

View file

@ -35,6 +35,7 @@ import org.apache.http.message.BasicHttpResponse;
import org.apache.http.message.BasicStatusLine; import org.apache.http.message.BasicStatusLine;
import org.apache.http.nio.protocol.HttpAsyncRequestProducer; import org.apache.http.nio.protocol.HttpAsyncRequestProducer;
import org.apache.http.nio.protocol.HttpAsyncResponseConsumer; import org.apache.http.nio.protocol.HttpAsyncResponseConsumer;
import org.elasticsearch.client.Node.Roles;
import org.junit.After; import org.junit.After;
import org.junit.Before; import org.junit.Before;
import org.mockito.invocation.InvocationOnMock; import org.mockito.invocation.InvocationOnMock;
@ -42,19 +43,24 @@ import org.mockito.stubbing.Answer;
import java.io.IOException; import java.io.IOException;
import java.net.SocketTimeoutException; import java.net.SocketTimeoutException;
import java.util.ArrayList;
import java.util.Collections; import java.util.Collections;
import java.util.HashSet; import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Set; import java.util.Set;
import java.util.concurrent.ExecutorService; import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors; import java.util.concurrent.Executors;
import java.util.concurrent.Future; import java.util.concurrent.Future;
import static java.util.Collections.singletonList;
import static org.elasticsearch.client.RestClientTestUtil.randomErrorNoRetryStatusCode; import static org.elasticsearch.client.RestClientTestUtil.randomErrorNoRetryStatusCode;
import static org.elasticsearch.client.RestClientTestUtil.randomErrorRetryStatusCode; import static org.elasticsearch.client.RestClientTestUtil.randomErrorRetryStatusCode;
import static org.elasticsearch.client.RestClientTestUtil.randomHttpMethod; import static org.elasticsearch.client.RestClientTestUtil.randomHttpMethod;
import static org.elasticsearch.client.RestClientTestUtil.randomOkStatusCode; import static org.elasticsearch.client.RestClientTestUtil.randomOkStatusCode;
import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.equalTo;
import static org.hamcrest.CoreMatchers.instanceOf; import static org.hamcrest.CoreMatchers.instanceOf;
import static org.hamcrest.Matchers.hasItem;
import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertThat; import static org.junit.Assert.assertThat;
import static org.junit.Assert.assertTrue; import static org.junit.Assert.assertTrue;
@ -71,7 +77,7 @@ public class RestClientMultipleHostsTests extends RestClientTestCase {
private ExecutorService exec = Executors.newFixedThreadPool(1); private ExecutorService exec = Executors.newFixedThreadPool(1);
private RestClient restClient; private RestClient restClient;
private HttpHost[] httpHosts; private List<Node> nodes;
private HostsTrackingFailureListener failureListener; private HostsTrackingFailureListener failureListener;
@Before @Before
@ -108,13 +114,14 @@ public class RestClientMultipleHostsTests extends RestClientTestCase {
return null; return null;
} }
}); });
int numHosts = RandomNumbers.randomIntBetween(getRandom(), 2, 5); int numNodes = RandomNumbers.randomIntBetween(getRandom(), 2, 5);
httpHosts = new HttpHost[numHosts]; nodes = new ArrayList<>(numNodes);
for (int i = 0; i < numHosts; i++) { for (int i = 0; i < numNodes; i++) {
httpHosts[i] = new HttpHost("localhost", 9200 + i); nodes.add(new Node(new HttpHost("localhost", 9200 + i)));
} }
nodes = Collections.unmodifiableList(nodes);
failureListener = new HostsTrackingFailureListener(); failureListener = new HostsTrackingFailureListener();
restClient = new RestClient(httpClient, 10000, new Header[0], httpHosts, null, failureListener); restClient = new RestClient(httpClient, 10000, new Header[0], nodes, null, failureListener);
} }
/** /**
@ -128,9 +135,8 @@ public class RestClientMultipleHostsTests extends RestClientTestCase {
public void testRoundRobinOkStatusCodes() throws IOException { public void testRoundRobinOkStatusCodes() throws IOException {
int numIters = RandomNumbers.randomIntBetween(getRandom(), 1, 5); int numIters = RandomNumbers.randomIntBetween(getRandom(), 1, 5);
for (int i = 0; i < numIters; i++) { for (int i = 0; i < numIters; i++) {
Set<HttpHost> hostsSet = new HashSet<>(); Set<HttpHost> hostsSet = hostsSet();
Collections.addAll(hostsSet, httpHosts); for (int j = 0; j < nodes.size(); j++) {
for (int j = 0; j < httpHosts.length; j++) {
int statusCode = randomOkStatusCode(getRandom()); int statusCode = randomOkStatusCode(getRandom());
Response response = restClient.performRequest(randomHttpMethod(getRandom()), "/" + statusCode); Response response = restClient.performRequest(randomHttpMethod(getRandom()), "/" + statusCode);
assertEquals(statusCode, response.getStatusLine().getStatusCode()); assertEquals(statusCode, response.getStatusLine().getStatusCode());
@ -144,9 +150,8 @@ public class RestClientMultipleHostsTests extends RestClientTestCase {
public void testRoundRobinNoRetryErrors() throws IOException { public void testRoundRobinNoRetryErrors() throws IOException {
int numIters = RandomNumbers.randomIntBetween(getRandom(), 1, 5); int numIters = RandomNumbers.randomIntBetween(getRandom(), 1, 5);
for (int i = 0; i < numIters; i++) { for (int i = 0; i < numIters; i++) {
Set<HttpHost> hostsSet = new HashSet<>(); Set<HttpHost> hostsSet = hostsSet();
Collections.addAll(hostsSet, httpHosts); for (int j = 0; j < nodes.size(); j++) {
for (int j = 0; j < httpHosts.length; j++) {
String method = randomHttpMethod(getRandom()); String method = randomHttpMethod(getRandom());
int statusCode = randomErrorNoRetryStatusCode(getRandom()); int statusCode = randomErrorNoRetryStatusCode(getRandom());
try { try {
@ -185,10 +190,9 @@ public class RestClientMultipleHostsTests extends RestClientTestCase {
* the caller. It wraps the exception that contains the failed hosts. * the caller. It wraps the exception that contains the failed hosts.
*/ */
e = (ResponseException) e.getCause(); e = (ResponseException) e.getCause();
Set<HttpHost> hostsSet = new HashSet<>(); Set<HttpHost> hostsSet = hostsSet();
Collections.addAll(hostsSet, httpHosts);
//first request causes all the hosts to be blacklisted, the returned exception holds one suppressed exception each //first request causes all the hosts to be blacklisted, the returned exception holds one suppressed exception each
failureListener.assertCalled(httpHosts); failureListener.assertCalled(nodes);
do { do {
Response response = e.getResponse(); Response response = e.getResponse();
assertEquals(Integer.parseInt(retryEndpoint.substring(1)), response.getStatusLine().getStatusCode()); assertEquals(Integer.parseInt(retryEndpoint.substring(1)), response.getStatusLine().getStatusCode());
@ -210,10 +214,9 @@ public class RestClientMultipleHostsTests extends RestClientTestCase {
* the caller. It wraps the exception that contains the failed hosts. * the caller. It wraps the exception that contains the failed hosts.
*/ */
e = (IOException) e.getCause(); e = (IOException) e.getCause();
Set<HttpHost> hostsSet = new HashSet<>(); Set<HttpHost> hostsSet = hostsSet();
Collections.addAll(hostsSet, httpHosts);
//first request causes all the hosts to be blacklisted, the returned exception holds one suppressed exception each //first request causes all the hosts to be blacklisted, the returned exception holds one suppressed exception each
failureListener.assertCalled(httpHosts); failureListener.assertCalled(nodes);
do { do {
HttpHost httpHost = HttpHost.create(e.getMessage()); HttpHost httpHost = HttpHost.create(e.getMessage());
assertTrue("host [" + httpHost + "] not found, most likely used multiple times", hostsSet.remove(httpHost)); assertTrue("host [" + httpHost + "] not found, most likely used multiple times", hostsSet.remove(httpHost));
@ -232,9 +235,8 @@ public class RestClientMultipleHostsTests extends RestClientTestCase {
int numIters = RandomNumbers.randomIntBetween(getRandom(), 2, 5); int numIters = RandomNumbers.randomIntBetween(getRandom(), 2, 5);
for (int i = 1; i <= numIters; i++) { for (int i = 1; i <= numIters; i++) {
//check that one different host is resurrected at each new attempt //check that one different host is resurrected at each new attempt
Set<HttpHost> hostsSet = new HashSet<>(); Set<HttpHost> hostsSet = hostsSet();
Collections.addAll(hostsSet, httpHosts); for (int j = 0; j < nodes.size(); j++) {
for (int j = 0; j < httpHosts.length; j++) {
retryEndpoint = randomErrorRetryEndpoint(); retryEndpoint = randomErrorRetryEndpoint();
try { try {
restClient.performRequest(randomHttpMethod(getRandom()), retryEndpoint); restClient.performRequest(randomHttpMethod(getRandom()), retryEndpoint);
@ -308,6 +310,58 @@ public class RestClientMultipleHostsTests extends RestClientTestCase {
} }
} }
public void testNodeSelector() throws IOException {
NodeSelector firstPositionOnly = new NodeSelector() {
@Override
public void select(Iterable<Node> restClientNodes) {
boolean found = false;
for (Iterator<Node> itr = restClientNodes.iterator(); itr.hasNext();) {
if (nodes.get(0) == itr.next()) {
found = true;
} else {
itr.remove();
}
}
assertTrue(found);
}
};
int rounds = between(1, 10);
for (int i = 0; i < rounds; i++) {
/*
* Run the request more than once to verify that the
* NodeSelector overrides the round robin behavior.
*/
Request request = new Request("GET", "/200");
RequestOptions.Builder options = request.getOptions().toBuilder();
options.setNodeSelector(firstPositionOnly);
request.setOptions(options);
Response response = restClient.performRequest(request);
assertEquals(nodes.get(0).getHost(), response.getHost());
}
}
public void testSetNodes() throws IOException {
List<Node> newNodes = new ArrayList<>(nodes.size());
for (int i = 0; i < nodes.size(); i++) {
Roles roles = i == 0 ? new Roles(false, true, true) : new Roles(true, false, false);
newNodes.add(new Node(nodes.get(i).getHost(), null, null, null, roles));
}
restClient.setNodes(newNodes);
int rounds = between(1, 10);
for (int i = 0; i < rounds; i++) {
/*
* Run the request more than once to verify that the
* NodeSelector overrides the round robin behavior.
*/
Request request = new Request("GET", "/200");
RequestOptions.Builder options = request.getOptions().toBuilder();
options.setNodeSelector(NodeSelector.NOT_MASTER_ONLY);
request.setOptions(options);
Response response = restClient.performRequest(request);
assertEquals(newNodes.get(0).getHost(), response.getHost());
}
}
private static String randomErrorRetryEndpoint() { private static String randomErrorRetryEndpoint() {
switch(RandomNumbers.randomIntBetween(getRandom(), 0, 3)) { switch(RandomNumbers.randomIntBetween(getRandom(), 0, 3)) {
case 0: case 0:
@ -321,4 +375,16 @@ public class RestClientMultipleHostsTests extends RestClientTestCase {
} }
throw new UnsupportedOperationException(); throw new UnsupportedOperationException();
} }
/**
* Build a mutable {@link Set} containing all the {@link Node#getHost() hosts}
* in use by the test.
*/
private Set<HttpHost> hostsSet() {
Set<HttpHost> hosts = new HashSet<>();
for (Node node : nodes) {
hosts.add(node.getHost());
}
return hosts;
}
} }

View file

@ -65,6 +65,7 @@ import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors; import java.util.concurrent.Executors;
import java.util.concurrent.Future; import java.util.concurrent.Future;
import static java.util.Collections.singletonList;
import static org.elasticsearch.client.RestClientTestUtil.getAllErrorStatusCodes; import static org.elasticsearch.client.RestClientTestUtil.getAllErrorStatusCodes;
import static org.elasticsearch.client.RestClientTestUtil.getHttpMethods; import static org.elasticsearch.client.RestClientTestUtil.getHttpMethods;
import static org.elasticsearch.client.RestClientTestUtil.getOkStatusCodes; import static org.elasticsearch.client.RestClientTestUtil.getOkStatusCodes;
@ -94,7 +95,7 @@ public class RestClientSingleHostTests extends RestClientTestCase {
private ExecutorService exec = Executors.newFixedThreadPool(1); private ExecutorService exec = Executors.newFixedThreadPool(1);
private RestClient restClient; private RestClient restClient;
private Header[] defaultHeaders; private Header[] defaultHeaders;
private HttpHost httpHost; private Node node;
private CloseableHttpAsyncClient httpClient; private CloseableHttpAsyncClient httpClient;
private HostsTrackingFailureListener failureListener; private HostsTrackingFailureListener failureListener;
@ -108,7 +109,7 @@ public class RestClientSingleHostTests extends RestClientTestCase {
public Future<HttpResponse> answer(InvocationOnMock invocationOnMock) throws Throwable { public Future<HttpResponse> answer(InvocationOnMock invocationOnMock) throws Throwable {
HttpAsyncRequestProducer requestProducer = (HttpAsyncRequestProducer) invocationOnMock.getArguments()[0]; HttpAsyncRequestProducer requestProducer = (HttpAsyncRequestProducer) invocationOnMock.getArguments()[0];
HttpClientContext context = (HttpClientContext) invocationOnMock.getArguments()[2]; HttpClientContext context = (HttpClientContext) invocationOnMock.getArguments()[2];
assertThat(context.getAuthCache().get(httpHost), instanceOf(BasicScheme.class)); assertThat(context.getAuthCache().get(node.getHost()), instanceOf(BasicScheme.class));
final FutureCallback<HttpResponse> futureCallback = final FutureCallback<HttpResponse> futureCallback =
(FutureCallback<HttpResponse>) invocationOnMock.getArguments()[3]; (FutureCallback<HttpResponse>) invocationOnMock.getArguments()[3];
HttpUriRequest request = (HttpUriRequest)requestProducer.generateRequest(); HttpUriRequest request = (HttpUriRequest)requestProducer.generateRequest();
@ -146,9 +147,10 @@ public class RestClientSingleHostTests extends RestClientTestCase {
}); });
defaultHeaders = RestClientTestUtil.randomHeaders(getRandom(), "Header-default"); defaultHeaders = RestClientTestUtil.randomHeaders(getRandom(), "Header-default");
httpHost = new HttpHost("localhost", 9200); node = new Node(new HttpHost("localhost", 9200));
failureListener = new HostsTrackingFailureListener(); failureListener = new HostsTrackingFailureListener();
restClient = new RestClient(httpClient, 10000, defaultHeaders, new HttpHost[]{httpHost}, null, failureListener); restClient = new RestClient(httpClient, 10000, defaultHeaders,
singletonList(node), null, failureListener);
} }
/** /**
@ -244,7 +246,7 @@ public class RestClientSingleHostTests extends RestClientTestCase {
if (errorStatusCode <= 500 || expectedIgnores.contains(errorStatusCode)) { if (errorStatusCode <= 500 || expectedIgnores.contains(errorStatusCode)) {
failureListener.assertNotCalled(); failureListener.assertNotCalled();
} else { } else {
failureListener.assertCalled(httpHost); failureListener.assertCalled(singletonList(node));
} }
} }
} }
@ -259,14 +261,14 @@ public class RestClientSingleHostTests extends RestClientTestCase {
} catch(IOException e) { } catch(IOException e) {
assertThat(e, instanceOf(ConnectTimeoutException.class)); assertThat(e, instanceOf(ConnectTimeoutException.class));
} }
failureListener.assertCalled(httpHost); failureListener.assertCalled(singletonList(node));
try { try {
performRequest(method, "/soe"); performRequest(method, "/soe");
fail("request should have failed"); fail("request should have failed");
} catch(IOException e) { } catch(IOException e) {
assertThat(e, instanceOf(SocketTimeoutException.class)); assertThat(e, instanceOf(SocketTimeoutException.class));
} }
failureListener.assertCalled(httpHost); failureListener.assertCalled(singletonList(node));
} }
} }

View file

@ -22,14 +22,23 @@ package org.elasticsearch.client;
import org.apache.http.Header; import org.apache.http.Header;
import org.apache.http.HttpHost; import org.apache.http.HttpHost;
import org.apache.http.impl.nio.client.CloseableHttpAsyncClient; import org.apache.http.impl.nio.client.CloseableHttpAsyncClient;
import org.elasticsearch.client.DeadHostStateTests.ConfigurableTimeSupplier;
import org.elasticsearch.client.RestClient.NodeTuple;
import java.io.IOException; import java.io.IOException;
import java.net.URI; import java.net.URI;
import java.util.ArrayList;
import java.util.Arrays; import java.util.Arrays;
import java.util.Collections; import java.util.Collections;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.concurrent.CountDownLatch; import java.util.concurrent.CountDownLatch;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeUnit;
import static java.util.Collections.singletonList;
import static org.elasticsearch.client.RestClientTestUtil.getHttpMethods; import static org.elasticsearch.client.RestClientTestUtil.getHttpMethods;
import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.instanceOf;
import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertEquals;
@ -43,9 +52,9 @@ import static org.mockito.Mockito.verify;
public class RestClientTests extends RestClientTestCase { public class RestClientTests extends RestClientTestCase {
public void testCloseIsIdempotent() throws IOException { public void testCloseIsIdempotent() throws IOException {
HttpHost[] hosts = new HttpHost[]{new HttpHost("localhost", 9200)}; List<Node> nodes = singletonList(new Node(new HttpHost("localhost", 9200)));
CloseableHttpAsyncClient closeableHttpAsyncClient = mock(CloseableHttpAsyncClient.class); CloseableHttpAsyncClient closeableHttpAsyncClient = mock(CloseableHttpAsyncClient.class);
RestClient restClient = new RestClient(closeableHttpAsyncClient, 1_000, new Header[0], hosts, null, null); RestClient restClient = new RestClient(closeableHttpAsyncClient, 1_000, new Header[0], nodes, null, null);
restClient.close(); restClient.close();
verify(closeableHttpAsyncClient, times(1)).close(); verify(closeableHttpAsyncClient, times(1)).close();
restClient.close(); restClient.close();
@ -225,6 +234,7 @@ public class RestClientTests extends RestClientTestCase {
} }
} }
@Deprecated
public void testSetHostsWrongArguments() throws IOException { public void testSetHostsWrongArguments() throws IOException {
try (RestClient restClient = createRestClient()) { try (RestClient restClient = createRestClient()) {
restClient.setHosts((HttpHost[]) null); restClient.setHosts((HttpHost[]) null);
@ -241,45 +251,75 @@ public class RestClientTests extends RestClientTestCase {
try (RestClient restClient = createRestClient()) { try (RestClient restClient = createRestClient()) {
restClient.setHosts((HttpHost) null); restClient.setHosts((HttpHost) null);
fail("setHosts should have failed"); fail("setHosts should have failed");
} catch (NullPointerException e) { } catch (IllegalArgumentException e) {
assertEquals("host cannot be null", e.getMessage()); assertEquals("host cannot be null", e.getMessage());
} }
try (RestClient restClient = createRestClient()) { try (RestClient restClient = createRestClient()) {
restClient.setHosts(new HttpHost("localhost", 9200), null, new HttpHost("localhost", 9201)); restClient.setHosts(new HttpHost("localhost", 9200), null, new HttpHost("localhost", 9201));
fail("setHosts should have failed"); fail("setHosts should have failed");
} catch (NullPointerException e) { } catch (IllegalArgumentException e) {
assertEquals("host cannot be null", e.getMessage()); assertEquals("host cannot be null", e.getMessage());
} }
} }
public void testSetHostsPreservesOrdering() throws Exception { public void testSetNodesWrongArguments() throws IOException {
try (RestClient restClient = createRestClient()) { try (RestClient restClient = createRestClient()) {
HttpHost[] hosts = randomHosts(); restClient.setNodes(null);
restClient.setHosts(hosts); fail("setNodes should have failed");
assertEquals(Arrays.asList(hosts), restClient.getHosts()); } catch (IllegalArgumentException e) {
assertEquals("nodes must not be null or empty", e.getMessage());
}
try (RestClient restClient = createRestClient()) {
restClient.setNodes(Collections.<Node>emptyList());
fail("setNodes should have failed");
} catch (IllegalArgumentException e) {
assertEquals("nodes must not be null or empty", e.getMessage());
}
try (RestClient restClient = createRestClient()) {
restClient.setNodes(Collections.singletonList((Node) null));
fail("setNodes should have failed");
} catch (NullPointerException e) {
assertEquals("node cannot be null", e.getMessage());
}
try (RestClient restClient = createRestClient()) {
restClient.setNodes(Arrays.asList(
new Node(new HttpHost("localhost", 9200)),
null,
new Node(new HttpHost("localhost", 9201))));
fail("setNodes should have failed");
} catch (NullPointerException e) {
assertEquals("node cannot be null", e.getMessage());
} }
} }
private static HttpHost[] randomHosts() { public void testSetNodesPreservesOrdering() throws Exception {
int numHosts = randomIntBetween(1, 10); try (RestClient restClient = createRestClient()) {
HttpHost[] hosts = new HttpHost[numHosts]; List<Node> nodes = randomNodes();
for (int i = 0; i < hosts.length; i++) { restClient.setNodes(nodes);
hosts[i] = new HttpHost("host-" + i, 9200); assertEquals(nodes, restClient.getNodes());
} }
return hosts;
} }
public void testSetHostsDuplicatedHosts() throws Exception { private static List<Node> randomNodes() {
int numNodes = randomIntBetween(1, 10);
List<Node> nodes = new ArrayList<>(numNodes);
for (int i = 0; i < numNodes; i++) {
nodes.add(new Node(new HttpHost("host-" + i, 9200)));
}
return nodes;
}
public void testSetNodesDuplicatedHosts() throws Exception {
try (RestClient restClient = createRestClient()) { try (RestClient restClient = createRestClient()) {
int numHosts = randomIntBetween(1, 10); int numNodes = randomIntBetween(1, 10);
HttpHost[] hosts = new HttpHost[numHosts]; List<Node> nodes = new ArrayList<>(numNodes);
HttpHost host = new HttpHost("host", 9200); Node node = new Node(new HttpHost("host", 9200));
for (int i = 0; i < hosts.length; i++) { for (int i = 0; i < numNodes; i++) {
hosts[i] = host; nodes.add(node);
} }
restClient.setHosts(hosts); restClient.setNodes(nodes);
assertEquals(1, restClient.getHosts().size()); assertEquals(1, restClient.getNodes().size());
assertEquals(host, restClient.getHosts().get(0)); assertEquals(node, restClient.getNodes().get(0));
} }
} }
@ -300,8 +340,143 @@ public class RestClientTests extends RestClientTestCase {
} }
} }
private static RestClient createRestClient() { public void testSelectHosts() throws IOException {
HttpHost[] hosts = new HttpHost[]{new HttpHost("localhost", 9200)}; Node n1 = new Node(new HttpHost("1"), null, null, "1", null);
return new RestClient(mock(CloseableHttpAsyncClient.class), randomIntBetween(1_000, 30_000), new Header[]{}, hosts, null, null); Node n2 = new Node(new HttpHost("2"), null, null, "2", null);
Node n3 = new Node(new HttpHost("3"), null, null, "3", null);
NodeSelector not1 = new NodeSelector() {
@Override
public void select(Iterable<Node> nodes) {
for (Iterator<Node> itr = nodes.iterator(); itr.hasNext();) {
if ("1".equals(itr.next().getVersion())) {
itr.remove();
}
}
}
@Override
public String toString() {
return "NOT 1";
}
};
NodeSelector noNodes = new NodeSelector() {
@Override
public void select(Iterable<Node> nodes) {
for (Iterator<Node> itr = nodes.iterator(); itr.hasNext();) {
itr.next();
itr.remove();
}
}
@Override
public String toString() {
return "NONE";
}
};
NodeTuple<List<Node>> nodeTuple = new NodeTuple<>(Arrays.asList(n1, n2, n3), null);
Map<HttpHost, DeadHostState> emptyBlacklist = Collections.emptyMap();
// Normal cases where the node selector doesn't reject all living nodes
assertSelectLivingHosts(Arrays.asList(n1, n2, n3), nodeTuple, emptyBlacklist, NodeSelector.ANY);
assertSelectLivingHosts(Arrays.asList(n2, n3), nodeTuple, emptyBlacklist, not1);
/*
* Try a NodeSelector that excludes all nodes. This should
* throw an exception
*/
{
String message = "NodeSelector [NONE] rejected all nodes, living ["
+ "[host=http://1, version=1], [host=http://2, version=2], "
+ "[host=http://3, version=3]] and dead []";
assertEquals(message, assertSelectAllRejected(nodeTuple, emptyBlacklist, noNodes));
}
// Mark all the nodes dead for a few test cases
{
ConfigurableTimeSupplier timeSupplier = new ConfigurableTimeSupplier();
Map<HttpHost, DeadHostState> blacklist = new HashMap<>();
blacklist.put(n1.getHost(), new DeadHostState(timeSupplier));
blacklist.put(n2.getHost(), new DeadHostState(new DeadHostState(timeSupplier)));
blacklist.put(n3.getHost(), new DeadHostState(new DeadHostState(new DeadHostState(timeSupplier))));
/*
* selectHosts will revive a single host if regardless of
* blacklist time. It'll revive the node that is closest
* to being revived that the NodeSelector is ok with.
*/
assertEquals(singletonList(n1), RestClient.selectHosts(nodeTuple, blacklist, new AtomicInteger(), NodeSelector.ANY));
assertEquals(singletonList(n2), RestClient.selectHosts(nodeTuple, blacklist, new AtomicInteger(), not1));
/*
* Try a NodeSelector that excludes all nodes. This should
* return a failure, but a different failure than when the
* blacklist is empty so that the caller knows that all of
* their nodes are blacklisted AND blocked.
*/
String message = "NodeSelector [NONE] rejected all nodes, living [] and dead ["
+ "[host=http://1, version=1], [host=http://2, version=2], "
+ "[host=http://3, version=3]]";
assertEquals(message, assertSelectAllRejected(nodeTuple, blacklist, noNodes));
/*
* Now lets wind the clock forward, past the timeout for one of
* the dead nodes. We should return it.
*/
timeSupplier.nanoTime = new DeadHostState(timeSupplier).getDeadUntilNanos();
assertSelectLivingHosts(Arrays.asList(n1), nodeTuple, blacklist, NodeSelector.ANY);
/*
* But if the NodeSelector rejects that node then we'll pick the
* first on that the NodeSelector doesn't reject.
*/
assertSelectLivingHosts(Arrays.asList(n2), nodeTuple, blacklist, not1);
/*
* If we wind the clock way into the future, past any of the
* blacklist timeouts then we function as though the nodes aren't
* in the blacklist at all.
*/
timeSupplier.nanoTime += DeadHostState.MAX_CONNECTION_TIMEOUT_NANOS;
assertSelectLivingHosts(Arrays.asList(n1, n2, n3), nodeTuple, blacklist, NodeSelector.ANY);
assertSelectLivingHosts(Arrays.asList(n2, n3), nodeTuple, blacklist, not1);
}
} }
private void assertSelectLivingHosts(List<Node> expectedNodes, NodeTuple<List<Node>> nodeTuple,
Map<HttpHost, DeadHostState> blacklist, NodeSelector nodeSelector) throws IOException {
int iterations = 1000;
AtomicInteger lastNodeIndex = new AtomicInteger(0);
assertEquals(expectedNodes, RestClient.selectHosts(nodeTuple, blacklist, lastNodeIndex, nodeSelector));
// Calling it again rotates the set of results
for (int i = 1; i < iterations; i++) {
Collections.rotate(expectedNodes, 1);
assertEquals("iteration " + i, expectedNodes,
RestClient.selectHosts(nodeTuple, blacklist, lastNodeIndex, nodeSelector));
}
}
/**
* Assert that {@link RestClient#selectHosts} fails on the provided arguments.
* @return the message in the exception thrown by the failure
*/
private String assertSelectAllRejected( NodeTuple<List<Node>> nodeTuple,
Map<HttpHost, DeadHostState> blacklist, NodeSelector nodeSelector) {
try {
RestClient.selectHosts(nodeTuple, blacklist, new AtomicInteger(0), nodeSelector);
throw new AssertionError("expected selectHosts to fail");
} catch (IOException e) {
return e.getMessage();
}
}
private static RestClient createRestClient() {
List<Node> nodes = Collections.singletonList(new Node(new HttpHost("localhost", 9200)));
return new RestClient(mock(CloseableHttpAsyncClient.class), randomLongBetween(1_000, 30_000),
new Header[] {}, nodes, null, null);
}
} }

View file

@ -37,6 +37,9 @@ import org.apache.http.ssl.SSLContextBuilder;
import org.apache.http.ssl.SSLContexts; import org.apache.http.ssl.SSLContexts;
import org.apache.http.util.EntityUtils; import org.apache.http.util.EntityUtils;
import org.elasticsearch.client.HttpAsyncResponseConsumerFactory; import org.elasticsearch.client.HttpAsyncResponseConsumerFactory;
import org.elasticsearch.client.HttpAsyncResponseConsumerFactory.HeapBufferedResponseConsumerFactory;
import org.elasticsearch.client.Node;
import org.elasticsearch.client.NodeSelector;
import org.elasticsearch.client.Request; import org.elasticsearch.client.Request;
import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.RequestOptions;
import org.elasticsearch.client.Response; import org.elasticsearch.client.Response;
@ -72,6 +75,19 @@ import java.util.concurrent.CountDownLatch;
*/ */
@SuppressWarnings("unused") @SuppressWarnings("unused")
public class RestClientDocumentation { public class RestClientDocumentation {
private static final String TOKEN = "DUMMY";
// tag::rest-client-options-singleton
private static final RequestOptions COMMON_OPTIONS;
static {
RequestOptions.Builder builder = RequestOptions.DEFAULT.toBuilder();
builder.addHeader("Authorization", "Bearer " + TOKEN); // <1>
builder.setNodeSelector(NodeSelector.NOT_MASTER_ONLY); // <2>
builder.setHttpAsyncResponseConsumerFactory( // <3>
new HeapBufferedResponseConsumerFactory(30 * 1024 * 1024 * 1024));
COMMON_OPTIONS = builder.build();
}
// end::rest-client-options-singleton
@SuppressWarnings("unused") @SuppressWarnings("unused")
public void testUsage() throws IOException, InterruptedException { public void testUsage() throws IOException, InterruptedException {
@ -104,7 +120,7 @@ public class RestClientDocumentation {
RestClientBuilder builder = RestClient.builder(new HttpHost("localhost", 9200, "http")); RestClientBuilder builder = RestClient.builder(new HttpHost("localhost", 9200, "http"));
builder.setFailureListener(new RestClient.FailureListener() { builder.setFailureListener(new RestClient.FailureListener() {
@Override @Override
public void onFailure(HttpHost host) { public void onFailure(Node node) {
// <1> // <1>
} }
}); });
@ -172,22 +188,14 @@ public class RestClientDocumentation {
//tag::rest-client-body-shorter //tag::rest-client-body-shorter
request.setJsonEntity("{\"json\":\"text\"}"); request.setJsonEntity("{\"json\":\"text\"}");
//end::rest-client-body-shorter //end::rest-client-body-shorter
{ //tag::rest-client-options-set-singleton
//tag::rest-client-headers request.setOptions(COMMON_OPTIONS);
RequestOptions.Builder options = request.getOptions().toBuilder(); //end::rest-client-options-set-singleton
options.addHeader("Accept", "text/plain"); //tag::rest-client-options-customize
options.addHeader("Cache-Control", "no-cache"); RequestOptions.Builder options = COMMON_OPTIONS.toBuilder();
request.setOptions(options); options.addHeader("cats", "knock things off of other things");
//end::rest-client-headers request.setOptions(options);
} //end::rest-client-options-customize
{
//tag::rest-client-response-consumer
RequestOptions.Builder options = request.getOptions().toBuilder();
options.setHttpAsyncResponseConsumerFactory(
new HttpAsyncResponseConsumerFactory.HeapBufferedResponseConsumerFactory(30 * 1024 * 1024));
request.setOptions(options);
//end::rest-client-response-consumer
}
} }
{ {
HttpEntity[] documents = new HttpEntity[10]; HttpEntity[] documents = new HttpEntity[10];

View file

@ -26,31 +26,36 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.http.HttpEntity; import org.apache.http.HttpEntity;
import org.apache.http.HttpHost; import org.apache.http.HttpHost;
import org.elasticsearch.client.Node;
import org.elasticsearch.client.Request;
import org.elasticsearch.client.Response; import org.elasticsearch.client.Response;
import org.elasticsearch.client.RestClient; import org.elasticsearch.client.RestClient;
import org.elasticsearch.client.Node.Roles;
import java.io.IOException; import java.io.IOException;
import java.io.InputStream; import java.io.InputStream;
import java.net.URI; import java.net.URI;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Collections; import java.util.Collections;
import java.util.HashSet;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.Objects; import java.util.Objects;
import java.util.Set;
import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeUnit;
/** /**
* Class responsible for sniffing the http hosts from elasticsearch through the nodes info api and returning them back. * Class responsible for sniffing the http hosts from elasticsearch through the nodes info api and returning them back.
* Compatible with elasticsearch 5.x and 2.x. * Compatible with elasticsearch 2.x+.
*/ */
public final class ElasticsearchHostsSniffer implements HostsSniffer { public final class ElasticsearchNodesSniffer implements NodesSniffer {
private static final Log logger = LogFactory.getLog(ElasticsearchHostsSniffer.class); private static final Log logger = LogFactory.getLog(ElasticsearchNodesSniffer.class);
public static final long DEFAULT_SNIFF_REQUEST_TIMEOUT = TimeUnit.SECONDS.toMillis(1); public static final long DEFAULT_SNIFF_REQUEST_TIMEOUT = TimeUnit.SECONDS.toMillis(1);
private final RestClient restClient; private final RestClient restClient;
private final Map<String, String> sniffRequestParams; private final Request request;
private final Scheme scheme; private final Scheme scheme;
private final JsonFactory jsonFactory = new JsonFactory(); private final JsonFactory jsonFactory = new JsonFactory();
@ -62,8 +67,8 @@ public final class ElasticsearchHostsSniffer implements HostsSniffer {
* that is also provided to {@link Sniffer#builder(RestClient)}, so that the hosts are set to the same * that is also provided to {@link Sniffer#builder(RestClient)}, so that the hosts are set to the same
* client that was used to fetch them. * client that was used to fetch them.
*/ */
public ElasticsearchHostsSniffer(RestClient restClient) { public ElasticsearchNodesSniffer(RestClient restClient) {
this(restClient, DEFAULT_SNIFF_REQUEST_TIMEOUT, ElasticsearchHostsSniffer.Scheme.HTTP); this(restClient, DEFAULT_SNIFF_REQUEST_TIMEOUT, ElasticsearchNodesSniffer.Scheme.HTTP);
} }
/** /**
@ -77,30 +82,32 @@ public final class ElasticsearchHostsSniffer implements HostsSniffer {
* that have responded within this timeout will be returned. * that have responded within this timeout will be returned.
* @param scheme the scheme to associate sniffed nodes with (as it is not returned by elasticsearch) * @param scheme the scheme to associate sniffed nodes with (as it is not returned by elasticsearch)
*/ */
public ElasticsearchHostsSniffer(RestClient restClient, long sniffRequestTimeoutMillis, Scheme scheme) { public ElasticsearchNodesSniffer(RestClient restClient, long sniffRequestTimeoutMillis, Scheme scheme) {
this.restClient = Objects.requireNonNull(restClient, "restClient cannot be null"); this.restClient = Objects.requireNonNull(restClient, "restClient cannot be null");
if (sniffRequestTimeoutMillis < 0) { if (sniffRequestTimeoutMillis < 0) {
throw new IllegalArgumentException("sniffRequestTimeoutMillis must be greater than 0"); throw new IllegalArgumentException("sniffRequestTimeoutMillis must be greater than 0");
} }
this.sniffRequestParams = Collections.<String, String>singletonMap("timeout", sniffRequestTimeoutMillis + "ms"); this.request = new Request("GET", "/_nodes/http");
request.addParameter("timeout", sniffRequestTimeoutMillis + "ms");
this.scheme = Objects.requireNonNull(scheme, "scheme cannot be null"); this.scheme = Objects.requireNonNull(scheme, "scheme cannot be null");
} }
/** /**
* Calls the elasticsearch nodes info api, parses the response and returns all the found http hosts * Calls the elasticsearch nodes info api, parses the response and returns all the found http hosts
*/ */
public List<HttpHost> sniffHosts() throws IOException { @Override
Response response = restClient.performRequest("get", "/_nodes/http", sniffRequestParams); public List<Node> sniff() throws IOException {
return readHosts(response.getEntity()); Response response = restClient.performRequest(request);
return readHosts(response.getEntity(), scheme, jsonFactory);
} }
private List<HttpHost> readHosts(HttpEntity entity) throws IOException { static List<Node> readHosts(HttpEntity entity, Scheme scheme, JsonFactory jsonFactory) throws IOException {
try (InputStream inputStream = entity.getContent()) { try (InputStream inputStream = entity.getContent()) {
JsonParser parser = jsonFactory.createParser(inputStream); JsonParser parser = jsonFactory.createParser(inputStream);
if (parser.nextToken() != JsonToken.START_OBJECT) { if (parser.nextToken() != JsonToken.START_OBJECT) {
throw new IOException("expected data to start with an object"); throw new IOException("expected data to start with an object");
} }
List<HttpHost> hosts = new ArrayList<>(); List<Node> nodes = new ArrayList<>();
while (parser.nextToken() != JsonToken.END_OBJECT) { while (parser.nextToken() != JsonToken.END_OBJECT) {
if (parser.getCurrentToken() == JsonToken.START_OBJECT) { if (parser.getCurrentToken() == JsonToken.START_OBJECT) {
if ("nodes".equals(parser.getCurrentName())) { if ("nodes".equals(parser.getCurrentName())) {
@ -108,10 +115,9 @@ public final class ElasticsearchHostsSniffer implements HostsSniffer {
JsonToken token = parser.nextToken(); JsonToken token = parser.nextToken();
assert token == JsonToken.START_OBJECT; assert token == JsonToken.START_OBJECT;
String nodeId = parser.getCurrentName(); String nodeId = parser.getCurrentName();
HttpHost sniffedHost = readHost(nodeId, parser, this.scheme); Node node = readNode(nodeId, parser, scheme);
if (sniffedHost != null) { if (node != null) {
logger.trace("adding node [" + nodeId + "]"); nodes.add(node);
hosts.add(sniffedHost);
} }
} }
} else { } else {
@ -119,13 +125,31 @@ public final class ElasticsearchHostsSniffer implements HostsSniffer {
} }
} }
} }
return hosts; return nodes;
} }
} }
private static HttpHost readHost(String nodeId, JsonParser parser, Scheme scheme) throws IOException { private static Node readNode(String nodeId, JsonParser parser, Scheme scheme) throws IOException {
HttpHost httpHost = null; HttpHost publishedHost = null;
/*
* We sniff the bound hosts so we can look up the node based on any
* address on which it is listening. This is useful in Elasticsearch's
* test framework where we sometimes publish ipv6 addresses but the
* tests contact the node on ipv4.
*/
Set<HttpHost> boundHosts = new HashSet<>();
String name = null;
String version = null;
String fieldName = null; String fieldName = null;
// Used to read roles from 5.0+
boolean sawRoles = false;
boolean master = false;
boolean data = false;
boolean ingest = false;
// Used to read roles from 2.x
Boolean masterAttribute = null;
Boolean dataAttribute = null;
boolean clientAttribute = false;
while (parser.nextToken() != JsonToken.END_OBJECT) { while (parser.nextToken() != JsonToken.END_OBJECT) {
if (parser.getCurrentToken() == JsonToken.FIELD_NAME) { if (parser.getCurrentToken() == JsonToken.FIELD_NAME) {
fieldName = parser.getCurrentName(); fieldName = parser.getCurrentName();
@ -133,9 +157,27 @@ public final class ElasticsearchHostsSniffer implements HostsSniffer {
if ("http".equals(fieldName)) { if ("http".equals(fieldName)) {
while (parser.nextToken() != JsonToken.END_OBJECT) { while (parser.nextToken() != JsonToken.END_OBJECT) {
if (parser.getCurrentToken() == JsonToken.VALUE_STRING && "publish_address".equals(parser.getCurrentName())) { if (parser.getCurrentToken() == JsonToken.VALUE_STRING && "publish_address".equals(parser.getCurrentName())) {
URI boundAddressAsURI = URI.create(scheme + "://" + parser.getValueAsString()); URI publishAddressAsURI = URI.create(scheme + "://" + parser.getValueAsString());
httpHost = new HttpHost(boundAddressAsURI.getHost(), boundAddressAsURI.getPort(), publishedHost = new HttpHost(publishAddressAsURI.getHost(), publishAddressAsURI.getPort(),
boundAddressAsURI.getScheme()); publishAddressAsURI.getScheme());
} else if (parser.currentToken() == JsonToken.START_ARRAY && "bound_address".equals(parser.getCurrentName())) {
while (parser.nextToken() != JsonToken.END_ARRAY) {
URI boundAddressAsURI = URI.create(scheme + "://" + parser.getValueAsString());
boundHosts.add(new HttpHost(boundAddressAsURI.getHost(), boundAddressAsURI.getPort(),
boundAddressAsURI.getScheme()));
}
} else if (parser.getCurrentToken() == JsonToken.START_OBJECT) {
parser.skipChildren();
}
}
} else if ("attributes".equals(fieldName)) {
while (parser.nextToken() != JsonToken.END_OBJECT) {
if (parser.getCurrentToken() == JsonToken.VALUE_STRING && "master".equals(parser.getCurrentName())) {
masterAttribute = toBoolean(parser.getValueAsString());
} else if (parser.getCurrentToken() == JsonToken.VALUE_STRING && "data".equals(parser.getCurrentName())) {
dataAttribute = toBoolean(parser.getValueAsString());
} else if (parser.getCurrentToken() == JsonToken.VALUE_STRING && "client".equals(parser.getCurrentName())) {
clientAttribute = toBoolean(parser.getValueAsString());
} else if (parser.getCurrentToken() == JsonToken.START_OBJECT) { } else if (parser.getCurrentToken() == JsonToken.START_OBJECT) {
parser.skipChildren(); parser.skipChildren();
} }
@ -143,14 +185,55 @@ public final class ElasticsearchHostsSniffer implements HostsSniffer {
} else { } else {
parser.skipChildren(); parser.skipChildren();
} }
} else if (parser.currentToken() == JsonToken.START_ARRAY) {
if ("roles".equals(fieldName)) {
sawRoles = true;
while (parser.nextToken() != JsonToken.END_ARRAY) {
switch (parser.getText()) {
case "master":
master = true;
break;
case "data":
data = true;
break;
case "ingest":
ingest = true;
break;
default:
logger.warn("unknown role [" + parser.getText() + "] on node [" + nodeId + "]");
}
}
} else {
parser.skipChildren();
}
} else if (parser.currentToken().isScalarValue()) {
if ("version".equals(fieldName)) {
version = parser.getText();
} else if ("name".equals(fieldName)) {
name = parser.getText();
}
} }
} }
//http section is not present if http is not enabled on the node, ignore such nodes //http section is not present if http is not enabled on the node, ignore such nodes
if (httpHost == null) { if (publishedHost == null) {
logger.debug("skipping node [" + nodeId + "] with http disabled"); logger.debug("skipping node [" + nodeId + "] with http disabled");
return null; return null;
} else {
logger.trace("adding node [" + nodeId + "]");
if (version.startsWith("2.")) {
/*
* 2.x doesn't send roles, instead we try to read them from
* attributes.
*/
master = masterAttribute == null ? false == clientAttribute : masterAttribute;
data = dataAttribute == null ? false == clientAttribute : dataAttribute;
} else {
assert sawRoles : "didn't see roles for [" + nodeId + "]";
}
assert boundHosts.contains(publishedHost) :
"[" + nodeId + "] doesn't make sense! publishedHost should be in boundHosts";
return new Node(publishedHost, boundHosts, name, version, new Roles(master, data, ingest));
} }
return httpHost;
} }
public enum Scheme { public enum Scheme {
@ -167,4 +250,15 @@ public final class ElasticsearchHostsSniffer implements HostsSniffer {
return name; return name;
} }
} }
private static boolean toBoolean(String string) {
switch (string) {
case "true":
return true;
case "false":
return false;
default:
throw new IllegalArgumentException("[" + string + "] is not a valid boolean");
}
}
} }

View file

@ -19,7 +19,7 @@
package org.elasticsearch.client.sniff; package org.elasticsearch.client.sniff;
import org.apache.http.HttpHost; import org.elasticsearch.client.Node;
import java.io.IOException; import java.io.IOException;
import java.util.List; import java.util.List;
@ -27,9 +27,9 @@ import java.util.List;
/** /**
* Responsible for sniffing the http hosts * Responsible for sniffing the http hosts
*/ */
public interface HostsSniffer { public interface NodesSniffer {
/** /**
* Returns the sniffed http hosts * Returns the sniffed Elasticsearch nodes.
*/ */
List<HttpHost> sniffHosts() throws IOException; List<Node> sniff() throws IOException;
} }

View file

@ -20,6 +20,7 @@
package org.elasticsearch.client.sniff; package org.elasticsearch.client.sniff;
import org.apache.http.HttpHost; import org.apache.http.HttpHost;
import org.elasticsearch.client.Node;
import org.elasticsearch.client.RestClient; import org.elasticsearch.client.RestClient;
import java.util.Objects; import java.util.Objects;
@ -54,7 +55,7 @@ public class SniffOnFailureListener extends RestClient.FailureListener {
} }
@Override @Override
public void onFailure(HttpHost host) { public void onFailure(Node node) {
if (sniffer == null) { if (sniffer == null) {
throw new IllegalStateException("sniffer was not set, unable to sniff on failure"); throw new IllegalStateException("sniffer was not set, unable to sniff on failure");
} }

View file

@ -21,7 +21,7 @@ package org.elasticsearch.client.sniff;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.http.HttpHost; import org.elasticsearch.client.Node;
import org.elasticsearch.client.RestClient; import org.elasticsearch.client.RestClient;
import org.elasticsearch.client.RestClientBuilder; import org.elasticsearch.client.RestClientBuilder;
@ -29,6 +29,7 @@ import java.io.Closeable;
import java.io.IOException; import java.io.IOException;
import java.security.AccessController; import java.security.AccessController;
import java.security.PrivilegedAction; import java.security.PrivilegedAction;
import java.util.Collection;
import java.util.List; import java.util.List;
import java.util.concurrent.Executors; import java.util.concurrent.Executors;
import java.util.concurrent.Future; import java.util.concurrent.Future;
@ -43,7 +44,7 @@ import java.util.concurrent.atomic.AtomicReference;
/** /**
* Class responsible for sniffing nodes from some source (default is elasticsearch itself) and setting them to a provided instance of * Class responsible for sniffing nodes from some source (default is elasticsearch itself) and setting them to a provided instance of
* {@link RestClient}. Must be created via {@link SnifferBuilder}, which allows to set all of the different options or rely on defaults. * {@link RestClient}. Must be created via {@link SnifferBuilder}, which allows to set all of the different options or rely on defaults.
* A background task fetches the nodes through the {@link HostsSniffer} and sets them to the {@link RestClient} instance. * A background task fetches the nodes through the {@link NodesSniffer} and sets them to the {@link RestClient} instance.
* It is possible to perform sniffing on failure by creating a {@link SniffOnFailureListener} and providing it as an argument to * It is possible to perform sniffing on failure by creating a {@link SniffOnFailureListener} and providing it as an argument to
* {@link RestClientBuilder#setFailureListener(RestClient.FailureListener)}. The Sniffer implementation needs to be lazily set to the * {@link RestClientBuilder#setFailureListener(RestClient.FailureListener)}. The Sniffer implementation needs to be lazily set to the
* previously created SniffOnFailureListener through {@link SniffOnFailureListener#setSniffer(Sniffer)}. * previously created SniffOnFailureListener through {@link SniffOnFailureListener#setSniffer(Sniffer)}.
@ -53,7 +54,7 @@ public class Sniffer implements Closeable {
private static final Log logger = LogFactory.getLog(Sniffer.class); private static final Log logger = LogFactory.getLog(Sniffer.class);
private static final String SNIFFER_THREAD_NAME = "es_rest_client_sniffer"; private static final String SNIFFER_THREAD_NAME = "es_rest_client_sniffer";
private final HostsSniffer hostsSniffer; private final NodesSniffer nodesSniffer;
private final RestClient restClient; private final RestClient restClient;
private final long sniffIntervalMillis; private final long sniffIntervalMillis;
private final long sniffAfterFailureDelayMillis; private final long sniffAfterFailureDelayMillis;
@ -61,12 +62,12 @@ public class Sniffer implements Closeable {
private final AtomicBoolean initialized = new AtomicBoolean(false); private final AtomicBoolean initialized = new AtomicBoolean(false);
private volatile ScheduledTask nextScheduledTask; private volatile ScheduledTask nextScheduledTask;
Sniffer(RestClient restClient, HostsSniffer hostsSniffer, long sniffInterval, long sniffAfterFailureDelay) { Sniffer(RestClient restClient, NodesSniffer nodesSniffer, long sniffInterval, long sniffAfterFailureDelay) {
this(restClient, hostsSniffer, new DefaultScheduler(), sniffInterval, sniffAfterFailureDelay); this(restClient, nodesSniffer, new DefaultScheduler(), sniffInterval, sniffAfterFailureDelay);
} }
Sniffer(RestClient restClient, HostsSniffer hostsSniffer, Scheduler scheduler, long sniffInterval, long sniffAfterFailureDelay) { Sniffer(RestClient restClient, NodesSniffer nodesSniffer, Scheduler scheduler, long sniffInterval, long sniffAfterFailureDelay) {
this.hostsSniffer = hostsSniffer; this.nodesSniffer = nodesSniffer;
this.restClient = restClient; this.restClient = restClient;
this.sniffIntervalMillis = sniffInterval; this.sniffIntervalMillis = sniffInterval;
this.sniffAfterFailureDelayMillis = sniffAfterFailureDelay; this.sniffAfterFailureDelayMillis = sniffAfterFailureDelay;
@ -205,14 +206,14 @@ public class Sniffer implements Closeable {
} }
final void sniff() throws IOException { final void sniff() throws IOException {
List<HttpHost> sniffedHosts = hostsSniffer.sniffHosts(); List<Node> sniffedNodes = nodesSniffer.sniff();
if (logger.isDebugEnabled()) { if (logger.isDebugEnabled()) {
logger.debug("sniffed hosts: " + sniffedHosts); logger.debug("sniffed nodes: " + sniffedNodes);
} }
if (sniffedHosts.isEmpty()) { if (sniffedNodes.isEmpty()) {
logger.warn("no hosts to set, hosts will be updated at the next sniffing round"); logger.warn("no nodes to set, nodes will be updated at the next sniffing round");
} else { } else {
restClient.setHosts(sniffedHosts.toArray(new HttpHost[sniffedHosts.size()])); restClient.setNodes(sniffedNodes);
} }
} }
@ -227,7 +228,8 @@ public class Sniffer implements Closeable {
/** /**
* Returns a new {@link SnifferBuilder} to help with {@link Sniffer} creation. * Returns a new {@link SnifferBuilder} to help with {@link Sniffer} creation.
* *
* @param restClient the client that gets its hosts set (via {@link RestClient#setHosts(HttpHost...)}) once they are fetched * @param restClient the client that gets its hosts set (via
* {@link RestClient#setNodes(Collection)}) once they are fetched
* @return a new instance of {@link SnifferBuilder} * @return a new instance of {@link SnifferBuilder}
*/ */
public static SnifferBuilder builder(RestClient restClient) { public static SnifferBuilder builder(RestClient restClient) {

View file

@ -34,7 +34,7 @@ public final class SnifferBuilder {
private final RestClient restClient; private final RestClient restClient;
private long sniffIntervalMillis = DEFAULT_SNIFF_INTERVAL; private long sniffIntervalMillis = DEFAULT_SNIFF_INTERVAL;
private long sniffAfterFailureDelayMillis = DEFAULT_SNIFF_AFTER_FAILURE_DELAY; private long sniffAfterFailureDelayMillis = DEFAULT_SNIFF_AFTER_FAILURE_DELAY;
private HostsSniffer hostsSniffer; private NodesSniffer nodesSniffer;
/** /**
* Creates a new builder instance by providing the {@link RestClient} that will be used to communicate with elasticsearch * Creates a new builder instance by providing the {@link RestClient} that will be used to communicate with elasticsearch
@ -69,13 +69,13 @@ public final class SnifferBuilder {
} }
/** /**
* Sets the {@link HostsSniffer} to be used to read hosts. A default instance of {@link ElasticsearchHostsSniffer} * Sets the {@link NodesSniffer} to be used to read hosts. A default instance of {@link ElasticsearchNodesSniffer}
* is created when not provided. This method can be used to change the configuration of the {@link ElasticsearchHostsSniffer}, * is created when not provided. This method can be used to change the configuration of the {@link ElasticsearchNodesSniffer},
* or to provide a different implementation (e.g. in case hosts need to taken from a different source). * or to provide a different implementation (e.g. in case hosts need to taken from a different source).
*/ */
public SnifferBuilder setHostsSniffer(HostsSniffer hostsSniffer) { public SnifferBuilder setNodesSniffer(NodesSniffer nodesSniffer) {
Objects.requireNonNull(hostsSniffer, "hostsSniffer cannot be null"); Objects.requireNonNull(nodesSniffer, "nodesSniffer cannot be null");
this.hostsSniffer = hostsSniffer; this.nodesSniffer = nodesSniffer;
return this; return this;
} }
@ -83,9 +83,9 @@ public final class SnifferBuilder {
* Creates the {@link Sniffer} based on the provided configuration. * Creates the {@link Sniffer} based on the provided configuration.
*/ */
public Sniffer build() { public Sniffer build() {
if (hostsSniffer == null) { if (nodesSniffer == null) {
this.hostsSniffer = new ElasticsearchHostsSniffer(restClient); this.nodesSniffer = new ElasticsearchNodesSniffer(restClient);
} }
return new Sniffer(restClient, hostsSniffer, sniffIntervalMillis, sniffAfterFailureDelayMillis); return new Sniffer(restClient, nodesSniffer, sniffIntervalMillis, sniffAfterFailureDelayMillis);
} }
} }

View file

@ -0,0 +1,109 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.client.sniff;
import org.apache.http.HttpEntity;
import org.apache.http.HttpHost;
import org.apache.http.entity.ContentType;
import org.apache.http.entity.InputStreamEntity;
import org.elasticsearch.client.Node;
import org.elasticsearch.client.RestClientTestCase;
import org.elasticsearch.client.Node.Roles;
import org.elasticsearch.client.sniff.ElasticsearchNodesSniffer.Scheme;
import java.io.IOException;
import java.io.InputStream;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import com.fasterxml.jackson.core.JsonFactory;
import static org.hamcrest.Matchers.hasItem;
import static org.hamcrest.Matchers.hasSize;
import static org.junit.Assert.assertThat;
/**
* Test parsing the response from the {@code /_nodes/http} API from fixed
* versions of Elasticsearch.
*/
public class ElasticsearchNodesSnifferParseTests extends RestClientTestCase {
private void checkFile(String file, Node... expected) throws IOException {
InputStream in = Thread.currentThread().getContextClassLoader().getResourceAsStream(file);
if (in == null) {
throw new IllegalArgumentException("Couldn't find [" + file + "]");
}
try {
HttpEntity entity = new InputStreamEntity(in, ContentType.APPLICATION_JSON);
List<Node> nodes = ElasticsearchNodesSniffer.readHosts(entity, Scheme.HTTP, new JsonFactory());
// Use these assertions because the error messages are nicer than hasItems.
assertThat(nodes, hasSize(expected.length));
for (Node expectedNode : expected) {
assertThat(nodes, hasItem(expectedNode));
}
} finally {
in.close();
}
}
public void test2x() throws IOException {
checkFile("2.0.0_nodes_http.json",
node(9200, "m1", "2.0.0", true, false, false),
node(9202, "m2", "2.0.0", true, true, false),
node(9201, "m3", "2.0.0", true, false, false),
node(9205, "d1", "2.0.0", false, true, false),
node(9204, "d2", "2.0.0", false, true, false),
node(9203, "d3", "2.0.0", false, true, false),
node(9207, "c1", "2.0.0", false, false, false),
node(9206, "c2", "2.0.0", false, false, false));
}
public void test5x() throws IOException {
checkFile("5.0.0_nodes_http.json",
node(9200, "m1", "5.0.0", true, false, true),
node(9201, "m2", "5.0.0", true, true, true),
node(9202, "m3", "5.0.0", true, false, true),
node(9203, "d1", "5.0.0", false, true, true),
node(9204, "d2", "5.0.0", false, true, true),
node(9205, "d3", "5.0.0", false, true, true),
node(9206, "c1", "5.0.0", false, false, true),
node(9207, "c2", "5.0.0", false, false, true));
}
public void test6x() throws IOException {
checkFile("6.0.0_nodes_http.json",
node(9200, "m1", "6.0.0", true, false, true),
node(9201, "m2", "6.0.0", true, true, true),
node(9202, "m3", "6.0.0", true, false, true),
node(9203, "d1", "6.0.0", false, true, true),
node(9204, "d2", "6.0.0", false, true, true),
node(9205, "d3", "6.0.0", false, true, true),
node(9206, "c1", "6.0.0", false, false, true),
node(9207, "c2", "6.0.0", false, false, true));
}
private Node node(int port, String name, String version, boolean master, boolean data, boolean ingest) {
HttpHost host = new HttpHost("127.0.0.1", port);
Set<HttpHost> boundHosts = new HashSet<>(2);
boundHosts.add(host);
boundHosts.add(new HttpHost("[::1]", port));
return new Node(host, boundHosts, name, version, new Roles(master, data, ingest));
}
}

View file

@ -30,6 +30,7 @@ import com.sun.net.httpserver.HttpServer;
import org.apache.http.Consts; import org.apache.http.Consts;
import org.apache.http.HttpHost; import org.apache.http.HttpHost;
import org.apache.http.client.methods.HttpGet; import org.apache.http.client.methods.HttpGet;
import org.elasticsearch.client.Node;
import org.elasticsearch.client.Response; import org.elasticsearch.client.Response;
import org.elasticsearch.client.ResponseException; import org.elasticsearch.client.ResponseException;
import org.elasticsearch.client.RestClient; import org.elasticsearch.client.RestClient;
@ -44,10 +45,10 @@ import java.io.StringWriter;
import java.net.InetAddress; import java.net.InetAddress;
import java.net.InetSocketAddress; import java.net.InetSocketAddress;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections; import java.util.Collections;
import java.util.HashMap; import java.util.HashMap;
import java.util.HashSet; import java.util.HashSet;
import java.util.Iterator;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.Set; import java.util.Set;
@ -59,17 +60,17 @@ import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertThat; import static org.junit.Assert.assertThat;
import static org.junit.Assert.fail; import static org.junit.Assert.fail;
public class ElasticsearchHostsSnifferTests extends RestClientTestCase { public class ElasticsearchNodesSnifferTests extends RestClientTestCase {
private int sniffRequestTimeout; private int sniffRequestTimeout;
private ElasticsearchHostsSniffer.Scheme scheme; private ElasticsearchNodesSniffer.Scheme scheme;
private SniffResponse sniffResponse; private SniffResponse sniffResponse;
private HttpServer httpServer; private HttpServer httpServer;
@Before @Before
public void startHttpServer() throws IOException { public void startHttpServer() throws IOException {
this.sniffRequestTimeout = RandomNumbers.randomIntBetween(getRandom(), 1000, 10000); this.sniffRequestTimeout = RandomNumbers.randomIntBetween(getRandom(), 1000, 10000);
this.scheme = RandomPicks.randomFrom(getRandom(), ElasticsearchHostsSniffer.Scheme.values()); this.scheme = RandomPicks.randomFrom(getRandom(), ElasticsearchNodesSniffer.Scheme.values());
if (rarely()) { if (rarely()) {
this.sniffResponse = SniffResponse.buildFailure(); this.sniffResponse = SniffResponse.buildFailure();
} else { } else {
@ -86,7 +87,7 @@ public class ElasticsearchHostsSnifferTests extends RestClientTestCase {
public void testConstructorValidation() throws IOException { public void testConstructorValidation() throws IOException {
try { try {
new ElasticsearchHostsSniffer(null, 1, ElasticsearchHostsSniffer.Scheme.HTTP); new ElasticsearchNodesSniffer(null, 1, ElasticsearchNodesSniffer.Scheme.HTTP);
fail("should have failed"); fail("should have failed");
} catch(NullPointerException e) { } catch(NullPointerException e) {
assertEquals("restClient cannot be null", e.getMessage()); assertEquals("restClient cannot be null", e.getMessage());
@ -94,14 +95,14 @@ public class ElasticsearchHostsSnifferTests extends RestClientTestCase {
HttpHost httpHost = new HttpHost(httpServer.getAddress().getHostString(), httpServer.getAddress().getPort()); HttpHost httpHost = new HttpHost(httpServer.getAddress().getHostString(), httpServer.getAddress().getPort());
try (RestClient restClient = RestClient.builder(httpHost).build()) { try (RestClient restClient = RestClient.builder(httpHost).build()) {
try { try {
new ElasticsearchHostsSniffer(restClient, 1, null); new ElasticsearchNodesSniffer(restClient, 1, null);
fail("should have failed"); fail("should have failed");
} catch (NullPointerException e) { } catch (NullPointerException e) {
assertEquals(e.getMessage(), "scheme cannot be null"); assertEquals(e.getMessage(), "scheme cannot be null");
} }
try { try {
new ElasticsearchHostsSniffer(restClient, RandomNumbers.randomIntBetween(getRandom(), Integer.MIN_VALUE, 0), new ElasticsearchNodesSniffer(restClient, RandomNumbers.randomIntBetween(getRandom(), Integer.MIN_VALUE, 0),
ElasticsearchHostsSniffer.Scheme.HTTP); ElasticsearchNodesSniffer.Scheme.HTTP);
fail("should have failed"); fail("should have failed");
} catch (IllegalArgumentException e) { } catch (IllegalArgumentException e) {
assertEquals(e.getMessage(), "sniffRequestTimeoutMillis must be greater than 0"); assertEquals(e.getMessage(), "sniffRequestTimeoutMillis must be greater than 0");
@ -112,17 +113,13 @@ public class ElasticsearchHostsSnifferTests extends RestClientTestCase {
public void testSniffNodes() throws IOException { public void testSniffNodes() throws IOException {
HttpHost httpHost = new HttpHost(httpServer.getAddress().getHostString(), httpServer.getAddress().getPort()); HttpHost httpHost = new HttpHost(httpServer.getAddress().getHostString(), httpServer.getAddress().getPort());
try (RestClient restClient = RestClient.builder(httpHost).build()) { try (RestClient restClient = RestClient.builder(httpHost).build()) {
ElasticsearchHostsSniffer sniffer = new ElasticsearchHostsSniffer(restClient, sniffRequestTimeout, scheme); ElasticsearchNodesSniffer sniffer = new ElasticsearchNodesSniffer(restClient, sniffRequestTimeout, scheme);
try { try {
List<HttpHost> sniffedHosts = sniffer.sniffHosts(); List<Node> sniffedNodes = sniffer.sniff();
if (sniffResponse.isFailure) { if (sniffResponse.isFailure) {
fail("sniffNodes should have failed"); fail("sniffNodes should have failed");
} }
assertThat(sniffedHosts.size(), equalTo(sniffResponse.hosts.size())); assertEquals(sniffResponse.result, sniffedNodes);
Iterator<HttpHost> responseHostsIterator = sniffResponse.hosts.iterator();
for (HttpHost sniffedHost : sniffedHosts) {
assertEquals(sniffedHost, responseHostsIterator.next());
}
} catch(ResponseException e) { } catch(ResponseException e) {
Response response = e.getResponse(); Response response = e.getResponse();
if (sniffResponse.isFailure) { if (sniffResponse.isFailure) {
@ -173,9 +170,9 @@ public class ElasticsearchHostsSnifferTests extends RestClientTestCase {
} }
} }
private static SniffResponse buildSniffResponse(ElasticsearchHostsSniffer.Scheme scheme) throws IOException { private static SniffResponse buildSniffResponse(ElasticsearchNodesSniffer.Scheme scheme) throws IOException {
int numNodes = RandomNumbers.randomIntBetween(getRandom(), 1, 5); int numNodes = RandomNumbers.randomIntBetween(getRandom(), 1, 5);
List<HttpHost> hosts = new ArrayList<>(numNodes); List<Node> nodes = new ArrayList<>(numNodes);
JsonFactory jsonFactory = new JsonFactory(); JsonFactory jsonFactory = new JsonFactory();
StringWriter writer = new StringWriter(); StringWriter writer = new StringWriter();
JsonGenerator generator = jsonFactory.createGenerator(writer); JsonGenerator generator = jsonFactory.createGenerator(writer);
@ -190,6 +187,23 @@ public class ElasticsearchHostsSnifferTests extends RestClientTestCase {
generator.writeObjectFieldStart("nodes"); generator.writeObjectFieldStart("nodes");
for (int i = 0; i < numNodes; i++) { for (int i = 0; i < numNodes; i++) {
String nodeId = RandomStrings.randomAsciiOfLengthBetween(getRandom(), 5, 10); String nodeId = RandomStrings.randomAsciiOfLengthBetween(getRandom(), 5, 10);
String host = "host" + i;
int port = RandomNumbers.randomIntBetween(getRandom(), 9200, 9299);
HttpHost publishHost = new HttpHost(host, port, scheme.toString());
Set<HttpHost> boundHosts = new HashSet<>();
boundHosts.add(publishHost);
if (randomBoolean()) {
int bound = between(1, 5);
for (int b = 0; b < bound; b++) {
boundHosts.add(new HttpHost(host + b, port, scheme.toString()));
}
}
Node node = new Node(publishHost, boundHosts, randomAsciiAlphanumOfLength(5),
randomAsciiAlphanumOfLength(5),
new Node.Roles(randomBoolean(), randomBoolean(), randomBoolean()));
generator.writeObjectFieldStart(nodeId); generator.writeObjectFieldStart(nodeId);
if (getRandom().nextBoolean()) { if (getRandom().nextBoolean()) {
generator.writeObjectFieldStart("bogus_object"); generator.writeObjectFieldStart("bogus_object");
@ -203,44 +217,45 @@ public class ElasticsearchHostsSnifferTests extends RestClientTestCase {
} }
boolean isHttpEnabled = rarely() == false; boolean isHttpEnabled = rarely() == false;
if (isHttpEnabled) { if (isHttpEnabled) {
String host = "host" + i; nodes.add(node);
int port = RandomNumbers.randomIntBetween(getRandom(), 9200, 9299);
HttpHost httpHost = new HttpHost(host, port, scheme.toString());
hosts.add(httpHost);
generator.writeObjectFieldStart("http"); generator.writeObjectFieldStart("http");
if (getRandom().nextBoolean()) { generator.writeArrayFieldStart("bound_address");
generator.writeArrayFieldStart("bound_address"); for (HttpHost bound : boundHosts) {
generator.writeString("[fe80::1]:" + port); generator.writeString(bound.toHostString());
generator.writeString("[::1]:" + port);
generator.writeString("127.0.0.1:" + port);
generator.writeEndArray();
} }
generator.writeEndArray();
if (getRandom().nextBoolean()) { if (getRandom().nextBoolean()) {
generator.writeObjectFieldStart("bogus_object"); generator.writeObjectFieldStart("bogus_object");
generator.writeEndObject(); generator.writeEndObject();
} }
generator.writeStringField("publish_address", httpHost.toHostString()); generator.writeStringField("publish_address", publishHost.toHostString());
if (getRandom().nextBoolean()) { if (getRandom().nextBoolean()) {
generator.writeNumberField("max_content_length_in_bytes", 104857600); generator.writeNumberField("max_content_length_in_bytes", 104857600);
} }
generator.writeEndObject(); generator.writeEndObject();
} }
if (getRandom().nextBoolean()) {
String[] roles = {"master", "data", "ingest"}; List<String> roles = Arrays.asList(new String[] {"master", "data", "ingest"});
int numRoles = RandomNumbers.randomIntBetween(getRandom(), 0, 3); Collections.shuffle(roles, getRandom());
Set<String> nodeRoles = new HashSet<>(numRoles); generator.writeArrayFieldStart("roles");
for (int j = 0; j < numRoles; j++) { for (String role : roles) {
String role; if ("master".equals(role) && node.getRoles().isMasterEligible()) {
do { generator.writeString("master");
role = RandomPicks.randomFrom(getRandom(), roles);
} while(nodeRoles.add(role) == false);
} }
generator.writeArrayFieldStart("roles"); if ("data".equals(role) && node.getRoles().isData()) {
for (String nodeRole : nodeRoles) { generator.writeString("data");
generator.writeString(nodeRole); }
if ("ingest".equals(role) && node.getRoles().isIngest()) {
generator.writeString("ingest");
} }
generator.writeEndArray();
} }
generator.writeEndArray();
generator.writeFieldName("version");
generator.writeString(node.getVersion());
generator.writeFieldName("name");
generator.writeString(node.getName());
int numAttributes = RandomNumbers.randomIntBetween(getRandom(), 0, 3); int numAttributes = RandomNumbers.randomIntBetween(getRandom(), 0, 3);
Map<String, String> attributes = new HashMap<>(numAttributes); Map<String, String> attributes = new HashMap<>(numAttributes);
for (int j = 0; j < numAttributes; j++) { for (int j = 0; j < numAttributes; j++) {
@ -260,18 +275,18 @@ public class ElasticsearchHostsSnifferTests extends RestClientTestCase {
generator.writeEndObject(); generator.writeEndObject();
generator.writeEndObject(); generator.writeEndObject();
generator.close(); generator.close();
return SniffResponse.buildResponse(writer.toString(), hosts); return SniffResponse.buildResponse(writer.toString(), nodes);
} }
private static class SniffResponse { private static class SniffResponse {
private final String nodesInfoBody; private final String nodesInfoBody;
private final int nodesInfoResponseCode; private final int nodesInfoResponseCode;
private final List<HttpHost> hosts; private final List<Node> result;
private final boolean isFailure; private final boolean isFailure;
SniffResponse(String nodesInfoBody, List<HttpHost> hosts, boolean isFailure) { SniffResponse(String nodesInfoBody, List<Node> result, boolean isFailure) {
this.nodesInfoBody = nodesInfoBody; this.nodesInfoBody = nodesInfoBody;
this.hosts = hosts; this.result = result;
this.isFailure = isFailure; this.isFailure = isFailure;
if (isFailure) { if (isFailure) {
this.nodesInfoResponseCode = randomErrorResponseCode(); this.nodesInfoResponseCode = randomErrorResponseCode();
@ -281,11 +296,11 @@ public class ElasticsearchHostsSnifferTests extends RestClientTestCase {
} }
static SniffResponse buildFailure() { static SniffResponse buildFailure() {
return new SniffResponse("", Collections.<HttpHost>emptyList(), true); return new SniffResponse("", Collections.<Node>emptyList(), true);
} }
static SniffResponse buildResponse(String nodesInfoBody, List<HttpHost> hosts) { static SniffResponse buildResponse(String nodesInfoBody, List<Node> nodes) {
return new SniffResponse(nodesInfoBody, hosts, false); return new SniffResponse(nodesInfoBody, nodes, false);
} }
} }

View file

@ -20,16 +20,17 @@
package org.elasticsearch.client.sniff; package org.elasticsearch.client.sniff;
import org.apache.http.HttpHost; import org.apache.http.HttpHost;
import org.elasticsearch.client.Node;
import java.util.Collections; import java.util.Collections;
import java.util.List; import java.util.List;
/** /**
* Mock implementation of {@link HostsSniffer}. Useful to prevent any connection attempt while testing builders etc. * Mock implementation of {@link NodesSniffer}. Useful to prevent any connection attempt while testing builders etc.
*/ */
class MockHostsSniffer implements HostsSniffer { class MockNodesSniffer implements NodesSniffer {
@Override @Override
public List<HttpHost> sniffHosts() { public List<Node> sniff() {
return Collections.singletonList(new HttpHost("localhost", 9200)); return Collections.singletonList(new Node(new HttpHost("localhost", 9200)));
} }
} }

View file

@ -20,6 +20,7 @@
package org.elasticsearch.client.sniff; package org.elasticsearch.client.sniff;
import org.apache.http.HttpHost; import org.apache.http.HttpHost;
import org.elasticsearch.client.Node;
import org.elasticsearch.client.RestClient; import org.elasticsearch.client.RestClient;
import org.elasticsearch.client.RestClientTestCase; import org.elasticsearch.client.RestClientTestCase;
@ -46,7 +47,7 @@ public class SniffOnFailureListenerTests extends RestClientTestCase {
} }
try (RestClient restClient = RestClient.builder(new HttpHost("localhost", 9200)).build()) { try (RestClient restClient = RestClient.builder(new HttpHost("localhost", 9200)).build()) {
try (Sniffer sniffer = Sniffer.builder(restClient).setHostsSniffer(new MockHostsSniffer()).build()) { try (Sniffer sniffer = Sniffer.builder(restClient).setNodesSniffer(new MockNodesSniffer()).build()) {
listener.setSniffer(sniffer); listener.setSniffer(sniffer);
try { try {
listener.setSniffer(sniffer); listener.setSniffer(sniffer);
@ -54,7 +55,7 @@ public class SniffOnFailureListenerTests extends RestClientTestCase {
} catch(IllegalStateException e) { } catch(IllegalStateException e) {
assertEquals("sniffer can only be set once", e.getMessage()); assertEquals("sniffer can only be set once", e.getMessage());
} }
listener.onFailure(new HttpHost("localhost", 9200)); listener.onFailure(new Node(new HttpHost("localhost", 9200)));
} }
} }
} }

View file

@ -61,10 +61,10 @@ public class SnifferBuilderTests extends RestClientTestCase {
try { try {
Sniffer.builder(client).setHostsSniffer(null); Sniffer.builder(client).setNodesSniffer(null);
fail("should have failed"); fail("should have failed");
} catch(NullPointerException e) { } catch(NullPointerException e) {
assertEquals("hostsSniffer cannot be null", e.getMessage()); assertEquals("nodesSniffer cannot be null", e.getMessage());
} }
@ -80,7 +80,7 @@ public class SnifferBuilderTests extends RestClientTestCase {
builder.setSniffAfterFailureDelayMillis(RandomNumbers.randomIntBetween(getRandom(), 1, Integer.MAX_VALUE)); builder.setSniffAfterFailureDelayMillis(RandomNumbers.randomIntBetween(getRandom(), 1, Integer.MAX_VALUE));
} }
if (getRandom().nextBoolean()) { if (getRandom().nextBoolean()) {
builder.setHostsSniffer(new MockHostsSniffer()); builder.setNodesSniffer(new MockNodesSniffer());
} }
try (Sniffer sniffer = builder.build()) { try (Sniffer sniffer = builder.build()) {

View file

@ -20,11 +20,11 @@
package org.elasticsearch.client.sniff; package org.elasticsearch.client.sniff;
import org.apache.http.HttpHost; import org.apache.http.HttpHost;
import org.elasticsearch.client.Node;
import org.elasticsearch.client.RestClient; import org.elasticsearch.client.RestClient;
import org.elasticsearch.client.RestClientTestCase; import org.elasticsearch.client.RestClientTestCase;
import org.elasticsearch.client.sniff.Sniffer.DefaultScheduler; import org.elasticsearch.client.sniff.Sniffer.DefaultScheduler;
import org.elasticsearch.client.sniff.Sniffer.Scheduler; import org.elasticsearch.client.sniff.Sniffer.Scheduler;
import org.mockito.Matchers;
import org.mockito.invocation.InvocationOnMock; import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer; import org.mockito.stubbing.Answer;
@ -62,6 +62,7 @@ import static org.junit.Assert.assertThat;
import static org.junit.Assert.assertTrue; import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail; import static org.junit.Assert.fail;
import static org.mockito.Matchers.any; import static org.mockito.Matchers.any;
import static org.mockito.Matchers.anyCollectionOf;
import static org.mockito.Mockito.mock; import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.times; import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify; import static org.mockito.Mockito.verify;
@ -71,12 +72,12 @@ import static org.mockito.Mockito.when;
public class SnifferTests extends RestClientTestCase { public class SnifferTests extends RestClientTestCase {
/** /**
* Tests the {@link Sniffer#sniff()} method in isolation. Verifies that it uses the {@link HostsSniffer} implementation * Tests the {@link Sniffer#sniff()} method in isolation. Verifies that it uses the {@link NodesSniffer} implementation
* to retrieve nodes and set them (when not empty) to the provided {@link RestClient} instance. * to retrieve nodes and set them (when not empty) to the provided {@link RestClient} instance.
*/ */
public void testSniff() throws IOException { public void testSniff() throws IOException {
HttpHost initialHost = new HttpHost("localhost", 9200); Node initialNode = new Node(new HttpHost("localhost", 9200));
try (RestClient restClient = RestClient.builder(initialHost).build()) { try (RestClient restClient = RestClient.builder(initialNode).build()) {
Scheduler noOpScheduler = new Scheduler() { Scheduler noOpScheduler = new Scheduler() {
@Override @Override
public Future<?> schedule(Sniffer.Task task, long delayMillis) { public Future<?> schedule(Sniffer.Task task, long delayMillis) {
@ -88,53 +89,53 @@ public class SnifferTests extends RestClientTestCase {
} }
}; };
CountingHostsSniffer hostsSniffer = new CountingHostsSniffer(); CountingNodesSniffer nodesSniffer = new CountingNodesSniffer();
int iters = randomIntBetween(5, 30); int iters = randomIntBetween(5, 30);
try (Sniffer sniffer = new Sniffer(restClient, hostsSniffer, noOpScheduler, 1000L, -1)){ try (Sniffer sniffer = new Sniffer(restClient, nodesSniffer, noOpScheduler, 1000L, -1)){
{ {
assertEquals(1, restClient.getHosts().size()); assertEquals(1, restClient.getNodes().size());
HttpHost httpHost = restClient.getHosts().get(0); Node node = restClient.getNodes().get(0);
assertEquals("localhost", httpHost.getHostName()); assertEquals("localhost", node.getHost().getHostName());
assertEquals(9200, httpHost.getPort()); assertEquals(9200, node.getHost().getPort());
} }
int emptyList = 0; int emptyList = 0;
int failures = 0; int failures = 0;
int runs = 0; int runs = 0;
List<HttpHost> lastHosts = Collections.singletonList(initialHost); List<Node> lastNodes = Collections.singletonList(initialNode);
for (int i = 0; i < iters; i++) { for (int i = 0; i < iters; i++) {
try { try {
runs++; runs++;
sniffer.sniff(); sniffer.sniff();
if (hostsSniffer.failures.get() > failures) { if (nodesSniffer.failures.get() > failures) {
failures++; failures++;
fail("should have failed given that hostsSniffer says it threw an exception"); fail("should have failed given that nodesSniffer says it threw an exception");
} else if (hostsSniffer.emptyList.get() > emptyList) { } else if (nodesSniffer.emptyList.get() > emptyList) {
emptyList++; emptyList++;
assertEquals(lastHosts, restClient.getHosts()); assertEquals(lastNodes, restClient.getNodes());
} else { } else {
assertNotEquals(lastHosts, restClient.getHosts()); assertNotEquals(lastNodes, restClient.getNodes());
List<HttpHost> expectedHosts = CountingHostsSniffer.buildHosts(runs); List<Node> expectedNodes = CountingNodesSniffer.buildNodes(runs);
assertEquals(expectedHosts, restClient.getHosts()); assertEquals(expectedNodes, restClient.getNodes());
lastHosts = restClient.getHosts(); lastNodes = restClient.getNodes();
} }
} catch(IOException e) { } catch(IOException e) {
if (hostsSniffer.failures.get() > failures) { if (nodesSniffer.failures.get() > failures) {
failures++; failures++;
assertEquals("communication breakdown", e.getMessage()); assertEquals("communication breakdown", e.getMessage());
} }
} }
} }
assertEquals(hostsSniffer.emptyList.get(), emptyList); assertEquals(nodesSniffer.emptyList.get(), emptyList);
assertEquals(hostsSniffer.failures.get(), failures); assertEquals(nodesSniffer.failures.get(), failures);
assertEquals(hostsSniffer.runs.get(), runs); assertEquals(nodesSniffer.runs.get(), runs);
} }
} }
} }
/** /**
* Test multiple sniffing rounds by mocking the {@link Scheduler} as well as the {@link HostsSniffer}. * Test multiple sniffing rounds by mocking the {@link Scheduler} as well as the {@link NodesSniffer}.
* Simulates the ordinary behaviour of {@link Sniffer} when sniffing on failure is not enabled. * Simulates the ordinary behaviour of {@link Sniffer} when sniffing on failure is not enabled.
* The {@link CountingHostsSniffer} doesn't make any network connection but may throw exception or return no hosts, which makes * The {@link CountingNodesSniffer} doesn't make any network connection but may throw exception or return no nodes, which makes
* it possible to verify that errors are properly handled and don't affect subsequent runs and their scheduling. * it possible to verify that errors are properly handled and don't affect subsequent runs and their scheduling.
* The {@link Scheduler} implementation submits rather than scheduling tasks, meaning that it doesn't respect the requested sniff * The {@link Scheduler} implementation submits rather than scheduling tasks, meaning that it doesn't respect the requested sniff
* delays while allowing to assert that the requested delays for each requested run and the following one are the expected values. * delays while allowing to assert that the requested delays for each requested run and the following one are the expected values.
@ -143,7 +144,7 @@ public class SnifferTests extends RestClientTestCase {
final long sniffInterval = randomLongBetween(1, Long.MAX_VALUE); final long sniffInterval = randomLongBetween(1, Long.MAX_VALUE);
long sniffAfterFailureDelay = randomLongBetween(1, Long.MAX_VALUE); long sniffAfterFailureDelay = randomLongBetween(1, Long.MAX_VALUE);
RestClient restClient = mock(RestClient.class); RestClient restClient = mock(RestClient.class);
CountingHostsSniffer hostsSniffer = new CountingHostsSniffer(); CountingNodesSniffer nodesSniffer = new CountingNodesSniffer();
final int iters = randomIntBetween(30, 100); final int iters = randomIntBetween(30, 100);
final Set<Future<?>> futures = new CopyOnWriteArraySet<>(); final Set<Future<?>> futures = new CopyOnWriteArraySet<>();
final CountDownLatch completionLatch = new CountDownLatch(1); final CountDownLatch completionLatch = new CountDownLatch(1);
@ -185,7 +186,7 @@ public class SnifferTests extends RestClientTestCase {
} }
}; };
try { try {
new Sniffer(restClient, hostsSniffer, scheduler, sniffInterval, sniffAfterFailureDelay); new Sniffer(restClient, nodesSniffer, scheduler, sniffInterval, sniffAfterFailureDelay);
assertTrue("timeout waiting for sniffing rounds to be completed", completionLatch.await(1000, TimeUnit.MILLISECONDS)); assertTrue("timeout waiting for sniffing rounds to be completed", completionLatch.await(1000, TimeUnit.MILLISECONDS));
assertEquals(iters, futures.size()); assertEquals(iters, futures.size());
//the last future is the only one that may not be completed yet, as the count down happens //the last future is the only one that may not be completed yet, as the count down happens
@ -200,10 +201,10 @@ public class SnifferTests extends RestClientTestCase {
executor.shutdown(); executor.shutdown();
assertTrue(executor.awaitTermination(1000, TimeUnit.MILLISECONDS)); assertTrue(executor.awaitTermination(1000, TimeUnit.MILLISECONDS));
} }
int totalRuns = hostsSniffer.runs.get(); int totalRuns = nodesSniffer.runs.get();
assertEquals(iters, totalRuns); assertEquals(iters, totalRuns);
int setHostsRuns = totalRuns - hostsSniffer.failures.get() - hostsSniffer.emptyList.get(); int setNodesRuns = totalRuns - nodesSniffer.failures.get() - nodesSniffer.emptyList.get();
verify(restClient, times(setHostsRuns)).setHosts(Matchers.<HttpHost>anyVararg()); verify(restClient, times(setNodesRuns)).setNodes(anyCollectionOf(Node.class));
verifyNoMoreInteractions(restClient); verifyNoMoreInteractions(restClient);
} }
@ -234,7 +235,7 @@ public class SnifferTests extends RestClientTestCase {
} }
}; };
Sniffer sniffer = new Sniffer(restClient, new MockHostsSniffer(), scheduler, sniffInterval, sniffAfterFailureDelay); Sniffer sniffer = new Sniffer(restClient, new MockNodesSniffer(), scheduler, sniffInterval, sniffAfterFailureDelay);
assertEquals(0, shutdown.get()); assertEquals(0, shutdown.get());
int iters = randomIntBetween(3, 10); int iters = randomIntBetween(3, 10);
for (int i = 1; i <= iters; i++) { for (int i = 1; i <= iters; i++) {
@ -246,7 +247,7 @@ public class SnifferTests extends RestClientTestCase {
public void testSniffOnFailureNotInitialized() { public void testSniffOnFailureNotInitialized() {
RestClient restClient = mock(RestClient.class); RestClient restClient = mock(RestClient.class);
CountingHostsSniffer hostsSniffer = new CountingHostsSniffer(); CountingNodesSniffer nodesSniffer = new CountingNodesSniffer();
long sniffInterval = randomLongBetween(1, Long.MAX_VALUE); long sniffInterval = randomLongBetween(1, Long.MAX_VALUE);
long sniffAfterFailureDelay = randomLongBetween(1, Long.MAX_VALUE); long sniffAfterFailureDelay = randomLongBetween(1, Long.MAX_VALUE);
final AtomicInteger scheduleCalls = new AtomicInteger(0); final AtomicInteger scheduleCalls = new AtomicInteger(0);
@ -262,15 +263,15 @@ public class SnifferTests extends RestClientTestCase {
} }
}; };
Sniffer sniffer = new Sniffer(restClient, hostsSniffer, scheduler, sniffInterval, sniffAfterFailureDelay); Sniffer sniffer = new Sniffer(restClient, nodesSniffer, scheduler, sniffInterval, sniffAfterFailureDelay);
for (int i = 0; i < 10; i++) { for (int i = 0; i < 10; i++) {
sniffer.sniffOnFailure(); sniffer.sniffOnFailure();
} }
assertEquals(1, scheduleCalls.get()); assertEquals(1, scheduleCalls.get());
int totalRuns = hostsSniffer.runs.get(); int totalRuns = nodesSniffer.runs.get();
assertEquals(0, totalRuns); assertEquals(0, totalRuns);
int setHostsRuns = totalRuns - hostsSniffer.failures.get() - hostsSniffer.emptyList.get(); int setNodesRuns = totalRuns - nodesSniffer.failures.get() - nodesSniffer.emptyList.get();
verify(restClient, times(setHostsRuns)).setHosts(Matchers.<HttpHost>anyVararg()); verify(restClient, times(setNodesRuns)).setNodes(anyCollectionOf(Node.class));
verifyNoMoreInteractions(restClient); verifyNoMoreInteractions(restClient);
} }
@ -281,7 +282,7 @@ public class SnifferTests extends RestClientTestCase {
*/ */
public void testSniffOnFailure() throws Exception { public void testSniffOnFailure() throws Exception {
RestClient restClient = mock(RestClient.class); RestClient restClient = mock(RestClient.class);
CountingHostsSniffer hostsSniffer = new CountingHostsSniffer(); CountingNodesSniffer nodesSniffer = new CountingNodesSniffer();
final AtomicBoolean initializing = new AtomicBoolean(true); final AtomicBoolean initializing = new AtomicBoolean(true);
final long sniffInterval = randomLongBetween(1, Long.MAX_VALUE); final long sniffInterval = randomLongBetween(1, Long.MAX_VALUE);
final long sniffAfterFailureDelay = randomLongBetween(1, Long.MAX_VALUE); final long sniffAfterFailureDelay = randomLongBetween(1, Long.MAX_VALUE);
@ -351,7 +352,7 @@ public class SnifferTests extends RestClientTestCase {
public void shutdown() { public void shutdown() {
} }
}; };
final Sniffer sniffer = new Sniffer(restClient, hostsSniffer, scheduler, sniffInterval, sniffAfterFailureDelay); final Sniffer sniffer = new Sniffer(restClient, nodesSniffer, scheduler, sniffInterval, sniffAfterFailureDelay);
assertTrue("timeout waiting for sniffer to get initialized", initializingLatch.await(1000, TimeUnit.MILLISECONDS)); assertTrue("timeout waiting for sniffer to get initialized", initializingLatch.await(1000, TimeUnit.MILLISECONDS));
ExecutorService onFailureExecutor = Executors.newFixedThreadPool(randomIntBetween(5, 20)); ExecutorService onFailureExecutor = Executors.newFixedThreadPool(randomIntBetween(5, 20));
@ -413,9 +414,9 @@ public class SnifferTests extends RestClientTestCase {
} }
assertEquals(onFailureTasks.size(), cancelledTasks); assertEquals(onFailureTasks.size(), cancelledTasks);
assertEquals(completedTasks, hostsSniffer.runs.get()); assertEquals(completedTasks, nodesSniffer.runs.get());
int setHostsRuns = hostsSniffer.runs.get() - hostsSniffer.failures.get() - hostsSniffer.emptyList.get(); int setNodesRuns = nodesSniffer.runs.get() - nodesSniffer.failures.get() - nodesSniffer.emptyList.get();
verify(restClient, times(setHostsRuns)).setHosts(Matchers.<HttpHost>anyVararg()); verify(restClient, times(setNodesRuns)).setNodes(anyCollectionOf(Node.class));
verifyNoMoreInteractions(restClient); verifyNoMoreInteractions(restClient);
} finally { } finally {
executor.shutdown(); executor.shutdown();
@ -446,7 +447,7 @@ public class SnifferTests extends RestClientTestCase {
public void testTaskCancelling() throws Exception { public void testTaskCancelling() throws Exception {
RestClient restClient = mock(RestClient.class); RestClient restClient = mock(RestClient.class);
HostsSniffer hostsSniffer = mock(HostsSniffer.class); NodesSniffer nodesSniffer = mock(NodesSniffer.class);
Scheduler noOpScheduler = new Scheduler() { Scheduler noOpScheduler = new Scheduler() {
@Override @Override
public Future<?> schedule(Sniffer.Task task, long delayMillis) { public Future<?> schedule(Sniffer.Task task, long delayMillis) {
@ -457,7 +458,7 @@ public class SnifferTests extends RestClientTestCase {
public void shutdown() { public void shutdown() {
} }
}; };
Sniffer sniffer = new Sniffer(restClient, hostsSniffer, noOpScheduler, 0L, 0L); Sniffer sniffer = new Sniffer(restClient, nodesSniffer, noOpScheduler, 0L, 0L);
ScheduledExecutorService executor = Executors.newSingleThreadScheduledExecutor(); ScheduledExecutorService executor = Executors.newSingleThreadScheduledExecutor();
try { try {
int numIters = randomIntBetween(50, 100); int numIters = randomIntBetween(50, 100);
@ -540,18 +541,18 @@ public class SnifferTests extends RestClientTestCase {
} }
/** /**
* Mock {@link HostsSniffer} implementation used for testing, which most of the times return a fixed host. * Mock {@link NodesSniffer} implementation used for testing, which most of the times return a fixed node.
* It rarely throws exception or return an empty list of hosts, to make sure that such situations are properly handled. * It rarely throws exception or return an empty list of nodes, to make sure that such situations are properly handled.
* It also asserts that it never gets called concurrently, based on the assumption that only one sniff run can be run * It also asserts that it never gets called concurrently, based on the assumption that only one sniff run can be run
* at a given point in time. * at a given point in time.
*/ */
private static class CountingHostsSniffer implements HostsSniffer { private static class CountingNodesSniffer implements NodesSniffer {
private final AtomicInteger runs = new AtomicInteger(0); private final AtomicInteger runs = new AtomicInteger(0);
private final AtomicInteger failures = new AtomicInteger(0); private final AtomicInteger failures = new AtomicInteger(0);
private final AtomicInteger emptyList = new AtomicInteger(0); private final AtomicInteger emptyList = new AtomicInteger(0);
@Override @Override
public List<HttpHost> sniffHosts() throws IOException { public List<Node> sniff() throws IOException {
int run = runs.incrementAndGet(); int run = runs.incrementAndGet();
if (rarely()) { if (rarely()) {
failures.incrementAndGet(); failures.incrementAndGet();
@ -562,24 +563,23 @@ public class SnifferTests extends RestClientTestCase {
emptyList.incrementAndGet(); emptyList.incrementAndGet();
return Collections.emptyList(); return Collections.emptyList();
} }
return buildHosts(run); return buildNodes(run);
} }
private static List<HttpHost> buildHosts(int run) { private static List<Node> buildNodes(int run) {
int size = run % 5 + 1; int size = run % 5 + 1;
assert size > 0; assert size > 0;
List<HttpHost> hosts = new ArrayList<>(size); List<Node> nodes = new ArrayList<>(size);
for (int i = 0; i < size; i++) { for (int i = 0; i < size; i++) {
hosts.add(new HttpHost("sniffed-" + run, 9200 + i)); nodes.add(new Node(new HttpHost("sniffed-" + run, 9200 + i)));
} }
return hosts; return nodes;
} }
} }
@SuppressWarnings("unchecked")
public void testDefaultSchedulerSchedule() { public void testDefaultSchedulerSchedule() {
RestClient restClient = mock(RestClient.class); RestClient restClient = mock(RestClient.class);
HostsSniffer hostsSniffer = mock(HostsSniffer.class); NodesSniffer nodesSniffer = mock(NodesSniffer.class);
Scheduler noOpScheduler = new Scheduler() { Scheduler noOpScheduler = new Scheduler() {
@Override @Override
public Future<?> schedule(Sniffer.Task task, long delayMillis) { public Future<?> schedule(Sniffer.Task task, long delayMillis) {
@ -591,7 +591,7 @@ public class SnifferTests extends RestClientTestCase {
} }
}; };
Sniffer sniffer = new Sniffer(restClient, hostsSniffer, noOpScheduler, 0L, 0L); Sniffer sniffer = new Sniffer(restClient, nodesSniffer, noOpScheduler, 0L, 0L);
Sniffer.Task task = sniffer.new Task(randomLongBetween(1, Long.MAX_VALUE)); Sniffer.Task task = sniffer.new Task(randomLongBetween(1, Long.MAX_VALUE));
ScheduledExecutorService scheduledExecutorService = mock(ScheduledExecutorService.class); ScheduledExecutorService scheduledExecutorService = mock(ScheduledExecutorService.class);

View file

@ -20,9 +20,10 @@
package org.elasticsearch.client.sniff.documentation; package org.elasticsearch.client.sniff.documentation;
import org.apache.http.HttpHost; import org.apache.http.HttpHost;
import org.elasticsearch.client.Node;
import org.elasticsearch.client.RestClient; import org.elasticsearch.client.RestClient;
import org.elasticsearch.client.sniff.ElasticsearchHostsSniffer; import org.elasticsearch.client.sniff.ElasticsearchNodesSniffer;
import org.elasticsearch.client.sniff.HostsSniffer; import org.elasticsearch.client.sniff.NodesSniffer;
import org.elasticsearch.client.sniff.SniffOnFailureListener; import org.elasticsearch.client.sniff.SniffOnFailureListener;
import org.elasticsearch.client.sniff.Sniffer; import org.elasticsearch.client.sniff.Sniffer;
@ -91,12 +92,12 @@ public class SnifferDocumentation {
RestClient restClient = RestClient.builder( RestClient restClient = RestClient.builder(
new HttpHost("localhost", 9200, "http")) new HttpHost("localhost", 9200, "http"))
.build(); .build();
HostsSniffer hostsSniffer = new ElasticsearchHostsSniffer( NodesSniffer nodesSniffer = new ElasticsearchNodesSniffer(
restClient, restClient,
ElasticsearchHostsSniffer.DEFAULT_SNIFF_REQUEST_TIMEOUT, ElasticsearchNodesSniffer.DEFAULT_SNIFF_REQUEST_TIMEOUT,
ElasticsearchHostsSniffer.Scheme.HTTPS); ElasticsearchNodesSniffer.Scheme.HTTPS);
Sniffer sniffer = Sniffer.builder(restClient) Sniffer sniffer = Sniffer.builder(restClient)
.setHostsSniffer(hostsSniffer).build(); .setNodesSniffer(nodesSniffer).build();
//end::sniffer-https //end::sniffer-https
} }
{ {
@ -104,28 +105,28 @@ public class SnifferDocumentation {
RestClient restClient = RestClient.builder( RestClient restClient = RestClient.builder(
new HttpHost("localhost", 9200, "http")) new HttpHost("localhost", 9200, "http"))
.build(); .build();
HostsSniffer hostsSniffer = new ElasticsearchHostsSniffer( NodesSniffer nodesSniffer = new ElasticsearchNodesSniffer(
restClient, restClient,
TimeUnit.SECONDS.toMillis(5), TimeUnit.SECONDS.toMillis(5),
ElasticsearchHostsSniffer.Scheme.HTTP); ElasticsearchNodesSniffer.Scheme.HTTP);
Sniffer sniffer = Sniffer.builder(restClient) Sniffer sniffer = Sniffer.builder(restClient)
.setHostsSniffer(hostsSniffer).build(); .setNodesSniffer(nodesSniffer).build();
//end::sniff-request-timeout //end::sniff-request-timeout
} }
{ {
//tag::custom-hosts-sniffer //tag::custom-nodes-sniffer
RestClient restClient = RestClient.builder( RestClient restClient = RestClient.builder(
new HttpHost("localhost", 9200, "http")) new HttpHost("localhost", 9200, "http"))
.build(); .build();
HostsSniffer hostsSniffer = new HostsSniffer() { NodesSniffer nodesSniffer = new NodesSniffer() {
@Override @Override
public List<HttpHost> sniffHosts() throws IOException { public List<Node> sniff() throws IOException {
return null; // <1> return null; // <1>
} }
}; };
Sniffer sniffer = Sniffer.builder(restClient) Sniffer sniffer = Sniffer.builder(restClient)
.setHostsSniffer(hostsSniffer).build(); .setNodesSniffer(nodesSniffer).build();
//end::custom-hosts-sniffer //end::custom-nodes-sniffer
} }
} }
} }

View file

@ -0,0 +1,141 @@
{
"cluster_name" : "elasticsearch",
"nodes" : {
"qYUZ_8bTRwODPxukDlFw6Q" : {
"name" : "d2",
"transport_address" : "127.0.0.1:9304",
"host" : "127.0.0.1",
"ip" : "127.0.0.1",
"version" : "2.0.0",
"build" : "de54438",
"http_address" : "127.0.0.1:9204",
"attributes" : {
"master" : "false"
},
"http" : {
"bound_address" : [ "127.0.0.1:9204", "[::1]:9204" ],
"publish_address" : "127.0.0.1:9204",
"max_content_length_in_bytes" : 104857600
}
},
"Yej5UVNgR2KgBjUFHOQpCw" : {
"name" : "c1",
"transport_address" : "127.0.0.1:9307",
"host" : "127.0.0.1",
"ip" : "127.0.0.1",
"version" : "2.0.0",
"build" : "de54438",
"http_address" : "127.0.0.1:9207",
"attributes" : {
"data" : "false",
"master" : "false"
},
"http" : {
"bound_address" : [ "127.0.0.1:9207", "[::1]:9207" ],
"publish_address" : "127.0.0.1:9207",
"max_content_length_in_bytes" : 104857600
}
},
"mHttJwhwReangKEx9EGuAg" : {
"name" : "m3",
"transport_address" : "127.0.0.1:9301",
"host" : "127.0.0.1",
"ip" : "127.0.0.1",
"version" : "2.0.0",
"build" : "de54438",
"http_address" : "127.0.0.1:9201",
"attributes" : {
"data" : "false",
"master" : "true"
},
"http" : {
"bound_address" : [ "127.0.0.1:9201", "[::1]:9201" ],
"publish_address" : "127.0.0.1:9201",
"max_content_length_in_bytes" : 104857600
}
},
"6Erdptt_QRGLxMiLi9mTkg" : {
"name" : "c2",
"transport_address" : "127.0.0.1:9306",
"host" : "127.0.0.1",
"ip" : "127.0.0.1",
"version" : "2.0.0",
"build" : "de54438",
"http_address" : "127.0.0.1:9206",
"attributes" : {
"data" : "false",
"client" : "true"
},
"http" : {
"bound_address" : [ "127.0.0.1:9206", "[::1]:9206" ],
"publish_address" : "127.0.0.1:9206",
"max_content_length_in_bytes" : 104857600
}
},
"mLRCZBypTiys6e8KY5DMnA" : {
"name" : "m1",
"transport_address" : "127.0.0.1:9300",
"host" : "127.0.0.1",
"ip" : "127.0.0.1",
"version" : "2.0.0",
"build" : "de54438",
"http_address" : "127.0.0.1:9200",
"attributes" : {
"data" : "false"
},
"http" : {
"bound_address" : [ "127.0.0.1:9200", "[::1]:9200" ],
"publish_address" : "127.0.0.1:9200",
"max_content_length_in_bytes" : 104857600
}
},
"pVqOhytXQwetsZVzCBppYw" : {
"name" : "m2",
"transport_address" : "127.0.0.1:9302",
"host" : "127.0.0.1",
"ip" : "127.0.0.1",
"version" : "2.0.0",
"build" : "de54438",
"http_address" : "127.0.0.1:9202",
"http" : {
"bound_address" : [ "127.0.0.1:9202", "[::1]:9202" ],
"publish_address" : "127.0.0.1:9202",
"max_content_length_in_bytes" : 104857600
}
},
"ARyzVfpJSw2a9TOIUpbsBA" : {
"name" : "d1",
"transport_address" : "127.0.0.1:9305",
"host" : "127.0.0.1",
"ip" : "127.0.0.1",
"version" : "2.0.0",
"build" : "de54438",
"http_address" : "127.0.0.1:9205",
"attributes" : {
"master" : "false"
},
"http" : {
"bound_address" : [ "127.0.0.1:9205", "[::1]:9205" ],
"publish_address" : "127.0.0.1:9205",
"max_content_length_in_bytes" : 104857600
}
},
"2Hpid-g5Sc2BKCevhN6VQw" : {
"name" : "d3",
"transport_address" : "127.0.0.1:9303",
"host" : "127.0.0.1",
"ip" : "127.0.0.1",
"version" : "2.0.0",
"build" : "de54438",
"http_address" : "127.0.0.1:9203",
"attributes" : {
"master" : "false"
},
"http" : {
"bound_address" : [ "127.0.0.1:9203", "[::1]:9203" ],
"publish_address" : "127.0.0.1:9203",
"max_content_length_in_bytes" : 104857600
}
}
}
}

View file

@ -0,0 +1,169 @@
{
"_nodes" : {
"total" : 8,
"successful" : 8,
"failed" : 0
},
"cluster_name" : "test",
"nodes" : {
"DXz_rhcdSF2xJ96qyjaLVw" : {
"name" : "m1",
"transport_address" : "127.0.0.1:9300",
"host" : "127.0.0.1",
"ip" : "127.0.0.1",
"version" : "5.0.0",
"build_hash" : "253032b",
"roles" : [
"master",
"ingest"
],
"http" : {
"bound_address" : [
"[::1]:9200",
"127.0.0.1:9200"
],
"publish_address" : "127.0.0.1:9200",
"max_content_length_in_bytes" : 104857600
}
},
"53Mi6jYdRgeR1cdyuoNfQQ" : {
"name" : "m2",
"transport_address" : "127.0.0.1:9301",
"host" : "127.0.0.1",
"ip" : "127.0.0.1",
"version" : "5.0.0",
"build_hash" : "253032b",
"roles" : [
"master",
"data",
"ingest"
],
"http" : {
"bound_address" : [
"[::1]:9201",
"127.0.0.1:9201"
],
"publish_address" : "127.0.0.1:9201",
"max_content_length_in_bytes" : 104857600
}
},
"XBIghcHiRlWP9c4vY6rETw" : {
"name" : "c2",
"transport_address" : "127.0.0.1:9307",
"host" : "127.0.0.1",
"ip" : "127.0.0.1",
"version" : "5.0.0",
"build_hash" : "253032b",
"roles" : [
"ingest"
],
"http" : {
"bound_address" : [
"[::1]:9207",
"127.0.0.1:9207"
],
"publish_address" : "127.0.0.1:9207",
"max_content_length_in_bytes" : 104857600
}
},
"cFM30FlyS8K1njH_bovwwQ" : {
"name" : "d1",
"transport_address" : "127.0.0.1:9303",
"host" : "127.0.0.1",
"ip" : "127.0.0.1",
"version" : "5.0.0",
"build_hash" : "253032b",
"roles" : [
"data",
"ingest"
],
"http" : {
"bound_address" : [
"[::1]:9203",
"127.0.0.1:9203"
],
"publish_address" : "127.0.0.1:9203",
"max_content_length_in_bytes" : 104857600
}
},
"eoVUVRGNRDyyOapqIcrsIA" : {
"name" : "d2",
"transport_address" : "127.0.0.1:9304",
"host" : "127.0.0.1",
"ip" : "127.0.0.1",
"version" : "5.0.0",
"build_hash" : "253032b",
"roles" : [
"data",
"ingest"
],
"http" : {
"bound_address" : [
"[::1]:9204",
"127.0.0.1:9204"
],
"publish_address" : "127.0.0.1:9204",
"max_content_length_in_bytes" : 104857600
}
},
"xPN76uDcTP-DyXaRzPg2NQ" : {
"name" : "c1",
"transport_address" : "127.0.0.1:9306",
"host" : "127.0.0.1",
"ip" : "127.0.0.1",
"version" : "5.0.0",
"build_hash" : "253032b",
"roles" : [
"ingest"
],
"http" : {
"bound_address" : [
"[::1]:9206",
"127.0.0.1:9206"
],
"publish_address" : "127.0.0.1:9206",
"max_content_length_in_bytes" : 104857600
}
},
"RY0oW2d7TISEqazk-U4Kcw" : {
"name" : "d3",
"transport_address" : "127.0.0.1:9305",
"host" : "127.0.0.1",
"ip" : "127.0.0.1",
"version" : "5.0.0",
"build_hash" : "253032b",
"roles" : [
"data",
"ingest"
],
"http" : {
"bound_address" : [
"[::1]:9205",
"127.0.0.1:9205"
],
"publish_address" : "127.0.0.1:9205",
"max_content_length_in_bytes" : 104857600
}
},
"tU0rXEZmQ9GsWfn2TQ4kow" : {
"name" : "m3",
"transport_address" : "127.0.0.1:9302",
"host" : "127.0.0.1",
"ip" : "127.0.0.1",
"version" : "5.0.0",
"build_hash" : "253032b",
"roles" : [
"master",
"ingest"
],
"http" : {
"bound_address" : [
"[::1]:9202",
"127.0.0.1:9202"
],
"publish_address" : "127.0.0.1:9202",
"max_content_length_in_bytes" : 104857600
}
}
}
}

View file

@ -0,0 +1,169 @@
{
"_nodes" : {
"total" : 8,
"successful" : 8,
"failed" : 0
},
"cluster_name" : "test",
"nodes" : {
"FX9npqGQSL2mOGF8Zkf3hw" : {
"name" : "m2",
"transport_address" : "127.0.0.1:9301",
"host" : "127.0.0.1",
"ip" : "127.0.0.1",
"version" : "6.0.0",
"build_hash" : "8f0685b",
"roles" : [
"master",
"data",
"ingest"
],
"http" : {
"bound_address" : [
"[::1]:9201",
"127.0.0.1:9201"
],
"publish_address" : "127.0.0.1:9201",
"max_content_length_in_bytes" : 104857600
}
},
"jmUqzYLGTbWCg127kve3Tg" : {
"name" : "d1",
"transport_address" : "127.0.0.1:9303",
"host" : "127.0.0.1",
"ip" : "127.0.0.1",
"version" : "6.0.0",
"build_hash" : "8f0685b",
"roles" : [
"data",
"ingest"
],
"http" : {
"bound_address" : [
"[::1]:9203",
"127.0.0.1:9203"
],
"publish_address" : "127.0.0.1:9203",
"max_content_length_in_bytes" : 104857600
}
},
"soBU6bzvTOqdLxPstSbJ2g" : {
"name" : "d3",
"transport_address" : "127.0.0.1:9305",
"host" : "127.0.0.1",
"ip" : "127.0.0.1",
"version" : "6.0.0",
"build_hash" : "8f0685b",
"roles" : [
"data",
"ingest"
],
"http" : {
"bound_address" : [
"[::1]:9205",
"127.0.0.1:9205"
],
"publish_address" : "127.0.0.1:9205",
"max_content_length_in_bytes" : 104857600
}
},
"mtYDAhURTP6twdmNAkMnOg" : {
"name" : "m3",
"transport_address" : "127.0.0.1:9302",
"host" : "127.0.0.1",
"ip" : "127.0.0.1",
"version" : "6.0.0",
"build_hash" : "8f0685b",
"roles" : [
"master",
"ingest"
],
"http" : {
"bound_address" : [
"[::1]:9202",
"127.0.0.1:9202"
],
"publish_address" : "127.0.0.1:9202",
"max_content_length_in_bytes" : 104857600
}
},
"URxHiUQPROOt1G22Ev6lXw" : {
"name" : "c2",
"transport_address" : "127.0.0.1:9307",
"host" : "127.0.0.1",
"ip" : "127.0.0.1",
"version" : "6.0.0",
"build_hash" : "8f0685b",
"roles" : [
"ingest"
],
"http" : {
"bound_address" : [
"[::1]:9207",
"127.0.0.1:9207"
],
"publish_address" : "127.0.0.1:9207",
"max_content_length_in_bytes" : 104857600
}
},
"_06S_kWoRqqFR8Z8CS3JRw" : {
"name" : "c1",
"transport_address" : "127.0.0.1:9306",
"host" : "127.0.0.1",
"ip" : "127.0.0.1",
"version" : "6.0.0",
"build_hash" : "8f0685b",
"roles" : [
"ingest"
],
"http" : {
"bound_address" : [
"[::1]:9206",
"127.0.0.1:9206"
],
"publish_address" : "127.0.0.1:9206",
"max_content_length_in_bytes" : 104857600
}
},
"QZE5Bd6DQJmnfVs2dglOvA" : {
"name" : "d2",
"transport_address" : "127.0.0.1:9304",
"host" : "127.0.0.1",
"ip" : "127.0.0.1",
"version" : "6.0.0",
"build_hash" : "8f0685b",
"roles" : [
"data",
"ingest"
],
"http" : {
"bound_address" : [
"[::1]:9204",
"127.0.0.1:9204"
],
"publish_address" : "127.0.0.1:9204",
"max_content_length_in_bytes" : 104857600
}
},
"_3mTXg6dSweZn5ReB2fQqw" : {
"name" : "m1",
"transport_address" : "127.0.0.1:9300",
"host" : "127.0.0.1",
"ip" : "127.0.0.1",
"version" : "6.0.0",
"build_hash" : "8f0685b",
"roles" : [
"master",
"ingest"
],
"http" : {
"bound_address" : [
"[::1]:9200",
"127.0.0.1:9200"
],
"publish_address" : "127.0.0.1:9200",
"max_content_length_in_bytes" : 104857600
}
}
}
}

View file

@ -0,0 +1,4 @@
`*_node_http.json` contains files created by spinning up toy clusters with a
few nodes in different configurations locally at various versions. They are
for testing `ElasticsearchNodesSniffer` against different versions of
Elasticsearch.

View file

@ -144,3 +144,13 @@ include-tagged::{doc-tests}/MiscellaneousDocumentationIT.java[rest-high-level-cl
In the rest of this documentation about the Java High Level Client, the `RestHighLevelClient` instance In the rest of this documentation about the Java High Level Client, the `RestHighLevelClient` instance
will be referenced as `client`. will be referenced as `client`.
[[java-rest-hight-getting-started-request-options]]
=== RequestOptions
All APIs in the `RestHighLevelClient` accept a `RequestOptions` which you can
use to customize the request in ways that won't change how Elasticsearch
executes the request. For example, this is the place where you'd specify a
`NodeSelector` to control which node receives the request. See the
<<java-rest-low-usage-request-options,low level client documentation>> for
more examples of customizing the options.

View file

@ -55,7 +55,7 @@ dependencies {
Once a `RestClient` instance has been created as shown in <<java-rest-low-usage-initialization>>, Once a `RestClient` instance has been created as shown in <<java-rest-low-usage-initialization>>,
a `Sniffer` can be associated to it. The `Sniffer` will make use of the provided `RestClient` a `Sniffer` can be associated to it. The `Sniffer` will make use of the provided `RestClient`
to periodically (every 5 minutes by default) fetch the list of current nodes from the cluster to periodically (every 5 minutes by default) fetch the list of current nodes from the cluster
and update them by calling `RestClient#setHosts`. and update them by calling `RestClient#setNodes`.
["source","java",subs="attributes,callouts,macros"] ["source","java",subs="attributes,callouts,macros"]
-------------------------------------------------- --------------------------------------------------
@ -105,7 +105,7 @@ on failure is not enabled like explained above.
The Elasticsearch Nodes Info api doesn't return the protocol to use when The Elasticsearch Nodes Info api doesn't return the protocol to use when
connecting to the nodes but only their `host:port` key-pair, hence `http` connecting to the nodes but only their `host:port` key-pair, hence `http`
is used by default. In case `https` should be used instead, the is used by default. In case `https` should be used instead, the
`ElasticsearchHostsSniffer` instance has to be manually created and provided `ElasticsearchNodesSniffer` instance has to be manually created and provided
as follows: as follows:
["source","java",subs="attributes,callouts,macros"] ["source","java",subs="attributes,callouts,macros"]
@ -125,12 +125,12 @@ cluster, the ones that have responded until then.
include-tagged::{doc-tests}/SnifferDocumentation.java[sniff-request-timeout] include-tagged::{doc-tests}/SnifferDocumentation.java[sniff-request-timeout]
-------------------------------------------------- --------------------------------------------------
Also, a custom `HostsSniffer` implementation can be provided for advanced Also, a custom `NodesSniffer` implementation can be provided for advanced
use-cases that may require fetching the hosts from external sources rather use-cases that may require fetching the `Node`s from external sources rather
than from Elasticsearch: than from Elasticsearch:
["source","java",subs="attributes,callouts,macros"] ["source","java",subs="attributes,callouts,macros"]
-------------------------------------------------- --------------------------------------------------
include-tagged::{doc-tests}/SnifferDocumentation.java[custom-hosts-sniffer] include-tagged::{doc-tests}/SnifferDocumentation.java[custom-nodes-sniffer]
-------------------------------------------------- --------------------------------------------------
<1> Fetch the hosts from the external source <1> Fetch the hosts from the external source

View file

@ -271,24 +271,51 @@ a `ContentType` of `application/json`.
include-tagged::{doc-tests}/RestClientDocumentation.java[rest-client-body-shorter] include-tagged::{doc-tests}/RestClientDocumentation.java[rest-client-body-shorter]
-------------------------------------------------- --------------------------------------------------
And you can add one or more headers to send with the request: [[java-rest-low-usage-request-options]]
==== RequestOptions
The `RequestOptions` class holds parts of the request that should be shared
between many requests in the same application. You can make a singleton
instance and share it between all requests:
["source","java",subs="attributes,callouts,macros"] ["source","java",subs="attributes,callouts,macros"]
-------------------------------------------------- --------------------------------------------------
include-tagged::{doc-tests}/RestClientDocumentation.java[rest-client-headers] include-tagged::{doc-tests}/RestClientDocumentation.java[rest-client-options-singleton]
-------------------------------------------------- --------------------------------------------------
<1> Add any headers needed by all requests.
<2> Set a `NodeSelector`.
<3> Customize the response consumer.
`addHeader` is for headers that are required for authorization or to work with
a proxy in front of Elasticsearch. There is no need to set the `Content-Type`
header because the client will automatically set that from the `HttpEntity`
attached to the request.
You can set the `NodeSelector` which controls which nodes will receive
requests. `NodeSelector.NOT_MASTER_ONLY` is a good choice.
You can also customize the response consumer used to buffer the asynchronous You can also customize the response consumer used to buffer the asynchronous
responses. The default consumer will buffer up to 100MB of response on the responses. The default consumer will buffer up to 100MB of response on the
JVM heap. If the response is larger then the request will fail. You could, JVM heap. If the response is larger then the request will fail. You could,
for example, lower the maximum size which might be useful if you are running for example, lower the maximum size which might be useful if you are running
in a heap constrained environment: in a heap constrained environment like the exmaple above.
Once you've created the singleton you can use it when making requests:
["source","java",subs="attributes,callouts,macros"] ["source","java",subs="attributes,callouts,macros"]
-------------------------------------------------- --------------------------------------------------
include-tagged::{doc-tests}/RestClientDocumentation.java[rest-client-response-consumer] include-tagged::{doc-tests}/RestClientDocumentation.java[rest-client-options-set-singleton]
-------------------------------------------------- --------------------------------------------------
You can also customize these options on a per request basis. For example, this
adds an extra header:
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/RestClientDocumentation.java[rest-client-options-customize]
--------------------------------------------------
==== Multiple parallel asynchronous actions ==== Multiple parallel asynchronous actions
The client is quite happy to execute many actions in parallel. The following The client is quite happy to execute many actions in parallel. The following

View file

@ -197,6 +197,24 @@ header. The warnings must match exactly. Using it looks like this:
id: 1 id: 1
.... ....
If the arguments to `do` include `node_selector` then the request is only
sent to nodes that match the `node_selector`. Currently only the `version`
selector is supported and it has the same logic as the `version` field in
`skip`. It looks like this:
....
"test id":
- skip:
features: node_selector
- do:
node_selector:
version: " - 6.9.99"
index:
index: test-weird-index-中文
type: weird.type
id: 1
body: { foo: bar }
....
=== `set` === `set`

View file

@ -21,6 +21,7 @@ import org.elasticsearch.gradle.precommit.PrecommitTasks;
dependencies { dependencies {
compile "org.elasticsearch.client:elasticsearch-rest-client:${version}" compile "org.elasticsearch.client:elasticsearch-rest-client:${version}"
compile "org.elasticsearch.client:elasticsearch-rest-client-sniffer:${version}"
compile "org.elasticsearch:elasticsearch-nio:${version}" compile "org.elasticsearch:elasticsearch-nio:${version}"
compile "org.elasticsearch:elasticsearch:${version}" compile "org.elasticsearch:elasticsearch:${version}"
compile "org.elasticsearch:elasticsearch-cli:${version}" compile "org.elasticsearch:elasticsearch-cli:${version}"

View file

@ -22,6 +22,8 @@ package org.elasticsearch.test.rest.yaml;
import org.apache.http.HttpEntity; import org.apache.http.HttpEntity;
import org.apache.http.HttpHost; import org.apache.http.HttpHost;
import org.elasticsearch.Version; import org.elasticsearch.Version;
import org.elasticsearch.client.NodeSelector;
import org.elasticsearch.client.Request;
import org.elasticsearch.client.Response; import org.elasticsearch.client.Response;
import org.elasticsearch.client.ResponseException; import org.elasticsearch.client.ResponseException;
import org.elasticsearch.client.RestClient; import org.elasticsearch.client.RestClient;
@ -49,22 +51,29 @@ public final class ClientYamlDocsTestClient extends ClientYamlTestClient {
super(restSpec, restClient, hosts, esVersion, masterVersion); super(restSpec, restClient, hosts, esVersion, masterVersion);
} }
public ClientYamlTestResponse callApi(String apiName, Map<String, String> params, HttpEntity entity, Map<String, String> headers) @Override
throws IOException { public ClientYamlTestResponse callApi(String apiName, Map<String, String> params, HttpEntity entity,
Map<String, String> headers, NodeSelector nodeSelector) throws IOException {
if ("raw".equals(apiName)) { if ("raw".equals(apiName)) {
// Raw requests are bit simpler.... // Raw requests don't use the rest spec at all and are configured entirely by their parameters
Map<String, String> queryStringParams = new HashMap<>(params); Map<String, String> queryStringParams = new HashMap<>(params);
String method = Objects.requireNonNull(queryStringParams.remove("method"), "Method must be set to use raw request"); String method = Objects.requireNonNull(queryStringParams.remove("method"), "Method must be set to use raw request");
String path = "/" + Objects.requireNonNull(queryStringParams.remove("path"), "Path must be set to use raw request"); String path = "/" + Objects.requireNonNull(queryStringParams.remove("path"), "Path must be set to use raw request");
// And everything else is a url parameter! Request request = new Request(method, path);
// All other parameters are url parameters
for (Map.Entry<String, String> param : queryStringParams.entrySet()) {
request.addParameter(param.getKey(), param.getValue());
}
request.setEntity(entity);
setOptions(request, headers, nodeSelector);
try { try {
Response response = restClient.performRequest(method, path, queryStringParams, entity); Response response = restClient.performRequest(request);
return new ClientYamlTestResponse(response); return new ClientYamlTestResponse(response);
} catch (ResponseException e) { } catch (ResponseException e) {
throw new ClientYamlTestResponseException(e); throw new ClientYamlTestResponseException(e);
} }
} }
return super.callApi(apiName, params, entity, headers); return super.callApi(apiName, params, entity, headers, nodeSelector);
} }
} }

View file

@ -19,15 +19,16 @@
package org.elasticsearch.test.rest.yaml; package org.elasticsearch.test.rest.yaml;
import com.carrotsearch.randomizedtesting.RandomizedTest; import com.carrotsearch.randomizedtesting.RandomizedTest;
import org.apache.http.Header;
import org.apache.http.HttpEntity; import org.apache.http.HttpEntity;
import org.apache.http.HttpHost; import org.apache.http.HttpHost;
import org.apache.http.client.methods.HttpGet; import org.apache.http.client.methods.HttpGet;
import org.apache.http.entity.ContentType; import org.apache.http.entity.ContentType;
import org.apache.http.message.BasicHeader;
import org.apache.http.util.EntityUtils; import org.apache.http.util.EntityUtils;
import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.Logger;
import org.elasticsearch.Version; import org.elasticsearch.Version;
import org.elasticsearch.client.NodeSelector;
import org.elasticsearch.client.Request;
import org.elasticsearch.client.RequestOptions;
import org.elasticsearch.client.Response; import org.elasticsearch.client.Response;
import org.elasticsearch.client.ResponseException; import org.elasticsearch.client.ResponseException;
import org.elasticsearch.client.RestClient; import org.elasticsearch.client.RestClient;
@ -85,8 +86,8 @@ public class ClientYamlTestClient {
/** /**
* Calls an api with the provided parameters and body * Calls an api with the provided parameters and body
*/ */
public ClientYamlTestResponse callApi(String apiName, Map<String, String> params, HttpEntity entity, Map<String, String> headers) public ClientYamlTestResponse callApi(String apiName, Map<String, String> params, HttpEntity entity,
throws IOException { Map<String, String> headers, NodeSelector nodeSelector) throws IOException {
ClientYamlSuiteRestApi restApi = restApi(apiName); ClientYamlSuiteRestApi restApi = restApi(apiName);
@ -171,22 +172,33 @@ public class ClientYamlTestClient {
requestPath = finalPath.toString(); requestPath = finalPath.toString();
} }
Header[] requestHeaders = new Header[headers.size()];
int index = 0;
for (Map.Entry<String, String> header : headers.entrySet()) {
logger.debug("Adding header {} with value {}", header.getKey(), header.getValue());
requestHeaders[index++] = new BasicHeader(header.getKey(), header.getValue());
}
logger.debug("calling api [{}]", apiName); logger.debug("calling api [{}]", apiName);
Request request = new Request(requestMethod, requestPath);
for (Map.Entry<String, String> param : queryStringParams.entrySet()) {
request.addParameter(param.getKey(), param.getValue());
}
request.setEntity(entity);
setOptions(request, headers, nodeSelector);
try { try {
Response response = restClient.performRequest(requestMethod, requestPath, queryStringParams, entity, requestHeaders); Response response = restClient.performRequest(request);
return new ClientYamlTestResponse(response); return new ClientYamlTestResponse(response);
} catch(ResponseException e) { } catch(ResponseException e) {
throw new ClientYamlTestResponseException(e); throw new ClientYamlTestResponseException(e);
} }
} }
protected static void setOptions(Request request, Map<String, String> headers, NodeSelector nodeSelector) {
RequestOptions.Builder options = request.getOptions().toBuilder();
for (Map.Entry<String, String> header : headers.entrySet()) {
logger.debug("Adding header {} with value {}", header.getKey(), header.getValue());
options.addHeader(header.getKey(), header.getValue());
}
options.setNodeSelector(nodeSelector);
request.setOptions(options);
}
private static boolean sendBodyAsSourceParam(List<String> supportedMethods, String contentType, long contentLength) { private static boolean sendBodyAsSourceParam(List<String> supportedMethods, String contentType, long contentLength) {
if (false == supportedMethods.contains(HttpGet.METHOD_NAME)) { if (false == supportedMethods.contains(HttpGet.METHOD_NAME)) {
// The API doesn't claim to support GET anyway // The API doesn't claim to support GET anyway

View file

@ -25,6 +25,7 @@ import org.apache.http.entity.ContentType;
import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.Logger;
import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRef;
import org.elasticsearch.Version; import org.elasticsearch.Version;
import org.elasticsearch.client.NodeSelector;
import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentBuilder;
@ -68,6 +69,15 @@ public class ClientYamlTestExecutionContext {
*/ */
public ClientYamlTestResponse callApi(String apiName, Map<String, String> params, List<Map<String, Object>> bodies, public ClientYamlTestResponse callApi(String apiName, Map<String, String> params, List<Map<String, Object>> bodies,
Map<String, String> headers) throws IOException { Map<String, String> headers) throws IOException {
return callApi(apiName, params, bodies, headers, NodeSelector.ANY);
}
/**
* Calls an elasticsearch api with the parameters and request body provided as arguments.
* Saves the obtained response in the execution context.
*/
public ClientYamlTestResponse callApi(String apiName, Map<String, String> params, List<Map<String, Object>> bodies,
Map<String, String> headers, NodeSelector nodeSelector) throws IOException {
//makes a copy of the parameters before modifying them for this specific request //makes a copy of the parameters before modifying them for this specific request
Map<String, String> requestParams = new HashMap<>(params); Map<String, String> requestParams = new HashMap<>(params);
requestParams.putIfAbsent("error_trace", "true"); // By default ask for error traces, this my be overridden by params requestParams.putIfAbsent("error_trace", "true"); // By default ask for error traces, this my be overridden by params
@ -87,7 +97,7 @@ public class ClientYamlTestExecutionContext {
HttpEntity entity = createEntity(bodies, requestHeaders); HttpEntity entity = createEntity(bodies, requestHeaders);
try { try {
response = callApiInternal(apiName, requestParams, entity, requestHeaders); response = callApiInternal(apiName, requestParams, entity, requestHeaders, nodeSelector);
return response; return response;
} catch(ClientYamlTestResponseException e) { } catch(ClientYamlTestResponseException e) {
response = e.getRestTestResponse(); response = e.getRestTestResponse();
@ -153,9 +163,9 @@ public class ClientYamlTestExecutionContext {
} }
// pkg-private for testing // pkg-private for testing
ClientYamlTestResponse callApiInternal(String apiName, Map<String, String> params, ClientYamlTestResponse callApiInternal(String apiName, Map<String, String> params, HttpEntity entity,
HttpEntity entity, Map<String, String> headers) throws IOException { Map<String, String> headers, NodeSelector nodeSelector) throws IOException {
return clientYamlTestClient.callApi(apiName, params, entity, headers); return clientYamlTestClient.callApi(apiName, params, entity, headers, nodeSelector);
} }
/** /**

View file

@ -22,9 +22,11 @@ package org.elasticsearch.test.rest.yaml;
import com.carrotsearch.randomizedtesting.RandomizedTest; import com.carrotsearch.randomizedtesting.RandomizedTest;
import org.apache.http.HttpHost; import org.apache.http.HttpHost;
import org.elasticsearch.Version; import org.elasticsearch.Version;
import org.elasticsearch.client.Node;
import org.elasticsearch.client.Request; import org.elasticsearch.client.Request;
import org.elasticsearch.client.Response; import org.elasticsearch.client.Response;
import org.elasticsearch.client.RestClient; import org.elasticsearch.client.RestClient;
import org.elasticsearch.client.sniff.ElasticsearchNodesSniffer;
import org.elasticsearch.common.Strings; import org.elasticsearch.common.Strings;
import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.io.PathUtils; import org.elasticsearch.common.io.PathUtils;
@ -48,11 +50,20 @@ import java.util.Collections;
import java.util.HashMap; import java.util.HashMap;
import java.util.HashSet; import java.util.HashSet;
import java.util.List; import java.util.List;
import java.util.Locale;
import java.util.Map; import java.util.Map;
import java.util.Set; import java.util.Set;
/** /**
* Runs a suite of yaml tests shared with all the official Elasticsearch clients against against an elasticsearch cluster. * Runs a suite of yaml tests shared with all the official Elasticsearch
* clients against against an elasticsearch cluster.
* <p>
* <strong>IMPORTANT</strong>: These tests sniff the cluster for metadata
* and hosts on startup and replace the list of hosts that they are
* configured to use with the list sniffed from the cluster. So you can't
* control which nodes receive the request by providing the right list of
* nodes in the <code>tests.rest.cluster</code> system property. Instead
* the tests must explictly use `node_selector`s.
*/ */
public abstract class ESClientYamlSuiteTestCase extends ESRestTestCase { public abstract class ESClientYamlSuiteTestCase extends ESRestTestCase {
@ -110,6 +121,11 @@ public abstract class ESClientYamlSuiteTestCase extends ESRestTestCase {
@Before @Before
public void initAndResetContext() throws Exception { public void initAndResetContext() throws Exception {
if (restTestExecutionContext == null) { if (restTestExecutionContext == null) {
// Sniff host metadata in case we need it in the yaml tests
List<Node> nodesWithMetadata = sniffHostMetadata(adminClient());
client().setNodes(nodesWithMetadata);
adminClient().setNodes(nodesWithMetadata);
assert adminExecutionContext == null; assert adminExecutionContext == null;
assert blacklistPathMatchers == null; assert blacklistPathMatchers == null;
final ClientYamlSuiteRestSpec restSpec = ClientYamlSuiteRestSpec.load(SPEC_PATH); final ClientYamlSuiteRestSpec restSpec = ClientYamlSuiteRestSpec.load(SPEC_PATH);
@ -381,4 +397,15 @@ public abstract class ESClientYamlSuiteTestCase extends ESRestTestCase {
protected boolean randomizeContentType() { protected boolean randomizeContentType() {
return true; return true;
} }
/**
* Sniff the cluster for host metadata.
*/
private List<Node> sniffHostMetadata(RestClient client) throws IOException {
ElasticsearchNodesSniffer.Scheme scheme =
ElasticsearchNodesSniffer.Scheme.valueOf(getProtocol().toUpperCase(Locale.ROOT));
ElasticsearchNodesSniffer sniffer = new ElasticsearchNodesSniffer(
adminClient(), ElasticsearchNodesSniffer.DEFAULT_SNIFF_REQUEST_TIMEOUT, scheme);
return sniffer.sniff();
}
} }

View file

@ -40,6 +40,7 @@ public final class Features {
"default_shards", "default_shards",
"embedded_stash_key", "embedded_stash_key",
"headers", "headers",
"node_selector",
"stash_in_key", "stash_in_key",
"stash_in_path", "stash_in_path",
"stash_path_replace", "stash_path_replace",

View file

@ -1,24 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/**
* Parses YAML test {@link org.elasticsearch.test.rest.yaml.section.ClientYamlTestSuite}s containing
* {@link org.elasticsearch.test.rest.yaml.section.ClientYamlTestSection}s.
*/
package org.elasticsearch.test.rest.yaml.parser;

View file

@ -24,6 +24,8 @@ import java.util.HashMap;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
import org.elasticsearch.client.NodeSelector;
import static java.util.Collections.unmodifiableMap; import static java.util.Collections.unmodifiableMap;
/** /**
@ -35,6 +37,7 @@ public class ApiCallSection {
private final Map<String, String> params = new HashMap<>(); private final Map<String, String> params = new HashMap<>();
private final Map<String, String> headers = new HashMap<>(); private final Map<String, String> headers = new HashMap<>();
private final List<Map<String, Object>> bodies = new ArrayList<>(); private final List<Map<String, Object>> bodies = new ArrayList<>();
private NodeSelector nodeSelector = NodeSelector.ANY;
public ApiCallSection(String api) { public ApiCallSection(String api) {
this.api = api; this.api = api;
@ -76,4 +79,18 @@ public class ApiCallSection {
public boolean hasBody() { public boolean hasBody() {
return bodies.size() > 0; return bodies.size() > 0;
} }
/**
* Selects the node on which to run this request.
*/
public NodeSelector getNodeSelector() {
return nodeSelector;
}
/**
* Set the selector that decides which node can run this request.
*/
public void setNodeSelector(NodeSelector nodeSelector) {
this.nodeSelector = nodeSelector;
}
} }

View file

@ -18,6 +18,7 @@
*/ */
package org.elasticsearch.test.rest.yaml.section; package org.elasticsearch.test.rest.yaml.section;
import org.elasticsearch.client.NodeSelector;
import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.ParsingException;
import org.elasticsearch.common.xcontent.XContentLocation; import org.elasticsearch.common.xcontent.XContentLocation;
import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentParser;
@ -91,6 +92,12 @@ public class ClientYamlTestSection implements Comparable<ClientYamlTestSection>
+ "runners that do not support the [warnings] section can skip the test at line [" + "runners that do not support the [warnings] section can skip the test at line ["
+ doSection.getLocation().lineNumber + "]"); + doSection.getLocation().lineNumber + "]");
} }
if (NodeSelector.ANY != doSection.getApiCallSection().getNodeSelector()
&& false == skipSection.getFeatures().contains("node_selector")) {
throw new IllegalArgumentException("Attempted to add a [do] with a [node_selector] section without a corresponding "
+ "[skip] so runners that do not support the [node_selector] section can skip the test at line ["
+ doSection.getLocation().lineNumber + "]");
}
} }
this.executableSections.add(executableSection); this.executableSections.add(executableSection);
} }

View file

@ -21,6 +21,8 @@ package org.elasticsearch.test.rest.yaml.section;
import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.Logger;
import org.elasticsearch.Version; import org.elasticsearch.Version;
import org.elasticsearch.client.Node;
import org.elasticsearch.client.NodeSelector;
import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.ParsingException;
import org.elasticsearch.common.Strings; import org.elasticsearch.common.Strings;
import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.collect.Tuple;
@ -38,9 +40,11 @@ import org.elasticsearch.test.rest.yaml.ClientYamlTestResponseException;
import java.io.IOException; import java.io.IOException;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.HashMap; import java.util.HashMap;
import java.util.Iterator;
import java.util.LinkedHashSet; import java.util.LinkedHashSet;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.Objects;
import java.util.Set; import java.util.Set;
import java.util.TreeMap; import java.util.TreeMap;
import java.util.regex.Matcher; import java.util.regex.Matcher;
@ -85,6 +89,7 @@ public class DoSection implements ExecutableSection {
DoSection doSection = new DoSection(parser.getTokenLocation()); DoSection doSection = new DoSection(parser.getTokenLocation());
ApiCallSection apiCallSection = null; ApiCallSection apiCallSection = null;
NodeSelector nodeSelector = NodeSelector.ANY;
Map<String, String> headers = new TreeMap<>(String.CASE_INSENSITIVE_ORDER); Map<String, String> headers = new TreeMap<>(String.CASE_INSENSITIVE_ORDER);
List<String> expectedWarnings = new ArrayList<>(); List<String> expectedWarnings = new ArrayList<>();
@ -121,6 +126,18 @@ public class DoSection implements ExecutableSection {
headers.put(headerName, parser.text()); headers.put(headerName, parser.text());
} }
} }
} else if ("node_selector".equals(currentFieldName)) {
String selectorName = null;
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
selectorName = parser.currentName();
} else if (token.isValue()) {
NodeSelector newSelector = buildNodeSelector(
parser.getTokenLocation(), selectorName, parser.text());
nodeSelector = nodeSelector == NodeSelector.ANY ?
newSelector : new ComposeNodeSelector(nodeSelector, newSelector);
}
}
} else if (currentFieldName != null) { // must be part of API call then } else if (currentFieldName != null) { // must be part of API call then
apiCallSection = new ApiCallSection(currentFieldName); apiCallSection = new ApiCallSection(currentFieldName);
String paramName = null; String paramName = null;
@ -153,6 +170,7 @@ public class DoSection implements ExecutableSection {
throw new IllegalArgumentException("client call section is mandatory within a do section"); throw new IllegalArgumentException("client call section is mandatory within a do section");
} }
apiCallSection.addHeaders(headers); apiCallSection.addHeaders(headers);
apiCallSection.setNodeSelector(nodeSelector);
doSection.setApiCallSection(apiCallSection); doSection.setApiCallSection(apiCallSection);
doSection.setExpectedWarningHeaders(unmodifiableList(expectedWarnings)); doSection.setExpectedWarningHeaders(unmodifiableList(expectedWarnings));
} finally { } finally {
@ -222,7 +240,7 @@ public class DoSection implements ExecutableSection {
try { try {
ClientYamlTestResponse response = executionContext.callApi(apiCallSection.getApi(), apiCallSection.getParams(), ClientYamlTestResponse response = executionContext.callApi(apiCallSection.getApi(), apiCallSection.getParams(),
apiCallSection.getBodies(), apiCallSection.getHeaders()); apiCallSection.getBodies(), apiCallSection.getHeaders(), apiCallSection.getNodeSelector());
if (Strings.hasLength(catchParam)) { if (Strings.hasLength(catchParam)) {
String catchStatusCode; String catchStatusCode;
if (catches.containsKey(catchParam)) { if (catches.containsKey(catchParam)) {
@ -349,4 +367,61 @@ public class DoSection implements ExecutableSection {
not(equalTo(408)), not(equalTo(408)),
not(equalTo(409))))); not(equalTo(409)))));
} }
private static NodeSelector buildNodeSelector(XContentLocation location, String name, String value) {
switch (name) {
case "version":
Version[] range = SkipSection.parseVersionRange(value);
return new NodeSelector() {
@Override
public void select(Iterable<Node> nodes) {
for (Iterator<Node> itr = nodes.iterator(); itr.hasNext();) {
Node node = itr.next();
if (node.getVersion() == null) {
throw new IllegalStateException("expected [version] metadata to be set but got "
+ node);
}
Version version = Version.fromString(node.getVersion());
if (false == (version.onOrAfter(range[0]) && version.onOrBefore(range[1]))) {
itr.remove();
}
}
}
@Override
public String toString() {
return "version between [" + range[0] + "] and [" + range[1] + "]";
}
};
default:
throw new IllegalArgumentException("unknown node_selector [" + name + "]");
}
}
/**
* Selector that composes two selectors, running the "right" most selector
* first and then running the "left" selector on the results of the "right"
* selector.
*/
private static class ComposeNodeSelector implements NodeSelector {
private final NodeSelector lhs;
private final NodeSelector rhs;
private ComposeNodeSelector(NodeSelector lhs, NodeSelector rhs) {
this.lhs = Objects.requireNonNull(lhs, "lhs is required");
this.rhs = Objects.requireNonNull(rhs, "rhs is required");
}
@Override
public void select(Iterable<Node> nodes) {
rhs.select(nodes);
lhs.select(nodes);
}
@Override
public String toString() {
// . as in haskell's "compose" operator
return lhs + "." + rhs;
}
}
} }

View file

@ -153,7 +153,7 @@ public class SkipSection {
return EMPTY.equals(this); return EMPTY.equals(this);
} }
private Version[] parseVersionRange(String versionRange) { static Version[] parseVersionRange(String versionRange) {
if (versionRange == null) { if (versionRange == null) {
return new Version[] { null, null }; return new Version[] { null, null };
} }

View file

@ -20,6 +20,7 @@
package org.elasticsearch.test.rest.yaml; package org.elasticsearch.test.rest.yaml;
import org.apache.http.HttpEntity; import org.apache.http.HttpEntity;
import org.elasticsearch.client.NodeSelector;
import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.ESTestCase;
import java.io.IOException; import java.io.IOException;
@ -36,8 +37,7 @@ public class ClientYamlTestExecutionContextTests extends ESTestCase {
new ClientYamlTestExecutionContext(null, randomBoolean()) { new ClientYamlTestExecutionContext(null, randomBoolean()) {
@Override @Override
ClientYamlTestResponse callApiInternal(String apiName, Map<String, String> params, ClientYamlTestResponse callApiInternal(String apiName, Map<String, String> params,
HttpEntity entity, HttpEntity entity, Map<String, String> headers, NodeSelector nodeSelector) {
Map<String, String> headers) {
headersRef.set(headers); headersRef.set(headers);
return null; return null;
} }

View file

@ -20,6 +20,7 @@
package org.elasticsearch.test.rest.yaml.section; package org.elasticsearch.test.rest.yaml.section;
import org.elasticsearch.Version; import org.elasticsearch.Version;
import org.elasticsearch.client.NodeSelector;
import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.ParsingException;
import org.elasticsearch.common.xcontent.XContentLocation; import org.elasticsearch.common.xcontent.XContentLocation;
import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentParser;
@ -35,11 +36,12 @@ import static org.hamcrest.Matchers.notNullValue;
import static org.hamcrest.Matchers.nullValue; import static org.hamcrest.Matchers.nullValue;
public class ClientYamlTestSectionTests extends AbstractClientYamlTestFragmentParserTestCase { public class ClientYamlTestSectionTests extends AbstractClientYamlTestFragmentParserTestCase {
public void testAddingDoWithoutWarningWithoutSkip() { public void testAddingDoWithoutSkips() {
int lineNumber = between(1, 10000); int lineNumber = between(1, 10000);
ClientYamlTestSection section = new ClientYamlTestSection(new XContentLocation(0, 0), "test"); ClientYamlTestSection section = new ClientYamlTestSection(new XContentLocation(0, 0), "test");
section.setSkipSection(SkipSection.EMPTY); section.setSkipSection(SkipSection.EMPTY);
DoSection doSection = new DoSection(new XContentLocation(lineNumber, 0)); DoSection doSection = new DoSection(new XContentLocation(lineNumber, 0));
doSection.setApiCallSection(new ApiCallSection("test"));
section.addExecutableSection(doSection); section.addExecutableSection(doSection);
} }
@ -49,6 +51,7 @@ public class ClientYamlTestSectionTests extends AbstractClientYamlTestFragmentPa
section.setSkipSection(new SkipSection(null, singletonList("warnings"), null)); section.setSkipSection(new SkipSection(null, singletonList("warnings"), null));
DoSection doSection = new DoSection(new XContentLocation(lineNumber, 0)); DoSection doSection = new DoSection(new XContentLocation(lineNumber, 0));
doSection.setExpectedWarningHeaders(singletonList("foo")); doSection.setExpectedWarningHeaders(singletonList("foo"));
doSection.setApiCallSection(new ApiCallSection("test"));
section.addExecutableSection(doSection); section.addExecutableSection(doSection);
} }
@ -58,11 +61,37 @@ public class ClientYamlTestSectionTests extends AbstractClientYamlTestFragmentPa
section.setSkipSection(new SkipSection(null, singletonList("yaml"), null)); section.setSkipSection(new SkipSection(null, singletonList("yaml"), null));
DoSection doSection = new DoSection(new XContentLocation(lineNumber, 0)); DoSection doSection = new DoSection(new XContentLocation(lineNumber, 0));
doSection.setExpectedWarningHeaders(singletonList("foo")); doSection.setExpectedWarningHeaders(singletonList("foo"));
doSection.setApiCallSection(new ApiCallSection("test"));
Exception e = expectThrows(IllegalArgumentException.class, () -> section.addExecutableSection(doSection)); Exception e = expectThrows(IllegalArgumentException.class, () -> section.addExecutableSection(doSection));
assertEquals("Attempted to add a [do] with a [warnings] section without a corresponding [skip] so runners that do not support the" assertEquals("Attempted to add a [do] with a [warnings] section without a corresponding [skip] so runners that do not support the"
+ " [warnings] section can skip the test at line [" + lineNumber + "]", e.getMessage()); + " [warnings] section can skip the test at line [" + lineNumber + "]", e.getMessage());
} }
public void testAddingDoWithNodeSelectorWithSkip() {
int lineNumber = between(1, 10000);
ClientYamlTestSection section = new ClientYamlTestSection(new XContentLocation(0, 0), "test");
section.setSkipSection(new SkipSection(null, singletonList("node_selector"), null));
DoSection doSection = new DoSection(new XContentLocation(lineNumber, 0));
ApiCallSection apiCall = new ApiCallSection("test");
apiCall.setNodeSelector(NodeSelector.NOT_MASTER_ONLY);
doSection.setApiCallSection(apiCall);
section.addExecutableSection(doSection);
}
public void testAddingDoWithNodeSelectorWithSkipButNotWarnings() {
int lineNumber = between(1, 10000);
ClientYamlTestSection section = new ClientYamlTestSection(new XContentLocation(0, 0), "test");
section.setSkipSection(new SkipSection(null, singletonList("yaml"), null));
DoSection doSection = new DoSection(new XContentLocation(lineNumber, 0));
ApiCallSection apiCall = new ApiCallSection("test");
apiCall.setNodeSelector(NodeSelector.NOT_MASTER_ONLY);
doSection.setApiCallSection(apiCall);
Exception e = expectThrows(IllegalArgumentException.class, () -> section.addExecutableSection(doSection));
assertEquals("Attempted to add a [do] with a [node_selector] section without a corresponding"
+ " [skip] so runners that do not support the [node_selector] section can skip the test at"
+ " line [" + lineNumber + "]", e.getMessage());
}
public void testWrongIndentation() throws Exception { public void testWrongIndentation() throws Exception {
{ {
XContentParser parser = createParser(YamlXContent.yamlXContent, XContentParser parser = createParser(YamlXContent.yamlXContent,

View file

@ -19,24 +19,36 @@
package org.elasticsearch.test.rest.yaml.section; package org.elasticsearch.test.rest.yaml.section;
import org.apache.http.HttpHost;
import org.elasticsearch.Version; import org.elasticsearch.Version;
import org.elasticsearch.client.Node;
import org.elasticsearch.client.NodeSelector;
import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.logging.DeprecationLogger;
import org.elasticsearch.common.xcontent.XContent; import org.elasticsearch.common.xcontent.XContent;
import org.elasticsearch.common.xcontent.XContentLocation; import org.elasticsearch.common.xcontent.XContentLocation;
import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.yaml.YamlXContent; import org.elasticsearch.common.xcontent.yaml.YamlXContent;
import org.elasticsearch.test.rest.yaml.ClientYamlTestExecutionContext;
import org.elasticsearch.test.rest.yaml.ClientYamlTestResponse;
import org.hamcrest.MatcherAssert; import org.hamcrest.MatcherAssert;
import java.io.IOException; import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays; import java.util.Arrays;
import java.util.List;
import java.util.Map; import java.util.Map;
import static java.util.Collections.emptyList; import static java.util.Collections.emptyList;
import static java.util.Collections.emptyMap;
import static java.util.Collections.singletonList; import static java.util.Collections.singletonList;
import static java.util.Collections.singletonMap;
import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.equalTo;
import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.is;
import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.notNullValue;
import static org.hamcrest.Matchers.nullValue; import static org.hamcrest.Matchers.nullValue;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
public class DoSectionTests extends AbstractClientYamlTestFragmentParserTestCase { public class DoSectionTests extends AbstractClientYamlTestFragmentParserTestCase {
@ -497,7 +509,40 @@ public class DoSectionTests extends AbstractClientYamlTestFragmentParserTestCase
assertThat(doSection.getApiCallSection(), notNullValue()); assertThat(doSection.getApiCallSection(), notNullValue());
assertThat(doSection.getExpectedWarningHeaders(), equalTo(singletonList( assertThat(doSection.getExpectedWarningHeaders(), equalTo(singletonList(
"just one entry this time"))); "just one entry this time")));
}
public void testNodeSelector() throws IOException {
parser = createParser(YamlXContent.yamlXContent,
"node_selector:\n" +
" version: 5.2.0-6.0.0\n" +
"indices.get_field_mapping:\n" +
" index: test_index"
);
DoSection doSection = DoSection.parse(parser);
assertNotSame(NodeSelector.ANY, doSection.getApiCallSection().getNodeSelector());
Node v170 = nodeWithVersion("1.7.0");
Node v521 = nodeWithVersion("5.2.1");
Node v550 = nodeWithVersion("5.5.0");
Node v612 = nodeWithVersion("6.1.2");
List<Node> nodes = new ArrayList<>();
nodes.add(v170);
nodes.add(v521);
nodes.add(v550);
nodes.add(v612);
doSection.getApiCallSection().getNodeSelector().select(nodes);
assertEquals(Arrays.asList(v521, v550), nodes);
ClientYamlTestExecutionContext context = mock(ClientYamlTestExecutionContext.class);
ClientYamlTestResponse mockResponse = mock(ClientYamlTestResponse.class);
when(context.callApi("indices.get_field_mapping", singletonMap("index", "test_index"),
emptyList(), emptyMap(), doSection.getApiCallSection().getNodeSelector())).thenReturn(mockResponse);
doSection.execute(context);
verify(context).callApi("indices.get_field_mapping", singletonMap("index", "test_index"),
emptyList(), emptyMap(), doSection.getApiCallSection().getNodeSelector());
}
private Node nodeWithVersion(String version) {
return new Node(new HttpHost("dummy"), null, null, version, null);
} }
private void assertJsonEquals(Map<String, Object> actual, String expected) throws IOException { private void assertJsonEquals(Map<String, Object> actual, String expected) throws IOException {

View file

@ -17,8 +17,7 @@ import org.apache.logging.log4j.Logger;
import org.elasticsearch.Version; import org.elasticsearch.Version;
import org.elasticsearch.client.RestClient; import org.elasticsearch.client.RestClient;
import org.elasticsearch.client.RestClientBuilder; import org.elasticsearch.client.RestClientBuilder;
import org.elasticsearch.client.sniff.ElasticsearchHostsSniffer; import org.elasticsearch.client.sniff.ElasticsearchNodesSniffer;
import org.elasticsearch.client.sniff.ElasticsearchHostsSniffer.Scheme;
import org.elasticsearch.client.sniff.Sniffer; import org.elasticsearch.client.sniff.Sniffer;
import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Nullable;
@ -303,11 +302,12 @@ public class HttpExporter extends Exporter {
if (sniffingEnabled) { if (sniffingEnabled) {
final List<String> hosts = HOST_SETTING.getConcreteSettingForNamespace(config.name()).get(config.settings()); final List<String> hosts = HOST_SETTING.getConcreteSettingForNamespace(config.name()).get(config.settings());
// createHosts(config) ensures that all schemes are the same for all hosts! // createHosts(config) ensures that all schemes are the same for all hosts!
final Scheme scheme = hosts.get(0).startsWith("https") ? Scheme.HTTPS : Scheme.HTTP; final ElasticsearchNodesSniffer.Scheme scheme = hosts.get(0).startsWith("https") ?
final ElasticsearchHostsSniffer hostsSniffer = ElasticsearchNodesSniffer.Scheme.HTTPS : ElasticsearchNodesSniffer.Scheme.HTTP;
new ElasticsearchHostsSniffer(client, ElasticsearchHostsSniffer.DEFAULT_SNIFF_REQUEST_TIMEOUT, scheme); final ElasticsearchNodesSniffer hostsSniffer =
new ElasticsearchNodesSniffer(client, ElasticsearchNodesSniffer.DEFAULT_SNIFF_REQUEST_TIMEOUT, scheme);
sniffer = Sniffer.builder(client).setHostsSniffer(hostsSniffer).build(); sniffer = Sniffer.builder(client).setNodesSniffer(hostsSniffer).build();
// inform the sniffer whenever there's a node failure // inform the sniffer whenever there's a node failure
listener.setSniffer(sniffer); listener.setSniffer(sniffer);

View file

@ -8,6 +8,7 @@ package org.elasticsearch.xpack.monitoring.exporter.http;
import org.apache.http.HttpHost; import org.apache.http.HttpHost;
import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.Logger;
import org.apache.lucene.util.SetOnce; import org.apache.lucene.util.SetOnce;
import org.elasticsearch.client.Node;
import org.elasticsearch.client.RestClient; import org.elasticsearch.client.RestClient;
import org.elasticsearch.client.sniff.Sniffer; import org.elasticsearch.client.sniff.Sniffer;
import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Nullable;
@ -76,7 +77,8 @@ class NodeFailureListener extends RestClient.FailureListener {
} }
@Override @Override
public void onFailure(final HttpHost host) { public void onFailure(final Node node) {
HttpHost host = node.getHost();
logger.warn("connection failed to node at [{}://{}:{}]", host.getSchemeName(), host.getHostName(), host.getPort()); logger.warn("connection failed to node at [{}://{}:{}]", host.getSchemeName(), host.getHostName(), host.getPort());
final HttpResource resource = this.resource.get(); final HttpResource resource = this.resource.get();
@ -90,4 +92,4 @@ class NodeFailureListener extends RestClient.FailureListener {
} }
} }
} }

View file

@ -8,6 +8,7 @@ package org.elasticsearch.xpack.monitoring.exporter.http;
import org.apache.http.entity.ContentType; import org.apache.http.entity.ContentType;
import org.apache.http.entity.StringEntity; import org.apache.http.entity.StringEntity;
import org.apache.http.nio.conn.ssl.SSLIOSessionStrategy; import org.apache.http.nio.conn.ssl.SSLIOSessionStrategy;
import org.elasticsearch.client.Request;
import org.elasticsearch.client.Response; import org.elasticsearch.client.Response;
import org.elasticsearch.client.RestClient; import org.elasticsearch.client.RestClient;
import org.elasticsearch.client.sniff.Sniffer; import org.elasticsearch.client.sniff.Sniffer;
@ -44,8 +45,6 @@ import static org.hamcrest.Matchers.hasSize;
import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.not;
import static org.hamcrest.Matchers.nullValue; import static org.hamcrest.Matchers.nullValue;
import static org.mockito.Matchers.any; import static org.mockito.Matchers.any;
import static org.mockito.Matchers.anyMapOf;
import static org.mockito.Matchers.eq;
import static org.mockito.Mockito.atMost; import static org.mockito.Mockito.atMost;
import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.doThrow;
import static org.mockito.Mockito.inOrder; import static org.mockito.Mockito.inOrder;
@ -300,7 +299,7 @@ public class HttpExporterTests extends ESTestCase {
final StringEntity entity = new StringEntity("{}", ContentType.APPLICATION_JSON); final StringEntity entity = new StringEntity("{}", ContentType.APPLICATION_JSON);
when(response.getEntity()).thenReturn(entity); when(response.getEntity()).thenReturn(entity);
when(client.performRequest(eq("get"), eq("/_nodes/http"), anyMapOf(String.class, String.class))).thenReturn(response); when(client.performRequest(any(Request.class))).thenReturn(response);
try (Sniffer sniffer = HttpExporter.createSniffer(config, client, listener)) { try (Sniffer sniffer = HttpExporter.createSniffer(config, client, listener)) {
assertThat(sniffer, not(nullValue())); assertThat(sniffer, not(nullValue()));
@ -309,7 +308,7 @@ public class HttpExporterTests extends ESTestCase {
} }
// it's a race whether it triggers this at all // it's a race whether it triggers this at all
verify(client, atMost(1)).performRequest(eq("get"), eq("/_nodes/http"), anyMapOf(String.class, String.class)); verify(client, atMost(1)).performRequest(any(Request.class));
verifyNoMoreInteractions(client, listener); verifyNoMoreInteractions(client, listener);
} }

View file

@ -7,6 +7,7 @@ package org.elasticsearch.xpack.monitoring.exporter.http;
import org.apache.http.HttpHost; import org.apache.http.HttpHost;
import org.apache.lucene.util.SetOnce.AlreadySetException; import org.apache.lucene.util.SetOnce.AlreadySetException;
import org.elasticsearch.client.Node;
import org.elasticsearch.client.sniff.Sniffer; import org.elasticsearch.client.sniff.Sniffer;
import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.ESTestCase;
@ -21,7 +22,7 @@ public class NodeFailureListenerTests extends ESTestCase {
private final Sniffer sniffer = mock(Sniffer.class); private final Sniffer sniffer = mock(Sniffer.class);
private final HttpResource resource = new MockHttpResource(getTestName(), false); private final HttpResource resource = new MockHttpResource(getTestName(), false);
private final HttpHost host = new HttpHost("localhost", 9200); private final Node node = new Node(new HttpHost("localhost", 9200));
private final NodeFailureListener listener = new NodeFailureListener(); private final NodeFailureListener listener = new NodeFailureListener();
@ -44,7 +45,7 @@ public class NodeFailureListenerTests extends ESTestCase {
public void testSnifferNotifiedOnFailure() { public void testSnifferNotifiedOnFailure() {
listener.setSniffer(sniffer); listener.setSniffer(sniffer);
listener.onFailure(host); listener.onFailure(node);
verify(sniffer).sniffOnFailure(); verify(sniffer).sniffOnFailure();
} }
@ -52,7 +53,7 @@ public class NodeFailureListenerTests extends ESTestCase {
public void testResourceNotifiedOnFailure() { public void testResourceNotifiedOnFailure() {
listener.setResource(resource); listener.setResource(resource);
listener.onFailure(host); listener.onFailure(node);
assertTrue(resource.isDirty()); assertTrue(resource.isDirty());
} }
@ -64,7 +65,7 @@ public class NodeFailureListenerTests extends ESTestCase {
listener.setResource(optionalResource); listener.setResource(optionalResource);
listener.setSniffer(optionalSniffer); listener.setSniffer(optionalSniffer);
listener.onFailure(host); listener.onFailure(node);
if (optionalResource != null) { if (optionalResource != null) {
assertTrue(resource.isDirty()); assertTrue(resource.isDirty());