mirror of
https://github.com/elastic/elasticsearch.git
synced 2025-06-28 09:28:55 -04:00
Update several references to TransportVersion.toString to use toReleaseVersion (#107902)
This commit is contained in:
parent
a6a29d0946
commit
638a45009c
39 changed files with 113 additions and 105 deletions
5
docs/changelog/107902.yaml
Normal file
5
docs/changelog/107902.yaml
Normal file
|
@ -0,0 +1,5 @@
|
||||||
|
pr: 107902
|
||||||
|
summary: Update several references to `TransportVersion.toString` to use `toReleaseVersion`
|
||||||
|
area: Infra/Core
|
||||||
|
type: bug
|
||||||
|
issues: []
|
|
@ -208,8 +208,8 @@ public class MultiSearchTemplateIT extends ESIntegTestCase {
|
||||||
String expectedCause = Strings.format(
|
String expectedCause = Strings.format(
|
||||||
"[fail_before_current_version] was released first in version %s, failed compatibility "
|
"[fail_before_current_version] was released first in version %s, failed compatibility "
|
||||||
+ "check trying to send it to node with version %s",
|
+ "check trying to send it to node with version %s",
|
||||||
FailBeforeCurrentVersionQueryBuilder.FUTURE_VERSION,
|
FailBeforeCurrentVersionQueryBuilder.FUTURE_VERSION.toReleaseVersion(),
|
||||||
TransportVersions.MINIMUM_CCS_VERSION
|
TransportVersions.MINIMUM_CCS_VERSION.toReleaseVersion()
|
||||||
);
|
);
|
||||||
String actualCause = ex.getCause().getMessage();
|
String actualCause = ex.getCause().getMessage();
|
||||||
assertEquals(expectedCause, actualCause);
|
assertEquals(expectedCause, actualCause);
|
||||||
|
|
|
@ -37,6 +37,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResp
|
||||||
import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder;
|
import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder;
|
||||||
import static org.hamcrest.Matchers.containsString;
|
import static org.hamcrest.Matchers.containsString;
|
||||||
import static org.hamcrest.Matchers.equalTo;
|
import static org.hamcrest.Matchers.equalTo;
|
||||||
|
import static org.hamcrest.Matchers.matchesRegex;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Full integration test of the template query plugin.
|
* Full integration test of the template query plugin.
|
||||||
|
@ -441,10 +442,13 @@ public class SearchTemplateIT extends ESSingleNodeTestCase {
|
||||||
);
|
);
|
||||||
assertThat(primary.getMessage(), containsString("'search.check_ccs_compatibility' setting is enabled."));
|
assertThat(primary.getMessage(), containsString("'search.check_ccs_compatibility' setting is enabled."));
|
||||||
|
|
||||||
String expectedCause = "[fail_before_current_version] was released first in version XXXXXXX, failed compatibility check trying to"
|
assertThat(
|
||||||
+ " send it to node with version XXXXXXX";
|
underlying.getMessage(),
|
||||||
String actualCause = underlying.getMessage().replaceAll("\\d{7,}", "XXXXXXX");
|
matchesRegex(
|
||||||
assertEquals(expectedCause, actualCause);
|
"\\[fail_before_current_version] was released first in version .+,"
|
||||||
|
+ " failed compatibility check trying to send it to node with version .+"
|
||||||
|
)
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
public static void assertHitCount(SearchTemplateRequestBuilder requestBuilder, long expectedHitCount) {
|
public static void assertHitCount(SearchTemplateRequestBuilder requestBuilder, long expectedHitCount) {
|
||||||
|
|
|
@ -8,8 +8,6 @@
|
||||||
|
|
||||||
package org.elasticsearch.action.admin.cluster.snapshots.features;
|
package org.elasticsearch.action.admin.cluster.snapshots.features;
|
||||||
|
|
||||||
import org.elasticsearch.TransportVersion;
|
|
||||||
import org.elasticsearch.TransportVersions;
|
|
||||||
import org.elasticsearch.action.ActionRequestValidationException;
|
import org.elasticsearch.action.ActionRequestValidationException;
|
||||||
import org.elasticsearch.action.support.master.MasterNodeRequest;
|
import org.elasticsearch.action.support.master.MasterNodeRequest;
|
||||||
import org.elasticsearch.common.io.stream.StreamInput;
|
import org.elasticsearch.common.io.stream.StreamInput;
|
||||||
|
@ -20,14 +18,7 @@ import java.io.IOException;
|
||||||
/** Request for resetting feature state */
|
/** Request for resetting feature state */
|
||||||
public class ResetFeatureStateRequest extends MasterNodeRequest<ResetFeatureStateRequest> {
|
public class ResetFeatureStateRequest extends MasterNodeRequest<ResetFeatureStateRequest> {
|
||||||
|
|
||||||
private static final TransportVersion FEATURE_RESET_ON_MASTER = TransportVersions.V_7_14_0;
|
|
||||||
|
|
||||||
public static ResetFeatureStateRequest fromStream(StreamInput in) throws IOException {
|
public static ResetFeatureStateRequest fromStream(StreamInput in) throws IOException {
|
||||||
if (in.getTransportVersion().before(FEATURE_RESET_ON_MASTER)) {
|
|
||||||
throw new IllegalStateException(
|
|
||||||
"feature reset is not available in a cluster that have nodes with version before " + FEATURE_RESET_ON_MASTER
|
|
||||||
);
|
|
||||||
}
|
|
||||||
return new ResetFeatureStateRequest(in);
|
return new ResetFeatureStateRequest(in);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -39,11 +30,6 @@ public class ResetFeatureStateRequest extends MasterNodeRequest<ResetFeatureStat
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void writeTo(StreamOutput out) throws IOException {
|
public void writeTo(StreamOutput out) throws IOException {
|
||||||
if (out.getTransportVersion().before(FEATURE_RESET_ON_MASTER)) {
|
|
||||||
throw new IllegalStateException(
|
|
||||||
"feature reset is not available in a cluster that have nodes with version before " + FEATURE_RESET_ON_MASTER
|
|
||||||
);
|
|
||||||
}
|
|
||||||
super.writeTo(out);
|
super.writeTo(out);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -65,9 +65,9 @@ public class ResolveClusterActionRequest extends ActionRequest implements Indice
|
||||||
if (in.getTransportVersion().before(TransportVersions.RESOLVE_CLUSTER_ENDPOINT_ADDED)) {
|
if (in.getTransportVersion().before(TransportVersions.RESOLVE_CLUSTER_ENDPOINT_ADDED)) {
|
||||||
throw new UnsupportedOperationException(
|
throw new UnsupportedOperationException(
|
||||||
"ResolveClusterAction requires at least Transport Version "
|
"ResolveClusterAction requires at least Transport Version "
|
||||||
+ TransportVersions.RESOLVE_CLUSTER_ENDPOINT_ADDED
|
+ TransportVersions.RESOLVE_CLUSTER_ENDPOINT_ADDED.toReleaseVersion()
|
||||||
+ " but was "
|
+ " but was "
|
||||||
+ in.getTransportVersion()
|
+ in.getTransportVersion().toReleaseVersion()
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
this.names = in.readStringArray();
|
this.names = in.readStringArray();
|
||||||
|
@ -81,9 +81,9 @@ public class ResolveClusterActionRequest extends ActionRequest implements Indice
|
||||||
if (out.getTransportVersion().before(TransportVersions.RESOLVE_CLUSTER_ENDPOINT_ADDED)) {
|
if (out.getTransportVersion().before(TransportVersions.RESOLVE_CLUSTER_ENDPOINT_ADDED)) {
|
||||||
throw new UnsupportedOperationException(
|
throw new UnsupportedOperationException(
|
||||||
"ResolveClusterAction requires at least Transport Version "
|
"ResolveClusterAction requires at least Transport Version "
|
||||||
+ TransportVersions.RESOLVE_CLUSTER_ENDPOINT_ADDED
|
+ TransportVersions.RESOLVE_CLUSTER_ENDPOINT_ADDED.toReleaseVersion()
|
||||||
+ " but was "
|
+ " but was "
|
||||||
+ out.getTransportVersion()
|
+ out.getTransportVersion().toReleaseVersion()
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
out.writeStringArray(names);
|
out.writeStringArray(names);
|
||||||
|
|
|
@ -47,9 +47,9 @@ public class ResolveClusterActionResponse extends ActionResponse implements ToXC
|
||||||
if (out.getTransportVersion().before(TransportVersions.RESOLVE_CLUSTER_ENDPOINT_ADDED)) {
|
if (out.getTransportVersion().before(TransportVersions.RESOLVE_CLUSTER_ENDPOINT_ADDED)) {
|
||||||
throw new UnsupportedOperationException(
|
throw new UnsupportedOperationException(
|
||||||
"ResolveClusterAction requires at least Transport Version "
|
"ResolveClusterAction requires at least Transport Version "
|
||||||
+ TransportVersions.RESOLVE_CLUSTER_ENDPOINT_ADDED
|
+ TransportVersions.RESOLVE_CLUSTER_ENDPOINT_ADDED.toReleaseVersion()
|
||||||
+ " but was "
|
+ " but was "
|
||||||
+ out.getTransportVersion()
|
+ out.getTransportVersion().toReleaseVersion()
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
out.writeMap(infoMap, StreamOutput::writeWriteable);
|
out.writeMap(infoMap, StreamOutput::writeWriteable);
|
||||||
|
|
|
@ -68,9 +68,9 @@ public class ResolveClusterInfo implements Writeable {
|
||||||
if (out.getTransportVersion().before(TransportVersions.RESOLVE_CLUSTER_ENDPOINT_ADDED)) {
|
if (out.getTransportVersion().before(TransportVersions.RESOLVE_CLUSTER_ENDPOINT_ADDED)) {
|
||||||
throw new UnsupportedOperationException(
|
throw new UnsupportedOperationException(
|
||||||
"ResolveClusterAction requires at least Transport Version "
|
"ResolveClusterAction requires at least Transport Version "
|
||||||
+ TransportVersions.RESOLVE_CLUSTER_ENDPOINT_ADDED
|
+ TransportVersions.RESOLVE_CLUSTER_ENDPOINT_ADDED.toReleaseVersion()
|
||||||
+ " but was "
|
+ " but was "
|
||||||
+ out.getTransportVersion()
|
+ out.getTransportVersion().toReleaseVersion()
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
out.writeBoolean(connected);
|
out.writeBoolean(connected);
|
||||||
|
|
|
@ -75,7 +75,7 @@ public class IndicesShardStoresRequest extends MasterNodeReadRequest<IndicesShar
|
||||||
"support for maxConcurrentShardRequests=["
|
"support for maxConcurrentShardRequests=["
|
||||||
+ maxConcurrentShardRequests
|
+ maxConcurrentShardRequests
|
||||||
+ "] was added in version [8.8.0], cannot send this request using transport version ["
|
+ "] was added in version [8.8.0], cannot send this request using transport version ["
|
||||||
+ out.getTransportVersion()
|
+ out.getTransportVersion().toReleaseVersion()
|
||||||
+ "]"
|
+ "]"
|
||||||
);
|
);
|
||||||
} // else just drop the value and use the default behaviour
|
} // else just drop the value and use the default behaviour
|
||||||
|
|
|
@ -139,7 +139,7 @@ public final class TransportSearchHelper {
|
||||||
"["
|
"["
|
||||||
+ writeableRequest.getClass()
|
+ writeableRequest.getClass()
|
||||||
+ "] is not compatible with version "
|
+ "] is not compatible with version "
|
||||||
+ TransportVersions.MINIMUM_CCS_VERSION
|
+ TransportVersions.MINIMUM_CCS_VERSION.toReleaseVersion()
|
||||||
+ " and the '"
|
+ " and the '"
|
||||||
+ SearchService.CCS_VERSION_CHECK_SETTING.getKey()
|
+ SearchService.CCS_VERSION_CHECK_SETTING.getKey()
|
||||||
+ "' setting is enabled.",
|
+ "' setting is enabled.",
|
||||||
|
|
|
@ -341,7 +341,7 @@ public class NodeJoinExecutor implements ClusterStateTaskExecutor<JoinTask> {
|
||||||
private static void blockForbiddenVersions(TransportVersion joiningTransportVersion) {
|
private static void blockForbiddenVersions(TransportVersion joiningTransportVersion) {
|
||||||
if (FORBIDDEN_VERSIONS.contains(joiningTransportVersion)) {
|
if (FORBIDDEN_VERSIONS.contains(joiningTransportVersion)) {
|
||||||
throw new IllegalStateException(
|
throw new IllegalStateException(
|
||||||
"A node with transport version " + joiningTransportVersion + " is forbidden from joining this cluster"
|
"A node with transport version " + joiningTransportVersion.toReleaseVersion() + " is forbidden from joining this cluster"
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -427,9 +427,9 @@ public class NodeJoinExecutor implements ClusterStateTaskExecutor<JoinTask> {
|
||||||
if (joiningCompatibilityVersions.transportVersion().before(minClusterTransportVersion)) {
|
if (joiningCompatibilityVersions.transportVersion().before(minClusterTransportVersion)) {
|
||||||
throw new IllegalStateException(
|
throw new IllegalStateException(
|
||||||
"node with transport version ["
|
"node with transport version ["
|
||||||
+ joiningCompatibilityVersions.transportVersion()
|
+ joiningCompatibilityVersions.transportVersion().toReleaseVersion()
|
||||||
+ "] may not join a cluster with minimum transport version ["
|
+ "] may not join a cluster with minimum transport version ["
|
||||||
+ minClusterTransportVersion
|
+ minClusterTransportVersion.toReleaseVersion()
|
||||||
+ "]"
|
+ "]"
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
|
@ -456,7 +456,11 @@ public class PublicationTransportHandler {
|
||||||
|
|
||||||
final ReleasableBytesReference bytes = serializedDiffs.get(connection.getTransportVersion());
|
final ReleasableBytesReference bytes = serializedDiffs.get(connection.getTransportVersion());
|
||||||
assert bytes != null
|
assert bytes != null
|
||||||
: "failed to find serialized diff for node " + destination + " of version [" + connection.getTransportVersion() + "]";
|
: "failed to find serialized diff for node "
|
||||||
|
+ destination
|
||||||
|
+ " of version ["
|
||||||
|
+ connection.getTransportVersion().toReleaseVersion()
|
||||||
|
+ "]";
|
||||||
|
|
||||||
// acquire a ref to the context just in case we need to try again with the full cluster state
|
// acquire a ref to the context just in case we need to try again with the full cluster state
|
||||||
if (tryIncRef() == false) {
|
if (tryIncRef() == false) {
|
||||||
|
|
|
@ -67,9 +67,9 @@ public final class VersionCheckingStreamOutput extends StreamOutput {
|
||||||
"["
|
"["
|
||||||
+ namedWriteable.getWriteableName()
|
+ namedWriteable.getWriteableName()
|
||||||
+ "] was released first in version "
|
+ "] was released first in version "
|
||||||
+ namedWriteable.getMinimalSupportedVersion()
|
+ namedWriteable.getMinimalSupportedVersion().toReleaseVersion()
|
||||||
+ ", failed compatibility check trying to send it to node with version "
|
+ ", failed compatibility check trying to send it to node with version "
|
||||||
+ getTransportVersion()
|
+ getTransportVersion().toReleaseVersion()
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -307,7 +307,7 @@ public final class SearchHit implements Writeable, ToXContentObject, RefCounted
|
||||||
if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) {
|
if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) {
|
||||||
out.writeVInt(rank);
|
out.writeVInt(rank);
|
||||||
} else if (rank != NO_RANK) {
|
} else if (rank != NO_RANK) {
|
||||||
throw new IllegalArgumentException("cannot serialize [rank] to version [" + out.getTransportVersion() + "]");
|
throw new IllegalArgumentException("cannot serialize [rank] to version [" + out.getTransportVersion().toReleaseVersion() + "]");
|
||||||
}
|
}
|
||||||
out.writeOptionalText(id);
|
out.writeOptionalText(id);
|
||||||
if (out.getTransportVersion().before(TransportVersions.V_8_0_0)) {
|
if (out.getTransportVersion().before(TransportVersions.V_8_0_0)) {
|
||||||
|
|
|
@ -299,7 +299,9 @@ public final class SearchSourceBuilder implements Writeable, ToXContentObject, R
|
||||||
if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) {
|
if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_9_X)) {
|
||||||
out.writeCollection(subSearchSourceBuilders);
|
out.writeCollection(subSearchSourceBuilders);
|
||||||
} else if (out.getTransportVersion().before(TransportVersions.V_8_4_0) && subSearchSourceBuilders.size() >= 2) {
|
} else if (out.getTransportVersion().before(TransportVersions.V_8_4_0) && subSearchSourceBuilders.size() >= 2) {
|
||||||
throw new IllegalArgumentException("cannot serialize [sub_searches] to version [" + out.getTransportVersion() + "]");
|
throw new IllegalArgumentException(
|
||||||
|
"cannot serialize [sub_searches] to version [" + out.getTransportVersion().toReleaseVersion() + "]"
|
||||||
|
);
|
||||||
} else {
|
} else {
|
||||||
out.writeOptionalNamedWriteable(query());
|
out.writeOptionalNamedWriteable(query());
|
||||||
}
|
}
|
||||||
|
@ -346,8 +348,10 @@ public final class SearchSourceBuilder implements Writeable, ToXContentObject, R
|
||||||
if (out.getTransportVersion().before(TransportVersions.V_8_7_0)) {
|
if (out.getTransportVersion().before(TransportVersions.V_8_7_0)) {
|
||||||
if (knnSearch.size() > 1) {
|
if (knnSearch.size() > 1) {
|
||||||
throw new IllegalArgumentException(
|
throw new IllegalArgumentException(
|
||||||
"Versions before 8070099 don't support multiple [knn] search clauses and search was sent to ["
|
"Versions before ["
|
||||||
+ out.getTransportVersion()
|
+ TransportVersions.V_8_7_0.toReleaseVersion()
|
||||||
|
+ "] don't support multiple [knn] search clauses and search was sent to ["
|
||||||
|
+ out.getTransportVersion().toReleaseVersion()
|
||||||
+ "]"
|
+ "]"
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
@ -359,7 +363,7 @@ public final class SearchSourceBuilder implements Writeable, ToXContentObject, R
|
||||||
if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) {
|
if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) {
|
||||||
out.writeOptionalNamedWriteable(rankBuilder);
|
out.writeOptionalNamedWriteable(rankBuilder);
|
||||||
} else if (rankBuilder != null) {
|
} else if (rankBuilder != null) {
|
||||||
throw new IllegalArgumentException("cannot serialize [rank] to version [" + out.getTransportVersion() + "]");
|
throw new IllegalArgumentException("cannot serialize [rank] to version [" + out.getTransportVersion().toReleaseVersion() + "]");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -141,8 +141,10 @@ public final class DfsSearchResult extends SearchPhaseResult {
|
||||||
if (knnResults != null && knnResults.size() > 1) {
|
if (knnResults != null && knnResults.size() > 1) {
|
||||||
throw new IllegalArgumentException(
|
throw new IllegalArgumentException(
|
||||||
"Cannot serialize multiple KNN results to nodes using previous transport version ["
|
"Cannot serialize multiple KNN results to nodes using previous transport version ["
|
||||||
+ out.getTransportVersion()
|
+ out.getTransportVersion().toReleaseVersion()
|
||||||
+ "], minimum required transport version is [8070099]"
|
+ "], minimum required transport version is ["
|
||||||
|
+ TransportVersions.V_8_7_0.toReleaseVersion()
|
||||||
|
+ "]"
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
out.writeOptionalWriteable(knnResults == null || knnResults.isEmpty() ? null : knnResults.get(0));
|
out.writeOptionalWriteable(knnResults == null || knnResults.isEmpty() ? null : knnResults.get(0));
|
||||||
|
|
|
@ -456,7 +456,7 @@ public final class QuerySearchResult extends SearchPhaseResult {
|
||||||
if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) {
|
if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) {
|
||||||
out.writeOptionalNamedWriteable(rankShardResult);
|
out.writeOptionalNamedWriteable(rankShardResult);
|
||||||
} else if (rankShardResult != null) {
|
} else if (rankShardResult != null) {
|
||||||
throw new IllegalArgumentException("cannot serialize [rank] to version [" + out.getTransportVersion() + "]");
|
throw new IllegalArgumentException("cannot serialize [rank] to version [" + out.getTransportVersion().toReleaseVersion() + "]");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -246,9 +246,9 @@ public class InboundDecoder implements Releasable {
|
||||||
if (TransportVersion.isCompatible(remoteVersion) == false) {
|
if (TransportVersion.isCompatible(remoteVersion) == false) {
|
||||||
throw new IllegalStateException(
|
throw new IllegalStateException(
|
||||||
"Received message from unsupported version: ["
|
"Received message from unsupported version: ["
|
||||||
+ remoteVersion
|
+ remoteVersion.toReleaseVersion()
|
||||||
+ "] minimal compatible version is: ["
|
+ "] minimal compatible version is: ["
|
||||||
+ TransportVersions.MINIMUM_COMPATIBLE
|
+ TransportVersions.MINIMUM_COMPATIBLE.toReleaseVersion()
|
||||||
+ "]"
|
+ "]"
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
|
@ -49,9 +49,9 @@ public class VersionCheckingStreamOutputTests extends ESTestCase {
|
||||||
);
|
);
|
||||||
assertEquals(
|
assertEquals(
|
||||||
"[test_writable] was released first in version "
|
"[test_writable] was released first in version "
|
||||||
+ TransportVersion.current()
|
+ TransportVersion.current().toReleaseVersion()
|
||||||
+ ", failed compatibility check trying to send it to node with version "
|
+ ", failed compatibility check trying to send it to node with version "
|
||||||
+ streamVersion,
|
+ streamVersion.toReleaseVersion(),
|
||||||
e.getMessage()
|
e.getMessage()
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
|
@ -477,9 +477,9 @@ public class InboundDecoderTests extends ESTestCase {
|
||||||
} catch (IllegalStateException expected) {
|
} catch (IllegalStateException expected) {
|
||||||
assertEquals(
|
assertEquals(
|
||||||
"Received message from unsupported version: ["
|
"Received message from unsupported version: ["
|
||||||
+ invalid
|
+ invalid.toReleaseVersion()
|
||||||
+ "] minimal compatible version is: ["
|
+ "] minimal compatible version is: ["
|
||||||
+ TransportVersions.MINIMUM_COMPATIBLE
|
+ TransportVersions.MINIMUM_COMPATIBLE.toReleaseVersion()
|
||||||
+ "]",
|
+ "]",
|
||||||
expected.getMessage()
|
expected.getMessage()
|
||||||
);
|
);
|
||||||
|
|
|
@ -22,7 +22,7 @@ import java.io.IOException;
|
||||||
public class FailBeforeCurrentVersionQueryBuilder extends DummyQueryBuilder {
|
public class FailBeforeCurrentVersionQueryBuilder extends DummyQueryBuilder {
|
||||||
|
|
||||||
public static final String NAME = "fail_before_current_version";
|
public static final String NAME = "fail_before_current_version";
|
||||||
public static final int FUTURE_VERSION = TransportVersion.current().id() + 11_111;
|
public static final TransportVersion FUTURE_VERSION = TransportVersion.fromId(TransportVersion.current().id() + 11_111);
|
||||||
|
|
||||||
public FailBeforeCurrentVersionQueryBuilder(StreamInput in) throws IOException {
|
public FailBeforeCurrentVersionQueryBuilder(StreamInput in) throws IOException {
|
||||||
super(in);
|
super(in);
|
||||||
|
@ -49,6 +49,6 @@ public class FailBeforeCurrentVersionQueryBuilder extends DummyQueryBuilder {
|
||||||
public TransportVersion getMinimalSupportedVersion() {
|
public TransportVersion getMinimalSupportedVersion() {
|
||||||
// this is what causes the failure - it always reports a version in the future, so it is never compatible with
|
// this is what causes the failure - it always reports a version in the future, so it is never compatible with
|
||||||
// current or minimum CCS TransportVersion
|
// current or minimum CCS TransportVersion
|
||||||
return new TransportVersion(FUTURE_VERSION);
|
return FUTURE_VERSION;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -71,8 +71,7 @@ public final class SourceDestValidator {
|
||||||
+ "alias [{0}], license is not active";
|
+ "alias [{0}], license is not active";
|
||||||
public static final String REMOTE_SOURCE_INDICES_NOT_SUPPORTED = "remote source indices are not supported";
|
public static final String REMOTE_SOURCE_INDICES_NOT_SUPPORTED = "remote source indices are not supported";
|
||||||
public static final String REMOTE_CLUSTERS_TRANSPORT_TOO_OLD =
|
public static final String REMOTE_CLUSTERS_TRANSPORT_TOO_OLD =
|
||||||
"remote clusters are expected to run at least transport version [{0}] (reason: [{1}]),"
|
"remote clusters are expected to run at least version [{0}] (reason: [{1}])," + " but the following clusters were too old: [{2}]";
|
||||||
+ " but the following clusters were too old: [{2}]";
|
|
||||||
public static final String PIPELINE_MISSING = "Pipeline with id [{0}] could not be found";
|
public static final String PIPELINE_MISSING = "Pipeline with id [{0}] could not be found";
|
||||||
|
|
||||||
private final IndexNameExpressionResolver indexNameExpressionResolver;
|
private final IndexNameExpressionResolver indexNameExpressionResolver;
|
||||||
|
@ -491,12 +490,12 @@ public final class SourceDestValidator {
|
||||||
if (oldRemoteClusterVersions.isEmpty() == false) {
|
if (oldRemoteClusterVersions.isEmpty() == false) {
|
||||||
context.addValidationError(
|
context.addValidationError(
|
||||||
REMOTE_CLUSTERS_TRANSPORT_TOO_OLD,
|
REMOTE_CLUSTERS_TRANSPORT_TOO_OLD,
|
||||||
minExpectedVersion,
|
minExpectedVersion.toReleaseVersion(),
|
||||||
reason,
|
reason,
|
||||||
oldRemoteClusterVersions.entrySet()
|
oldRemoteClusterVersions.entrySet()
|
||||||
.stream()
|
.stream()
|
||||||
.sorted(comparingByKey()) // sort to have a deterministic order among clusters in the resulting string
|
.sorted(comparingByKey()) // sort to have a deterministic order among clusters in the resulting string
|
||||||
.map(e -> e.getKey() + " (" + e.getValue() + ")")
|
.map(e -> e.getKey() + " (" + e.getValue().toReleaseVersion() + ")")
|
||||||
.collect(joining(", "))
|
.collect(joining(", "))
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
|
@ -111,9 +111,9 @@ public final class GetUserPrivilegesResponse extends ActionResponse {
|
||||||
} else if (hasRemoteIndicesPrivileges()) {
|
} else if (hasRemoteIndicesPrivileges()) {
|
||||||
throw new IllegalArgumentException(
|
throw new IllegalArgumentException(
|
||||||
"versions of Elasticsearch before ["
|
"versions of Elasticsearch before ["
|
||||||
+ TransportVersions.V_8_8_0
|
+ TransportVersions.V_8_8_0.toReleaseVersion()
|
||||||
+ "] can't handle remote indices privileges and attempted to send to ["
|
+ "] can't handle remote indices privileges and attempted to send to ["
|
||||||
+ out.getTransportVersion()
|
+ out.getTransportVersion().toReleaseVersion()
|
||||||
+ "]"
|
+ "]"
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
|
@ -226,9 +226,9 @@ public final class Authentication implements ToXContentObject {
|
||||||
if (isCrossClusterAccess() && olderVersion.before(TRANSPORT_VERSION_ADVANCED_REMOTE_CLUSTER_SECURITY)) {
|
if (isCrossClusterAccess() && olderVersion.before(TRANSPORT_VERSION_ADVANCED_REMOTE_CLUSTER_SECURITY)) {
|
||||||
throw new IllegalArgumentException(
|
throw new IllegalArgumentException(
|
||||||
"versions of Elasticsearch before ["
|
"versions of Elasticsearch before ["
|
||||||
+ TRANSPORT_VERSION_ADVANCED_REMOTE_CLUSTER_SECURITY
|
+ TRANSPORT_VERSION_ADVANCED_REMOTE_CLUSTER_SECURITY.toReleaseVersion()
|
||||||
+ "] can't handle cross cluster access authentication and attempted to rewrite for ["
|
+ "] can't handle cross cluster access authentication and attempted to rewrite for ["
|
||||||
+ olderVersion
|
+ olderVersion.toReleaseVersion()
|
||||||
+ "]"
|
+ "]"
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
@ -576,9 +576,9 @@ public final class Authentication implements ToXContentObject {
|
||||||
if (isCrossClusterAccess && out.getTransportVersion().before(TRANSPORT_VERSION_ADVANCED_REMOTE_CLUSTER_SECURITY)) {
|
if (isCrossClusterAccess && out.getTransportVersion().before(TRANSPORT_VERSION_ADVANCED_REMOTE_CLUSTER_SECURITY)) {
|
||||||
throw new IllegalArgumentException(
|
throw new IllegalArgumentException(
|
||||||
"versions of Elasticsearch before ["
|
"versions of Elasticsearch before ["
|
||||||
+ TRANSPORT_VERSION_ADVANCED_REMOTE_CLUSTER_SECURITY
|
+ TRANSPORT_VERSION_ADVANCED_REMOTE_CLUSTER_SECURITY.toReleaseVersion()
|
||||||
+ "] can't handle cross cluster access authentication and attempted to send to ["
|
+ "] can't handle cross cluster access authentication and attempted to send to ["
|
||||||
+ out.getTransportVersion()
|
+ out.getTransportVersion().toReleaseVersion()
|
||||||
+ "]"
|
+ "]"
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
@ -1368,9 +1368,9 @@ public final class Authentication implements ToXContentObject {
|
||||||
() -> "Cross cluster access authentication has authentication field in metadata ["
|
() -> "Cross cluster access authentication has authentication field in metadata ["
|
||||||
+ authenticationFromMetadata
|
+ authenticationFromMetadata
|
||||||
+ "] that may require a rewrite from version ["
|
+ "] that may require a rewrite from version ["
|
||||||
+ effectiveSubjectVersion
|
+ effectiveSubjectVersion.toReleaseVersion()
|
||||||
+ "] to ["
|
+ "] to ["
|
||||||
+ olderVersion
|
+ olderVersion.toReleaseVersion()
|
||||||
+ "]"
|
+ "]"
|
||||||
);
|
);
|
||||||
final Map<String, Object> rewrittenMetadata = new HashMap<>(metadata);
|
final Map<String, Object> rewrittenMetadata = new HashMap<>(metadata);
|
||||||
|
|
|
@ -82,8 +82,8 @@ public class RemoteClusterMinimumVersionValidationTests extends ESTestCase {
|
||||||
ctx -> assertThat(
|
ctx -> assertThat(
|
||||||
ctx.getValidationException().validationErrors(),
|
ctx.getValidationException().validationErrors(),
|
||||||
contains(
|
contains(
|
||||||
"remote clusters are expected to run at least transport version [7110099] (reason: [some reason]), "
|
"remote clusters are expected to run at least version [7.11.0] (reason: [some reason]), "
|
||||||
+ "but the following clusters were too old: [cluster-A (7100099)]"
|
+ "but the following clusters were too old: [cluster-A (7.10.0)]"
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
@ -100,8 +100,8 @@ public class RemoteClusterMinimumVersionValidationTests extends ESTestCase {
|
||||||
ctx -> assertThat(
|
ctx -> assertThat(
|
||||||
ctx.getValidationException().validationErrors(),
|
ctx.getValidationException().validationErrors(),
|
||||||
contains(
|
contains(
|
||||||
"remote clusters are expected to run at least transport version [7120099] (reason: [some reason]), "
|
"remote clusters are expected to run at least version [7.12.0] (reason: [some reason]), "
|
||||||
+ "but the following clusters were too old: [cluster-A (7100099), cluster-B (7110099)]"
|
+ "but the following clusters were too old: [cluster-A (7.10.0), cluster-B (7.11.0)]"
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
|
@ -100,9 +100,9 @@ public class GetUserPrivilegesResponseTests extends ESTestCase {
|
||||||
ex.getMessage(),
|
ex.getMessage(),
|
||||||
containsString(
|
containsString(
|
||||||
"versions of Elasticsearch before ["
|
"versions of Elasticsearch before ["
|
||||||
+ TransportVersions.V_8_8_0
|
+ TransportVersions.V_8_8_0.toReleaseVersion()
|
||||||
+ "] can't handle remote indices privileges and attempted to send to ["
|
+ "] can't handle remote indices privileges and attempted to send to ["
|
||||||
+ version
|
+ version.toReleaseVersion()
|
||||||
+ "]"
|
+ "]"
|
||||||
)
|
)
|
||||||
);
|
);
|
||||||
|
|
|
@ -84,7 +84,9 @@ public class TransportTermsEnumActionTests extends ESSingleNodeTestCase {
|
||||||
assertThat(
|
assertThat(
|
||||||
ex.getCause().getCause().getMessage(),
|
ex.getCause().getCause().getMessage(),
|
||||||
containsString(
|
containsString(
|
||||||
"was released first in version " + version + ", failed compatibility check trying to send it to node with version"
|
"was released first in version "
|
||||||
|
+ version.toReleaseVersion()
|
||||||
|
+ ", failed compatibility check trying to send it to node with version"
|
||||||
)
|
)
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
|
@ -318,7 +318,7 @@ public class TransportStartDatafeedAction extends TransportMasterNodeAction<Star
|
||||||
throw ExceptionsHelper.badRequestException(
|
throw ExceptionsHelper.badRequestException(
|
||||||
Messages.getMessage(
|
Messages.getMessage(
|
||||||
REMOTE_CLUSTERS_TRANSPORT_TOO_OLD,
|
REMOTE_CLUSTERS_TRANSPORT_TOO_OLD,
|
||||||
minVersion.toString(),
|
minVersion.toReleaseVersion(),
|
||||||
reason,
|
reason,
|
||||||
Strings.collectionToCommaDelimitedString(clustersTooOld)
|
Strings.collectionToCommaDelimitedString(clustersTooOld)
|
||||||
)
|
)
|
||||||
|
|
|
@ -138,7 +138,7 @@ public class TransportStartDatafeedActionTests extends ESTestCase {
|
||||||
assertThat(
|
assertThat(
|
||||||
ex.getMessage(),
|
ex.getMessage(),
|
||||||
containsString(
|
containsString(
|
||||||
"remote clusters are expected to run at least transport version [7110099] (reason: [runtime_mappings]), "
|
"remote clusters are expected to run at least version [7.11.0] (reason: [runtime_mappings]), "
|
||||||
+ "but the following clusters were too old: [old_cluster_1]"
|
+ "but the following clusters were too old: [old_cluster_1]"
|
||||||
)
|
)
|
||||||
);
|
);
|
||||||
|
|
|
@ -322,8 +322,8 @@ public class ApiKeyService {
|
||||||
// Creating API keys with roles which define remote indices privileges is not allowed in a mixed cluster.
|
// Creating API keys with roles which define remote indices privileges is not allowed in a mixed cluster.
|
||||||
listener.onFailure(
|
listener.onFailure(
|
||||||
new IllegalArgumentException(
|
new IllegalArgumentException(
|
||||||
"all nodes must have transport version ["
|
"all nodes must have version ["
|
||||||
+ TRANSPORT_VERSION_ADVANCED_REMOTE_CLUSTER_SECURITY
|
+ TRANSPORT_VERSION_ADVANCED_REMOTE_CLUSTER_SECURITY.toReleaseVersion()
|
||||||
+ "] or higher to support remote indices privileges for API keys"
|
+ "] or higher to support remote indices privileges for API keys"
|
||||||
)
|
)
|
||||||
);
|
);
|
||||||
|
@ -333,8 +333,8 @@ public class ApiKeyService {
|
||||||
&& request.getType() == ApiKey.Type.CROSS_CLUSTER) {
|
&& request.getType() == ApiKey.Type.CROSS_CLUSTER) {
|
||||||
listener.onFailure(
|
listener.onFailure(
|
||||||
new IllegalArgumentException(
|
new IllegalArgumentException(
|
||||||
"all nodes must have transport version ["
|
"all nodes must have version ["
|
||||||
+ TRANSPORT_VERSION_ADVANCED_REMOTE_CLUSTER_SECURITY
|
+ TRANSPORT_VERSION_ADVANCED_REMOTE_CLUSTER_SECURITY.toReleaseVersion()
|
||||||
+ "] or higher to support creating cross cluster API keys"
|
+ "] or higher to support creating cross cluster API keys"
|
||||||
)
|
)
|
||||||
);
|
);
|
||||||
|
@ -381,8 +381,8 @@ public class ApiKeyService {
|
||||||
// creating/updating API keys with restrictions is not allowed in a mixed cluster.
|
// creating/updating API keys with restrictions is not allowed in a mixed cluster.
|
||||||
if (transportVersion.before(WORKFLOWS_RESTRICTION_VERSION)) {
|
if (transportVersion.before(WORKFLOWS_RESTRICTION_VERSION)) {
|
||||||
return new IllegalArgumentException(
|
return new IllegalArgumentException(
|
||||||
"all nodes must have transport version ["
|
"all nodes must have version ["
|
||||||
+ WORKFLOWS_RESTRICTION_VERSION
|
+ WORKFLOWS_RESTRICTION_VERSION.toReleaseVersion()
|
||||||
+ "] or higher to support restrictions for API keys"
|
+ "] or higher to support restrictions for API keys"
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
@ -492,8 +492,8 @@ public class ApiKeyService {
|
||||||
// Updating API keys with roles which define remote indices privileges is not allowed in a mixed cluster.
|
// Updating API keys with roles which define remote indices privileges is not allowed in a mixed cluster.
|
||||||
listener.onFailure(
|
listener.onFailure(
|
||||||
new IllegalArgumentException(
|
new IllegalArgumentException(
|
||||||
"all nodes must have transport version ["
|
"all nodes must have version ["
|
||||||
+ TRANSPORT_VERSION_ADVANCED_REMOTE_CLUSTER_SECURITY
|
+ TRANSPORT_VERSION_ADVANCED_REMOTE_CLUSTER_SECURITY.toReleaseVersion()
|
||||||
+ "] or higher to support remote indices privileges for API keys"
|
+ "] or higher to support remote indices privileges for API keys"
|
||||||
)
|
)
|
||||||
);
|
);
|
||||||
|
|
|
@ -79,8 +79,8 @@ public class CrossClusterAccessAuthenticationService {
|
||||||
withRequestProcessingFailure(
|
withRequestProcessingFailure(
|
||||||
authcContext,
|
authcContext,
|
||||||
new IllegalArgumentException(
|
new IllegalArgumentException(
|
||||||
"all nodes must have transport version ["
|
"all nodes must have version ["
|
||||||
+ TRANSPORT_VERSION_ADVANCED_REMOTE_CLUSTER_SECURITY
|
+ TRANSPORT_VERSION_ADVANCED_REMOTE_CLUSTER_SECURITY.toReleaseVersion()
|
||||||
+ "] or higher to support cross cluster requests through the dedicated remote cluster port"
|
+ "] or higher to support cross cluster requests through the dedicated remote cluster port"
|
||||||
),
|
),
|
||||||
listener
|
listener
|
||||||
|
|
|
@ -259,8 +259,8 @@ public class NativeRolesStore implements BiConsumer<Set<String>, ActionListener<
|
||||||
&& clusterService.state().getMinTransportVersion().before(TRANSPORT_VERSION_ADVANCED_REMOTE_CLUSTER_SECURITY)) {
|
&& clusterService.state().getMinTransportVersion().before(TRANSPORT_VERSION_ADVANCED_REMOTE_CLUSTER_SECURITY)) {
|
||||||
listener.onFailure(
|
listener.onFailure(
|
||||||
new IllegalStateException(
|
new IllegalStateException(
|
||||||
"all nodes must have transport version ["
|
"all nodes must have version ["
|
||||||
+ TRANSPORT_VERSION_ADVANCED_REMOTE_CLUSTER_SECURITY
|
+ TRANSPORT_VERSION_ADVANCED_REMOTE_CLUSTER_SECURITY.toReleaseVersion()
|
||||||
+ "] or higher to support remote indices privileges"
|
+ "] or higher to support remote indices privileges"
|
||||||
)
|
)
|
||||||
);
|
);
|
||||||
|
|
|
@ -318,7 +318,7 @@ public class SecurityServerTransportInterceptor implements TransportInterceptor
|
||||||
"Settings for remote cluster ["
|
"Settings for remote cluster ["
|
||||||
+ remoteClusterAlias
|
+ remoteClusterAlias
|
||||||
+ "] indicate cross cluster access headers should be sent but target cluster version ["
|
+ "] indicate cross cluster access headers should be sent but target cluster version ["
|
||||||
+ connection.getTransportVersion()
|
+ connection.getTransportVersion().toReleaseVersion()
|
||||||
+ "] does not support receiving them"
|
+ "] does not support receiving them"
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
|
@ -2710,8 +2710,8 @@ public class ApiKeyServiceTests extends ESTestCase {
|
||||||
assertThat(
|
assertThat(
|
||||||
e.getMessage(),
|
e.getMessage(),
|
||||||
containsString(
|
containsString(
|
||||||
"all nodes must have transport version ["
|
"all nodes must have version ["
|
||||||
+ TRANSPORT_VERSION_ADVANCED_REMOTE_CLUSTER_SECURITY
|
+ TRANSPORT_VERSION_ADVANCED_REMOTE_CLUSTER_SECURITY.toReleaseVersion()
|
||||||
+ "] or higher to support creating cross cluster API keys"
|
+ "] or higher to support creating cross cluster API keys"
|
||||||
)
|
)
|
||||||
);
|
);
|
||||||
|
@ -2856,8 +2856,8 @@ public class ApiKeyServiceTests extends ESTestCase {
|
||||||
assertThat(
|
assertThat(
|
||||||
e1.getMessage(),
|
e1.getMessage(),
|
||||||
containsString(
|
containsString(
|
||||||
"all nodes must have transport version ["
|
"all nodes must have version ["
|
||||||
+ WORKFLOWS_RESTRICTION_VERSION
|
+ WORKFLOWS_RESTRICTION_VERSION.toReleaseVersion()
|
||||||
+ "] or higher to support restrictions for API keys"
|
+ "] or higher to support restrictions for API keys"
|
||||||
)
|
)
|
||||||
);
|
);
|
||||||
|
@ -2874,8 +2874,8 @@ public class ApiKeyServiceTests extends ESTestCase {
|
||||||
assertThat(
|
assertThat(
|
||||||
e2.getMessage(),
|
e2.getMessage(),
|
||||||
containsString(
|
containsString(
|
||||||
"all nodes must have transport version ["
|
"all nodes must have version ["
|
||||||
+ WORKFLOWS_RESTRICTION_VERSION
|
+ WORKFLOWS_RESTRICTION_VERSION.toReleaseVersion()
|
||||||
+ "] or higher to support restrictions for API keys"
|
+ "] or higher to support restrictions for API keys"
|
||||||
)
|
)
|
||||||
);
|
);
|
||||||
|
|
|
@ -108,8 +108,8 @@ public class CrossClusterAccessAuthenticationServiceTests extends ESTestCase {
|
||||||
assertThat(
|
assertThat(
|
||||||
actual.getCause().getCause().getMessage(),
|
actual.getCause().getCause().getMessage(),
|
||||||
equalTo(
|
equalTo(
|
||||||
"all nodes must have transport version ["
|
"all nodes must have version ["
|
||||||
+ TRANSPORT_VERSION_ADVANCED_REMOTE_CLUSTER_SECURITY
|
+ TRANSPORT_VERSION_ADVANCED_REMOTE_CLUSTER_SECURITY.toReleaseVersion()
|
||||||
+ "] or higher to support cross cluster requests through the dedicated remote cluster port"
|
+ "] or higher to support cross cluster requests through the dedicated remote cluster port"
|
||||||
)
|
)
|
||||||
);
|
);
|
||||||
|
|
|
@ -435,8 +435,8 @@ public class NativeRolesStoreTests extends ESTestCase {
|
||||||
assertThat(
|
assertThat(
|
||||||
e.getMessage(),
|
e.getMessage(),
|
||||||
containsString(
|
containsString(
|
||||||
"all nodes must have transport version ["
|
"all nodes must have version ["
|
||||||
+ TRANSPORT_VERSION_ADVANCED_REMOTE_CLUSTER_SECURITY
|
+ TRANSPORT_VERSION_ADVANCED_REMOTE_CLUSTER_SECURITY.toReleaseVersion()
|
||||||
+ "] or higher to support remote indices privileges"
|
+ "] or higher to support remote indices privileges"
|
||||||
)
|
)
|
||||||
);
|
);
|
||||||
|
|
|
@ -1005,7 +1005,7 @@ public class SecurityServerTransportInterceptorTests extends ESTestCase {
|
||||||
"Settings for remote cluster ["
|
"Settings for remote cluster ["
|
||||||
+ remoteClusterAlias
|
+ remoteClusterAlias
|
||||||
+ "] indicate cross cluster access headers should be sent but target cluster version ["
|
+ "] indicate cross cluster access headers should be sent but target cluster version ["
|
||||||
+ connection.getTransportVersion()
|
+ connection.getTransportVersion().toReleaseVersion()
|
||||||
+ "] does not support receiving them"
|
+ "] does not support receiving them"
|
||||||
)
|
)
|
||||||
);
|
);
|
||||||
|
|
|
@ -716,7 +716,9 @@ class BlobAnalyzeAction extends HandledTransportAction<BlobAnalyzeAction.Request
|
||||||
if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_14_0)) {
|
if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_14_0)) {
|
||||||
out.writeBoolean(abortWrite);
|
out.writeBoolean(abortWrite);
|
||||||
} else if (abortWrite) {
|
} else if (abortWrite) {
|
||||||
throw new IllegalStateException("cannot send abortWrite request on transport version [" + out.getTransportVersion() + "]");
|
throw new IllegalStateException(
|
||||||
|
"cannot send abortWrite request on transport version [" + out.getTransportVersion().toReleaseVersion() + "]"
|
||||||
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -949,8 +949,8 @@ public class RepositoryAnalyzeAction extends HandledTransportAction<RepositoryAn
|
||||||
out.writeVInt(registerOperationCount);
|
out.writeVInt(registerOperationCount);
|
||||||
} else if (registerOperationCount != concurrency) {
|
} else if (registerOperationCount != concurrency) {
|
||||||
throw new IllegalArgumentException(
|
throw new IllegalArgumentException(
|
||||||
"cannot send request with registerOperationCount != concurrency on transport version ["
|
"cannot send request with registerOperationCount != concurrency to version ["
|
||||||
+ out.getTransportVersion()
|
+ out.getTransportVersion().toReleaseVersion()
|
||||||
+ "]"
|
+ "]"
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
@ -965,7 +965,7 @@ public class RepositoryAnalyzeAction extends HandledTransportAction<RepositoryAn
|
||||||
out.writeBoolean(abortWritePermitted);
|
out.writeBoolean(abortWritePermitted);
|
||||||
} else if (abortWritePermitted) {
|
} else if (abortWritePermitted) {
|
||||||
throw new IllegalArgumentException(
|
throw new IllegalArgumentException(
|
||||||
"cannot send abortWritePermitted request on transport version [" + out.getTransportVersion() + "]"
|
"cannot send abortWritePermitted request to version [" + out.getTransportVersion().toReleaseVersion() + "]"
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -167,8 +167,8 @@ public class ApiKeyBackwardsCompatibilityIT extends AbstractUpgradeTestCase {
|
||||||
assertThat(
|
assertThat(
|
||||||
e.getMessage(),
|
e.getMessage(),
|
||||||
containsString(
|
containsString(
|
||||||
"all nodes must have transport version ["
|
"all nodes must have version ["
|
||||||
+ TRANSPORT_VERSION_ADVANCED_REMOTE_CLUSTER_SECURITY
|
+ TRANSPORT_VERSION_ADVANCED_REMOTE_CLUSTER_SECURITY.toReleaseVersion()
|
||||||
+ "] or higher to support remote indices privileges for API keys"
|
+ "] or higher to support remote indices privileges for API keys"
|
||||||
)
|
)
|
||||||
);
|
);
|
||||||
|
@ -179,8 +179,8 @@ public class ApiKeyBackwardsCompatibilityIT extends AbstractUpgradeTestCase {
|
||||||
assertThat(
|
assertThat(
|
||||||
e.getMessage(),
|
e.getMessage(),
|
||||||
containsString(
|
containsString(
|
||||||
"all nodes must have transport version ["
|
"all nodes must have version ["
|
||||||
+ TRANSPORT_VERSION_ADVANCED_REMOTE_CLUSTER_SECURITY
|
+ TRANSPORT_VERSION_ADVANCED_REMOTE_CLUSTER_SECURITY.toReleaseVersion()
|
||||||
+ "] or higher to support remote indices privileges for API keys"
|
+ "] or higher to support remote indices privileges for API keys"
|
||||||
)
|
)
|
||||||
);
|
);
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue