mirror of
https://github.com/elastic/elasticsearch.git
synced 2025-06-28 17:34:17 -04:00
Dry up collection writing to StreamOutput (#86386)
Small weekend project around automated refactoring: We have endless duplication here, drying up some of it.
This commit is contained in:
parent
03388ff09e
commit
00b3721108
92 changed files with 184 additions and 570 deletions
|
@ -51,10 +51,7 @@ public class GeometryCollectionBuilder extends ShapeBuilder<Shape, GeometryColle
|
|||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeVInt(shapes.size());
|
||||
for (ShapeBuilder<?, ?, ?> shape : shapes) {
|
||||
out.writeNamedWriteable(shape);
|
||||
}
|
||||
out.writeNamedWriteableList(shapes);
|
||||
}
|
||||
|
||||
public GeometryCollectionBuilder shape(ShapeBuilder<?, ?, ?> shape) {
|
||||
|
|
|
@ -49,10 +49,7 @@ public class MultiLineStringBuilder extends ShapeBuilder<JtsGeometry, org.elasti
|
|||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeVInt(lines.size());
|
||||
for (LineStringBuilder line : lines) {
|
||||
line.writeTo(out);
|
||||
}
|
||||
out.writeList(lines);
|
||||
}
|
||||
|
||||
public MultiLineStringBuilder linestring(LineStringBuilder line) {
|
||||
|
|
|
@ -63,10 +63,7 @@ public class MultiPolygonBuilder extends ShapeBuilder<Shape, MultiPolygon, Multi
|
|||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
orientation.writeTo(out);
|
||||
out.writeVInt(polygons.size());
|
||||
for (PolygonBuilder polygon : polygons) {
|
||||
polygon.writeTo(out);
|
||||
}
|
||||
out.writeList(polygons);
|
||||
}
|
||||
|
||||
public Orientation orientation() {
|
||||
|
|
|
@ -93,10 +93,7 @@ public class PolygonBuilder extends ShapeBuilder<JtsGeometry, org.elasticsearch.
|
|||
public void writeTo(StreamOutput out) throws IOException {
|
||||
shell.writeTo(out);
|
||||
orientation.writeTo(out);
|
||||
out.writeVInt(holes.size());
|
||||
for (LineStringBuilder hole : holes) {
|
||||
hole.writeTo(out);
|
||||
}
|
||||
out.writeList(holes);
|
||||
}
|
||||
|
||||
public Orientation orientation() {
|
||||
|
|
|
@ -107,10 +107,7 @@ public abstract class ShapeBuilder<T extends Shape, G extends org.elasticsearch.
|
|||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeVInt(coordinates.size());
|
||||
for (Coordinate point : coordinates) {
|
||||
writeCoordinateTo(point, out);
|
||||
}
|
||||
out.writeCollection(coordinates, (o, p) -> writeCoordinateTo(p, o));
|
||||
}
|
||||
|
||||
protected static void writeCoordinateTo(Coordinate coordinate, StreamOutput out) throws IOException {
|
||||
|
|
|
@ -275,10 +275,7 @@ public class PercolateQueryBuilder extends AbstractQueryBuilder<PercolateQueryBu
|
|||
} else {
|
||||
out.writeBoolean(false);
|
||||
}
|
||||
out.writeVInt(documents.size());
|
||||
for (BytesReference document : documents) {
|
||||
out.writeBytesReference(document);
|
||||
}
|
||||
out.writeCollection(documents, StreamOutput::writeBytesReference);
|
||||
if (documents.isEmpty() == false) {
|
||||
XContentHelper.writeTo(out, documentXContentType);
|
||||
}
|
||||
|
|
|
@ -88,16 +88,8 @@ public class RankEvalResponse extends ActionResponse implements ToXContentObject
|
|||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeDouble(metricScore);
|
||||
out.writeVInt(details.size());
|
||||
for (String queryId : details.keySet()) {
|
||||
out.writeString(queryId);
|
||||
details.get(queryId).writeTo(out);
|
||||
}
|
||||
out.writeVInt(failures.size());
|
||||
for (String queryId : failures.keySet()) {
|
||||
out.writeString(queryId);
|
||||
out.writeException(failures.get(queryId));
|
||||
}
|
||||
out.writeMap(details, StreamOutput::writeString, (o, v) -> v.writeTo(o));
|
||||
out.writeMap(failures, StreamOutput::writeString, StreamOutput::writeException);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -96,16 +96,9 @@ public class RankEvalSpec implements Writeable, ToXContentObject {
|
|||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeVInt(ratedRequests.size());
|
||||
for (RatedRequest spec : ratedRequests) {
|
||||
spec.writeTo(out);
|
||||
}
|
||||
out.writeList(ratedRequests);
|
||||
out.writeNamedWriteable(metric);
|
||||
out.writeVInt(templates.size());
|
||||
for (Entry<String, Script> entry : templates.entrySet()) {
|
||||
out.writeString(entry.getKey());
|
||||
entry.getValue().writeTo(out);
|
||||
}
|
||||
out.writeMap(templates, StreamOutput::writeString, (o, v) -> v.writeTo(o));
|
||||
out.writeVInt(maxConcurrentSearches);
|
||||
}
|
||||
|
||||
|
|
|
@ -646,18 +646,12 @@ public class AnalyzeAction extends ActionType<AnalyzeAction.Response> {
|
|||
if (customAnalyzer) {
|
||||
tokenizer.writeTo(out);
|
||||
if (charfilters != null) {
|
||||
out.writeVInt(charfilters.length);
|
||||
for (CharFilteredText charfilter : charfilters) {
|
||||
charfilter.writeTo(out);
|
||||
}
|
||||
out.writeArray(charfilters);
|
||||
} else {
|
||||
out.writeVInt(0);
|
||||
}
|
||||
if (tokenfilters != null) {
|
||||
out.writeVInt(tokenfilters.length);
|
||||
for (AnalyzeTokenList tokenfilter : tokenfilters) {
|
||||
tokenfilter.writeTo(out);
|
||||
}
|
||||
out.writeArray(tokenfilters);
|
||||
} else {
|
||||
out.writeVInt(0);
|
||||
}
|
||||
|
|
|
@ -465,10 +465,7 @@ public class CreateIndexRequest extends AcknowledgedRequest<CreateIndexRequest>
|
|||
} else {
|
||||
out.writeString(mappings);
|
||||
}
|
||||
out.writeVInt(aliases.size());
|
||||
for (Alias alias : aliases) {
|
||||
alias.writeTo(out);
|
||||
}
|
||||
out.writeCollection(aliases);
|
||||
waitForActiveShards.writeTo(out);
|
||||
if (out.getVersion().onOrAfter(Version.V_7_12_0)) {
|
||||
out.writeString(origin);
|
||||
|
|
|
@ -120,11 +120,7 @@ public final class RolloverResponse extends ShardsAcknowledgedResponse implement
|
|||
super.writeTo(out);
|
||||
out.writeString(oldIndex);
|
||||
out.writeString(newIndex);
|
||||
out.writeVInt(conditionStatus.size());
|
||||
for (Map.Entry<String, Boolean> entry : conditionStatus.entrySet()) {
|
||||
out.writeString(entry.getKey());
|
||||
out.writeBoolean(entry.getValue());
|
||||
}
|
||||
out.writeMap(conditionStatus, StreamOutput::writeString, StreamOutput::writeBoolean);
|
||||
out.writeBoolean(dryRun);
|
||||
out.writeBoolean(rolledOver);
|
||||
out.writeBoolean(shardsAcknowledged);
|
||||
|
|
|
@ -52,10 +52,7 @@ public class IndicesShardStoresRequest extends MasterNodeReadRequest<IndicesShar
|
|||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
out.writeStringArrayNullable(indices);
|
||||
out.writeVInt(statuses.size());
|
||||
for (ClusterHealthStatus status : statuses) {
|
||||
out.writeByte(status.value());
|
||||
}
|
||||
out.writeCollection(statuses, (o, v) -> o.writeByte(v.value()));
|
||||
indicesOptions.writeIndicesOptions(out);
|
||||
}
|
||||
|
||||
|
|
|
@ -269,13 +269,11 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon
|
|||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeMap(storeStatuses, StreamOutput::writeString, (o, v) -> {
|
||||
o.writeVInt(v.size());
|
||||
for (Map.Entry<Integer, List<StoreStatus>> shardStatusesEntry : v.entrySet()) {
|
||||
o.writeInt(shardStatusesEntry.getKey());
|
||||
o.writeCollection(shardStatusesEntry.getValue());
|
||||
}
|
||||
});
|
||||
out.writeMap(
|
||||
storeStatuses,
|
||||
StreamOutput::writeString,
|
||||
(o, v) -> o.writeMap(v, StreamOutput::writeInt, StreamOutput::writeCollection)
|
||||
);
|
||||
out.writeList(failures);
|
||||
}
|
||||
|
||||
|
|
|
@ -42,10 +42,7 @@ public class GetIndexTemplatesResponse extends ActionResponse implements ToXCont
|
|||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeVInt(indexTemplates.size());
|
||||
for (IndexTemplateMetadata indexTemplate : indexTemplates) {
|
||||
indexTemplate.writeTo(out);
|
||||
}
|
||||
out.writeCollection(indexTemplates);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -456,10 +456,7 @@ public class PutIndexTemplateRequest extends MasterNodeRequest<PutIndexTemplateR
|
|||
} else {
|
||||
out.writeOptionalString(mappings);
|
||||
}
|
||||
out.writeVInt(aliases.size());
|
||||
for (Alias alias : aliases) {
|
||||
alias.writeTo(out);
|
||||
}
|
||||
out.writeCollection(aliases);
|
||||
out.writeOptionalVInt(version);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -64,10 +64,7 @@ public class GetPipelineResponse extends ActionResponse implements StatusToXCont
|
|||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeVInt(pipelines.size());
|
||||
for (PipelineConfiguration pipeline : pipelines) {
|
||||
pipeline.writeTo(out);
|
||||
}
|
||||
out.writeCollection(pipelines);
|
||||
out.writeBoolean(summary);
|
||||
}
|
||||
|
||||
|
|
|
@ -55,10 +55,7 @@ public final class SimulateDocumentVerboseResult implements SimulateDocumentResu
|
|||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeVInt(processorResults.size());
|
||||
for (SimulateProcessorResult result : processorResults) {
|
||||
result.writeTo(out);
|
||||
}
|
||||
out.writeCollection(processorResults);
|
||||
}
|
||||
|
||||
public List<SimulateProcessorResult> getProcessorResults() {
|
||||
|
|
|
@ -121,10 +121,7 @@ public class SimulatePipelineResponse extends ActionResponse implements ToXConte
|
|||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeOptionalString(pipelineId);
|
||||
out.writeBoolean(verbose);
|
||||
out.writeVInt(results.size());
|
||||
for (SimulateDocumentResult response : results) {
|
||||
response.writeTo(out);
|
||||
}
|
||||
out.writeCollection(results);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -147,10 +147,7 @@ public class MultiSearchRequest extends ActionRequest implements CompositeIndice
|
|||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
out.writeVInt(maxConcurrentSearchRequests);
|
||||
out.writeVInt(requests.size());
|
||||
for (SearchRequest request : requests) {
|
||||
request.writeTo(out);
|
||||
}
|
||||
out.writeCollection(requests);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -39,19 +39,18 @@ public final class TransportSearchHelper {
|
|||
BytesStreamOutput out = new BytesStreamOutput();
|
||||
out.writeString(INCLUDE_CONTEXT_UUID);
|
||||
out.writeString(searchPhaseResults.length() == 1 ? ParsedScrollId.QUERY_AND_FETCH_TYPE : ParsedScrollId.QUERY_THEN_FETCH_TYPE);
|
||||
out.writeVInt(searchPhaseResults.asList().size());
|
||||
for (SearchPhaseResult searchPhaseResult : searchPhaseResults.asList()) {
|
||||
out.writeString(searchPhaseResult.getContextId().getSessionId());
|
||||
out.writeLong(searchPhaseResult.getContextId().getId());
|
||||
out.writeCollection(searchPhaseResults.asList(), (o, searchPhaseResult) -> {
|
||||
o.writeString(searchPhaseResult.getContextId().getSessionId());
|
||||
o.writeLong(searchPhaseResult.getContextId().getId());
|
||||
SearchShardTarget searchShardTarget = searchPhaseResult.getSearchShardTarget();
|
||||
if (searchShardTarget.getClusterAlias() != null) {
|
||||
out.writeString(
|
||||
o.writeString(
|
||||
RemoteClusterAware.buildRemoteIndexName(searchShardTarget.getClusterAlias(), searchShardTarget.getNodeId())
|
||||
);
|
||||
} else {
|
||||
out.writeString(searchShardTarget.getNodeId());
|
||||
o.writeString(searchShardTarget.getNodeId());
|
||||
}
|
||||
}
|
||||
});
|
||||
return Base64.getUrlEncoder().encodeToString(out.copyBytes().array());
|
||||
} catch (IOException e) {
|
||||
throw new UncheckedIOException(e);
|
||||
|
|
|
@ -137,10 +137,7 @@ public class BroadcastResponse extends ActionResponse implements ToXContentObjec
|
|||
out.writeVInt(totalShards);
|
||||
out.writeVInt(successfulShards);
|
||||
out.writeVInt(failedShards);
|
||||
out.writeVInt(shardFailures.length);
|
||||
for (DefaultShardOperationFailedException exp : shardFailures) {
|
||||
exp.writeTo(out);
|
||||
}
|
||||
out.writeArray(shardFailures);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -671,10 +671,7 @@ public abstract class TransportBroadcastByNodeAction<
|
|||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeString(nodeId);
|
||||
out.writeVInt(totalShards);
|
||||
out.writeVInt(results.size());
|
||||
for (ShardOperationResult result : results) {
|
||||
out.writeOptionalWriteable(result);
|
||||
}
|
||||
out.writeCollection(results, StreamOutput::writeOptionalWriteable);
|
||||
out.writeBoolean(exceptions != null);
|
||||
if (exceptions != null) {
|
||||
out.writeList(exceptions);
|
||||
|
|
|
@ -135,10 +135,7 @@ public class ReplicationResponse extends ActionResponse {
|
|||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeVInt(total);
|
||||
out.writeVInt(successful);
|
||||
out.writeVInt(failures.length);
|
||||
for (Failure failure : failures) {
|
||||
failure.writeTo(out);
|
||||
}
|
||||
out.writeArray(failures);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -61,14 +61,8 @@ public class BaseTasksResponse extends ActionResponse {
|
|||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeVInt(taskFailures.size());
|
||||
for (TaskOperationFailure exp : taskFailures) {
|
||||
exp.writeTo(out);
|
||||
}
|
||||
out.writeVInt(nodeFailures.size());
|
||||
for (ElasticsearchException exp : nodeFailures) {
|
||||
exp.writeTo(out);
|
||||
}
|
||||
out.writeCollection(taskFailures);
|
||||
out.writeCollection(nodeFailures);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -393,22 +393,10 @@ public abstract class TransportTasksAction<
|
|||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeString(nodeId);
|
||||
out.writeVInt(results.size());
|
||||
for (TaskResponse result : results) {
|
||||
if (result != null) {
|
||||
out.writeBoolean(true);
|
||||
result.writeTo(out);
|
||||
} else {
|
||||
out.writeBoolean(false);
|
||||
}
|
||||
}
|
||||
out.writeCollection(results, StreamOutput::writeOptionalWriteable);
|
||||
out.writeBoolean(exceptions != null);
|
||||
if (exceptions != null) {
|
||||
int taskFailures = exceptions.size();
|
||||
out.writeVInt(taskFailures);
|
||||
for (TaskOperationFailure exception : exceptions) {
|
||||
exception.writeTo(out);
|
||||
}
|
||||
out.writeCollection(exceptions);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -116,15 +116,11 @@ public class ClusterInfo implements ToXContentFragment, Writeable {
|
|||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeVInt(this.leastAvailableSpaceUsage.size());
|
||||
for (Map.Entry<String, DiskUsage> c : this.leastAvailableSpaceUsage.entrySet()) {
|
||||
out.writeString(c.getKey());
|
||||
c.getValue().writeTo(out);
|
||||
}
|
||||
out.writeMap(this.leastAvailableSpaceUsage, StreamOutput::writeString, (o, v) -> v.writeTo(o));
|
||||
out.writeMap(this.mostAvailableSpaceUsage, StreamOutput::writeString, (o, v) -> v.writeTo(o));
|
||||
out.writeMap(this.shardSizes, StreamOutput::writeString, (o, v) -> out.writeLong(v == null ? -1 : v));
|
||||
out.writeMap(this.shardSizes, StreamOutput::writeString, (o, v) -> o.writeLong(v == null ? -1 : v));
|
||||
if (out.getVersion().onOrAfter(DATA_SET_SIZE_SIZE_VERSION)) {
|
||||
out.writeMap(this.shardDataSetSizes, (o, s) -> s.writeTo(o), (o, v) -> out.writeLong(v));
|
||||
out.writeMap(this.shardDataSetSizes, (o, s) -> s.writeTo(o), StreamOutput::writeLong);
|
||||
}
|
||||
out.writeMap(this.routingToDataPath, (o, k) -> k.writeTo(o), StreamOutput::writeString);
|
||||
if (out.getVersion().onOrAfter(StoreStats.RESERVED_BYTES_VERSION)) {
|
||||
|
@ -316,10 +312,7 @@ public class ClusterInfo implements ToXContentFragment, Writeable {
|
|||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeVLong(total);
|
||||
out.writeVInt(shardIds.size());
|
||||
for (ShardId shardIdCursor : shardIds) {
|
||||
shardIdCursor.writeTo(out);
|
||||
}
|
||||
out.writeCollection(shardIds);
|
||||
}
|
||||
|
||||
public long getTotal() {
|
||||
|
|
|
@ -370,14 +370,13 @@ public class RestoreInProgress extends AbstractNamedDiffable<Custom> implements
|
|||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeVInt(entries.size());
|
||||
for (Entry entry : entries.values()) {
|
||||
out.writeString(entry.uuid);
|
||||
entry.snapshot().writeTo(out);
|
||||
out.writeByte(entry.state().value());
|
||||
out.writeStringCollection(entry.indices);
|
||||
out.writeMap(entry.shards);
|
||||
}
|
||||
out.writeCollection(entries.values(), (o, entry) -> {
|
||||
o.writeString(entry.uuid);
|
||||
entry.snapshot().writeTo(o);
|
||||
o.writeByte(entry.state().value());
|
||||
o.writeStringCollection(entry.indices);
|
||||
o.writeMap(entry.shards);
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -1247,24 +1247,14 @@ public class IndexMetadata implements Diffable<IndexMetadata>, ToXContentFragmen
|
|||
mapping.writeTo(out);
|
||||
}
|
||||
}
|
||||
out.writeVInt(aliases.size());
|
||||
for (AliasMetadata aliasMetadata : aliases.values()) {
|
||||
aliasMetadata.writeTo(out);
|
||||
}
|
||||
out.writeVInt(customData.size());
|
||||
for (final Map.Entry<String, DiffableStringMap> cursor : customData.entrySet()) {
|
||||
out.writeString(cursor.getKey());
|
||||
cursor.getValue().writeTo(out);
|
||||
}
|
||||
out.writeVInt(inSyncAllocationIds.size());
|
||||
for (Map.Entry<Integer, Set<String>> cursor : inSyncAllocationIds.entrySet()) {
|
||||
out.writeVInt(cursor.getKey());
|
||||
DiffableUtils.StringSetValueSerializer.getInstance().write(cursor.getValue(), out);
|
||||
}
|
||||
out.writeVInt(rolloverInfos.size());
|
||||
for (RolloverInfo rolloverInfo : rolloverInfos.values()) {
|
||||
rolloverInfo.writeTo(out);
|
||||
}
|
||||
out.writeCollection(aliases.values());
|
||||
out.writeMap(customData, StreamOutput::writeString, (o, v) -> v.writeTo(o));
|
||||
out.writeMap(
|
||||
inSyncAllocationIds,
|
||||
StreamOutput::writeVInt,
|
||||
(o, v) -> DiffableUtils.StringSetValueSerializer.getInstance().write(v, o)
|
||||
);
|
||||
out.writeCollection(rolloverInfos.values());
|
||||
if (out.getVersion().onOrAfter(SYSTEM_INDEX_FLAG_ADDED)) {
|
||||
out.writeBoolean(isSystem);
|
||||
}
|
||||
|
|
|
@ -207,15 +207,8 @@ public class IndexTemplateMetadata implements SimpleDiffable<IndexTemplateMetada
|
|||
out.writeInt(order);
|
||||
out.writeStringCollection(patterns);
|
||||
Settings.writeSettingsToStream(settings, out);
|
||||
out.writeVInt(mappings.size());
|
||||
for (Map.Entry<String, CompressedXContent> cursor : mappings.entrySet()) {
|
||||
out.writeString(cursor.getKey());
|
||||
cursor.getValue().writeTo(out);
|
||||
}
|
||||
out.writeVInt(aliases.size());
|
||||
for (AliasMetadata aliasMetadata : aliases.values()) {
|
||||
aliasMetadata.writeTo(out);
|
||||
}
|
||||
out.writeMap(mappings, StreamOutput::writeString, (o, v) -> v.writeTo(o));
|
||||
out.writeCollection(aliases.values());
|
||||
out.writeOptionalVInt(version);
|
||||
}
|
||||
|
||||
|
|
|
@ -1212,10 +1212,7 @@ public class Metadata extends AbstractCollection<IndexMetadata> implements Diffa
|
|||
for (IndexMetadata indexMetadata : this) {
|
||||
indexMetadata.writeTo(out, writeMappingsHash);
|
||||
}
|
||||
out.writeVInt(templates.size());
|
||||
for (IndexTemplateMetadata template : templates.values()) {
|
||||
template.writeTo(out);
|
||||
}
|
||||
out.writeCollection(templates.values());
|
||||
VersionedNamedWriteable.writeVersionedWritables(out, customs);
|
||||
}
|
||||
|
||||
|
|
|
@ -388,12 +388,11 @@ public class DiscoveryNode implements Writeable, ToXContentFragment {
|
|||
out.writeString(hostAddress);
|
||||
address.writeTo(out);
|
||||
out.writeMap(attributes, StreamOutput::writeString, StreamOutput::writeString);
|
||||
out.writeVInt(roles.size());
|
||||
for (final DiscoveryNodeRole role : roles) {
|
||||
out.writeString(role.roleName());
|
||||
out.writeString(role.roleNameAbbreviation());
|
||||
out.writeBoolean(role.canContainData());
|
||||
}
|
||||
out.writeCollection(roles, (o, role) -> {
|
||||
o.writeString(role.roleName());
|
||||
o.writeString(role.roleNameAbbreviation());
|
||||
o.writeBoolean(role.canContainData());
|
||||
});
|
||||
Version.writeVersion(version, out);
|
||||
if (out.getVersion().onOrAfter(EXTERNAL_ID_VERSION)) {
|
||||
out.writeString(externalId);
|
||||
|
|
|
@ -574,10 +574,7 @@ public class DiscoveryNodes extends AbstractCollection<DiscoveryNode> implements
|
|||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeOptionalString(masterNodeId);
|
||||
out.writeVInt(nodes.size());
|
||||
for (DiscoveryNode node : this) {
|
||||
node.writeTo(out);
|
||||
}
|
||||
out.writeCollection(nodes.values());
|
||||
}
|
||||
|
||||
public static DiscoveryNodes readFrom(StreamInput in, DiscoveryNode localNode) throws IOException {
|
||||
|
|
|
@ -313,10 +313,7 @@ public class IndexRoutingTable implements SimpleDiffable<IndexRoutingTable> {
|
|||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
index.writeTo(out);
|
||||
out.writeVInt(shards.length);
|
||||
for (IndexShardRoutingTable indexShard : this.shards) {
|
||||
IndexShardRoutingTable.Builder.writeToThin(indexShard, out);
|
||||
}
|
||||
out.writeArray((o, s) -> IndexShardRoutingTable.Builder.writeToThin(s, o), shards);
|
||||
}
|
||||
|
||||
public static Builder builder(Index index) {
|
||||
|
|
|
@ -620,11 +620,7 @@ public class IndexShardRoutingTable {
|
|||
|
||||
public static void writeToThin(IndexShardRoutingTable indexShard, StreamOutput out) throws IOException {
|
||||
out.writeVInt(indexShard.shardId.id());
|
||||
|
||||
out.writeVInt(indexShard.shards.length);
|
||||
for (ShardRouting entry : indexShard.shards) {
|
||||
entry.writeToThin(out);
|
||||
}
|
||||
out.writeArray((o, v) -> v.writeToThin(o), indexShard.shards);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -343,10 +343,7 @@ public class RoutingTable implements Iterable<IndexRoutingTable>, Diffable<Routi
|
|||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeLong(version);
|
||||
out.writeVInt(indicesRouting.size());
|
||||
for (IndexRoutingTable index : indicesRouting.values()) {
|
||||
index.writeTo(out);
|
||||
}
|
||||
out.writeCollection(indicesRouting.values());
|
||||
}
|
||||
|
||||
private static class RoutingTableDiff implements Diff<RoutingTable> {
|
||||
|
|
|
@ -68,10 +68,7 @@ public class RoutingExplanations implements ToXContentFragment {
|
|||
* Write the RoutingExplanations object
|
||||
*/
|
||||
public static void writeTo(RoutingExplanations explanations, StreamOutput out) throws IOException {
|
||||
out.writeVInt(explanations.explanations.size());
|
||||
for (RerouteExplanation explanation : explanations.explanations) {
|
||||
RerouteExplanation.writeTo(explanation, out);
|
||||
}
|
||||
out.writeCollection(explanations.explanations, (o, v) -> RerouteExplanation.writeTo(v, o));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -99,10 +99,7 @@ public class AllocationCommands implements ToXContentFragment {
|
|||
* @throws IOException if something happens during write
|
||||
*/
|
||||
public static void writeTo(AllocationCommands commands, StreamOutput out) throws IOException {
|
||||
out.writeVInt(commands.commands.size());
|
||||
for (AllocationCommand command : commands.commands) {
|
||||
out.writeNamedWriteable(command);
|
||||
}
|
||||
out.writeNamedWriteableList(commands.commands);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -339,10 +339,7 @@ public abstract class Decision implements ToXContent, Writeable {
|
|||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeBoolean(true); // flag indicating it is a multi decision
|
||||
out.writeVInt(getDecisions().size());
|
||||
for (Decision d : getDecisions()) {
|
||||
d.writeTo(out);
|
||||
}
|
||||
out.writeCollection(getDecisions());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -414,20 +414,12 @@ public class Lucene {
|
|||
out.writeFloat(topDocs.maxScore);
|
||||
|
||||
out.writeArray(Lucene::writeSortField, topFieldDocs.fields);
|
||||
|
||||
out.writeVInt(topDocs.topDocs.scoreDocs.length);
|
||||
for (ScoreDoc doc : topFieldDocs.scoreDocs) {
|
||||
writeFieldDoc(out, (FieldDoc) doc);
|
||||
}
|
||||
out.writeArray((o, doc) -> writeFieldDoc(o, (FieldDoc) doc), topFieldDocs.scoreDocs);
|
||||
} else {
|
||||
out.writeByte((byte) 0);
|
||||
writeTotalHits(out, topDocs.topDocs.totalHits);
|
||||
out.writeFloat(topDocs.maxScore);
|
||||
|
||||
out.writeVInt(topDocs.topDocs.scoreDocs.length);
|
||||
for (ScoreDoc doc : topDocs.topDocs.scoreDocs) {
|
||||
writeScoreDoc(out, doc);
|
||||
}
|
||||
out.writeArray(Lucene::writeScoreDoc, topDocs.topDocs.scoreDocs);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -495,10 +487,7 @@ public class Lucene {
|
|||
}
|
||||
|
||||
public static void writeFieldDoc(StreamOutput out, FieldDoc fieldDoc) throws IOException {
|
||||
out.writeVInt(fieldDoc.fields.length);
|
||||
for (Object field : fieldDoc.fields) {
|
||||
writeSortValue(out, field);
|
||||
}
|
||||
out.writeArray(Lucene::writeSortValue, fieldDoc.fields);
|
||||
out.writeVInt(fieldDoc.doc);
|
||||
out.writeFloat(fieldDoc.score);
|
||||
}
|
||||
|
@ -627,10 +616,7 @@ public class Lucene {
|
|||
out.writeBoolean(explanation.isMatch());
|
||||
out.writeString(explanation.getDescription());
|
||||
Explanation[] subExplanations = explanation.getDetails();
|
||||
out.writeVInt(subExplanations.length);
|
||||
for (Explanation subExp : subExplanations) {
|
||||
writeExplanation(out, subExp);
|
||||
}
|
||||
out.writeArray(Lucene::writeExplanation, subExplanations);
|
||||
if (explanation.isMatch()) {
|
||||
writeExplanationValue(out, explanation.getValue());
|
||||
}
|
||||
|
|
|
@ -685,12 +685,7 @@ public final class ThreadContext implements Writeable {
|
|||
requestHeaders.putAll(this.requestHeaders);
|
||||
}
|
||||
|
||||
out.writeVInt(requestHeaders.size());
|
||||
for (Map.Entry<String, String> entry : requestHeaders.entrySet()) {
|
||||
out.writeString(entry.getKey());
|
||||
out.writeString(entry.getValue());
|
||||
}
|
||||
|
||||
out.writeMap(requestHeaders, StreamOutput::writeString, StreamOutput::writeString);
|
||||
out.writeMap(responseHeaders, StreamOutput::writeString, StreamOutput::writeStringCollection);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -78,11 +78,7 @@ public final class CommitStats implements Writeable, ToXContentFragment {
|
|||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeVInt(userData.size());
|
||||
for (Map.Entry<String, String> entry : userData.entrySet()) {
|
||||
out.writeString(entry.getKey());
|
||||
out.writeString(entry.getValue());
|
||||
}
|
||||
out.writeMap(userData, StreamOutput::writeString, StreamOutput::writeString);
|
||||
out.writeLong(generation);
|
||||
out.writeOptionalString(id);
|
||||
out.writeInt(numDocs);
|
||||
|
|
|
@ -219,29 +219,28 @@ public class Segment implements Writeable {
|
|||
out.writeVInt(0);
|
||||
return;
|
||||
}
|
||||
out.writeVInt(sort.getSort().length);
|
||||
for (SortField field : sort.getSort()) {
|
||||
out.writeString(field.getField());
|
||||
out.writeArray((o, field) -> {
|
||||
o.writeString(field.getField());
|
||||
if (field instanceof SortedSetSortField) {
|
||||
out.writeByte((byte) 0);
|
||||
out.writeOptionalBoolean(field.getMissingValue() == null ? null : field.getMissingValue() == SortField.STRING_FIRST);
|
||||
out.writeBoolean(((SortedSetSortField) field).getSelector() == SortedSetSelector.Type.MAX);
|
||||
out.writeBoolean(field.getReverse());
|
||||
o.writeByte((byte) 0);
|
||||
o.writeOptionalBoolean(field.getMissingValue() == null ? null : field.getMissingValue() == SortField.STRING_FIRST);
|
||||
o.writeBoolean(((SortedSetSortField) field).getSelector() == SortedSetSelector.Type.MAX);
|
||||
o.writeBoolean(field.getReverse());
|
||||
} else if (field instanceof SortedNumericSortField) {
|
||||
switch (((SortedNumericSortField) field).getNumericType()) {
|
||||
case INT -> out.writeByte((byte) 1);
|
||||
case FLOAT -> out.writeByte((byte) 2);
|
||||
case DOUBLE -> out.writeByte((byte) 3);
|
||||
case LONG -> out.writeByte((byte) 4);
|
||||
case INT -> o.writeByte((byte) 1);
|
||||
case FLOAT -> o.writeByte((byte) 2);
|
||||
case DOUBLE -> o.writeByte((byte) 3);
|
||||
case LONG -> o.writeByte((byte) 4);
|
||||
default -> throw new IOException("invalid index sort field:" + field);
|
||||
}
|
||||
out.writeGenericValue(field.getMissingValue());
|
||||
out.writeBoolean(((SortedNumericSortField) field).getSelector() == SortedNumericSelector.Type.MAX);
|
||||
out.writeBoolean(field.getReverse());
|
||||
o.writeGenericValue(field.getMissingValue());
|
||||
o.writeBoolean(((SortedNumericSortField) field).getSelector() == SortedNumericSelector.Type.MAX);
|
||||
o.writeBoolean(field.getReverse());
|
||||
} else {
|
||||
throw new IOException("invalid index sort field:" + field);
|
||||
}
|
||||
}
|
||||
}, sort.getSort());
|
||||
}
|
||||
|
||||
private static void readRamTree(StreamInput in) throws IOException {
|
||||
|
|
|
@ -224,10 +224,7 @@ public class SegmentsStats implements Writeable, ToXContentFragment {
|
|||
out.writeLong(bitsetMemoryInBytes);
|
||||
out.writeLong(maxUnsafeAutoIdTimestamp);
|
||||
|
||||
out.writeVInt(files.size());
|
||||
for (FileStats file : files.values()) {
|
||||
file.writeTo(out);
|
||||
}
|
||||
out.writeCollection(files.values());
|
||||
}
|
||||
|
||||
public void clearFiles() {
|
||||
|
|
|
@ -105,10 +105,7 @@ public class GeoPolygonQueryBuilder extends AbstractQueryBuilder<GeoPolygonQuery
|
|||
@Override
|
||||
protected void doWriteTo(StreamOutput out) throws IOException {
|
||||
out.writeString(fieldName);
|
||||
out.writeVInt(shell.size());
|
||||
for (GeoPoint point : shell) {
|
||||
out.writeGeoPoint(point);
|
||||
}
|
||||
out.writeCollection(shell, StreamOutput::writeGeoPoint);
|
||||
validationMethod.writeTo(out);
|
||||
out.writeBoolean(ignoreUnmapped);
|
||||
}
|
||||
|
|
|
@ -223,10 +223,7 @@ public final class InnerHitBuilder implements Writeable, ToXContentObject {
|
|||
boolean hasSorts = sorts != null;
|
||||
out.writeBoolean(hasSorts);
|
||||
if (hasSorts) {
|
||||
out.writeVInt(sorts.size());
|
||||
for (SortBuilder<?> sort : sorts) {
|
||||
out.writeNamedWriteable(sort);
|
||||
}
|
||||
out.writeNamedWriteableList(sorts);
|
||||
}
|
||||
out.writeOptionalWriteable(highlightBuilder);
|
||||
out.writeOptionalWriteable(innerCollapseBuilder);
|
||||
|
|
|
@ -234,11 +234,7 @@ public class MultiMatchQueryBuilder extends AbstractQueryBuilder<MultiMatchQuery
|
|||
@Override
|
||||
protected void doWriteTo(StreamOutput out) throws IOException {
|
||||
out.writeGenericValue(value);
|
||||
out.writeVInt(fieldsBoosts.size());
|
||||
for (Map.Entry<String, Float> fieldsEntry : fieldsBoosts.entrySet()) {
|
||||
out.writeString(fieldsEntry.getKey());
|
||||
out.writeFloat(fieldsEntry.getValue());
|
||||
}
|
||||
out.writeMap(fieldsBoosts, StreamOutput::writeString, StreamOutput::writeFloat);
|
||||
type.writeTo(out);
|
||||
operator.writeTo(out);
|
||||
out.writeOptionalString(analyzer);
|
||||
|
|
|
@ -192,11 +192,7 @@ public class QueryStringQueryBuilder extends AbstractQueryBuilder<QueryStringQue
|
|||
protected void doWriteTo(StreamOutput out) throws IOException {
|
||||
out.writeString(this.queryString);
|
||||
out.writeOptionalString(this.defaultField);
|
||||
out.writeVInt(this.fieldsAndWeights.size());
|
||||
for (Map.Entry<String, Float> fieldsEntry : this.fieldsAndWeights.entrySet()) {
|
||||
out.writeString(fieldsEntry.getKey());
|
||||
out.writeFloat(fieldsEntry.getValue());
|
||||
}
|
||||
out.writeMap(this.fieldsAndWeights, StreamOutput::writeString, StreamOutput::writeFloat);
|
||||
this.defaultOperator.writeTo(out);
|
||||
out.writeOptionalString(this.analyzer);
|
||||
out.writeOptionalString(this.quoteAnalyzer);
|
||||
|
|
|
@ -556,10 +556,7 @@ public class BulkByScrollTask extends CancellableTask {
|
|||
out.writeFloat(requestsPerSecond);
|
||||
out.writeOptionalString(reasonCancelled);
|
||||
out.writeTimeValue(throttledUntil);
|
||||
out.writeVInt(sliceStatuses.size());
|
||||
for (StatusOrException sliceStatus : sliceStatuses) {
|
||||
out.writeOptionalWriteable(sliceStatus);
|
||||
}
|
||||
out.writeCollection(sliceStatuses, StreamOutput::writeOptionalWriteable);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -169,14 +169,7 @@ public class NodeIndicesStats implements Writeable, ToXContentFragment {
|
|||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
stats.writeTo(out);
|
||||
out.writeVInt(statsByShard.size());
|
||||
for (Map.Entry<Index, List<IndexShardStats>> entry : statsByShard.entrySet()) {
|
||||
entry.getKey().writeTo(out);
|
||||
out.writeVInt(entry.getValue().size());
|
||||
for (IndexShardStats indexShardStats : entry.getValue()) {
|
||||
indexShardStats.writeTo(out);
|
||||
}
|
||||
}
|
||||
out.writeMap(statsByShard, (o, k) -> k.writeTo(o), StreamOutput::writeList);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -92,25 +92,12 @@ public class RecoveryFilesInfoRequest extends RecoveryTransportRequest {
|
|||
out.writeLong(recoveryId);
|
||||
shardId.writeTo(out);
|
||||
|
||||
out.writeVInt(phase1FileNames.size());
|
||||
for (String phase1FileName : phase1FileNames) {
|
||||
out.writeString(phase1FileName);
|
||||
}
|
||||
out.writeStringCollection(phase1FileNames);
|
||||
out.writeCollection(phase1FileSizes, StreamOutput::writeVLong);
|
||||
|
||||
out.writeVInt(phase1FileSizes.size());
|
||||
for (Long phase1FileSize : phase1FileSizes) {
|
||||
out.writeVLong(phase1FileSize);
|
||||
}
|
||||
out.writeStringCollection(phase1ExistingFileNames);
|
||||
out.writeCollection(phase1ExistingFileSizes, StreamOutput::writeVLong);
|
||||
|
||||
out.writeVInt(phase1ExistingFileNames.size());
|
||||
for (String phase1ExistingFileName : phase1ExistingFileNames) {
|
||||
out.writeString(phase1ExistingFileName);
|
||||
}
|
||||
|
||||
out.writeVInt(phase1ExistingFileSizes.size());
|
||||
for (Long phase1ExistingFileSize : phase1ExistingFileSizes) {
|
||||
out.writeVLong(phase1ExistingFileSize);
|
||||
}
|
||||
out.writeVInt(totalTranslogOps);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -77,12 +77,11 @@ public class IngestStats implements Writeable, ToXContentFragment {
|
|||
if (processorStatsForPipeline == null) {
|
||||
out.writeVInt(0);
|
||||
} else {
|
||||
out.writeVInt(processorStatsForPipeline.size());
|
||||
for (ProcessorStat processorStat : processorStatsForPipeline) {
|
||||
out.writeString(processorStat.getName());
|
||||
out.writeString(processorStat.getType());
|
||||
processorStat.getStats().writeTo(out);
|
||||
}
|
||||
out.writeCollection(processorStatsForPipeline, (o, processorStat) -> {
|
||||
o.writeString(processorStat.getName());
|
||||
o.writeString(processorStat.getType());
|
||||
processorStat.getStats().writeTo(o);
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -461,10 +461,7 @@ public class FsInfo implements Iterable<FsInfo.Path>, Writeable, ToXContentFragm
|
|||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeVLong(timestamp);
|
||||
out.writeOptionalWriteable(ioStats);
|
||||
out.writeVInt(paths.length);
|
||||
for (Path path : paths) {
|
||||
path.writeTo(out);
|
||||
}
|
||||
out.writeArray(paths);
|
||||
}
|
||||
|
||||
public Path getTotal() {
|
||||
|
|
|
@ -313,11 +313,7 @@ public class JvmInfo implements ReportingService.Info {
|
|||
}
|
||||
out.writeString(bootClassPath);
|
||||
out.writeString(classPath);
|
||||
out.writeVInt(this.systemProperties.size());
|
||||
for (Map.Entry<String, String> entry : systemProperties.entrySet()) {
|
||||
out.writeString(entry.getKey());
|
||||
out.writeString(entry.getValue());
|
||||
}
|
||||
out.writeMap(this.systemProperties, StreamOutput::writeString, StreamOutput::writeString);
|
||||
mem.writeTo(out);
|
||||
out.writeStringArray(gcCollectors);
|
||||
out.writeStringArray(memoryPools);
|
||||
|
|
|
@ -257,12 +257,7 @@ public final class ScriptMetadata implements Metadata.Custom, Writeable, ToXCont
|
|||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeVInt(scripts.size());
|
||||
|
||||
for (Map.Entry<String, StoredScriptSource> entry : scripts.entrySet()) {
|
||||
out.writeString(entry.getKey());
|
||||
entry.getValue().writeTo(out);
|
||||
}
|
||||
out.writeMap(scripts, StreamOutput::writeString, (o, v) -> v.writeTo(o));
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -233,10 +233,7 @@ public final class SearchHit implements Writeable, ToXContentObject, Iterable<Do
|
|||
if (fields == null) {
|
||||
out.writeVInt(0);
|
||||
} else {
|
||||
out.writeVInt(fields.size());
|
||||
for (DocumentField field : fields.values()) {
|
||||
field.writeTo(out);
|
||||
}
|
||||
out.writeCollection(fields.values());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -267,30 +264,20 @@ public final class SearchHit implements Writeable, ToXContentObject, Iterable<Do
|
|||
if (highlightFields == null) {
|
||||
out.writeVInt(0);
|
||||
} else {
|
||||
out.writeVInt(highlightFields.size());
|
||||
for (HighlightField highlightField : highlightFields.values()) {
|
||||
highlightField.writeTo(out);
|
||||
}
|
||||
out.writeCollection(highlightFields.values());
|
||||
}
|
||||
sortValues.writeTo(out);
|
||||
|
||||
if (matchedQueries.length == 0) {
|
||||
out.writeVInt(0);
|
||||
} else {
|
||||
out.writeVInt(matchedQueries.length);
|
||||
for (String matchedFilter : matchedQueries) {
|
||||
out.writeString(matchedFilter);
|
||||
}
|
||||
out.writeStringArray(matchedQueries);
|
||||
}
|
||||
out.writeOptionalWriteable(shard);
|
||||
if (innerHits == null) {
|
||||
out.writeVInt(0);
|
||||
} else {
|
||||
out.writeVInt(innerHits.size());
|
||||
for (Map.Entry<String, SearchHits> entry : innerHits.entrySet()) {
|
||||
out.writeString(entry.getKey());
|
||||
entry.getValue().writeTo(out);
|
||||
}
|
||||
out.writeMap(innerHits, StreamOutput::writeString, (o, v) -> v.writeTo(o));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -97,12 +97,7 @@ public final class SearchHits implements Writeable, ToXContentFragment, Iterable
|
|||
Lucene.writeTotalHits(out, totalHits);
|
||||
}
|
||||
out.writeFloat(maxScore);
|
||||
out.writeVInt(hits.length);
|
||||
if (hits.length > 0) {
|
||||
for (SearchHit hit : hits) {
|
||||
hit.writeTo(out);
|
||||
}
|
||||
}
|
||||
out.writeArray(hits);
|
||||
out.writeOptionalArray(Lucene::writeSortField, sortFields);
|
||||
out.writeOptionalString(collapseField);
|
||||
out.writeOptionalArray(Lucene::writeSortValue, collapseValues);
|
||||
|
|
|
@ -303,14 +303,8 @@ public class AggregatorFactories {
|
|||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeVInt(this.aggregationBuilders.size());
|
||||
for (AggregationBuilder factory : aggregationBuilders) {
|
||||
out.writeNamedWriteable(factory);
|
||||
}
|
||||
out.writeVInt(this.pipelineAggregatorBuilders.size());
|
||||
for (PipelineAggregationBuilder factory : pipelineAggregatorBuilders) {
|
||||
out.writeNamedWriteable(factory);
|
||||
}
|
||||
out.writeCollection(this.aggregationBuilders, StreamOutput::writeNamedWriteable);
|
||||
out.writeCollection(this.pipelineAggregatorBuilders, StreamOutput::writeNamedWriteable);
|
||||
}
|
||||
|
||||
public boolean mustVisitAllDocs() {
|
||||
|
|
|
@ -519,10 +519,7 @@ public abstract class InternalOrder extends BucketOrder {
|
|||
out.writeBoolean(aggregationOrder.order == SortOrder.ASC);
|
||||
out.writeString(aggregationOrder.path().toString());
|
||||
} else if (order instanceof CompoundOrder compoundOrder) {
|
||||
out.writeVInt(compoundOrder.orderElements.size());
|
||||
for (BucketOrder innerOrder : compoundOrder.orderElements) {
|
||||
innerOrder.writeTo(out);
|
||||
}
|
||||
out.writeCollection(compoundOrder.orderElements);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -141,10 +141,7 @@ public class InternalAdjacencyMatrix extends InternalMultiBucketAggregation<Inte
|
|||
|
||||
@Override
|
||||
protected void doWriteTo(StreamOutput out) throws IOException {
|
||||
out.writeVInt(buckets.size());
|
||||
for (InternalBucket bucket : buckets) {
|
||||
bucket.writeTo(out);
|
||||
}
|
||||
out.writeCollection(buckets);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -112,10 +112,7 @@ public class CompositeAggregationBuilder extends AbstractAggregationBuilder<Comp
|
|||
|
||||
@Override
|
||||
protected void doWriteTo(StreamOutput out) throws IOException {
|
||||
out.writeVInt(sources.size());
|
||||
for (CompositeValuesSourceBuilder<?> builder : sources) {
|
||||
CompositeValuesSourceParserHelper.writeTo(builder, out);
|
||||
}
|
||||
out.writeCollection(sources, (o, v) -> CompositeValuesSourceParserHelper.writeTo(v, o));
|
||||
out.writeVInt(size);
|
||||
out.writeBoolean(after != null);
|
||||
if (after != null) {
|
||||
|
|
|
@ -128,16 +128,7 @@ public class FiltersAggregationBuilder extends AbstractAggregationBuilder<Filter
|
|||
@Override
|
||||
protected void doWriteTo(StreamOutput out) throws IOException {
|
||||
out.writeBoolean(keyed);
|
||||
out.writeVInt(filters.size());
|
||||
if (keyed) {
|
||||
for (KeyedFilter keyedFilter : filters) {
|
||||
keyedFilter.writeTo(out);
|
||||
}
|
||||
} else {
|
||||
for (KeyedFilter keyedFilter : filters) {
|
||||
out.writeNamedWriteable(keyedFilter.filter());
|
||||
}
|
||||
}
|
||||
out.writeCollection(filters, keyed ? (o, v) -> v.writeTo(o) : (o, v) -> o.writeNamedWriteable(v.filter()));
|
||||
out.writeBoolean(otherBucket);
|
||||
out.writeString(otherBucketKey);
|
||||
}
|
||||
|
|
|
@ -148,10 +148,7 @@ public class InternalFilters extends InternalMultiBucketAggregation<InternalFilt
|
|||
@Override
|
||||
protected void doWriteTo(StreamOutput out) throws IOException {
|
||||
out.writeBoolean(keyed);
|
||||
out.writeVInt(buckets.size());
|
||||
for (InternalBucket bucket : buckets) {
|
||||
bucket.writeTo(out);
|
||||
}
|
||||
out.writeList(buckets);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -166,10 +166,7 @@ public final class InternalAutoDateHistogram extends InternalMultiBucketAggregat
|
|||
}
|
||||
|
||||
void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeVInt(roundingInfos.length);
|
||||
for (RoundingInfo roundingInfo : roundingInfos) {
|
||||
roundingInfo.writeTo(out);
|
||||
}
|
||||
out.writeArray(roundingInfos);
|
||||
out.writeVInt(roundingIdx);
|
||||
emptySubAggregations.writeTo(out);
|
||||
}
|
||||
|
|
|
@ -106,10 +106,7 @@ public abstract class AbstractRangeBuilder<AB extends AbstractRangeBuilder<AB, R
|
|||
|
||||
@Override
|
||||
protected void innerWriteTo(StreamOutput out) throws IOException {
|
||||
out.writeVInt(ranges.size());
|
||||
for (Range range : ranges) {
|
||||
range.writeTo(out);
|
||||
}
|
||||
out.writeList(ranges);
|
||||
out.writeBoolean(keyed);
|
||||
}
|
||||
|
||||
|
|
|
@ -311,10 +311,7 @@ public class GeoDistanceAggregationBuilder extends ValuesSourceAggregationBuilde
|
|||
protected void innerWriteTo(StreamOutput out) throws IOException {
|
||||
out.writeDouble(origin.lat());
|
||||
out.writeDouble(origin.lon());
|
||||
out.writeVInt(ranges.size());
|
||||
for (Range range : ranges) {
|
||||
range.writeTo(out);
|
||||
}
|
||||
out.writeList(ranges);
|
||||
out.writeBoolean(keyed);
|
||||
distanceType.writeTo(out);
|
||||
unit.writeTo(out);
|
||||
|
|
|
@ -302,10 +302,7 @@ public class InternalRange<B extends InternalRange.Bucket, R extends InternalRan
|
|||
protected void doWriteTo(StreamOutput out) throws IOException {
|
||||
out.writeNamedWriteable(format);
|
||||
out.writeBoolean(keyed);
|
||||
out.writeVInt(ranges.size());
|
||||
for (B bucket : ranges) {
|
||||
bucket.writeTo(out);
|
||||
}
|
||||
out.writeCollection(ranges);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -13,6 +13,7 @@ import org.elasticsearch.Version;
|
|||
import org.elasticsearch.common.ParsingException;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.common.network.InetAddresses;
|
||||
import org.elasticsearch.core.Tuple;
|
||||
import org.elasticsearch.script.Script;
|
||||
|
@ -100,7 +101,7 @@ public final class IpRangeAggregationBuilder extends ValuesSourceAggregationBuil
|
|||
}
|
||||
}
|
||||
|
||||
public static class Range implements ToXContentObject {
|
||||
public static class Range implements ToXContentObject, Writeable {
|
||||
|
||||
private final String key;
|
||||
private final String from;
|
||||
|
@ -155,7 +156,8 @@ public final class IpRangeAggregationBuilder extends ValuesSourceAggregationBuil
|
|||
this.to = in.readOptionalString();
|
||||
}
|
||||
|
||||
void writeTo(StreamOutput out) throws IOException {
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeOptionalString(key);
|
||||
out.writeOptionalString(from);
|
||||
out.writeOptionalString(to);
|
||||
|
@ -354,10 +356,7 @@ public final class IpRangeAggregationBuilder extends ValuesSourceAggregationBuil
|
|||
|
||||
@Override
|
||||
protected void innerWriteTo(StreamOutput out) throws IOException {
|
||||
out.writeVInt(ranges.size());
|
||||
for (Range range : ranges) {
|
||||
range.writeTo(out);
|
||||
}
|
||||
out.writeCollection(ranges);
|
||||
out.writeBoolean(keyed);
|
||||
}
|
||||
|
||||
|
|
|
@ -433,18 +433,12 @@ public class IncludeExclude implements Writeable, ToXContentFragment {
|
|||
boolean hasIncludes = includeValues != null;
|
||||
out.writeBoolean(hasIncludes);
|
||||
if (hasIncludes) {
|
||||
out.writeVInt(includeValues.size());
|
||||
for (BytesRef value : includeValues) {
|
||||
out.writeBytesRef(value);
|
||||
}
|
||||
out.writeCollection(includeValues, StreamOutput::writeBytesRef);
|
||||
}
|
||||
boolean hasExcludes = excludeValues != null;
|
||||
out.writeBoolean(hasExcludes);
|
||||
if (hasExcludes) {
|
||||
out.writeVInt(excludeValues.size());
|
||||
for (BytesRef value : excludeValues) {
|
||||
out.writeBytesRef(value);
|
||||
}
|
||||
out.writeCollection(excludeValues, StreamOutput::writeBytesRef);
|
||||
}
|
||||
out.writeVInt(incNumPartitions);
|
||||
out.writeVInt(incZeroBasedPartition);
|
||||
|
|
|
@ -25,7 +25,6 @@ import java.util.Collections;
|
|||
import java.util.HashMap;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.Map.Entry;
|
||||
import java.util.Objects;
|
||||
import java.util.TreeMap;
|
||||
|
||||
|
@ -96,11 +95,7 @@ public class BucketScriptPipelineAggregationBuilder extends AbstractPipelineAggr
|
|||
|
||||
@Override
|
||||
protected void doWriteTo(StreamOutput out) throws IOException {
|
||||
out.writeVInt(bucketsPathsMap.size());
|
||||
for (Entry<String, String> e : bucketsPathsMap.entrySet()) {
|
||||
out.writeString(e.getKey());
|
||||
out.writeString(e.getValue());
|
||||
}
|
||||
out.writeMap(bucketsPathsMap, StreamOutput::writeString, StreamOutput::writeString);
|
||||
script.writeTo(out);
|
||||
out.writeOptionalString(format);
|
||||
gapPolicy.writeTo(out);
|
||||
|
|
|
@ -52,14 +52,7 @@ public class AggregationInfo implements ReportingService.Info {
|
|||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeVInt(aggs.size());
|
||||
for (Map.Entry<String, Set<String>> e : aggs.entrySet()) {
|
||||
out.writeString(e.getKey());
|
||||
out.writeVInt(e.getValue().size());
|
||||
for (String type : e.getValue()) {
|
||||
out.writeString(type);
|
||||
}
|
||||
}
|
||||
out.writeMap(aggs, StreamOutput::writeString, StreamOutput::writeStringCollection);
|
||||
}
|
||||
|
||||
public Map<String, Set<String>> getAggregations() {
|
||||
|
|
|
@ -181,10 +181,7 @@ public class InternalTimeSeries extends InternalMultiBucketAggregation<InternalT
|
|||
@Override
|
||||
protected void doWriteTo(StreamOutput out) throws IOException {
|
||||
out.writeBoolean(keyed);
|
||||
out.writeVInt(buckets.size());
|
||||
for (InternalTimeSeries.InternalBucket bucket : buckets) {
|
||||
bucket.writeTo(out);
|
||||
}
|
||||
out.writeCollection(buckets);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -283,10 +283,7 @@ public final class SearchSourceBuilder implements Writeable, ToXContentObject, R
|
|||
boolean hasSorts = sorts != null;
|
||||
out.writeBoolean(hasSorts);
|
||||
if (hasSorts) {
|
||||
out.writeVInt(sorts.size());
|
||||
for (SortBuilder<?> sort : sorts) {
|
||||
out.writeNamedWriteable(sort);
|
||||
}
|
||||
out.writeNamedWriteableList(sorts);
|
||||
}
|
||||
boolean hasStats = stats != null;
|
||||
out.writeBoolean(hasStats);
|
||||
|
|
|
@ -57,18 +57,14 @@ public class AggregatedDfs implements Writeable {
|
|||
|
||||
@Override
|
||||
public void writeTo(final StreamOutput out) throws IOException {
|
||||
out.writeVInt(termStatistics.size());
|
||||
|
||||
for (var entry : termStatistics().entrySet()) {
|
||||
Term term = entry.getKey();
|
||||
out.writeString(term.field());
|
||||
out.writeBytesRef(term.bytes());
|
||||
TermStatistics stats = entry.getValue();
|
||||
out.writeBytesRef(stats.term());
|
||||
out.writeVLong(stats.docFreq());
|
||||
out.writeVLong(DfsSearchResult.addOne(stats.totalTermFreq()));
|
||||
}
|
||||
|
||||
out.writeMap(termStatistics, (o, k) -> {
|
||||
o.writeString(k.field());
|
||||
o.writeBytesRef(k.bytes());
|
||||
}, (o, v) -> {
|
||||
o.writeBytesRef(v.term());
|
||||
o.writeVLong(v.docFreq());
|
||||
o.writeVLong(DfsSearchResult.addOne(v.totalTermFreq()));
|
||||
});
|
||||
DfsSearchResult.writeFieldStats(out, fieldStatistics);
|
||||
out.writeVLong(maxDoc);
|
||||
}
|
||||
|
|
|
@ -95,11 +95,10 @@ public class DfsSearchResult extends SearchPhaseResult {
|
|||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
contextId.writeTo(out);
|
||||
out.writeVInt(terms.length);
|
||||
for (Term term : terms) {
|
||||
out.writeString(term.field());
|
||||
out.writeBytesRef(term.bytes());
|
||||
}
|
||||
out.writeArray((o, term) -> {
|
||||
o.writeString(term.field());
|
||||
o.writeBytesRef(term.bytes());
|
||||
}, terms);
|
||||
writeTermStats(out, termStatistics);
|
||||
writeFieldStats(out, fieldStatistics);
|
||||
out.writeVInt(maxDoc);
|
||||
|
@ -109,25 +108,18 @@ public class DfsSearchResult extends SearchPhaseResult {
|
|||
}
|
||||
|
||||
public static void writeFieldStats(StreamOutput out, Map<String, CollectionStatistics> fieldStatistics) throws IOException {
|
||||
out.writeVInt(fieldStatistics.size());
|
||||
|
||||
for (var entry : fieldStatistics.entrySet()) {
|
||||
out.writeString(entry.getKey());
|
||||
CollectionStatistics statistics = entry.getValue();
|
||||
out.writeMap(fieldStatistics, StreamOutput::writeString, (o, statistics) -> {
|
||||
assert statistics.maxDoc() >= 0;
|
||||
out.writeVLong(statistics.maxDoc());
|
||||
o.writeVLong(statistics.maxDoc());
|
||||
// stats are always positive numbers
|
||||
out.writeVLong(statistics.docCount());
|
||||
out.writeVLong(statistics.sumTotalTermFreq());
|
||||
out.writeVLong(statistics.sumDocFreq());
|
||||
}
|
||||
o.writeVLong(statistics.docCount());
|
||||
o.writeVLong(statistics.sumTotalTermFreq());
|
||||
o.writeVLong(statistics.sumDocFreq());
|
||||
});
|
||||
}
|
||||
|
||||
public static void writeTermStats(StreamOutput out, TermStatistics[] termStatistics) throws IOException {
|
||||
out.writeVInt(termStatistics.length);
|
||||
for (TermStatistics termStatistic : termStatistics) {
|
||||
writeSingleTermStats(out, termStatistic);
|
||||
}
|
||||
out.writeArray(DfsSearchResult::writeSingleTermStats, termStatistics);
|
||||
}
|
||||
|
||||
public static void writeSingleTermStats(StreamOutput out, TermStatistics termStatistic) throws IOException {
|
||||
|
|
|
@ -67,10 +67,7 @@ public class ShardFetchRequest extends TransportRequest {
|
|||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
contextId.writeTo(out);
|
||||
out.writeVInt(docIds.length);
|
||||
for (int docId : docIds) {
|
||||
out.writeVInt(docId);
|
||||
}
|
||||
out.writeVIntArray(docIds);
|
||||
if (lastEmittedDoc == null) {
|
||||
out.writeByte((byte) 0);
|
||||
} else if (lastEmittedDoc instanceof FieldDoc) {
|
||||
|
|
|
@ -94,10 +94,7 @@ public class HighlightField implements ToXContentFragment, Writeable {
|
|||
out.writeBoolean(false);
|
||||
} else {
|
||||
out.writeBoolean(true);
|
||||
out.writeVInt(fragments.length);
|
||||
for (Text fragment : fragments) {
|
||||
out.writeText(fragment);
|
||||
}
|
||||
out.writeArray(StreamOutput::writeText, fragments);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -50,10 +50,7 @@ public final class AggregationProfileShardResult implements Writeable, ToXConten
|
|||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeVInt(aggProfileResults.size());
|
||||
for (ProfileResult p : aggProfileResults) {
|
||||
p.writeTo(out);
|
||||
}
|
||||
out.writeCollection(aggProfileResults);
|
||||
}
|
||||
|
||||
public List<ProfileResult> getProfileResults() {
|
||||
|
|
|
@ -58,10 +58,7 @@ public class SearchAfterBuilder implements ToXContentObject, Writeable {
|
|||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeVInt(sortValues.length);
|
||||
for (Object fieldValue : sortValues) {
|
||||
out.writeGenericValue(fieldValue);
|
||||
}
|
||||
out.writeArray(StreamOutput::writeGenericValue, sortValues);
|
||||
}
|
||||
|
||||
public SearchAfterBuilder setSortValues(Object[] values) {
|
||||
|
|
|
@ -542,10 +542,7 @@ public class Suggest implements Iterable<Suggest.Suggestion<? extends Entry<? ex
|
|||
out.writeText(text);
|
||||
out.writeVInt(offset);
|
||||
out.writeVInt(length);
|
||||
out.writeVInt(options.size());
|
||||
for (Option option : options) {
|
||||
option.writeTo(out);
|
||||
}
|
||||
out.writeCollection(options);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -411,10 +411,7 @@ public final class CompletionSuggestion extends Suggest.Suggestion<CompletionSug
|
|||
out.writeInt(contexts.size());
|
||||
for (Map.Entry<String, Set<String>> entry : contexts.entrySet()) {
|
||||
out.writeString(entry.getKey());
|
||||
out.writeVInt(entry.getValue().size());
|
||||
for (CharSequence ctx : entry.getValue()) {
|
||||
out.writeString(ctx.toString());
|
||||
}
|
||||
out.writeStringCollection(entry.getValue());
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -161,15 +161,7 @@ public class PhraseSuggestionBuilder extends SuggestionBuilder<PhraseSuggestionB
|
|||
}
|
||||
out.writeMapWithConsistentOrder(collateParams);
|
||||
out.writeOptionalBoolean(collatePrune);
|
||||
out.writeVInt(this.generators.size());
|
||||
for (Entry<String, List<CandidateGenerator>> entry : this.generators.entrySet()) {
|
||||
out.writeString(entry.getKey());
|
||||
List<CandidateGenerator> generatorsList = entry.getValue();
|
||||
out.writeVInt(generatorsList.size());
|
||||
for (CandidateGenerator generator : generatorsList) {
|
||||
generator.writeTo(out);
|
||||
}
|
||||
}
|
||||
out.writeMap(this.generators, StreamOutput::writeString, StreamOutput::writeList);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -84,10 +84,7 @@ public class AutoscalingMetadata implements Metadata.Custom {
|
|||
|
||||
@Override
|
||||
public void writeTo(final StreamOutput out) throws IOException {
|
||||
out.writeVInt(policies.size());
|
||||
for (final Map.Entry<String, AutoscalingPolicyMetadata> policy : policies.entrySet()) {
|
||||
policy.getValue().writeTo(out);
|
||||
}
|
||||
out.writeCollection(policies.values());
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -61,10 +61,7 @@ public final class BulkShardOperationsRequest extends ReplicatedWriteRequest<Bul
|
|||
super.writeTo(out);
|
||||
out.writeString(historyUUID);
|
||||
out.writeZLong(maxSeqNoOfUpdatesOrDeletes);
|
||||
out.writeVInt(operations.size());
|
||||
for (Translog.Operation operation : operations) {
|
||||
Translog.Operation.writeOperation(out, operation);
|
||||
}
|
||||
out.writeCollection(operations, Translog.Operation::writeOperation);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -98,14 +98,7 @@ public class PostStartBasicResponse extends AcknowledgedResponse implements Stat
|
|||
super.writeTo(out);
|
||||
out.writeEnum(status);
|
||||
out.writeOptionalString(acknowledgeMessage);
|
||||
out.writeVInt(acknowledgeMessages.size());
|
||||
for (Map.Entry<String, String[]> entry : acknowledgeMessages.entrySet()) {
|
||||
out.writeString(entry.getKey());
|
||||
out.writeVInt(entry.getValue().length);
|
||||
for (String message : entry.getValue()) {
|
||||
out.writeString(message);
|
||||
}
|
||||
}
|
||||
out.writeMap(acknowledgeMessages, StreamOutput::writeString, StreamOutput::writeStringArray);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -88,14 +88,7 @@ public class PostStartTrialResponse extends ActionResponse {
|
|||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeEnum(status);
|
||||
out.writeOptionalString(acknowledgeMessage);
|
||||
out.writeVInt(acknowledgeMessages.size());
|
||||
for (Map.Entry<String, String[]> entry : acknowledgeMessages.entrySet()) {
|
||||
out.writeString(entry.getKey());
|
||||
out.writeVInt(entry.getValue().length);
|
||||
for (String message : entry.getValue()) {
|
||||
out.writeString(message);
|
||||
}
|
||||
}
|
||||
out.writeMap(acknowledgeMessages, StreamOutput::writeString, StreamOutput::writeStringArray);
|
||||
}
|
||||
|
||||
Map<String, String[]> getAcknowledgementMessages() {
|
||||
|
|
|
@ -15,6 +15,7 @@ import org.elasticsearch.action.support.IndicesOptions;
|
|||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.core.TimeValue;
|
||||
import org.elasticsearch.index.query.QueryBuilder;
|
||||
import org.elasticsearch.search.aggregations.bucket.sampler.SamplerAggregationBuilder;
|
||||
|
@ -313,7 +314,7 @@ public class GraphExploreRequest extends ActionRequest implements IndicesRequest
|
|||
return hops.get(hopNumber);
|
||||
}
|
||||
|
||||
public static class TermBoost {
|
||||
public static class TermBoost implements Writeable {
|
||||
String term;
|
||||
float boost;
|
||||
|
||||
|
@ -341,7 +342,8 @@ public class GraphExploreRequest extends ActionRequest implements IndicesRequest
|
|||
this.boost = in.readFloat();
|
||||
}
|
||||
|
||||
void writeTo(StreamOutput out) throws IOException {
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeString(term);
|
||||
out.writeFloat(boost);
|
||||
}
|
||||
|
|
|
@ -71,10 +71,7 @@ public class Hop implements ToXContentFragment {
|
|||
if (vertices == null) {
|
||||
out.writeVInt(0);
|
||||
} else {
|
||||
out.writeVInt(vertices.size());
|
||||
for (VertexRequest vr : vertices) {
|
||||
vr.writeTo(out);
|
||||
}
|
||||
out.writeList(vertices);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -9,6 +9,7 @@ package org.elasticsearch.protocol.xpack.graph;
|
|||
import org.apache.lucene.util.BytesRef;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.protocol.xpack.graph.GraphExploreRequest.TermBoost;
|
||||
import org.elasticsearch.xcontent.ToXContentObject;
|
||||
import org.elasticsearch.xcontent.XContentBuilder;
|
||||
|
@ -27,7 +28,7 @@ import java.util.TreeSet;
|
|||
* inclusion list to filter which terms are considered.
|
||||
*
|
||||
*/
|
||||
public class VertexRequest implements ToXContentObject {
|
||||
public class VertexRequest implements ToXContentObject, Writeable {
|
||||
public static final int DEFAULT_SIZE = 5;
|
||||
public static final int DEFAULT_MIN_DOC_COUNT = 3;
|
||||
public static final int DEFAULT_SHARD_MIN_DOC_COUNT = 2;
|
||||
|
@ -69,26 +70,21 @@ public class VertexRequest implements ToXContentObject {
|
|||
|
||||
}
|
||||
|
||||
void writeTo(StreamOutput out) throws IOException {
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeString(fieldName);
|
||||
out.writeVInt(size);
|
||||
out.writeVInt(minDocCount);
|
||||
out.writeVInt(shardMinDocCount);
|
||||
|
||||
if (includes != null) {
|
||||
out.writeVInt(includes.size());
|
||||
for (TermBoost tb : includes.values()) {
|
||||
tb.writeTo(out);
|
||||
}
|
||||
out.writeCollection(includes.values());
|
||||
} else {
|
||||
out.writeVInt(0);
|
||||
}
|
||||
|
||||
if (excludes != null) {
|
||||
out.writeVInt(excludes.size());
|
||||
for (String term : excludes) {
|
||||
out.writeString(term);
|
||||
}
|
||||
out.writeStringCollection(excludes);
|
||||
} else {
|
||||
out.writeVInt(0);
|
||||
}
|
||||
|
|
|
@ -76,14 +76,7 @@ public class PutLicenseResponse extends AcknowledgedResponse {
|
|||
super.writeTo(out);
|
||||
out.writeVInt(status.id());
|
||||
out.writeOptionalString(acknowledgeHeader);
|
||||
out.writeVInt(acknowledgeMessages.size());
|
||||
for (Map.Entry<String, String[]> entry : acknowledgeMessages.entrySet()) {
|
||||
out.writeString(entry.getKey());
|
||||
out.writeVInt(entry.getValue().length);
|
||||
for (String message : entry.getValue()) {
|
||||
out.writeString(message);
|
||||
}
|
||||
}
|
||||
out.writeMap(acknowledgeMessages, StreamOutput::writeString, StreamOutput::writeStringArray);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -96,10 +96,7 @@ public class ExplainLifecycleResponse extends ActionResponse implements ToXConte
|
|||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeVInt(indexResponses.size());
|
||||
for (IndexLifecycleExplainResponse e : indexResponses.values()) {
|
||||
e.writeTo(out);
|
||||
}
|
||||
out.writeCollection(indexResponses.values());
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -55,10 +55,7 @@ public class ExportException extends ElasticsearchException implements Iterable<
|
|||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
out.writeVInt(exceptions.size());
|
||||
for (ExportException e : exceptions) {
|
||||
e.writeTo(out);
|
||||
}
|
||||
out.writeCollection(exceptions);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -455,10 +455,7 @@ public abstract class AbstractSqlQueryRequest extends AbstractSqlRequest impleme
|
|||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
out.writeString(query);
|
||||
out.writeVInt(params.size());
|
||||
for (SqlTypedParamValue param : params) {
|
||||
writeSqlTypedParamValue(out, param);
|
||||
}
|
||||
out.writeCollection(params, AbstractSqlQueryRequest::writeSqlTypedParamValue);
|
||||
out.writeZoneId(zoneId);
|
||||
if (out.getVersion().onOrAfter(Version.V_7_16_0)) {
|
||||
out.writeOptionalString(catalog);
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue