mirror of
https://github.com/elastic/elasticsearch.git
synced 2025-04-25 07:37:19 -04:00
Limit ByteSizeUnit to 2 decimals (#120142)
* Exhaustive testParseFractionalNumber * Refactor: encapsulate ByteSizeUnit constructor * Refactor: store size in bytes * Support up to 2 decimals in parsed ByteSizeValue * Fix test for rounding up with no warnings * ByteSizeUnit transport changes * Update docs/changelog/120142.yaml * Changelog details and impact * Fix change log breaking.area * Address PR comments
This commit is contained in:
parent
1515898e8c
commit
34059c9dbd
119 changed files with 663 additions and 510 deletions
|
@ -47,7 +47,7 @@ public class BytesArrayReadLongBenchmark {
|
||||||
@Setup
|
@Setup
|
||||||
public void initResults() throws IOException {
|
public void initResults() throws IOException {
|
||||||
final BytesStreamOutput tmp = new BytesStreamOutput();
|
final BytesStreamOutput tmp = new BytesStreamOutput();
|
||||||
final long bytes = new ByteSizeValue(dataMb, ByteSizeUnit.MB).getBytes();
|
final long bytes = ByteSizeValue.of(dataMb, ByteSizeUnit.MB).getBytes();
|
||||||
for (int i = 0; i < bytes / 8; i++) {
|
for (int i = 0; i < bytes / 8; i++) {
|
||||||
tmp.writeLong(i);
|
tmp.writeLong(i);
|
||||||
}
|
}
|
||||||
|
|
|
@ -47,7 +47,7 @@ public class PagedBytesReferenceReadLongBenchmark {
|
||||||
@Setup
|
@Setup
|
||||||
public void initResults() throws IOException {
|
public void initResults() throws IOException {
|
||||||
final BytesStreamOutput tmp = new BytesStreamOutput();
|
final BytesStreamOutput tmp = new BytesStreamOutput();
|
||||||
final long bytes = new ByteSizeValue(dataMb, ByteSizeUnit.MB).getBytes();
|
final long bytes = ByteSizeValue.of(dataMb, ByteSizeUnit.MB).getBytes();
|
||||||
for (int i = 0; i < bytes / 8; i++) {
|
for (int i = 0; i < bytes / 8; i++) {
|
||||||
tmp.writeLong(i);
|
tmp.writeLong(i);
|
||||||
}
|
}
|
||||||
|
|
13
docs/changelog/120142.yaml
Normal file
13
docs/changelog/120142.yaml
Normal file
|
@ -0,0 +1,13 @@
|
||||||
|
pr: 120142
|
||||||
|
summary: Limit `ByteSizeUnit` to 2 decimals
|
||||||
|
area: Infra/Core
|
||||||
|
type: breaking
|
||||||
|
issues: []
|
||||||
|
breaking:
|
||||||
|
title: Limit `ByteSizeUnit` to 2 decimals
|
||||||
|
area: Cluster and node setting
|
||||||
|
details: In the past, byte values like `1.25 mb` were allowed but deprecated. Now, values with up to two decimal places are allowed,
|
||||||
|
unless the unit is bytes, in which case no decimals are allowed. Values with too many decimal places result in an error.
|
||||||
|
impact: Values with more than two decimal places, like `0.123 mb` will be rejected as an error,
|
||||||
|
where in the past, they'd be accepted with a deprecation warning.
|
||||||
|
notable: false
|
|
@ -77,9 +77,6 @@ public class BytesProcessorTests extends AbstractStringProcessorTestCase<Long> {
|
||||||
String fieldName = RandomDocumentPicks.addRandomField(random(), ingestDocument, "1.1kb");
|
String fieldName = RandomDocumentPicks.addRandomField(random(), ingestDocument, "1.1kb");
|
||||||
Processor processor = newProcessor(fieldName, randomBoolean(), fieldName);
|
Processor processor = newProcessor(fieldName, randomBoolean(), fieldName);
|
||||||
processor.execute(ingestDocument);
|
processor.execute(ingestDocument);
|
||||||
assertThat(ingestDocument.getFieldValue(fieldName, expectedResultType()), equalTo(1126L));
|
assertThat(ingestDocument.getFieldValue(fieldName, expectedResultType()), equalTo(1127L));
|
||||||
assertWarnings(
|
|
||||||
"Fractional bytes values are deprecated. Use non-fractional bytes values instead: [1.1kb] found for setting " + "[Ingest Field]"
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -438,7 +438,7 @@ public class RemoteScrollableHitSourceTests extends ESTestCase {
|
||||||
public Future<HttpResponse> answer(InvocationOnMock invocationOnMock) throws Throwable {
|
public Future<HttpResponse> answer(InvocationOnMock invocationOnMock) throws Throwable {
|
||||||
HeapBufferedAsyncResponseConsumer consumer = (HeapBufferedAsyncResponseConsumer) invocationOnMock.getArguments()[1];
|
HeapBufferedAsyncResponseConsumer consumer = (HeapBufferedAsyncResponseConsumer) invocationOnMock.getArguments()[1];
|
||||||
FutureCallback callback = (FutureCallback) invocationOnMock.getArguments()[3];
|
FutureCallback callback = (FutureCallback) invocationOnMock.getArguments()[3];
|
||||||
assertEquals(new ByteSizeValue(100, ByteSizeUnit.MB).bytesAsInt(), consumer.getBufferLimit());
|
assertEquals(ByteSizeValue.of(100, ByteSizeUnit.MB).bytesAsInt(), consumer.getBufferLimit());
|
||||||
callback.failed(tooLong);
|
callback.failed(tooLong);
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
|
@ -90,7 +90,7 @@ public class AzureBlobStoreRepositoryTests extends ESMockAPIBasedRepositoryInteg
|
||||||
protected Settings repositorySettings(String repoName) {
|
protected Settings repositorySettings(String repoName) {
|
||||||
Settings.Builder settingsBuilder = Settings.builder()
|
Settings.Builder settingsBuilder = Settings.builder()
|
||||||
.put(super.repositorySettings(repoName))
|
.put(super.repositorySettings(repoName))
|
||||||
.put(AzureRepository.Repository.MAX_SINGLE_PART_UPLOAD_SIZE_SETTING.getKey(), new ByteSizeValue(1, ByteSizeUnit.MB))
|
.put(AzureRepository.Repository.MAX_SINGLE_PART_UPLOAD_SIZE_SETTING.getKey(), ByteSizeValue.of(1, ByteSizeUnit.MB))
|
||||||
.put(AzureRepository.Repository.CONTAINER_SETTING.getKey(), "container")
|
.put(AzureRepository.Repository.CONTAINER_SETTING.getKey(), "container")
|
||||||
.put(AzureStorageSettings.ACCOUNT_SETTING.getKey(), "test")
|
.put(AzureStorageSettings.ACCOUNT_SETTING.getKey(), "test")
|
||||||
.put(AzureRepository.Repository.DELETION_BATCH_SIZE_SETTING.getKey(), randomIntBetween(5, 256))
|
.put(AzureRepository.Repository.DELETION_BATCH_SIZE_SETTING.getKey(), randomIntBetween(5, 256))
|
||||||
|
|
|
@ -115,7 +115,7 @@ public class AzureStorageCleanupThirdPartyTests extends AbstractThirdPartyReposi
|
||||||
Settings.builder()
|
Settings.builder()
|
||||||
.put("container", System.getProperty("test.azure.container"))
|
.put("container", System.getProperty("test.azure.container"))
|
||||||
.put("base_path", System.getProperty("test.azure.base") + randomAlphaOfLength(8))
|
.put("base_path", System.getProperty("test.azure.base") + randomAlphaOfLength(8))
|
||||||
.put("max_single_part_upload_size", new ByteSizeValue(1, ByteSizeUnit.MB))
|
.put("max_single_part_upload_size", ByteSizeValue.of(1, ByteSizeUnit.MB))
|
||||||
)
|
)
|
||||||
.get();
|
.get();
|
||||||
assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
|
assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
|
||||||
|
|
|
@ -107,8 +107,8 @@ public class AzureBlobStore implements BlobStore {
|
||||||
private static final Logger logger = LogManager.getLogger(AzureBlobStore.class);
|
private static final Logger logger = LogManager.getLogger(AzureBlobStore.class);
|
||||||
// See https://learn.microsoft.com/en-us/rest/api/storageservices/blob-batch#request-body
|
// See https://learn.microsoft.com/en-us/rest/api/storageservices/blob-batch#request-body
|
||||||
public static final int MAX_ELEMENTS_PER_BATCH = 256;
|
public static final int MAX_ELEMENTS_PER_BATCH = 256;
|
||||||
private static final long DEFAULT_READ_CHUNK_SIZE = new ByteSizeValue(32, ByteSizeUnit.MB).getBytes();
|
private static final long DEFAULT_READ_CHUNK_SIZE = ByteSizeValue.of(32, ByteSizeUnit.MB).getBytes();
|
||||||
private static final int DEFAULT_UPLOAD_BUFFERS_SIZE = (int) new ByteSizeValue(64, ByteSizeUnit.KB).getBytes();
|
private static final int DEFAULT_UPLOAD_BUFFERS_SIZE = (int) ByteSizeValue.of(64, ByteSizeUnit.KB).getBytes();
|
||||||
|
|
||||||
private final AzureStorageService service;
|
private final AzureStorageService service;
|
||||||
private final BigArrays bigArrays;
|
private final BigArrays bigArrays;
|
||||||
|
|
|
@ -81,7 +81,7 @@ public class AzureRepository extends MeteredBlobStoreRepository {
|
||||||
);
|
);
|
||||||
public static final Setting<Boolean> READONLY_SETTING = Setting.boolSetting(READONLY_SETTING_KEY, false, Property.NodeScope);
|
public static final Setting<Boolean> READONLY_SETTING = Setting.boolSetting(READONLY_SETTING_KEY, false, Property.NodeScope);
|
||||||
// see ModelHelper.BLOB_DEFAULT_MAX_SINGLE_UPLOAD_SIZE
|
// see ModelHelper.BLOB_DEFAULT_MAX_SINGLE_UPLOAD_SIZE
|
||||||
private static final ByteSizeValue DEFAULT_MAX_SINGLE_UPLOAD_SIZE = new ByteSizeValue(256, ByteSizeUnit.MB);
|
private static final ByteSizeValue DEFAULT_MAX_SINGLE_UPLOAD_SIZE = ByteSizeValue.of(256, ByteSizeUnit.MB);
|
||||||
public static final Setting<ByteSizeValue> MAX_SINGLE_PART_UPLOAD_SIZE_SETTING = Setting.byteSizeSetting(
|
public static final Setting<ByteSizeValue> MAX_SINGLE_PART_UPLOAD_SIZE_SETTING = Setting.byteSizeSetting(
|
||||||
"max_single_part_upload_size",
|
"max_single_part_upload_size",
|
||||||
DEFAULT_MAX_SINGLE_UPLOAD_SIZE,
|
DEFAULT_MAX_SINGLE_UPLOAD_SIZE,
|
||||||
|
|
|
@ -37,7 +37,7 @@ public class AzureStorageService {
|
||||||
* The maximum size of a BlockBlob block.
|
* The maximum size of a BlockBlob block.
|
||||||
* See https://docs.microsoft.com/en-us/rest/api/storageservices/understanding-block-blobs--append-blobs--and-page-blobs
|
* See https://docs.microsoft.com/en-us/rest/api/storageservices/understanding-block-blobs--append-blobs--and-page-blobs
|
||||||
*/
|
*/
|
||||||
public static final ByteSizeValue MAX_BLOCK_SIZE = new ByteSizeValue(100, ByteSizeUnit.MB);
|
public static final ByteSizeValue MAX_BLOCK_SIZE = ByteSizeValue.of(100, ByteSizeUnit.MB);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* The maximum number of blocks.
|
* The maximum number of blocks.
|
||||||
|
|
|
@ -165,7 +165,7 @@ public abstract class AbstractAzureServerTestCase extends ESTestCase {
|
||||||
.put(CONTAINER_SETTING.getKey(), CONTAINER)
|
.put(CONTAINER_SETTING.getKey(), CONTAINER)
|
||||||
.put(ACCOUNT_SETTING.getKey(), clientName)
|
.put(ACCOUNT_SETTING.getKey(), clientName)
|
||||||
.put(LOCATION_MODE_SETTING.getKey(), locationMode)
|
.put(LOCATION_MODE_SETTING.getKey(), locationMode)
|
||||||
.put(MAX_SINGLE_PART_UPLOAD_SIZE_SETTING.getKey(), new ByteSizeValue(1, ByteSizeUnit.MB))
|
.put(MAX_SINGLE_PART_UPLOAD_SIZE_SETTING.getKey(), ByteSizeValue.of(1, ByteSizeUnit.MB))
|
||||||
.build()
|
.build()
|
||||||
);
|
);
|
||||||
|
|
||||||
|
|
|
@ -133,7 +133,7 @@ public class AzureRepositorySettingsTests extends ESTestCase {
|
||||||
// chunk size in settings
|
// chunk size in settings
|
||||||
int size = randomIntBetween(1, 256);
|
int size = randomIntBetween(1, 256);
|
||||||
azureRepository = azureRepository(Settings.builder().put("chunk_size", size + "mb").build());
|
azureRepository = azureRepository(Settings.builder().put("chunk_size", size + "mb").build());
|
||||||
assertEquals(new ByteSizeValue(size, ByteSizeUnit.MB), azureRepository.chunkSize());
|
assertEquals(ByteSizeValue.of(size, ByteSizeUnit.MB), azureRepository.chunkSize());
|
||||||
|
|
||||||
// zero bytes is not allowed
|
// zero bytes is not allowed
|
||||||
IllegalArgumentException e = expectThrows(
|
IllegalArgumentException e = expectThrows(
|
||||||
|
|
|
@ -143,7 +143,7 @@ public class GoogleCloudStorageBlobStoreRepositoryTests extends ESMockAPIBasedRe
|
||||||
Settings.builder().put("chunk_size", size + "mb").build()
|
Settings.builder().put("chunk_size", size + "mb").build()
|
||||||
);
|
);
|
||||||
chunkSize = GoogleCloudStorageRepository.getSetting(GoogleCloudStorageRepository.CHUNK_SIZE, repositoryMetadata);
|
chunkSize = GoogleCloudStorageRepository.getSetting(GoogleCloudStorageRepository.CHUNK_SIZE, repositoryMetadata);
|
||||||
assertEquals(new ByteSizeValue(size, ByteSizeUnit.MB), chunkSize);
|
assertEquals(ByteSizeValue.of(size, ByteSizeUnit.MB), chunkSize);
|
||||||
|
|
||||||
// zero bytes is not allowed
|
// zero bytes is not allowed
|
||||||
IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> {
|
IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> {
|
||||||
|
|
|
@ -83,7 +83,7 @@ class GoogleCloudStorageBlobStore implements BlobStore {
|
||||||
final String key = "es.repository_gcs.large_blob_threshold_byte_size";
|
final String key = "es.repository_gcs.large_blob_threshold_byte_size";
|
||||||
final String largeBlobThresholdByteSizeProperty = System.getProperty(key);
|
final String largeBlobThresholdByteSizeProperty = System.getProperty(key);
|
||||||
if (largeBlobThresholdByteSizeProperty == null) {
|
if (largeBlobThresholdByteSizeProperty == null) {
|
||||||
LARGE_BLOB_THRESHOLD_BYTE_SIZE = Math.toIntExact(new ByteSizeValue(5, ByteSizeUnit.MB).getBytes());
|
LARGE_BLOB_THRESHOLD_BYTE_SIZE = Math.toIntExact(ByteSizeValue.of(5, ByteSizeUnit.MB).getBytes());
|
||||||
} else {
|
} else {
|
||||||
final int largeBlobThresholdByteSize;
|
final int largeBlobThresholdByteSize;
|
||||||
try {
|
try {
|
||||||
|
|
|
@ -40,7 +40,7 @@ class GoogleCloudStorageRepository extends MeteredBlobStoreRepository {
|
||||||
* Maximum allowed object size in GCS.
|
* Maximum allowed object size in GCS.
|
||||||
* @see <a href="https://cloud.google.com/storage/quotas#objects">GCS documentation</a> for details.
|
* @see <a href="https://cloud.google.com/storage/quotas#objects">GCS documentation</a> for details.
|
||||||
*/
|
*/
|
||||||
static final ByteSizeValue MAX_CHUNK_SIZE = new ByteSizeValue(5, ByteSizeUnit.TB);
|
static final ByteSizeValue MAX_CHUNK_SIZE = ByteSizeValue.of(5, ByteSizeUnit.TB);
|
||||||
|
|
||||||
static final String TYPE = "gcs";
|
static final String TYPE = "gcs";
|
||||||
|
|
||||||
|
|
|
@ -129,7 +129,7 @@ class S3BlobContainer extends AbstractBlobContainer {
|
||||||
@Override
|
@Override
|
||||||
public long readBlobPreferredLength() {
|
public long readBlobPreferredLength() {
|
||||||
// This container returns streams that must be fully consumed, so we tell consumers to make bounded requests.
|
// This container returns streams that must be fully consumed, so we tell consumers to make bounded requests.
|
||||||
return new ByteSizeValue(32, ByteSizeUnit.MB).getBytes();
|
return ByteSizeValue.of(32, ByteSizeUnit.MB).getBytes();
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -99,13 +99,13 @@ class S3Repository extends MeteredBlobStoreRepository {
|
||||||
/**
|
/**
|
||||||
* Maximum size of files that can be uploaded using a single upload request.
|
* Maximum size of files that can be uploaded using a single upload request.
|
||||||
*/
|
*/
|
||||||
static final ByteSizeValue MAX_FILE_SIZE = new ByteSizeValue(5, ByteSizeUnit.GB);
|
static final ByteSizeValue MAX_FILE_SIZE = ByteSizeValue.of(5, ByteSizeUnit.GB);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Minimum size of parts that can be uploaded using the Multipart Upload API.
|
* Minimum size of parts that can be uploaded using the Multipart Upload API.
|
||||||
* (see http://docs.aws.amazon.com/AmazonS3/latest/dev/qfacts.html)
|
* (see http://docs.aws.amazon.com/AmazonS3/latest/dev/qfacts.html)
|
||||||
*/
|
*/
|
||||||
static final ByteSizeValue MIN_PART_SIZE_USING_MULTIPART = new ByteSizeValue(5, ByteSizeUnit.MB);
|
static final ByteSizeValue MIN_PART_SIZE_USING_MULTIPART = ByteSizeValue.of(5, ByteSizeUnit.MB);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Maximum size of parts that can be uploaded using the Multipart Upload API.
|
* Maximum size of parts that can be uploaded using the Multipart Upload API.
|
||||||
|
@ -116,7 +116,7 @@ class S3Repository extends MeteredBlobStoreRepository {
|
||||||
/**
|
/**
|
||||||
* Maximum size of files that can be uploaded using the Multipart Upload API.
|
* Maximum size of files that can be uploaded using the Multipart Upload API.
|
||||||
*/
|
*/
|
||||||
static final ByteSizeValue MAX_FILE_SIZE_USING_MULTIPART = new ByteSizeValue(5, ByteSizeUnit.TB);
|
static final ByteSizeValue MAX_FILE_SIZE_USING_MULTIPART = ByteSizeValue.of(5, ByteSizeUnit.TB);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Minimum threshold below which the chunk is uploaded using a single request. Beyond this threshold,
|
* Minimum threshold below which the chunk is uploaded using a single request. Beyond this threshold,
|
||||||
|
@ -137,7 +137,7 @@ class S3Repository extends MeteredBlobStoreRepository {
|
||||||
static final Setting<ByteSizeValue> CHUNK_SIZE_SETTING = Setting.byteSizeSetting(
|
static final Setting<ByteSizeValue> CHUNK_SIZE_SETTING = Setting.byteSizeSetting(
|
||||||
"chunk_size",
|
"chunk_size",
|
||||||
MAX_FILE_SIZE_USING_MULTIPART,
|
MAX_FILE_SIZE_USING_MULTIPART,
|
||||||
new ByteSizeValue(5, ByteSizeUnit.MB),
|
ByteSizeValue.of(5, ByteSizeUnit.MB),
|
||||||
MAX_FILE_SIZE_USING_MULTIPART
|
MAX_FILE_SIZE_USING_MULTIPART
|
||||||
);
|
);
|
||||||
|
|
||||||
|
|
|
@ -335,7 +335,7 @@ public class S3BlobContainerRetriesTests extends AbstractBlobContainerRetriesTes
|
||||||
public void testWriteLargeBlob() throws Exception {
|
public void testWriteLargeBlob() throws Exception {
|
||||||
final boolean useTimeout = rarely();
|
final boolean useTimeout = rarely();
|
||||||
final TimeValue readTimeout = useTimeout ? TimeValue.timeValueMillis(randomIntBetween(100, 500)) : null;
|
final TimeValue readTimeout = useTimeout ? TimeValue.timeValueMillis(randomIntBetween(100, 500)) : null;
|
||||||
final ByteSizeValue bufferSize = new ByteSizeValue(5, ByteSizeUnit.MB);
|
final ByteSizeValue bufferSize = ByteSizeValue.of(5, ByteSizeUnit.MB);
|
||||||
final BlobContainer blobContainer = createBlobContainer(null, readTimeout, true, bufferSize);
|
final BlobContainer blobContainer = createBlobContainer(null, readTimeout, true, bufferSize);
|
||||||
|
|
||||||
final int parts = randomIntBetween(1, 5);
|
final int parts = randomIntBetween(1, 5);
|
||||||
|
@ -436,7 +436,7 @@ public class S3BlobContainerRetriesTests extends AbstractBlobContainerRetriesTes
|
||||||
public void testWriteLargeBlobStreaming() throws Exception {
|
public void testWriteLargeBlobStreaming() throws Exception {
|
||||||
final boolean useTimeout = rarely();
|
final boolean useTimeout = rarely();
|
||||||
final TimeValue readTimeout = useTimeout ? TimeValue.timeValueMillis(randomIntBetween(100, 500)) : null;
|
final TimeValue readTimeout = useTimeout ? TimeValue.timeValueMillis(randomIntBetween(100, 500)) : null;
|
||||||
final ByteSizeValue bufferSize = new ByteSizeValue(5, ByteSizeUnit.MB);
|
final ByteSizeValue bufferSize = ByteSizeValue.of(5, ByteSizeUnit.MB);
|
||||||
final BlobContainer blobContainer = createBlobContainer(null, readTimeout, true, bufferSize);
|
final BlobContainer blobContainer = createBlobContainer(null, readTimeout, true, bufferSize);
|
||||||
|
|
||||||
final int parts = randomIntBetween(1, 5);
|
final int parts = randomIntBetween(1, 5);
|
||||||
|
|
|
@ -93,8 +93,8 @@ public class S3RepositoryTests extends ESTestCase {
|
||||||
private Settings bufferAndChunkSettings(long buffer, long chunk) {
|
private Settings bufferAndChunkSettings(long buffer, long chunk) {
|
||||||
return Settings.builder()
|
return Settings.builder()
|
||||||
.put(S3Repository.BUCKET_SETTING.getKey(), "bucket")
|
.put(S3Repository.BUCKET_SETTING.getKey(), "bucket")
|
||||||
.put(S3Repository.BUFFER_SIZE_SETTING.getKey(), new ByteSizeValue(buffer, ByteSizeUnit.MB).getStringRep())
|
.put(S3Repository.BUFFER_SIZE_SETTING.getKey(), ByteSizeValue.of(buffer, ByteSizeUnit.MB).getStringRep())
|
||||||
.put(S3Repository.CHUNK_SIZE_SETTING.getKey(), new ByteSizeValue(chunk, ByteSizeUnit.MB).getStringRep())
|
.put(S3Repository.CHUNK_SIZE_SETTING.getKey(), ByteSizeValue.of(chunk, ByteSizeUnit.MB).getStringRep())
|
||||||
.build();
|
.build();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -33,7 +33,7 @@ public class URLBlobStore implements BlobStore {
|
||||||
|
|
||||||
static final Setting<ByteSizeValue> BUFFER_SIZE_SETTING = Setting.byteSizeSetting(
|
static final Setting<ByteSizeValue> BUFFER_SIZE_SETTING = Setting.byteSizeSetting(
|
||||||
"repositories.uri.buffer_size",
|
"repositories.uri.buffer_size",
|
||||||
new ByteSizeValue(100, ByteSizeUnit.KB),
|
ByteSizeValue.of(100, ByteSizeUnit.KB),
|
||||||
Setting.Property.NodeScope
|
Setting.Property.NodeScope
|
||||||
);
|
);
|
||||||
|
|
||||||
|
|
|
@ -42,7 +42,7 @@ import static org.hamcrest.Matchers.hasSize;
|
||||||
@ClusterScope(scope = Scope.TEST, supportsDedicatedMasters = false, numClientNodes = 0, numDataNodes = 1)
|
@ClusterScope(scope = Scope.TEST, supportsDedicatedMasters = false, numClientNodes = 0, numDataNodes = 1)
|
||||||
public class Netty4HttpRequestSizeLimitIT extends ESNetty4IntegTestCase {
|
public class Netty4HttpRequestSizeLimitIT extends ESNetty4IntegTestCase {
|
||||||
|
|
||||||
private static final ByteSizeValue LIMIT = new ByteSizeValue(2, ByteSizeUnit.KB);
|
private static final ByteSizeValue LIMIT = ByteSizeValue.of(2, ByteSizeUnit.KB);
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected boolean addMockHttpTransport() {
|
protected boolean addMockHttpTransport() {
|
||||||
|
|
|
@ -96,7 +96,7 @@ public class Netty4IncrementalRequestHandlingIT extends ESNetty4IntegTestCase {
|
||||||
@Override
|
@Override
|
||||||
protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) {
|
protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) {
|
||||||
Settings.Builder builder = Settings.builder().put(super.nodeSettings(nodeOrdinal, otherSettings));
|
Settings.Builder builder = Settings.builder().put(super.nodeSettings(nodeOrdinal, otherSettings));
|
||||||
builder.put(HttpTransportSettings.SETTING_HTTP_MAX_CONTENT_LENGTH.getKey(), new ByteSizeValue(50, ByteSizeUnit.MB));
|
builder.put(HttpTransportSettings.SETTING_HTTP_MAX_CONTENT_LENGTH.getKey(), ByteSizeValue.of(50, ByteSizeUnit.MB));
|
||||||
return builder.build();
|
return builder.build();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -57,7 +57,7 @@ public class Netty4Plugin extends Plugin implements NetworkPlugin {
|
||||||
);
|
);
|
||||||
public static final Setting<ByteSizeValue> SETTING_HTTP_NETTY_RECEIVE_PREDICTOR_SIZE = byteSizeSetting(
|
public static final Setting<ByteSizeValue> SETTING_HTTP_NETTY_RECEIVE_PREDICTOR_SIZE = byteSizeSetting(
|
||||||
"http.netty.receive_predictor_size",
|
"http.netty.receive_predictor_size",
|
||||||
new ByteSizeValue(64, ByteSizeUnit.KB),
|
ByteSizeValue.of(64, ByteSizeUnit.KB),
|
||||||
Setting.Property.NodeScope
|
Setting.Property.NodeScope
|
||||||
);
|
);
|
||||||
public static final Setting<Integer> WORKER_COUNT = new Setting<>(
|
public static final Setting<Integer> WORKER_COUNT = new Setting<>(
|
||||||
|
@ -68,7 +68,7 @@ public class Netty4Plugin extends Plugin implements NetworkPlugin {
|
||||||
);
|
);
|
||||||
private static final Setting<ByteSizeValue> NETTY_RECEIVE_PREDICTOR_SIZE = byteSizeSetting(
|
private static final Setting<ByteSizeValue> NETTY_RECEIVE_PREDICTOR_SIZE = byteSizeSetting(
|
||||||
"transport.netty.receive_predictor_size",
|
"transport.netty.receive_predictor_size",
|
||||||
new ByteSizeValue(64, ByteSizeUnit.KB),
|
ByteSizeValue.of(64, ByteSizeUnit.KB),
|
||||||
Setting.Property.NodeScope
|
Setting.Property.NodeScope
|
||||||
);
|
);
|
||||||
public static final Setting<ByteSizeValue> NETTY_RECEIVE_PREDICTOR_MAX = byteSizeSetting(
|
public static final Setting<ByteSizeValue> NETTY_RECEIVE_PREDICTOR_MAX = byteSizeSetting(
|
||||||
|
|
|
@ -316,7 +316,7 @@ public class RolloverIT extends ESIntegTestCase {
|
||||||
final RolloverResponse response = indicesAdmin().prepareRolloverIndex("test_alias")
|
final RolloverResponse response = indicesAdmin().prepareRolloverIndex("test_alias")
|
||||||
.setConditions(
|
.setConditions(
|
||||||
RolloverConditions.newBuilder()
|
RolloverConditions.newBuilder()
|
||||||
.addMaxIndexSizeCondition(new ByteSizeValue(10, ByteSizeUnit.MB))
|
.addMaxIndexSizeCondition(ByteSizeValue.of(10, ByteSizeUnit.MB))
|
||||||
.addMaxIndexAgeCondition(TimeValue.timeValueHours(4))
|
.addMaxIndexAgeCondition(TimeValue.timeValueHours(4))
|
||||||
)
|
)
|
||||||
.get();
|
.get();
|
||||||
|
@ -330,7 +330,7 @@ public class RolloverIT extends ESIntegTestCase {
|
||||||
assertThat(
|
assertThat(
|
||||||
conditions,
|
conditions,
|
||||||
containsInAnyOrder(
|
containsInAnyOrder(
|
||||||
new MaxSizeCondition(new ByteSizeValue(10, ByteSizeUnit.MB)).toString(),
|
new MaxSizeCondition(ByteSizeValue.of(10, ByteSizeUnit.MB)).toString(),
|
||||||
new MaxAgeCondition(TimeValue.timeValueHours(4)).toString()
|
new MaxAgeCondition(TimeValue.timeValueHours(4)).toString()
|
||||||
)
|
)
|
||||||
);
|
);
|
||||||
|
@ -447,7 +447,7 @@ public class RolloverIT extends ESIntegTestCase {
|
||||||
final RolloverResponse response = indicesAdmin().prepareRolloverIndex("test_alias")
|
final RolloverResponse response = indicesAdmin().prepareRolloverIndex("test_alias")
|
||||||
.setConditions(
|
.setConditions(
|
||||||
RolloverConditions.newBuilder()
|
RolloverConditions.newBuilder()
|
||||||
.addMaxIndexSizeCondition(new ByteSizeValue(randomIntBetween(100, 50 * 1024), ByteSizeUnit.MB))
|
.addMaxIndexSizeCondition(ByteSizeValue.of(randomIntBetween(100, 50 * 1024), ByteSizeUnit.MB))
|
||||||
)
|
)
|
||||||
.get();
|
.get();
|
||||||
assertThat(response.getOldIndex(), equalTo("test-1"));
|
assertThat(response.getOldIndex(), equalTo("test-1"));
|
||||||
|
@ -459,7 +459,7 @@ public class RolloverIT extends ESIntegTestCase {
|
||||||
|
|
||||||
// A small max_size
|
// A small max_size
|
||||||
{
|
{
|
||||||
ByteSizeValue maxSizeValue = new ByteSizeValue(randomIntBetween(1, 20), ByteSizeUnit.BYTES);
|
ByteSizeValue maxSizeValue = ByteSizeValue.of(randomIntBetween(1, 20), ByteSizeUnit.BYTES);
|
||||||
long beforeTime = client().threadPool().absoluteTimeInMillis() - 1000L;
|
long beforeTime = client().threadPool().absoluteTimeInMillis() - 1000L;
|
||||||
final RolloverResponse response = indicesAdmin().prepareRolloverIndex("test_alias")
|
final RolloverResponse response = indicesAdmin().prepareRolloverIndex("test_alias")
|
||||||
.setConditions(RolloverConditions.newBuilder().addMaxIndexSizeCondition(maxSizeValue))
|
.setConditions(RolloverConditions.newBuilder().addMaxIndexSizeCondition(maxSizeValue))
|
||||||
|
@ -482,7 +482,7 @@ public class RolloverIT extends ESIntegTestCase {
|
||||||
final RolloverResponse response = indicesAdmin().prepareRolloverIndex("test_alias")
|
final RolloverResponse response = indicesAdmin().prepareRolloverIndex("test_alias")
|
||||||
.setConditions(
|
.setConditions(
|
||||||
RolloverConditions.newBuilder()
|
RolloverConditions.newBuilder()
|
||||||
.addMaxIndexSizeCondition(new ByteSizeValue(randomNonNegativeLong(), ByteSizeUnit.BYTES))
|
.addMaxIndexSizeCondition(ByteSizeValue.of(randomNonNegativeLong(), ByteSizeUnit.BYTES))
|
||||||
.addMinIndexDocsCondition(1L)
|
.addMinIndexDocsCondition(1L)
|
||||||
)
|
)
|
||||||
.get();
|
.get();
|
||||||
|
@ -512,7 +512,7 @@ public class RolloverIT extends ESIntegTestCase {
|
||||||
final RolloverResponse response = indicesAdmin().prepareRolloverIndex("test_alias")
|
final RolloverResponse response = indicesAdmin().prepareRolloverIndex("test_alias")
|
||||||
.setConditions(
|
.setConditions(
|
||||||
RolloverConditions.newBuilder()
|
RolloverConditions.newBuilder()
|
||||||
.addMaxPrimaryShardSizeCondition(new ByteSizeValue(randomIntBetween(100, 50 * 1024), ByteSizeUnit.MB))
|
.addMaxPrimaryShardSizeCondition(ByteSizeValue.of(randomIntBetween(100, 50 * 1024), ByteSizeUnit.MB))
|
||||||
)
|
)
|
||||||
.get();
|
.get();
|
||||||
assertThat(response.getOldIndex(), equalTo("test-1"));
|
assertThat(response.getOldIndex(), equalTo("test-1"));
|
||||||
|
@ -524,7 +524,7 @@ public class RolloverIT extends ESIntegTestCase {
|
||||||
|
|
||||||
// A small max_primary_shard_size
|
// A small max_primary_shard_size
|
||||||
{
|
{
|
||||||
ByteSizeValue maxPrimaryShardSizeCondition = new ByteSizeValue(randomIntBetween(1, 20), ByteSizeUnit.BYTES);
|
ByteSizeValue maxPrimaryShardSizeCondition = ByteSizeValue.of(randomIntBetween(1, 20), ByteSizeUnit.BYTES);
|
||||||
long beforeTime = client().threadPool().absoluteTimeInMillis() - 1000L;
|
long beforeTime = client().threadPool().absoluteTimeInMillis() - 1000L;
|
||||||
final RolloverResponse response = indicesAdmin().prepareRolloverIndex("test_alias")
|
final RolloverResponse response = indicesAdmin().prepareRolloverIndex("test_alias")
|
||||||
.setConditions(RolloverConditions.newBuilder().addMaxPrimaryShardSizeCondition(maxPrimaryShardSizeCondition))
|
.setConditions(RolloverConditions.newBuilder().addMaxPrimaryShardSizeCondition(maxPrimaryShardSizeCondition))
|
||||||
|
@ -547,7 +547,7 @@ public class RolloverIT extends ESIntegTestCase {
|
||||||
final RolloverResponse response = indicesAdmin().prepareRolloverIndex("test_alias")
|
final RolloverResponse response = indicesAdmin().prepareRolloverIndex("test_alias")
|
||||||
.setConditions(
|
.setConditions(
|
||||||
RolloverConditions.newBuilder()
|
RolloverConditions.newBuilder()
|
||||||
.addMaxPrimaryShardSizeCondition(new ByteSizeValue(randomNonNegativeLong(), ByteSizeUnit.BYTES))
|
.addMaxPrimaryShardSizeCondition(ByteSizeValue.of(randomNonNegativeLong(), ByteSizeUnit.BYTES))
|
||||||
.addMinIndexDocsCondition(1L)
|
.addMinIndexDocsCondition(1L)
|
||||||
)
|
)
|
||||||
.get();
|
.get();
|
||||||
|
|
|
@ -53,7 +53,7 @@ public class BulkProcessor2IT extends ESIntegTestCase {
|
||||||
// let's make sure that the bulk action limit trips, one single execution will index all the documents
|
// let's make sure that the bulk action limit trips, one single execution will index all the documents
|
||||||
.setBulkActions(numDocs)
|
.setBulkActions(numDocs)
|
||||||
.setFlushInterval(TimeValue.timeValueHours(24))
|
.setFlushInterval(TimeValue.timeValueHours(24))
|
||||||
.setBulkSize(new ByteSizeValue(1, ByteSizeUnit.GB))
|
.setBulkSize(ByteSizeValue.of(1, ByteSizeUnit.GB))
|
||||||
.build();
|
.build();
|
||||||
try {
|
try {
|
||||||
|
|
||||||
|
@ -89,7 +89,7 @@ public class BulkProcessor2IT extends ESIntegTestCase {
|
||||||
.setBulkActions(bulkActions)
|
.setBulkActions(bulkActions)
|
||||||
// set interval and size to high values
|
// set interval and size to high values
|
||||||
.setFlushInterval(TimeValue.timeValueHours(24))
|
.setFlushInterval(TimeValue.timeValueHours(24))
|
||||||
.setBulkSize(new ByteSizeValue(1, ByteSizeUnit.GB))
|
.setBulkSize(ByteSizeValue.of(1, ByteSizeUnit.GB))
|
||||||
.build();
|
.build();
|
||||||
try {
|
try {
|
||||||
|
|
||||||
|
@ -134,7 +134,7 @@ public class BulkProcessor2IT extends ESIntegTestCase {
|
||||||
// let's make sure that the bulk action limit trips, one single execution will index all the documents
|
// let's make sure that the bulk action limit trips, one single execution will index all the documents
|
||||||
.setBulkActions(numDocs)
|
.setBulkActions(numDocs)
|
||||||
.setFlushInterval(TimeValue.timeValueHours(24))
|
.setFlushInterval(TimeValue.timeValueHours(24))
|
||||||
.setBulkSize(new ByteSizeValue(randomIntBetween(1, 10), RandomPicks.randomFrom(random(), ByteSizeUnit.values())))
|
.setBulkSize(ByteSizeValue.of(randomIntBetween(1, 10), RandomPicks.randomFrom(random(), ByteSizeUnit.values())))
|
||||||
.build();
|
.build();
|
||||||
|
|
||||||
MultiGetRequestBuilder multiGetRequestBuilder = indexDocs(client(), processor, numDocs);
|
MultiGetRequestBuilder multiGetRequestBuilder = indexDocs(client(), processor, numDocs);
|
||||||
|
@ -169,7 +169,7 @@ public class BulkProcessor2IT extends ESIntegTestCase {
|
||||||
.setBulkActions(bulkActions)
|
.setBulkActions(bulkActions)
|
||||||
// set interval and size to high values
|
// set interval and size to high values
|
||||||
.setFlushInterval(TimeValue.timeValueHours(24))
|
.setFlushInterval(TimeValue.timeValueHours(24))
|
||||||
.setBulkSize(new ByteSizeValue(1, ByteSizeUnit.GB))
|
.setBulkSize(ByteSizeValue.of(1, ByteSizeUnit.GB))
|
||||||
.build();
|
.build();
|
||||||
try {
|
try {
|
||||||
|
|
||||||
|
|
|
@ -55,7 +55,7 @@ public class BulkProcessorIT extends ESIntegTestCase {
|
||||||
.setConcurrentRequests(randomIntBetween(0, 1))
|
.setConcurrentRequests(randomIntBetween(0, 1))
|
||||||
.setBulkActions(numDocs)
|
.setBulkActions(numDocs)
|
||||||
.setFlushInterval(TimeValue.timeValueHours(24))
|
.setFlushInterval(TimeValue.timeValueHours(24))
|
||||||
.setBulkSize(new ByteSizeValue(1, ByteSizeUnit.GB))
|
.setBulkSize(ByteSizeValue.of(1, ByteSizeUnit.GB))
|
||||||
.build()
|
.build()
|
||||||
) {
|
) {
|
||||||
|
|
||||||
|
@ -83,7 +83,7 @@ public class BulkProcessorIT extends ESIntegTestCase {
|
||||||
.setConcurrentRequests(randomIntBetween(0, 10))
|
.setConcurrentRequests(randomIntBetween(0, 10))
|
||||||
.setBulkActions(numDocs + randomIntBetween(1, 100))
|
.setBulkActions(numDocs + randomIntBetween(1, 100))
|
||||||
.setFlushInterval(TimeValue.timeValueHours(24))
|
.setFlushInterval(TimeValue.timeValueHours(24))
|
||||||
.setBulkSize(new ByteSizeValue(1, ByteSizeUnit.GB))
|
.setBulkSize(ByteSizeValue.of(1, ByteSizeUnit.GB))
|
||||||
.build()
|
.build()
|
||||||
) {
|
) {
|
||||||
|
|
||||||
|
@ -115,7 +115,7 @@ public class BulkProcessorIT extends ESIntegTestCase {
|
||||||
.setConcurrentRequests(randomIntBetween(0, 10))
|
.setConcurrentRequests(randomIntBetween(0, 10))
|
||||||
.setBulkActions(numDocs + randomIntBetween(1, 100))
|
.setBulkActions(numDocs + randomIntBetween(1, 100))
|
||||||
.setFlushInterval(TimeValue.timeValueHours(24))
|
.setFlushInterval(TimeValue.timeValueHours(24))
|
||||||
.setBulkSize(new ByteSizeValue(1, ByteSizeUnit.GB))
|
.setBulkSize(ByteSizeValue.of(1, ByteSizeUnit.GB))
|
||||||
.setFlushCondition(flushEnabled::get)
|
.setFlushCondition(flushEnabled::get)
|
||||||
.build()
|
.build()
|
||||||
) {
|
) {
|
||||||
|
@ -159,7 +159,7 @@ public class BulkProcessorIT extends ESIntegTestCase {
|
||||||
.setBulkActions(bulkActions)
|
.setBulkActions(bulkActions)
|
||||||
// set interval and size to high values
|
// set interval and size to high values
|
||||||
.setFlushInterval(TimeValue.timeValueHours(24))
|
.setFlushInterval(TimeValue.timeValueHours(24))
|
||||||
.setBulkSize(new ByteSizeValue(1, ByteSizeUnit.GB))
|
.setBulkSize(ByteSizeValue.of(1, ByteSizeUnit.GB))
|
||||||
.build()
|
.build()
|
||||||
) {
|
) {
|
||||||
|
|
||||||
|
@ -202,7 +202,7 @@ public class BulkProcessorIT extends ESIntegTestCase {
|
||||||
.setConcurrentRequests(randomIntBetween(0, 1))
|
.setConcurrentRequests(randomIntBetween(0, 1))
|
||||||
.setBulkActions(numDocs)
|
.setBulkActions(numDocs)
|
||||||
.setFlushInterval(TimeValue.timeValueHours(24))
|
.setFlushInterval(TimeValue.timeValueHours(24))
|
||||||
.setBulkSize(new ByteSizeValue(randomIntBetween(1, 10), RandomPicks.randomFrom(random(), ByteSizeUnit.values())))
|
.setBulkSize(ByteSizeValue.of(randomIntBetween(1, 10), RandomPicks.randomFrom(random(), ByteSizeUnit.values())))
|
||||||
.build();
|
.build();
|
||||||
|
|
||||||
MultiGetRequestBuilder multiGetRequestBuilder = indexDocs(client(), processor, numDocs);
|
MultiGetRequestBuilder multiGetRequestBuilder = indexDocs(client(), processor, numDocs);
|
||||||
|
@ -250,7 +250,7 @@ public class BulkProcessorIT extends ESIntegTestCase {
|
||||||
.setBulkActions(bulkActions)
|
.setBulkActions(bulkActions)
|
||||||
// set interval and size to high values
|
// set interval and size to high values
|
||||||
.setFlushInterval(TimeValue.timeValueHours(24))
|
.setFlushInterval(TimeValue.timeValueHours(24))
|
||||||
.setBulkSize(new ByteSizeValue(1, ByteSizeUnit.GB))
|
.setBulkSize(ByteSizeValue.of(1, ByteSizeUnit.GB))
|
||||||
.build()
|
.build()
|
||||||
) {
|
) {
|
||||||
|
|
||||||
|
|
|
@ -43,7 +43,7 @@ import static org.hamcrest.Matchers.equalTo;
|
||||||
@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0)
|
@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0)
|
||||||
public class DiskThresholdMonitorIT extends DiskUsageIntegTestCase {
|
public class DiskThresholdMonitorIT extends DiskUsageIntegTestCase {
|
||||||
|
|
||||||
private static final long FLOOD_STAGE_BYTES = new ByteSizeValue(10, ByteSizeUnit.KB).getBytes();
|
private static final long FLOOD_STAGE_BYTES = ByteSizeValue.of(10, ByteSizeUnit.KB).getBytes();
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) {
|
protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) {
|
||||||
|
|
|
@ -60,7 +60,7 @@ import static org.hamcrest.Matchers.is;
|
||||||
@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0)
|
@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0)
|
||||||
public class DiskThresholdDeciderIT extends DiskUsageIntegTestCase {
|
public class DiskThresholdDeciderIT extends DiskUsageIntegTestCase {
|
||||||
|
|
||||||
private static final long WATERMARK_BYTES = new ByteSizeValue(10, ByteSizeUnit.KB).getBytes();
|
private static final long WATERMARK_BYTES = ByteSizeValue.of(10, ByteSizeUnit.KB).getBytes();
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) {
|
protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) {
|
||||||
|
|
|
@ -98,7 +98,6 @@ import static org.elasticsearch.cluster.routing.TestShardRouting.shardRoutingBui
|
||||||
import static org.elasticsearch.index.shard.IndexShardTestCase.closeShardNoCheck;
|
import static org.elasticsearch.index.shard.IndexShardTestCase.closeShardNoCheck;
|
||||||
import static org.elasticsearch.index.shard.IndexShardTestCase.getTranslog;
|
import static org.elasticsearch.index.shard.IndexShardTestCase.getTranslog;
|
||||||
import static org.elasticsearch.index.shard.IndexShardTestCase.recoverFromStore;
|
import static org.elasticsearch.index.shard.IndexShardTestCase.recoverFromStore;
|
||||||
import static org.elasticsearch.indices.cluster.AbstractIndicesClusterStateServiceTestCase.awaitIndexShardCloseAsyncTasks;
|
|
||||||
import static org.elasticsearch.test.LambdaMatchers.falseWith;
|
import static org.elasticsearch.test.LambdaMatchers.falseWith;
|
||||||
import static org.elasticsearch.test.LambdaMatchers.trueWith;
|
import static org.elasticsearch.test.LambdaMatchers.trueWith;
|
||||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
|
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
|
||||||
|
@ -332,7 +331,7 @@ public class IndexShardIT extends ESSingleNodeTestCase {
|
||||||
Settings.builder()
|
Settings.builder()
|
||||||
.put(
|
.put(
|
||||||
IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(),
|
IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(),
|
||||||
new ByteSizeValue(135 /* size of the operation + one generation header&footer*/, ByteSizeUnit.BYTES)
|
ByteSizeValue.of(135 /* size of the operation + one generation header&footer*/, ByteSizeUnit.BYTES)
|
||||||
)
|
)
|
||||||
.build()
|
.build()
|
||||||
)
|
)
|
||||||
|
@ -372,7 +371,7 @@ public class IndexShardIT extends ESSingleNodeTestCase {
|
||||||
indicesAdmin().prepareUpdateSettings("test")
|
indicesAdmin().prepareUpdateSettings("test")
|
||||||
.setSettings(
|
.setSettings(
|
||||||
Settings.builder()
|
Settings.builder()
|
||||||
.put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(size, ByteSizeUnit.BYTES))
|
.put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), ByteSizeValue.of(size, ByteSizeUnit.BYTES))
|
||||||
.build()
|
.build()
|
||||||
)
|
)
|
||||||
.get();
|
.get();
|
||||||
|
|
|
@ -604,7 +604,7 @@ public class RemoveCorruptedShardDataCommandIT extends ESIntegTestCase {
|
||||||
private static void disableTranslogFlush(String index) {
|
private static void disableTranslogFlush(String index) {
|
||||||
updateIndexSettings(
|
updateIndexSettings(
|
||||||
Settings.builder()
|
Settings.builder()
|
||||||
.put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(1, ByteSizeUnit.PB)),
|
.put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), ByteSizeValue.of(1, ByteSizeUnit.PB)),
|
||||||
index
|
index
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
|
@ -155,7 +155,7 @@ public class CorruptedFileIT extends ESIntegTestCase {
|
||||||
// no checkindex - we corrupt shards on purpose
|
// no checkindex - we corrupt shards on purpose
|
||||||
.put(MockFSIndexStore.INDEX_CHECK_INDEX_ON_CLOSE_SETTING.getKey(), false)
|
.put(MockFSIndexStore.INDEX_CHECK_INDEX_ON_CLOSE_SETTING.getKey(), false)
|
||||||
// no translog based flush - it might change the .liv / segments.N files
|
// no translog based flush - it might change the .liv / segments.N files
|
||||||
.put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(1, ByteSizeUnit.PB))
|
.put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), ByteSizeValue.of(1, ByteSizeUnit.PB))
|
||||||
)
|
)
|
||||||
);
|
);
|
||||||
ensureGreen();
|
ensureGreen();
|
||||||
|
@ -269,7 +269,7 @@ public class CorruptedFileIT extends ESIntegTestCase {
|
||||||
.put(MockFSIndexStore.INDEX_CHECK_INDEX_ON_CLOSE_SETTING.getKey(), false) // no checkindex - we corrupt shards on
|
.put(MockFSIndexStore.INDEX_CHECK_INDEX_ON_CLOSE_SETTING.getKey(), false) // no checkindex - we corrupt shards on
|
||||||
// purpose
|
// purpose
|
||||||
// no translog based flush - it might change the .liv / segments.N files
|
// no translog based flush - it might change the .liv / segments.N files
|
||||||
.put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(1, ByteSizeUnit.PB))
|
.put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), ByteSizeValue.of(1, ByteSizeUnit.PB))
|
||||||
)
|
)
|
||||||
);
|
);
|
||||||
ensureGreen();
|
ensureGreen();
|
||||||
|
@ -544,7 +544,7 @@ public class CorruptedFileIT extends ESIntegTestCase {
|
||||||
// no checkindex - we corrupt shards on purpose
|
// no checkindex - we corrupt shards on purpose
|
||||||
.put(MockFSIndexStore.INDEX_CHECK_INDEX_ON_CLOSE_SETTING.getKey(), false)
|
.put(MockFSIndexStore.INDEX_CHECK_INDEX_ON_CLOSE_SETTING.getKey(), false)
|
||||||
// no translog based flush - it might change the .liv / segments.N files
|
// no translog based flush - it might change the .liv / segments.N files
|
||||||
.put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(1, ByteSizeUnit.PB))
|
.put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), ByteSizeValue.of(1, ByteSizeUnit.PB))
|
||||||
)
|
)
|
||||||
);
|
);
|
||||||
ensureGreen();
|
ensureGreen();
|
||||||
|
@ -612,7 +612,7 @@ public class CorruptedFileIT extends ESIntegTestCase {
|
||||||
// no checkindex - we corrupt shards on purpose
|
// no checkindex - we corrupt shards on purpose
|
||||||
.put(MockFSIndexStore.INDEX_CHECK_INDEX_ON_CLOSE_SETTING.getKey(), false)
|
.put(MockFSIndexStore.INDEX_CHECK_INDEX_ON_CLOSE_SETTING.getKey(), false)
|
||||||
// no translog based flush - it might change the .liv / segments.N files
|
// no translog based flush - it might change the .liv / segments.N files
|
||||||
.put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(1, ByteSizeUnit.PB))
|
.put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), ByteSizeValue.of(1, ByteSizeUnit.PB))
|
||||||
)
|
)
|
||||||
);
|
);
|
||||||
ensureGreen();
|
ensureGreen();
|
||||||
|
|
|
@ -54,7 +54,7 @@ public class CorruptedTranslogIT extends ESIntegTestCase {
|
||||||
prepareCreate("test").setSettings(
|
prepareCreate("test").setSettings(
|
||||||
indexSettings(1, 0).put("index.refresh_interval", "-1")
|
indexSettings(1, 0).put("index.refresh_interval", "-1")
|
||||||
.put(MockEngineSupport.DISABLE_FLUSH_ON_CLOSE.getKey(), true) // never flush - always recover from translog
|
.put(MockEngineSupport.DISABLE_FLUSH_ON_CLOSE.getKey(), true) // never flush - always recover from translog
|
||||||
.put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(1, ByteSizeUnit.PB))
|
.put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), ByteSizeValue.of(1, ByteSizeUnit.PB))
|
||||||
)
|
)
|
||||||
);
|
);
|
||||||
|
|
||||||
|
|
|
@ -332,7 +332,7 @@ public class CircuitBreakerServiceIT extends ESIntegTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testLimitsRequestSize() {
|
public void testLimitsRequestSize() {
|
||||||
ByteSizeValue inFlightRequestsLimit = new ByteSizeValue(8, ByteSizeUnit.KB);
|
ByteSizeValue inFlightRequestsLimit = ByteSizeValue.of(8, ByteSizeUnit.KB);
|
||||||
if (noopBreakerUsed()) {
|
if (noopBreakerUsed()) {
|
||||||
logger.info("--> noop breakers used, skipping test");
|
logger.info("--> noop breakers used, skipping test");
|
||||||
return;
|
return;
|
||||||
|
|
|
@ -256,7 +256,7 @@ public class IndexRecoveryIT extends AbstractIndexRecoveryIntegTestCase {
|
||||||
public Settings.Builder createRecoverySettingsChunkPerSecond(long chunkSizeBytes) {
|
public Settings.Builder createRecoverySettingsChunkPerSecond(long chunkSizeBytes) {
|
||||||
return Settings.builder()
|
return Settings.builder()
|
||||||
// Set the chunk size in bytes
|
// Set the chunk size in bytes
|
||||||
.put(RecoverySettings.INDICES_RECOVERY_CHUNK_SIZE.getKey(), new ByteSizeValue(chunkSizeBytes, ByteSizeUnit.BYTES))
|
.put(RecoverySettings.INDICES_RECOVERY_CHUNK_SIZE.getKey(), ByteSizeValue.of(chunkSizeBytes, ByteSizeUnit.BYTES))
|
||||||
// Set one chunk of bytes per second.
|
// Set one chunk of bytes per second.
|
||||||
.put(RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey(), chunkSizeBytes, ByteSizeUnit.BYTES);
|
.put(RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey(), chunkSizeBytes, ByteSizeUnit.BYTES);
|
||||||
}
|
}
|
||||||
|
|
|
@ -270,7 +270,7 @@ public class UpdateSettingsIT extends ESIntegTestCase {
|
||||||
IndexService indexService = service.indexService(resolveIndex("test"));
|
IndexService indexService = service.indexService(resolveIndex("test"));
|
||||||
if (indexService != null) {
|
if (indexService != null) {
|
||||||
assertEquals(indexService.getIndexSettings().getRefreshInterval().millis(), -1);
|
assertEquals(indexService.getIndexSettings().getRefreshInterval().millis(), -1);
|
||||||
assertEquals(indexService.getIndexSettings().getFlushThresholdSize(new ByteSizeValue(1, ByteSizeUnit.TB)).getBytes(), 1024);
|
assertEquals(indexService.getIndexSettings().getFlushThresholdSize(ByteSizeValue.of(1, ByteSizeUnit.TB)).getBytes(), 1024);
|
||||||
assertEquals(indexService.getIndexSettings().getGenerationThresholdSize().getBytes(), 4096);
|
assertEquals(indexService.getIndexSettings().getGenerationThresholdSize().getBytes(), 4096);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -281,7 +281,7 @@ public class UpdateSettingsIT extends ESIntegTestCase {
|
||||||
IndexService indexService = service.indexService(resolveIndex("test"));
|
IndexService indexService = service.indexService(resolveIndex("test"));
|
||||||
if (indexService != null) {
|
if (indexService != null) {
|
||||||
assertEquals(indexService.getIndexSettings().getRefreshInterval().millis(), 1000);
|
assertEquals(indexService.getIndexSettings().getRefreshInterval().millis(), 1000);
|
||||||
assertEquals(indexService.getIndexSettings().getFlushThresholdSize(new ByteSizeValue(1, ByteSizeUnit.TB)).getBytes(), 1024);
|
assertEquals(indexService.getIndexSettings().getFlushThresholdSize(ByteSizeValue.of(1, ByteSizeUnit.TB)).getBytes(), 1024);
|
||||||
assertEquals(indexService.getIndexSettings().getGenerationThresholdSize().getBytes(), 4096);
|
assertEquals(indexService.getIndexSettings().getGenerationThresholdSize().getBytes(), 4096);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -74,7 +74,7 @@ public class CloseIndexIT extends ESIntegTestCase {
|
||||||
.put(super.indexSettings())
|
.put(super.indexSettings())
|
||||||
.put(
|
.put(
|
||||||
IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(),
|
IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(),
|
||||||
new ByteSizeValue(randomIntBetween(1, 4096), ByteSizeUnit.KB)
|
ByteSizeValue.of(randomIntBetween(1, 4096), ByteSizeUnit.KB)
|
||||||
)
|
)
|
||||||
.build();
|
.build();
|
||||||
}
|
}
|
||||||
|
|
|
@ -63,10 +63,7 @@ public class TruncatedRecoveryIT extends ESIntegTestCase {
|
||||||
public void testCancelRecoveryAndResume() throws Exception {
|
public void testCancelRecoveryAndResume() throws Exception {
|
||||||
updateClusterSettings(
|
updateClusterSettings(
|
||||||
Settings.builder()
|
Settings.builder()
|
||||||
.put(
|
.put(RecoverySettings.INDICES_RECOVERY_CHUNK_SIZE.getKey(), ByteSizeValue.of(randomIntBetween(50, 300), ByteSizeUnit.BYTES))
|
||||||
RecoverySettings.INDICES_RECOVERY_CHUNK_SIZE.getKey(),
|
|
||||||
new ByteSizeValue(randomIntBetween(50, 300), ByteSizeUnit.BYTES)
|
|
||||||
)
|
|
||||||
);
|
);
|
||||||
|
|
||||||
NodesStatsResponse nodeStats = clusterAdmin().prepareNodesStats().get();
|
NodesStatsResponse nodeStats = clusterAdmin().prepareNodesStats().get();
|
||||||
|
|
|
@ -21,7 +21,7 @@ public class FsBlobStoreRepositoryIT extends ESFsBasedRepositoryIntegTestCase {
|
||||||
final Settings.Builder settings = Settings.builder().put("compress", randomBoolean()).put("location", randomRepoPath());
|
final Settings.Builder settings = Settings.builder().put("compress", randomBoolean()).put("location", randomRepoPath());
|
||||||
if (randomBoolean()) {
|
if (randomBoolean()) {
|
||||||
long size = 1 << randomInt(10);
|
long size = 1 << randomInt(10);
|
||||||
settings.put("chunk_size", new ByteSizeValue(size, ByteSizeUnit.KB));
|
settings.put("chunk_size", ByteSizeValue.of(size, ByteSizeUnit.KB));
|
||||||
}
|
}
|
||||||
return settings.build();
|
return settings.build();
|
||||||
}
|
}
|
||||||
|
|
|
@ -497,7 +497,7 @@ public class SnapshotShutdownIT extends AbstractSnapshotIntegTestCase {
|
||||||
final String nodeForRemovalId = internalCluster().getInstance(NodeEnvironment.class, nodeForRemoval).nodeId();
|
final String nodeForRemovalId = internalCluster().getInstance(NodeEnvironment.class, nodeForRemoval).nodeId();
|
||||||
final var indexName = randomIdentifier();
|
final var indexName = randomIdentifier();
|
||||||
createIndexWithContent(indexName, indexSettings(numShards, 0).put(REQUIRE_NODE_NAME_SETTING, nodeForRemoval).build());
|
createIndexWithContent(indexName, indexSettings(numShards, 0).put(REQUIRE_NODE_NAME_SETTING, nodeForRemoval).build());
|
||||||
indexAllShardsToAnEqualOrGreaterMinimumSize(indexName, new ByteSizeValue(2, ByteSizeUnit.KB).getBytes());
|
indexAllShardsToAnEqualOrGreaterMinimumSize(indexName, ByteSizeValue.of(2, ByteSizeUnit.KB).getBytes());
|
||||||
|
|
||||||
// Start the snapshot with blocking in place on the data node not to allow shard snapshots to finish yet.
|
// Start the snapshot with blocking in place on the data node not to allow shard snapshots to finish yet.
|
||||||
final var clusterService = internalCluster().getCurrentMasterNodeInstance(ClusterService.class);
|
final var clusterService = internalCluster().getCurrentMasterNodeInstance(ClusterService.class);
|
||||||
|
|
|
@ -156,6 +156,7 @@ public class TransportVersions {
|
||||||
public static final TransportVersion ELASTIC_INFERENCE_SERVICE_UNIFIED_CHAT_COMPLETIONS_INTEGRATION = def(8_822_00_0);
|
public static final TransportVersion ELASTIC_INFERENCE_SERVICE_UNIFIED_CHAT_COMPLETIONS_INTEGRATION = def(8_822_00_0);
|
||||||
public static final TransportVersion KQL_QUERY_TECH_PREVIEW = def(8_823_00_0);
|
public static final TransportVersion KQL_QUERY_TECH_PREVIEW = def(8_823_00_0);
|
||||||
public static final TransportVersion ESQL_PROFILE_ROWS_PROCESSED = def(8_824_00_0);
|
public static final TransportVersion ESQL_PROFILE_ROWS_PROCESSED = def(8_824_00_0);
|
||||||
|
public static final TransportVersion BYTE_SIZE_VALUE_ALWAYS_USES_BYTES = def(8_825_00_0);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* STOP! READ THIS FIRST! No, really,
|
* STOP! READ THIS FIRST! No, really,
|
||||||
|
|
|
@ -84,7 +84,7 @@ public class BulkProcessor implements Closeable {
|
||||||
private final Runnable onClose;
|
private final Runnable onClose;
|
||||||
private int concurrentRequests = 1;
|
private int concurrentRequests = 1;
|
||||||
private int bulkActions = 1000;
|
private int bulkActions = 1000;
|
||||||
private ByteSizeValue bulkSize = new ByteSizeValue(5, ByteSizeUnit.MB);
|
private ByteSizeValue bulkSize = ByteSizeValue.of(5, ByteSizeUnit.MB);
|
||||||
private TimeValue flushInterval = null;
|
private TimeValue flushInterval = null;
|
||||||
private BackoffPolicy backoffPolicy = BackoffPolicy.exponentialBackoff();
|
private BackoffPolicy backoffPolicy = BackoffPolicy.exponentialBackoff();
|
||||||
private String globalIndex;
|
private String globalIndex;
|
||||||
|
|
|
@ -76,8 +76,8 @@ public class BulkProcessor2 implements Closeable {
|
||||||
private final Listener listener;
|
private final Listener listener;
|
||||||
private final ThreadPool threadPool;
|
private final ThreadPool threadPool;
|
||||||
private int maxRequestsInBulk = 1000;
|
private int maxRequestsInBulk = 1000;
|
||||||
private ByteSizeValue maxBulkSizeInBytes = new ByteSizeValue(5, ByteSizeUnit.MB);
|
private ByteSizeValue maxBulkSizeInBytes = ByteSizeValue.of(5, ByteSizeUnit.MB);
|
||||||
private ByteSizeValue maxBytesInFlight = new ByteSizeValue(50, ByteSizeUnit.MB);
|
private ByteSizeValue maxBytesInFlight = ByteSizeValue.of(50, ByteSizeUnit.MB);
|
||||||
private TimeValue flushInterval = null;
|
private TimeValue flushInterval = null;
|
||||||
private int maxNumberOfRetries = 3;
|
private int maxNumberOfRetries = 3;
|
||||||
|
|
||||||
|
|
|
@ -14,84 +14,44 @@ import org.elasticsearch.common.Strings;
|
||||||
import org.elasticsearch.common.io.stream.StreamInput;
|
import org.elasticsearch.common.io.stream.StreamInput;
|
||||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||||
import org.elasticsearch.common.io.stream.Writeable;
|
import org.elasticsearch.common.io.stream.Writeable;
|
||||||
import org.elasticsearch.common.logging.DeprecationCategory;
|
|
||||||
import org.elasticsearch.common.logging.DeprecationLogger;
|
import org.elasticsearch.common.logging.DeprecationLogger;
|
||||||
import org.elasticsearch.common.logging.LogConfigurator;
|
|
||||||
import org.elasticsearch.xcontent.ToXContentFragment;
|
import org.elasticsearch.xcontent.ToXContentFragment;
|
||||||
import org.elasticsearch.xcontent.XContentBuilder;
|
import org.elasticsearch.xcontent.XContentBuilder;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
import java.math.BigDecimal;
|
||||||
|
import java.math.RoundingMode;
|
||||||
import java.util.Locale;
|
import java.util.Locale;
|
||||||
import java.util.Objects;
|
import java.util.Objects;
|
||||||
|
|
||||||
|
import static org.elasticsearch.TransportVersions.BYTE_SIZE_VALUE_ALWAYS_USES_BYTES;
|
||||||
|
import static org.elasticsearch.common.unit.ByteSizeUnit.BYTES;
|
||||||
|
import static org.elasticsearch.common.unit.ByteSizeUnit.GB;
|
||||||
|
import static org.elasticsearch.common.unit.ByteSizeUnit.KB;
|
||||||
|
import static org.elasticsearch.common.unit.ByteSizeUnit.MB;
|
||||||
|
import static org.elasticsearch.common.unit.ByteSizeUnit.PB;
|
||||||
|
import static org.elasticsearch.common.unit.ByteSizeUnit.TB;
|
||||||
|
|
||||||
public class ByteSizeValue implements Writeable, Comparable<ByteSizeValue>, ToXContentFragment {
|
public class ByteSizeValue implements Writeable, Comparable<ByteSizeValue>, ToXContentFragment {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* We have to lazy initialize the deprecation logger as otherwise a static logger here would be constructed before logging is configured
|
* We have to lazy initialize the deprecation logger as otherwise a static logger here would be constructed before logging is configured
|
||||||
* leading to a runtime failure (see {@link LogConfigurator#checkErrorListener()} ). The premature construction would come from any
|
* leading to a runtime failure (see {@code LogConfigurator.checkErrorListener()} ). The premature construction would come from any
|
||||||
* {@link ByteSizeValue} object constructed in, for example, settings in {@link org.elasticsearch.common.network.NetworkService}.
|
* {@link ByteSizeValue} object constructed in, for example, settings in {@link org.elasticsearch.common.network.NetworkService}.
|
||||||
*/
|
*/
|
||||||
static class DeprecationLoggerHolder {
|
static class DeprecationLoggerHolder {
|
||||||
static DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(ByteSizeValue.class);
|
static DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(ByteSizeValue.class);
|
||||||
}
|
}
|
||||||
|
|
||||||
public static final ByteSizeValue ZERO = new ByteSizeValue(0, ByteSizeUnit.BYTES);
|
public static final ByteSizeValue ZERO = new ByteSizeValue(0, BYTES);
|
||||||
public static final ByteSizeValue ONE = new ByteSizeValue(1, ByteSizeUnit.BYTES);
|
public static final ByteSizeValue ONE = new ByteSizeValue(1, BYTES);
|
||||||
public static final ByteSizeValue MINUS_ONE = new ByteSizeValue(-1, ByteSizeUnit.BYTES);
|
public static final ByteSizeValue MINUS_ONE = new ByteSizeValue(-1, BYTES);
|
||||||
|
|
||||||
public static ByteSizeValue ofBytes(long size) {
|
/**
|
||||||
if (size == 0) {
|
* @param size the number of {@code unit}s
|
||||||
return ZERO;
|
*/
|
||||||
}
|
public static ByteSizeValue of(long size, ByteSizeUnit unit) {
|
||||||
if (size == 1) {
|
if (size < -1 || (size == -1 && unit != BYTES)) {
|
||||||
return ONE;
|
|
||||||
}
|
|
||||||
if (size == -1) {
|
|
||||||
return MINUS_ONE;
|
|
||||||
}
|
|
||||||
return new ByteSizeValue(size, ByteSizeUnit.BYTES);
|
|
||||||
}
|
|
||||||
|
|
||||||
public static ByteSizeValue ofKb(long size) {
|
|
||||||
return new ByteSizeValue(size, ByteSizeUnit.KB);
|
|
||||||
}
|
|
||||||
|
|
||||||
public static ByteSizeValue ofMb(long size) {
|
|
||||||
return new ByteSizeValue(size, ByteSizeUnit.MB);
|
|
||||||
}
|
|
||||||
|
|
||||||
public static ByteSizeValue ofGb(long size) {
|
|
||||||
return new ByteSizeValue(size, ByteSizeUnit.GB);
|
|
||||||
}
|
|
||||||
|
|
||||||
public static ByteSizeValue ofTb(long size) {
|
|
||||||
return new ByteSizeValue(size, ByteSizeUnit.TB);
|
|
||||||
}
|
|
||||||
|
|
||||||
public static ByteSizeValue ofPb(long size) {
|
|
||||||
return new ByteSizeValue(size, ByteSizeUnit.PB);
|
|
||||||
}
|
|
||||||
|
|
||||||
private final long size;
|
|
||||||
private final ByteSizeUnit unit;
|
|
||||||
|
|
||||||
public static ByteSizeValue readFrom(StreamInput in) throws IOException {
|
|
||||||
long size = in.readZLong();
|
|
||||||
ByteSizeUnit unit = ByteSizeUnit.readFrom(in);
|
|
||||||
if (unit == ByteSizeUnit.BYTES) {
|
|
||||||
return ofBytes(size);
|
|
||||||
}
|
|
||||||
return new ByteSizeValue(size, unit);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void writeTo(StreamOutput out) throws IOException {
|
|
||||||
out.writeZLong(size);
|
|
||||||
unit.writeTo(out);
|
|
||||||
}
|
|
||||||
|
|
||||||
public ByteSizeValue(long size, ByteSizeUnit unit) {
|
|
||||||
if (size < -1 || (size == -1 && unit != ByteSizeUnit.BYTES)) {
|
|
||||||
throw new IllegalArgumentException("Values less than -1 bytes are not supported: " + size + unit.getSuffix());
|
throw new IllegalArgumentException("Values less than -1 bytes are not supported: " + size + unit.getSuffix());
|
||||||
}
|
}
|
||||||
if (size > Long.MAX_VALUE / unit.toBytes(1)) {
|
if (size > Long.MAX_VALUE / unit.toBytes(1)) {
|
||||||
|
@ -99,18 +59,88 @@ public class ByteSizeValue implements Writeable, Comparable<ByteSizeValue>, ToXC
|
||||||
"Values greater than " + Long.MAX_VALUE + " bytes are not supported: " + size + unit.getSuffix()
|
"Values greater than " + Long.MAX_VALUE + " bytes are not supported: " + size + unit.getSuffix()
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
this.size = size;
|
return newByteSizeValue(size * unit.toBytes(1), unit);
|
||||||
this.unit = unit;
|
}
|
||||||
|
|
||||||
|
public static ByteSizeValue ofBytes(long size) {
|
||||||
|
return of(size, BYTES);
|
||||||
|
}
|
||||||
|
|
||||||
|
public static ByteSizeValue ofKb(long size) {
|
||||||
|
return of(size, KB);
|
||||||
|
}
|
||||||
|
|
||||||
|
public static ByteSizeValue ofMb(long size) {
|
||||||
|
return of(size, MB);
|
||||||
|
}
|
||||||
|
|
||||||
|
public static ByteSizeValue ofGb(long size) {
|
||||||
|
return of(size, GB);
|
||||||
|
}
|
||||||
|
|
||||||
|
public static ByteSizeValue ofTb(long size) {
|
||||||
|
return of(size, TB);
|
||||||
|
}
|
||||||
|
|
||||||
|
public static ByteSizeValue ofPb(long size) {
|
||||||
|
return of(size, PB);
|
||||||
|
}
|
||||||
|
|
||||||
|
static ByteSizeValue newByteSizeValue(long sizeInBytes, ByteSizeUnit desiredUnit) {
|
||||||
|
// Peel off some common cases to avoid allocations
|
||||||
|
if (desiredUnit == BYTES) {
|
||||||
|
if (sizeInBytes == 0) {
|
||||||
|
return ZERO;
|
||||||
|
}
|
||||||
|
if (sizeInBytes == 1) {
|
||||||
|
return ONE;
|
||||||
|
}
|
||||||
|
if (sizeInBytes == -1) {
|
||||||
|
return MINUS_ONE;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (sizeInBytes < 0) {
|
||||||
|
throw new IllegalArgumentException("Values less than -1 bytes are not supported: " + sizeInBytes);
|
||||||
|
}
|
||||||
|
return new ByteSizeValue(sizeInBytes, desiredUnit);
|
||||||
|
}
|
||||||
|
|
||||||
|
private final long sizeInBytes;
|
||||||
|
private final ByteSizeUnit desiredUnit;
|
||||||
|
|
||||||
|
public static ByteSizeValue readFrom(StreamInput in) throws IOException {
|
||||||
|
long size = in.readZLong();
|
||||||
|
ByteSizeUnit unit = ByteSizeUnit.readFrom(in);
|
||||||
|
if (in.getTransportVersion().onOrAfter(BYTE_SIZE_VALUE_ALWAYS_USES_BYTES)) {
|
||||||
|
return newByteSizeValue(size, unit);
|
||||||
|
} else {
|
||||||
|
return of(size, unit);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void writeTo(StreamOutput out) throws IOException {
|
||||||
|
if (out.getTransportVersion().onOrAfter(BYTE_SIZE_VALUE_ALWAYS_USES_BYTES)) {
|
||||||
|
out.writeZLong(sizeInBytes);
|
||||||
|
} else {
|
||||||
|
out.writeZLong(Math.divideExact(sizeInBytes, desiredUnit.toBytes(1)));
|
||||||
|
}
|
||||||
|
desiredUnit.writeTo(out);
|
||||||
|
}
|
||||||
|
|
||||||
|
ByteSizeValue(long sizeInBytes, ByteSizeUnit desiredUnit) {
|
||||||
|
this.sizeInBytes = sizeInBytes;
|
||||||
|
this.desiredUnit = desiredUnit;
|
||||||
}
|
}
|
||||||
|
|
||||||
// For testing
|
// For testing
|
||||||
long getSize() {
|
long getSizeInBytes() {
|
||||||
return size;
|
return sizeInBytes;
|
||||||
}
|
}
|
||||||
|
|
||||||
// For testing
|
// For testing
|
||||||
ByteSizeUnit getUnit() {
|
ByteSizeUnit getDesiredUnit() {
|
||||||
return unit;
|
return desiredUnit;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Deprecated
|
@Deprecated
|
||||||
|
@ -123,27 +153,27 @@ public class ByteSizeValue implements Writeable, Comparable<ByteSizeValue>, ToXC
|
||||||
}
|
}
|
||||||
|
|
||||||
public long getBytes() {
|
public long getBytes() {
|
||||||
return unit.toBytes(size);
|
return sizeInBytes;
|
||||||
}
|
}
|
||||||
|
|
||||||
public long getKb() {
|
public long getKb() {
|
||||||
return unit.toKB(size);
|
return getBytes() / KB.toBytes(1);
|
||||||
}
|
}
|
||||||
|
|
||||||
public long getMb() {
|
public long getMb() {
|
||||||
return unit.toMB(size);
|
return getBytes() / MB.toBytes(1);
|
||||||
}
|
}
|
||||||
|
|
||||||
public long getGb() {
|
public long getGb() {
|
||||||
return unit.toGB(size);
|
return getBytes() / GB.toBytes(1);
|
||||||
}
|
}
|
||||||
|
|
||||||
public long getTb() {
|
public long getTb() {
|
||||||
return unit.toTB(size);
|
return getBytes() / TB.toBytes(1);
|
||||||
}
|
}
|
||||||
|
|
||||||
public long getPb() {
|
public long getPb() {
|
||||||
return unit.toPB(size);
|
return getBytes() / PB.toBytes(1);
|
||||||
}
|
}
|
||||||
|
|
||||||
public double getKbFrac() {
|
public double getKbFrac() {
|
||||||
|
@ -175,32 +205,41 @@ public class ByteSizeValue implements Writeable, Comparable<ByteSizeValue>, ToXC
|
||||||
* serialising the value to JSON.
|
* serialising the value to JSON.
|
||||||
*/
|
*/
|
||||||
public String getStringRep() {
|
public String getStringRep() {
|
||||||
if (size <= 0) {
|
if (sizeInBytes <= 0) {
|
||||||
return String.valueOf(size);
|
return String.valueOf(sizeInBytes);
|
||||||
|
}
|
||||||
|
long numUnits = sizeInBytes / desiredUnit.toBytes(1);
|
||||||
|
long residue = sizeInBytes % desiredUnit.toBytes(1);
|
||||||
|
if (residue == 0) {
|
||||||
|
return numUnits + desiredUnit.getSuffix();
|
||||||
|
} else {
|
||||||
|
return sizeInBytes + BYTES.getSuffix();
|
||||||
}
|
}
|
||||||
return size + unit.getSuffix();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @return a string with at most one decimal point whose magnitude is close to {@code this}.
|
||||||
|
*/
|
||||||
@Override
|
@Override
|
||||||
public String toString() {
|
public String toString() {
|
||||||
long bytes = getBytes();
|
long bytes = getBytes();
|
||||||
double value = bytes;
|
double value = bytes;
|
||||||
String suffix = ByteSizeUnit.BYTES.getSuffix();
|
String suffix = BYTES.getSuffix();
|
||||||
if (bytes >= ByteSizeUnit.C5) {
|
if (bytes >= ByteSizeUnit.C5) {
|
||||||
value = getPbFrac();
|
value = getPbFrac();
|
||||||
suffix = ByteSizeUnit.PB.getSuffix();
|
suffix = PB.getSuffix();
|
||||||
} else if (bytes >= ByteSizeUnit.C4) {
|
} else if (bytes >= ByteSizeUnit.C4) {
|
||||||
value = getTbFrac();
|
value = getTbFrac();
|
||||||
suffix = ByteSizeUnit.TB.getSuffix();
|
suffix = TB.getSuffix();
|
||||||
} else if (bytes >= ByteSizeUnit.C3) {
|
} else if (bytes >= ByteSizeUnit.C3) {
|
||||||
value = getGbFrac();
|
value = getGbFrac();
|
||||||
suffix = ByteSizeUnit.GB.getSuffix();
|
suffix = GB.getSuffix();
|
||||||
} else if (bytes >= ByteSizeUnit.C2) {
|
} else if (bytes >= ByteSizeUnit.C2) {
|
||||||
value = getMbFrac();
|
value = getMbFrac();
|
||||||
suffix = ByteSizeUnit.MB.getSuffix();
|
suffix = MB.getSuffix();
|
||||||
} else if (bytes >= ByteSizeUnit.C1) {
|
} else if (bytes >= ByteSizeUnit.C1) {
|
||||||
value = getKbFrac();
|
value = getKbFrac();
|
||||||
suffix = ByteSizeUnit.KB.getSuffix();
|
suffix = KB.getSuffix();
|
||||||
}
|
}
|
||||||
return Strings.format1Decimals(value, suffix);
|
return Strings.format1Decimals(value, suffix);
|
||||||
}
|
}
|
||||||
|
@ -231,25 +270,25 @@ public class ByteSizeValue implements Writeable, Comparable<ByteSizeValue>, ToXC
|
||||||
}
|
}
|
||||||
String lowerSValue = sValue.toLowerCase(Locale.ROOT).trim();
|
String lowerSValue = sValue.toLowerCase(Locale.ROOT).trim();
|
||||||
if (lowerSValue.endsWith("k")) {
|
if (lowerSValue.endsWith("k")) {
|
||||||
return parse(sValue, lowerSValue, "k", ByteSizeUnit.KB, settingName);
|
return parse(sValue, lowerSValue, "k", KB, settingName);
|
||||||
} else if (lowerSValue.endsWith("kb")) {
|
} else if (lowerSValue.endsWith("kb")) {
|
||||||
return parse(sValue, lowerSValue, "kb", ByteSizeUnit.KB, settingName);
|
return parse(sValue, lowerSValue, "kb", KB, settingName);
|
||||||
} else if (lowerSValue.endsWith("m")) {
|
} else if (lowerSValue.endsWith("m")) {
|
||||||
return parse(sValue, lowerSValue, "m", ByteSizeUnit.MB, settingName);
|
return parse(sValue, lowerSValue, "m", MB, settingName);
|
||||||
} else if (lowerSValue.endsWith("mb")) {
|
} else if (lowerSValue.endsWith("mb")) {
|
||||||
return parse(sValue, lowerSValue, "mb", ByteSizeUnit.MB, settingName);
|
return parse(sValue, lowerSValue, "mb", MB, settingName);
|
||||||
} else if (lowerSValue.endsWith("g")) {
|
} else if (lowerSValue.endsWith("g")) {
|
||||||
return parse(sValue, lowerSValue, "g", ByteSizeUnit.GB, settingName);
|
return parse(sValue, lowerSValue, "g", GB, settingName);
|
||||||
} else if (lowerSValue.endsWith("gb")) {
|
} else if (lowerSValue.endsWith("gb")) {
|
||||||
return parse(sValue, lowerSValue, "gb", ByteSizeUnit.GB, settingName);
|
return parse(sValue, lowerSValue, "gb", GB, settingName);
|
||||||
} else if (lowerSValue.endsWith("t")) {
|
} else if (lowerSValue.endsWith("t")) {
|
||||||
return parse(sValue, lowerSValue, "t", ByteSizeUnit.TB, settingName);
|
return parse(sValue, lowerSValue, "t", TB, settingName);
|
||||||
} else if (lowerSValue.endsWith("tb")) {
|
} else if (lowerSValue.endsWith("tb")) {
|
||||||
return parse(sValue, lowerSValue, "tb", ByteSizeUnit.TB, settingName);
|
return parse(sValue, lowerSValue, "tb", TB, settingName);
|
||||||
} else if (lowerSValue.endsWith("p")) {
|
} else if (lowerSValue.endsWith("p")) {
|
||||||
return parse(sValue, lowerSValue, "p", ByteSizeUnit.PB, settingName);
|
return parse(sValue, lowerSValue, "p", PB, settingName);
|
||||||
} else if (lowerSValue.endsWith("pb")) {
|
} else if (lowerSValue.endsWith("pb")) {
|
||||||
return parse(sValue, lowerSValue, "pb", ByteSizeUnit.PB, settingName);
|
return parse(sValue, lowerSValue, "pb", PB, settingName);
|
||||||
} else if (lowerSValue.endsWith("b")) {
|
} else if (lowerSValue.endsWith("b")) {
|
||||||
return parseBytes(lowerSValue, settingName, sValue);
|
return parseBytes(lowerSValue, settingName, sValue);
|
||||||
} else {
|
} else {
|
||||||
|
@ -285,24 +324,16 @@ public class ByteSizeValue implements Writeable, Comparable<ByteSizeValue>, ToXC
|
||||||
ByteSizeUnit unit,
|
ByteSizeUnit unit,
|
||||||
final String settingName
|
final String settingName
|
||||||
) {
|
) {
|
||||||
|
assert unit != BYTES : "Use parseBytes";
|
||||||
final String s = normalized.substring(0, normalized.length() - suffix.length()).trim();
|
final String s = normalized.substring(0, normalized.length() - suffix.length()).trim();
|
||||||
try {
|
try {
|
||||||
try {
|
try {
|
||||||
return new ByteSizeValue(Long.parseLong(s), unit);
|
return of(Long.parseLong(s), unit);
|
||||||
} catch (final NumberFormatException e) {
|
} catch (final NumberFormatException e) {
|
||||||
try {
|
// If it's not an integer, it could be a valid number with a decimal
|
||||||
final double doubleValue = Double.parseDouble(s);
|
BigDecimal decimalValue = parseDecimal(s, settingName, initialInput, e);
|
||||||
DeprecationLoggerHolder.deprecationLogger.warn(
|
long sizeInBytes = convertToBytes(decimalValue, unit, settingName, initialInput, e);
|
||||||
DeprecationCategory.PARSING,
|
return new ByteSizeValue(sizeInBytes, unit);
|
||||||
"fractional_byte_values",
|
|
||||||
"Fractional bytes values are deprecated. Use non-fractional bytes values instead: [{}] found for setting [{}]",
|
|
||||||
initialInput,
|
|
||||||
settingName
|
|
||||||
);
|
|
||||||
return ByteSizeValue.ofBytes((long) (doubleValue * unit.toBytes(1)));
|
|
||||||
} catch (final NumberFormatException ignored) {
|
|
||||||
throw new ElasticsearchParseException("failed to parse setting [{}] with value [{}]", e, settingName, initialInput);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
} catch (IllegalArgumentException e) {
|
} catch (IllegalArgumentException e) {
|
||||||
throw new ElasticsearchParseException(
|
throw new ElasticsearchParseException(
|
||||||
|
@ -314,6 +345,82 @@ public class ByteSizeValue implements Writeable, Comparable<ByteSizeValue>, ToXC
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @param numericPortion the number to parse
|
||||||
|
* @param settingName for error reporting - the name of the setting we're parsing
|
||||||
|
* @param settingValue for error reporting - the whole string value of the setting
|
||||||
|
* @param originalException for error reporting - the exception that occurred when we tried to parse the setting as an integer
|
||||||
|
*/
|
||||||
|
private static BigDecimal parseDecimal(
|
||||||
|
String numericPortion,
|
||||||
|
String settingName,
|
||||||
|
String settingValue,
|
||||||
|
NumberFormatException originalException
|
||||||
|
) {
|
||||||
|
BigDecimal decimalValue;
|
||||||
|
try {
|
||||||
|
decimalValue = new BigDecimal(numericPortion);
|
||||||
|
} catch (NumberFormatException e) {
|
||||||
|
// Here, we choose to use originalException as the cause, because a NumberFormatException here
|
||||||
|
// indicates the string wasn't actually a valid BigDecimal after all, so there's no reason
|
||||||
|
// to confuse matters by reporting BigDecimal in the stack trace.
|
||||||
|
ElasticsearchParseException toThrow = new ElasticsearchParseException(
|
||||||
|
"failed to parse setting [{}] with value [{}]",
|
||||||
|
originalException,
|
||||||
|
settingName,
|
||||||
|
settingValue
|
||||||
|
);
|
||||||
|
toThrow.addSuppressed(e);
|
||||||
|
throw toThrow;
|
||||||
|
}
|
||||||
|
if (decimalValue.signum() < 0) {
|
||||||
|
throw new ElasticsearchParseException("failed to parse setting [{}] with value [{}]", settingName, settingValue);
|
||||||
|
} else if (decimalValue.scale() > 2) {
|
||||||
|
throw new ElasticsearchParseException(
|
||||||
|
"failed to parse setting [{}] with more than two decimals in value [{}]",
|
||||||
|
settingName,
|
||||||
|
settingValue
|
||||||
|
);
|
||||||
|
}
|
||||||
|
return decimalValue;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @param decimalValue the number of {@code unit}s
|
||||||
|
* @param unit the specified {@link ByteSizeUnit}
|
||||||
|
* @param settingName for error reporting - the name of the setting we're parsing
|
||||||
|
* @param settingValue for error reporting - the whole string value of the setting
|
||||||
|
* @param originalException for error reporting - the exception that occurred when we tried to parse the setting as an integer
|
||||||
|
*/
|
||||||
|
private static long convertToBytes(
|
||||||
|
BigDecimal decimalValue,
|
||||||
|
ByteSizeUnit unit,
|
||||||
|
String settingName,
|
||||||
|
String settingValue,
|
||||||
|
NumberFormatException originalException
|
||||||
|
) {
|
||||||
|
BigDecimal sizeInBytes = decimalValue.multiply(new BigDecimal(unit.toBytes(1)));
|
||||||
|
try {
|
||||||
|
// Note we always round up here for two reasons:
|
||||||
|
// 1. Practically: toString truncates, so if we ever round down, we'll lose a tenth
|
||||||
|
// 2. In principle: if the user asks for 1.1kb, which is 1126.4 bytes, and we only give then 1126, then
|
||||||
|
// we have not given them what they asked for.
|
||||||
|
return sizeInBytes.setScale(0, RoundingMode.UP).longValueExact();
|
||||||
|
} catch (ArithmeticException e) {
|
||||||
|
// Here, we choose to use the ArithmeticException as the cause, because we already know the
|
||||||
|
// number is a valid BigDecimal, so it makes sense to supply that context in the stack trace.
|
||||||
|
ElasticsearchParseException toThrow = new ElasticsearchParseException(
|
||||||
|
"failed to parse setting [{}] with value beyond {}: [{}]",
|
||||||
|
e,
|
||||||
|
settingName,
|
||||||
|
Long.MAX_VALUE,
|
||||||
|
settingValue
|
||||||
|
);
|
||||||
|
toThrow.addSuppressed(originalException);
|
||||||
|
throw toThrow;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean equals(Object o) {
|
public boolean equals(Object o) {
|
||||||
if (this == o) {
|
if (this == o) {
|
||||||
|
@ -328,7 +435,7 @@ public class ByteSizeValue implements Writeable, Comparable<ByteSizeValue>, ToXC
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public int hashCode() {
|
public int hashCode() {
|
||||||
return Long.hashCode(size * unit.toBytes(1));
|
return Long.hashCode(getBytes());
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|
|
@ -87,19 +87,19 @@ public final class HttpTransportSettings {
|
||||||
);
|
);
|
||||||
public static final Setting<ByteSizeValue> SETTING_HTTP_MAX_CONTENT_LENGTH = Setting.byteSizeSetting(
|
public static final Setting<ByteSizeValue> SETTING_HTTP_MAX_CONTENT_LENGTH = Setting.byteSizeSetting(
|
||||||
"http.max_content_length",
|
"http.max_content_length",
|
||||||
new ByteSizeValue(100, ByteSizeUnit.MB),
|
ByteSizeValue.of(100, ByteSizeUnit.MB),
|
||||||
ByteSizeValue.ZERO,
|
ByteSizeValue.ZERO,
|
||||||
ByteSizeValue.ofBytes(Integer.MAX_VALUE),
|
ByteSizeValue.ofBytes(Integer.MAX_VALUE),
|
||||||
Property.NodeScope
|
Property.NodeScope
|
||||||
);
|
);
|
||||||
public static final Setting<ByteSizeValue> SETTING_HTTP_MAX_CHUNK_SIZE = Setting.byteSizeSetting(
|
public static final Setting<ByteSizeValue> SETTING_HTTP_MAX_CHUNK_SIZE = Setting.byteSizeSetting(
|
||||||
"http.max_chunk_size",
|
"http.max_chunk_size",
|
||||||
new ByteSizeValue(8, ByteSizeUnit.KB),
|
ByteSizeValue.of(8, ByteSizeUnit.KB),
|
||||||
Property.NodeScope
|
Property.NodeScope
|
||||||
);
|
);
|
||||||
public static final Setting<ByteSizeValue> SETTING_HTTP_MAX_HEADER_SIZE = Setting.byteSizeSetting(
|
public static final Setting<ByteSizeValue> SETTING_HTTP_MAX_HEADER_SIZE = Setting.byteSizeSetting(
|
||||||
"http.max_header_size",
|
"http.max_header_size",
|
||||||
new ByteSizeValue(16, ByteSizeUnit.KB),
|
ByteSizeValue.of(16, ByteSizeUnit.KB),
|
||||||
Property.NodeScope
|
Property.NodeScope
|
||||||
);
|
);
|
||||||
public static final Setting<Integer> SETTING_HTTP_MAX_WARNING_HEADER_COUNT = intSetting(
|
public static final Setting<Integer> SETTING_HTTP_MAX_WARNING_HEADER_COUNT = intSetting(
|
||||||
|
@ -115,7 +115,7 @@ public final class HttpTransportSettings {
|
||||||
);
|
);
|
||||||
public static final Setting<ByteSizeValue> SETTING_HTTP_MAX_INITIAL_LINE_LENGTH = Setting.byteSizeSetting(
|
public static final Setting<ByteSizeValue> SETTING_HTTP_MAX_INITIAL_LINE_LENGTH = Setting.byteSizeSetting(
|
||||||
"http.max_initial_line_length",
|
"http.max_initial_line_length",
|
||||||
new ByteSizeValue(4, ByteSizeUnit.KB),
|
ByteSizeValue.of(4, ByteSizeUnit.KB),
|
||||||
Property.NodeScope
|
Property.NodeScope
|
||||||
);
|
);
|
||||||
|
|
||||||
|
|
|
@ -353,7 +353,7 @@ public final class IndexSettings {
|
||||||
* Prevent the translog from growing over 10GB or 20% of the recommended shard size of 50GB. This helps bound the maximum disk usage
|
* Prevent the translog from growing over 10GB or 20% of the recommended shard size of 50GB. This helps bound the maximum disk usage
|
||||||
* overhead of translogs.
|
* overhead of translogs.
|
||||||
*/
|
*/
|
||||||
new ByteSizeValue(10, ByteSizeUnit.GB),
|
ByteSizeValue.of(10, ByteSizeUnit.GB),
|
||||||
/*
|
/*
|
||||||
* An empty translog occupies 55 bytes on disk. If the flush threshold is below this, the flush thread
|
* An empty translog occupies 55 bytes on disk. If the flush threshold is below this, the flush thread
|
||||||
* can get stuck in an infinite loop as the shouldPeriodicallyFlush can still be true after flushing.
|
* can get stuck in an infinite loop as the shouldPeriodicallyFlush can still be true after flushing.
|
||||||
|
@ -385,7 +385,7 @@ public final class IndexSettings {
|
||||||
*/
|
*/
|
||||||
public static final Setting<ByteSizeValue> INDEX_FLUSH_AFTER_MERGE_THRESHOLD_SIZE_SETTING = Setting.byteSizeSetting(
|
public static final Setting<ByteSizeValue> INDEX_FLUSH_AFTER_MERGE_THRESHOLD_SIZE_SETTING = Setting.byteSizeSetting(
|
||||||
"index.flush_after_merge",
|
"index.flush_after_merge",
|
||||||
new ByteSizeValue(512, ByteSizeUnit.MB),
|
ByteSizeValue.of(512, ByteSizeUnit.MB),
|
||||||
ByteSizeValue.ZERO, // always flush after merge
|
ByteSizeValue.ZERO, // always flush after merge
|
||||||
ByteSizeValue.ofBytes(Long.MAX_VALUE), // never flush after merge
|
ByteSizeValue.ofBytes(Long.MAX_VALUE), // never flush after merge
|
||||||
Property.Dynamic,
|
Property.Dynamic,
|
||||||
|
@ -398,7 +398,7 @@ public final class IndexSettings {
|
||||||
*/
|
*/
|
||||||
public static final Setting<ByteSizeValue> INDEX_TRANSLOG_GENERATION_THRESHOLD_SIZE_SETTING = Setting.byteSizeSetting(
|
public static final Setting<ByteSizeValue> INDEX_TRANSLOG_GENERATION_THRESHOLD_SIZE_SETTING = Setting.byteSizeSetting(
|
||||||
"index.translog.generation_threshold_size",
|
"index.translog.generation_threshold_size",
|
||||||
new ByteSizeValue(64, ByteSizeUnit.MB),
|
ByteSizeValue.of(64, ByteSizeUnit.MB),
|
||||||
/*
|
/*
|
||||||
* An empty translog occupies 55 bytes on disk. If the generation threshold is
|
* An empty translog occupies 55 bytes on disk. If the generation threshold is
|
||||||
* below this, the flush thread can get stuck in an infinite loop repeatedly
|
* below this, the flush thread can get stuck in an infinite loop repeatedly
|
||||||
|
@ -1431,7 +1431,7 @@ public final class IndexSettings {
|
||||||
}
|
}
|
||||||
assert onePercentOfTotalDiskSpace > Translog.DEFAULT_HEADER_SIZE_IN_BYTES;
|
assert onePercentOfTotalDiskSpace > Translog.DEFAULT_HEADER_SIZE_IN_BYTES;
|
||||||
if (onePercentOfTotalDiskSpace < flushThresholdSize.getBytes()) {
|
if (onePercentOfTotalDiskSpace < flushThresholdSize.getBytes()) {
|
||||||
return new ByteSizeValue(onePercentOfTotalDiskSpace, ByteSizeUnit.BYTES);
|
return ByteSizeValue.of(onePercentOfTotalDiskSpace, ByteSizeUnit.BYTES);
|
||||||
} else {
|
} else {
|
||||||
return flushThresholdSize;
|
return flushThresholdSize;
|
||||||
}
|
}
|
||||||
|
|
|
@ -116,9 +116,9 @@ public final class MergePolicyConfig {
|
||||||
private final ByteSizeValue defaultMaxTimeBasedMergedSegment;
|
private final ByteSizeValue defaultMaxTimeBasedMergedSegment;
|
||||||
|
|
||||||
public static final double DEFAULT_EXPUNGE_DELETES_ALLOWED = 10d;
|
public static final double DEFAULT_EXPUNGE_DELETES_ALLOWED = 10d;
|
||||||
public static final ByteSizeValue DEFAULT_FLOOR_SEGMENT = new ByteSizeValue(2, ByteSizeUnit.MB);
|
public static final ByteSizeValue DEFAULT_FLOOR_SEGMENT = ByteSizeValue.of(2, ByteSizeUnit.MB);
|
||||||
public static final int DEFAULT_MAX_MERGE_AT_ONCE = 10;
|
public static final int DEFAULT_MAX_MERGE_AT_ONCE = 10;
|
||||||
public static final ByteSizeValue DEFAULT_MAX_MERGED_SEGMENT = new ByteSizeValue(5, ByteSizeUnit.GB);
|
public static final ByteSizeValue DEFAULT_MAX_MERGED_SEGMENT = ByteSizeValue.of(5, ByteSizeUnit.GB);
|
||||||
public static final Setting<ByteSizeValue> DEFAULT_MAX_MERGED_SEGMENT_SETTING = Setting.byteSizeSetting(
|
public static final Setting<ByteSizeValue> DEFAULT_MAX_MERGED_SEGMENT_SETTING = Setting.byteSizeSetting(
|
||||||
"indices.merge.policy.max_merged_segment",
|
"indices.merge.policy.max_merged_segment",
|
||||||
DEFAULT_MAX_MERGED_SEGMENT,
|
DEFAULT_MAX_MERGED_SEGMENT,
|
||||||
|
@ -131,7 +131,7 @@ public final class MergePolicyConfig {
|
||||||
* of merging fewer segments together than the merge factor, which in-turn increases write amplification. So we set an arbitrarily high
|
* of merging fewer segments together than the merge factor, which in-turn increases write amplification. So we set an arbitrarily high
|
||||||
* roof that serves as a protection that we expect to never hit.
|
* roof that serves as a protection that we expect to never hit.
|
||||||
*/
|
*/
|
||||||
public static final ByteSizeValue DEFAULT_MAX_TIME_BASED_MERGED_SEGMENT = new ByteSizeValue(100, ByteSizeUnit.GB);
|
public static final ByteSizeValue DEFAULT_MAX_TIME_BASED_MERGED_SEGMENT = ByteSizeValue.of(100, ByteSizeUnit.GB);
|
||||||
public static final Setting<ByteSizeValue> DEFAULT_MAX_TIME_BASED_MERGED_SEGMENT_SETTING = Setting.byteSizeSetting(
|
public static final Setting<ByteSizeValue> DEFAULT_MAX_TIME_BASED_MERGED_SEGMENT_SETTING = Setting.byteSizeSetting(
|
||||||
"indices.merge.policy.max_time_based_merged_segment",
|
"indices.merge.policy.max_time_based_merged_segment",
|
||||||
DEFAULT_MAX_TIME_BASED_MERGED_SEGMENT,
|
DEFAULT_MAX_TIME_BASED_MERGED_SEGMENT,
|
||||||
|
|
|
@ -266,7 +266,7 @@ public class InternalEngine extends Engine {
|
||||||
);
|
);
|
||||||
assert translog.getGeneration() != null;
|
assert translog.getGeneration() != null;
|
||||||
this.translog = translog;
|
this.translog = translog;
|
||||||
this.totalDiskSpace = new ByteSizeValue(Environment.getFileStore(translog.location()).getTotalSpace(), ByteSizeUnit.BYTES);
|
this.totalDiskSpace = ByteSizeValue.of(Environment.getFileStore(translog.location()).getTotalSpace(), ByteSizeUnit.BYTES);
|
||||||
this.lastCommittedSegmentInfos = store.readLastCommittedSegmentsInfo();
|
this.lastCommittedSegmentInfos = store.readLastCommittedSegmentsInfo();
|
||||||
this.softDeletesPolicy = newSoftDeletesPolicy();
|
this.softDeletesPolicy = newSoftDeletesPolicy();
|
||||||
this.combinedDeletionPolicy = new CombinedDeletionPolicy(
|
this.combinedDeletionPolicy = new CombinedDeletionPolicy(
|
||||||
|
|
|
@ -49,7 +49,7 @@ public class PrimaryReplicaSyncer {
|
||||||
private final TransportService transportService;
|
private final TransportService transportService;
|
||||||
private final SyncAction syncAction;
|
private final SyncAction syncAction;
|
||||||
|
|
||||||
public static final ByteSizeValue DEFAULT_CHUNK_SIZE = new ByteSizeValue(512, ByteSizeUnit.KB);
|
public static final ByteSizeValue DEFAULT_CHUNK_SIZE = ByteSizeValue.of(512, ByteSizeUnit.KB);
|
||||||
|
|
||||||
private volatile ByteSizeValue chunkSize = DEFAULT_CHUNK_SIZE;
|
private volatile ByteSizeValue chunkSize = DEFAULT_CHUNK_SIZE;
|
||||||
|
|
||||||
|
|
|
@ -26,7 +26,7 @@ import java.nio.file.Path;
|
||||||
*/
|
*/
|
||||||
public final class TranslogConfig {
|
public final class TranslogConfig {
|
||||||
|
|
||||||
public static final ByteSizeValue DEFAULT_BUFFER_SIZE = new ByteSizeValue(1, ByteSizeUnit.MB);
|
public static final ByteSizeValue DEFAULT_BUFFER_SIZE = ByteSizeValue.of(1, ByteSizeUnit.MB);
|
||||||
public static final ByteSizeValue EMPTY_TRANSLOG_BUFFER_SIZE = ByteSizeValue.ofBytes(10);
|
public static final ByteSizeValue EMPTY_TRANSLOG_BUFFER_SIZE = ByteSizeValue.ofBytes(10);
|
||||||
public static final OperationListener NOOP_OPERATION_LISTENER = (d, s, l) -> {};
|
public static final OperationListener NOOP_OPERATION_LISTENER = (d, s, l) -> {};
|
||||||
|
|
||||||
|
|
|
@ -56,7 +56,7 @@ public class IndexingMemoryController implements IndexingOperationListener, Clos
|
||||||
* to set a floor on the actual size in bytes (default: 48 MB). */
|
* to set a floor on the actual size in bytes (default: 48 MB). */
|
||||||
public static final Setting<ByteSizeValue> MIN_INDEX_BUFFER_SIZE_SETTING = Setting.byteSizeSetting(
|
public static final Setting<ByteSizeValue> MIN_INDEX_BUFFER_SIZE_SETTING = Setting.byteSizeSetting(
|
||||||
"indices.memory.min_index_buffer_size",
|
"indices.memory.min_index_buffer_size",
|
||||||
new ByteSizeValue(48, ByteSizeUnit.MB),
|
ByteSizeValue.of(48, ByteSizeUnit.MB),
|
||||||
ByteSizeValue.ZERO,
|
ByteSizeValue.ZERO,
|
||||||
ByteSizeValue.ofBytes(Long.MAX_VALUE),
|
ByteSizeValue.ofBytes(Long.MAX_VALUE),
|
||||||
Property.NodeScope
|
Property.NodeScope
|
||||||
|
|
|
@ -201,7 +201,7 @@ public class RecoverySettings {
|
||||||
return s -> Setting.parseDouble(s, 0d, 1d, key, false);
|
return s -> Setting.parseDouble(s, 0d, 1d, key, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
static final ByteSizeValue DEFAULT_MAX_BYTES_PER_SEC = new ByteSizeValue(40L, ByteSizeUnit.MB);
|
static final ByteSizeValue DEFAULT_MAX_BYTES_PER_SEC = ByteSizeValue.of(40L, ByteSizeUnit.MB);
|
||||||
|
|
||||||
public static final Setting<ByteSizeValue> INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING = Setting.byteSizeSetting(
|
public static final Setting<ByteSizeValue> INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING = Setting.byteSizeSetting(
|
||||||
"indices.recovery.max_bytes_per_sec",
|
"indices.recovery.max_bytes_per_sec",
|
||||||
|
@ -227,16 +227,16 @@ public class RecoverySettings {
|
||||||
*/
|
*/
|
||||||
final ByteSizeValue totalPhysicalMemory = TOTAL_PHYSICAL_MEMORY_OVERRIDING_TEST_SETTING.get(s);
|
final ByteSizeValue totalPhysicalMemory = TOTAL_PHYSICAL_MEMORY_OVERRIDING_TEST_SETTING.get(s);
|
||||||
final ByteSizeValue maxBytesPerSec;
|
final ByteSizeValue maxBytesPerSec;
|
||||||
if (totalPhysicalMemory.compareTo(new ByteSizeValue(4, ByteSizeUnit.GB)) <= 0) {
|
if (totalPhysicalMemory.compareTo(ByteSizeValue.of(4, ByteSizeUnit.GB)) <= 0) {
|
||||||
maxBytesPerSec = new ByteSizeValue(40, ByteSizeUnit.MB);
|
maxBytesPerSec = ByteSizeValue.of(40, ByteSizeUnit.MB);
|
||||||
} else if (totalPhysicalMemory.compareTo(new ByteSizeValue(8, ByteSizeUnit.GB)) <= 0) {
|
} else if (totalPhysicalMemory.compareTo(ByteSizeValue.of(8, ByteSizeUnit.GB)) <= 0) {
|
||||||
maxBytesPerSec = new ByteSizeValue(60, ByteSizeUnit.MB);
|
maxBytesPerSec = ByteSizeValue.of(60, ByteSizeUnit.MB);
|
||||||
} else if (totalPhysicalMemory.compareTo(new ByteSizeValue(16, ByteSizeUnit.GB)) <= 0) {
|
} else if (totalPhysicalMemory.compareTo(ByteSizeValue.of(16, ByteSizeUnit.GB)) <= 0) {
|
||||||
maxBytesPerSec = new ByteSizeValue(90, ByteSizeUnit.MB);
|
maxBytesPerSec = ByteSizeValue.of(90, ByteSizeUnit.MB);
|
||||||
} else if (totalPhysicalMemory.compareTo(new ByteSizeValue(32, ByteSizeUnit.GB)) <= 0) {
|
} else if (totalPhysicalMemory.compareTo(ByteSizeValue.of(32, ByteSizeUnit.GB)) <= 0) {
|
||||||
maxBytesPerSec = new ByteSizeValue(125, ByteSizeUnit.MB);
|
maxBytesPerSec = ByteSizeValue.of(125, ByteSizeUnit.MB);
|
||||||
} else {
|
} else {
|
||||||
maxBytesPerSec = new ByteSizeValue(250, ByteSizeUnit.MB);
|
maxBytesPerSec = ByteSizeValue.of(250, ByteSizeUnit.MB);
|
||||||
}
|
}
|
||||||
return maxBytesPerSec.getStringRep();
|
return maxBytesPerSec.getStringRep();
|
||||||
},
|
},
|
||||||
|
@ -397,7 +397,7 @@ public class RecoverySettings {
|
||||||
Property.NodeScope
|
Property.NodeScope
|
||||||
);
|
);
|
||||||
|
|
||||||
public static final ByteSizeValue DEFAULT_CHUNK_SIZE = new ByteSizeValue(512, ByteSizeUnit.KB);
|
public static final ByteSizeValue DEFAULT_CHUNK_SIZE = ByteSizeValue.of(512, ByteSizeUnit.KB);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* The maximum allowable size, in bytes, for buffering source documents during recovery.
|
* The maximum allowable size, in bytes, for buffering source documents during recovery.
|
||||||
|
|
|
@ -102,14 +102,14 @@ public class FsInfo implements Iterable<FsInfo.Path>, Writeable, ToXContentFragm
|
||||||
}
|
}
|
||||||
|
|
||||||
public void setEffectiveWatermarks(final DiskThresholdSettings masterThresholdSettings, boolean isDedicatedFrozenNode) {
|
public void setEffectiveWatermarks(final DiskThresholdSettings masterThresholdSettings, boolean isDedicatedFrozenNode) {
|
||||||
lowWatermarkFreeSpace = masterThresholdSettings.getFreeBytesThresholdLowStage(new ByteSizeValue(total, ByteSizeUnit.BYTES));
|
lowWatermarkFreeSpace = masterThresholdSettings.getFreeBytesThresholdLowStage(ByteSizeValue.of(total, ByteSizeUnit.BYTES));
|
||||||
highWatermarkFreeSpace = masterThresholdSettings.getFreeBytesThresholdHighStage(new ByteSizeValue(total, ByteSizeUnit.BYTES));
|
highWatermarkFreeSpace = masterThresholdSettings.getFreeBytesThresholdHighStage(ByteSizeValue.of(total, ByteSizeUnit.BYTES));
|
||||||
floodStageWatermarkFreeSpace = masterThresholdSettings.getFreeBytesThresholdFloodStage(
|
floodStageWatermarkFreeSpace = masterThresholdSettings.getFreeBytesThresholdFloodStage(
|
||||||
new ByteSizeValue(total, ByteSizeUnit.BYTES)
|
ByteSizeValue.of(total, ByteSizeUnit.BYTES)
|
||||||
);
|
);
|
||||||
if (isDedicatedFrozenNode) {
|
if (isDedicatedFrozenNode) {
|
||||||
frozenFloodStageWatermarkFreeSpace = masterThresholdSettings.getFreeBytesThresholdFrozenFloodStage(
|
frozenFloodStageWatermarkFreeSpace = masterThresholdSettings.getFreeBytesThresholdFrozenFloodStage(
|
||||||
new ByteSizeValue(total, ByteSizeUnit.BYTES)
|
ByteSizeValue.of(total, ByteSizeUnit.BYTES)
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -261,7 +261,7 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv
|
||||||
|
|
||||||
public static final Setting<ByteSizeValue> MAX_ASYNC_SEARCH_RESPONSE_SIZE_SETTING = Setting.byteSizeSetting(
|
public static final Setting<ByteSizeValue> MAX_ASYNC_SEARCH_RESPONSE_SIZE_SETTING = Setting.byteSizeSetting(
|
||||||
"search.max_async_search_response_size",
|
"search.max_async_search_response_size",
|
||||||
new ByteSizeValue(10, ByteSizeUnit.MB),
|
ByteSizeValue.of(10, ByteSizeUnit.MB),
|
||||||
Property.Dynamic,
|
Property.Dynamic,
|
||||||
Property.NodeScope
|
Property.NodeScope
|
||||||
);
|
);
|
||||||
|
|
|
@ -649,7 +649,7 @@ public class ThreadPool implements ReportingService<ThreadPoolInfo>, Scheduler,
|
||||||
static int getMaxSnapshotThreadPoolSize(int allocatedProcessors, final ByteSizeValue maxHeapSize) {
|
static int getMaxSnapshotThreadPoolSize(int allocatedProcessors, final ByteSizeValue maxHeapSize) {
|
||||||
// While on larger data nodes, larger snapshot threadpool size improves snapshotting on high latency blob stores,
|
// While on larger data nodes, larger snapshot threadpool size improves snapshotting on high latency blob stores,
|
||||||
// smaller instances can run into OOM issues and need a smaller snapshot threadpool size.
|
// smaller instances can run into OOM issues and need a smaller snapshot threadpool size.
|
||||||
if (maxHeapSize.compareTo(new ByteSizeValue(750, ByteSizeUnit.MB)) < 0) {
|
if (maxHeapSize.compareTo(ByteSizeValue.of(750, ByteSizeUnit.MB)) < 0) {
|
||||||
return halfAllocatedProcessorsMaxFive(allocatedProcessors);
|
return halfAllocatedProcessorsMaxFive(allocatedProcessors);
|
||||||
}
|
}
|
||||||
return 10;
|
return 10;
|
||||||
|
|
|
@ -40,11 +40,11 @@ public class InboundDecoder implements Releasable {
|
||||||
private final ChannelType channelType;
|
private final ChannelType channelType;
|
||||||
|
|
||||||
public InboundDecoder(Recycler<BytesRef> recycler) {
|
public InboundDecoder(Recycler<BytesRef> recycler) {
|
||||||
this(recycler, new ByteSizeValue(2, ByteSizeUnit.GB), ChannelType.MIX);
|
this(recycler, ByteSizeValue.of(2, ByteSizeUnit.GB), ChannelType.MIX);
|
||||||
}
|
}
|
||||||
|
|
||||||
public InboundDecoder(Recycler<BytesRef> recycler, ChannelType channelType) {
|
public InboundDecoder(Recycler<BytesRef> recycler, ChannelType channelType) {
|
||||||
this(recycler, new ByteSizeValue(2, ByteSizeUnit.GB), channelType);
|
this(recycler, ByteSizeValue.of(2, ByteSizeUnit.GB), channelType);
|
||||||
}
|
}
|
||||||
|
|
||||||
public InboundDecoder(Recycler<BytesRef> recycler, ByteSizeValue maxHeaderSize, ChannelType channelType) {
|
public InboundDecoder(Recycler<BytesRef> recycler, ByteSizeValue maxHeaderSize, ChannelType channelType) {
|
||||||
|
|
|
@ -135,9 +135,9 @@ public class RemoteClusterPortSettings {
|
||||||
|
|
||||||
public static final Setting<ByteSizeValue> MAX_REQUEST_HEADER_SIZE = Setting.byteSizeSetting(
|
public static final Setting<ByteSizeValue> MAX_REQUEST_HEADER_SIZE = Setting.byteSizeSetting(
|
||||||
REMOTE_CLUSTER_PREFIX + "max_request_header_size",
|
REMOTE_CLUSTER_PREFIX + "max_request_header_size",
|
||||||
new ByteSizeValue(64, ByteSizeUnit.KB), // should cover typical querying user/key authn serialized to the fulfilling cluster
|
ByteSizeValue.of(64, ByteSizeUnit.KB), // should cover typical querying user/key authn serialized to the fulfilling cluster
|
||||||
new ByteSizeValue(64, ByteSizeUnit.BYTES), // toBytes must be higher than fixed header length
|
ByteSizeValue.of(64, ByteSizeUnit.BYTES), // toBytes must be higher than fixed header length
|
||||||
new ByteSizeValue(2, ByteSizeUnit.GB), // toBytes must be lower than INT_MAX (>2 GB)
|
ByteSizeValue.of(2, ByteSizeUnit.GB), // toBytes must be lower than INT_MAX (>2 GB)
|
||||||
Setting.Property.NodeScope
|
Setting.Property.NodeScope
|
||||||
);
|
);
|
||||||
|
|
||||||
|
|
|
@ -84,22 +84,22 @@ public class RolloverConditionsTests extends AbstractXContentSerializingTestCase
|
||||||
switch (between(0, 9)) {
|
switch (between(0, 9)) {
|
||||||
case 0 -> maxSize = randomValueOtherThan(maxSize, () -> {
|
case 0 -> maxSize = randomValueOtherThan(maxSize, () -> {
|
||||||
ByteSizeUnit maxSizeUnit = randomFrom(ByteSizeUnit.values());
|
ByteSizeUnit maxSizeUnit = randomFrom(ByteSizeUnit.values());
|
||||||
return new ByteSizeValue(randomNonNegativeLong() / maxSizeUnit.toBytes(1), maxSizeUnit);
|
return ByteSizeValue.of(randomNonNegativeLong() / maxSizeUnit.toBytes(1), maxSizeUnit);
|
||||||
});
|
});
|
||||||
case 1 -> maxPrimaryShardSize = randomValueOtherThan(maxPrimaryShardSize, () -> {
|
case 1 -> maxPrimaryShardSize = randomValueOtherThan(maxPrimaryShardSize, () -> {
|
||||||
ByteSizeUnit maxPrimaryShardSizeUnit = randomFrom(ByteSizeUnit.values());
|
ByteSizeUnit maxPrimaryShardSizeUnit = randomFrom(ByteSizeUnit.values());
|
||||||
return new ByteSizeValue(randomNonNegativeLong() / maxPrimaryShardSizeUnit.toBytes(1), maxPrimaryShardSizeUnit);
|
return ByteSizeValue.of(randomNonNegativeLong() / maxPrimaryShardSizeUnit.toBytes(1), maxPrimaryShardSizeUnit);
|
||||||
});
|
});
|
||||||
case 2 -> maxAge = randomValueOtherThan(maxAge, () -> randomPositiveTimeValue());
|
case 2 -> maxAge = randomValueOtherThan(maxAge, () -> randomPositiveTimeValue());
|
||||||
case 3 -> maxDocs = maxDocs == null ? randomNonNegativeLong() : maxDocs + 1;
|
case 3 -> maxDocs = maxDocs == null ? randomNonNegativeLong() : maxDocs + 1;
|
||||||
case 4 -> maxPrimaryShardDocs = maxPrimaryShardDocs == null ? randomNonNegativeLong() : maxPrimaryShardDocs + 1;
|
case 4 -> maxPrimaryShardDocs = maxPrimaryShardDocs == null ? randomNonNegativeLong() : maxPrimaryShardDocs + 1;
|
||||||
case 5 -> minSize = randomValueOtherThan(minSize, () -> {
|
case 5 -> minSize = randomValueOtherThan(minSize, () -> {
|
||||||
ByteSizeUnit minSizeUnit = randomFrom(ByteSizeUnit.values());
|
ByteSizeUnit minSizeUnit = randomFrom(ByteSizeUnit.values());
|
||||||
return new ByteSizeValue(randomNonNegativeLong() / minSizeUnit.toBytes(1), minSizeUnit);
|
return ByteSizeValue.of(randomNonNegativeLong() / minSizeUnit.toBytes(1), minSizeUnit);
|
||||||
});
|
});
|
||||||
case 6 -> minPrimaryShardSize = randomValueOtherThan(minPrimaryShardSize, () -> {
|
case 6 -> minPrimaryShardSize = randomValueOtherThan(minPrimaryShardSize, () -> {
|
||||||
ByteSizeUnit minPrimaryShardSizeUnit = randomFrom(ByteSizeUnit.values());
|
ByteSizeUnit minPrimaryShardSizeUnit = randomFrom(ByteSizeUnit.values());
|
||||||
return new ByteSizeValue(randomNonNegativeLong() / minPrimaryShardSizeUnit.toBytes(1), minPrimaryShardSizeUnit);
|
return ByteSizeValue.of(randomNonNegativeLong() / minPrimaryShardSizeUnit.toBytes(1), minPrimaryShardSizeUnit);
|
||||||
});
|
});
|
||||||
case 7 -> minAge = randomValueOtherThan(minAge, () -> randomPositiveTimeValue());
|
case 7 -> minAge = randomValueOtherThan(minAge, () -> randomPositiveTimeValue());
|
||||||
case 8 -> minDocs = minDocs == null ? randomNonNegativeLong() : minDocs + 1;
|
case 8 -> minDocs = minDocs == null ? randomNonNegativeLong() : minDocs + 1;
|
||||||
|
|
|
@ -67,7 +67,7 @@ public class ResizeRequestTests extends AbstractWireSerializingTestCase<ResizeRe
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
ResizeRequest request = new ResizeRequest("target", "source");
|
ResizeRequest request = new ResizeRequest("target", "source");
|
||||||
request.setMaxPrimaryShardSize(new ByteSizeValue(100, ByteSizeUnit.MB));
|
request.setMaxPrimaryShardSize(ByteSizeValue.of(100, ByteSizeUnit.MB));
|
||||||
String actualRequestBody = Strings.toString(request);
|
String actualRequestBody = Strings.toString(request);
|
||||||
assertEquals("""
|
assertEquals("""
|
||||||
{"settings":{},"aliases":{},"max_primary_shard_size":"100mb"}""", actualRequestBody);
|
{"settings":{},"aliases":{},"max_primary_shard_size":"100mb"}""", actualRequestBody);
|
||||||
|
|
|
@ -175,7 +175,7 @@ public class BulkProcessor2Tests extends ESTestCase {
|
||||||
countingListener(requestCount, successCount, failureCount, docCount, exceptionRef),
|
countingListener(requestCount, successCount, failureCount, docCount, exceptionRef),
|
||||||
maxBatchSize,
|
maxBatchSize,
|
||||||
ByteSizeValue.ofBytes(Integer.MAX_VALUE),
|
ByteSizeValue.ofBytes(Integer.MAX_VALUE),
|
||||||
new ByteSizeValue(50, ByteSizeUnit.MB),
|
ByteSizeValue.of(50, ByteSizeUnit.MB),
|
||||||
null,
|
null,
|
||||||
threadPool
|
threadPool
|
||||||
);
|
);
|
||||||
|
@ -280,7 +280,7 @@ public class BulkProcessor2Tests extends ESTestCase {
|
||||||
countingListener(requestCount, successCount, failureCount, docCount, exceptionRef),
|
countingListener(requestCount, successCount, failureCount, docCount, exceptionRef),
|
||||||
maxBatchSize,
|
maxBatchSize,
|
||||||
ByteSizeValue.ofBytes(Integer.MAX_VALUE),
|
ByteSizeValue.ofBytes(Integer.MAX_VALUE),
|
||||||
new ByteSizeValue(50, ByteSizeUnit.MB),
|
ByteSizeValue.of(50, ByteSizeUnit.MB),
|
||||||
TimeValue.timeValueMillis(simulateWorkTimeInMillis * 2),
|
TimeValue.timeValueMillis(simulateWorkTimeInMillis * 2),
|
||||||
threadPool
|
threadPool
|
||||||
);
|
);
|
||||||
|
|
|
@ -98,7 +98,7 @@ public class BulkProcessorTests extends ESTestCase {
|
||||||
emptyListener(),
|
emptyListener(),
|
||||||
1,
|
1,
|
||||||
bulkSize,
|
bulkSize,
|
||||||
new ByteSizeValue(5, ByteSizeUnit.MB),
|
ByteSizeValue.of(5, ByteSizeUnit.MB),
|
||||||
flushInterval,
|
flushInterval,
|
||||||
threadPool,
|
threadPool,
|
||||||
() -> {},
|
() -> {},
|
||||||
|
|
|
@ -62,7 +62,7 @@ public class MemorySizeSettingsTests extends ESTestCase {
|
||||||
public void testCircuitBreakerSettings() {
|
public void testCircuitBreakerSettings() {
|
||||||
// default is chosen based on actual heap size
|
// default is chosen based on actual heap size
|
||||||
double defaultTotalPercentage;
|
double defaultTotalPercentage;
|
||||||
if (JvmInfo.jvmInfo().getMem().getHeapMax().getBytes() < new ByteSizeValue(1, ByteSizeUnit.GB).getBytes()) {
|
if (JvmInfo.jvmInfo().getMem().getHeapMax().getBytes() < ByteSizeValue.of(1, ByteSizeUnit.GB).getBytes()) {
|
||||||
defaultTotalPercentage = 0.95d;
|
defaultTotalPercentage = 0.95d;
|
||||||
} else {
|
} else {
|
||||||
defaultTotalPercentage = 0.7d;
|
defaultTotalPercentage = 0.7d;
|
||||||
|
|
|
@ -70,8 +70,8 @@ public class SettingTests extends ESTestCase {
|
||||||
public void testByteSizeSettingMinValue() {
|
public void testByteSizeSettingMinValue() {
|
||||||
final Setting<ByteSizeValue> byteSizeValueSetting = Setting.byteSizeSetting(
|
final Setting<ByteSizeValue> byteSizeValueSetting = Setting.byteSizeSetting(
|
||||||
"a.byte.size",
|
"a.byte.size",
|
||||||
new ByteSizeValue(100, ByteSizeUnit.MB),
|
ByteSizeValue.of(100, ByteSizeUnit.MB),
|
||||||
new ByteSizeValue(20_000_000, ByteSizeUnit.BYTES),
|
ByteSizeValue.of(20_000_000, ByteSizeUnit.BYTES),
|
||||||
ByteSizeValue.ofBytes(Integer.MAX_VALUE)
|
ByteSizeValue.ofBytes(Integer.MAX_VALUE)
|
||||||
);
|
);
|
||||||
final long value = 20_000_000 - randomIntBetween(1, 1024);
|
final long value = 20_000_000 - randomIntBetween(1, 1024);
|
||||||
|
@ -84,8 +84,8 @@ public class SettingTests extends ESTestCase {
|
||||||
public void testByteSizeSettingMaxValue() {
|
public void testByteSizeSettingMaxValue() {
|
||||||
final Setting<ByteSizeValue> byteSizeValueSetting = Setting.byteSizeSetting(
|
final Setting<ByteSizeValue> byteSizeValueSetting = Setting.byteSizeSetting(
|
||||||
"a.byte.size",
|
"a.byte.size",
|
||||||
new ByteSizeValue(100, ByteSizeUnit.MB),
|
ByteSizeValue.of(100, ByteSizeUnit.MB),
|
||||||
new ByteSizeValue(16, ByteSizeUnit.MB),
|
ByteSizeValue.of(16, ByteSizeUnit.MB),
|
||||||
ByteSizeValue.ofBytes(Integer.MAX_VALUE)
|
ByteSizeValue.ofBytes(Integer.MAX_VALUE)
|
||||||
);
|
);
|
||||||
final long value = (1L << 31) - 1 + randomIntBetween(1, 1024);
|
final long value = (1L << 31) - 1 + randomIntBetween(1, 1024);
|
||||||
|
|
|
@ -659,7 +659,7 @@ public class SettingsTests extends ESTestCase {
|
||||||
"key",
|
"key",
|
||||||
ByteSizeValue.parseBytesSizeValue(randomIntBetween(1, 16) + "k", "key")
|
ByteSizeValue.parseBytesSizeValue(randomIntBetween(1, 16) + "k", "key")
|
||||||
);
|
);
|
||||||
final ByteSizeValue expected = new ByteSizeValue(randomNonNegativeLong(), ByteSizeUnit.BYTES);
|
final ByteSizeValue expected = ByteSizeValue.of(randomNonNegativeLong(), ByteSizeUnit.BYTES);
|
||||||
final Settings settings = Settings.builder().put("key", expected).build();
|
final Settings settings = Settings.builder().put("key", expected).build();
|
||||||
/*
|
/*
|
||||||
* Previously we would internally convert the byte size value to a string using a method that tries to be smart about the units
|
* Previously we would internally convert the byte size value to a string using a method that tries to be smart about the units
|
||||||
|
|
|
@ -10,11 +10,16 @@
|
||||||
package org.elasticsearch.common.unit;
|
package org.elasticsearch.common.unit;
|
||||||
|
|
||||||
import org.elasticsearch.ElasticsearchParseException;
|
import org.elasticsearch.ElasticsearchParseException;
|
||||||
|
import org.elasticsearch.TransportVersion;
|
||||||
|
import org.elasticsearch.TransportVersions;
|
||||||
|
import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
||||||
import org.elasticsearch.common.io.stream.Writeable.Reader;
|
import org.elasticsearch.common.io.stream.Writeable.Reader;
|
||||||
import org.elasticsearch.test.AbstractWireSerializingTestCase;
|
import org.elasticsearch.test.AbstractWireSerializingTestCase;
|
||||||
|
import org.elasticsearch.test.ESTestCase;
|
||||||
import org.hamcrest.MatcherAssert;
|
import org.hamcrest.MatcherAssert;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
import java.util.List;
|
||||||
import java.util.function.Function;
|
import java.util.function.Function;
|
||||||
|
|
||||||
import static org.hamcrest.Matchers.containsString;
|
import static org.hamcrest.Matchers.containsString;
|
||||||
|
@ -23,24 +28,24 @@ import static org.hamcrest.Matchers.is;
|
||||||
|
|
||||||
public class ByteSizeValueTests extends AbstractWireSerializingTestCase<ByteSizeValue> {
|
public class ByteSizeValueTests extends AbstractWireSerializingTestCase<ByteSizeValue> {
|
||||||
public void testActualPeta() {
|
public void testActualPeta() {
|
||||||
MatcherAssert.assertThat(new ByteSizeValue(4, ByteSizeUnit.PB).getBytes(), equalTo(4503599627370496L));
|
MatcherAssert.assertThat(ByteSizeValue.of(4, ByteSizeUnit.PB).getBytes(), equalTo(4503599627370496L));
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testActualTera() {
|
public void testActualTera() {
|
||||||
MatcherAssert.assertThat(new ByteSizeValue(4, ByteSizeUnit.TB).getBytes(), equalTo(4398046511104L));
|
MatcherAssert.assertThat(ByteSizeValue.of(4, ByteSizeUnit.TB).getBytes(), equalTo(4398046511104L));
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testActual() {
|
public void testActual() {
|
||||||
MatcherAssert.assertThat(new ByteSizeValue(4, ByteSizeUnit.GB).getBytes(), equalTo(4294967296L));
|
MatcherAssert.assertThat(ByteSizeValue.of(4, ByteSizeUnit.GB).getBytes(), equalTo(4294967296L));
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testSimple() {
|
public void testSimple() {
|
||||||
assertThat(ByteSizeUnit.BYTES.toBytes(10), is(new ByteSizeValue(10, ByteSizeUnit.BYTES).getBytes()));
|
assertThat(ByteSizeUnit.BYTES.toBytes(10), is(ByteSizeValue.of(10, ByteSizeUnit.BYTES).getBytes()));
|
||||||
assertThat(ByteSizeUnit.KB.toKB(10), is(new ByteSizeValue(10, ByteSizeUnit.KB).getKb()));
|
assertThat(ByteSizeUnit.KB.toKB(10), is(ByteSizeValue.of(10, ByteSizeUnit.KB).getKb()));
|
||||||
assertThat(ByteSizeUnit.MB.toMB(10), is(new ByteSizeValue(10, ByteSizeUnit.MB).getMb()));
|
assertThat(ByteSizeUnit.MB.toMB(10), is(ByteSizeValue.of(10, ByteSizeUnit.MB).getMb()));
|
||||||
assertThat(ByteSizeUnit.GB.toGB(10), is(new ByteSizeValue(10, ByteSizeUnit.GB).getGb()));
|
assertThat(ByteSizeUnit.GB.toGB(10), is(ByteSizeValue.of(10, ByteSizeUnit.GB).getGb()));
|
||||||
assertThat(ByteSizeUnit.TB.toTB(10), is(new ByteSizeValue(10, ByteSizeUnit.TB).getTb()));
|
assertThat(ByteSizeUnit.TB.toTB(10), is(ByteSizeValue.of(10, ByteSizeUnit.TB).getTb()));
|
||||||
assertThat(ByteSizeUnit.PB.toPB(10), is(new ByteSizeValue(10, ByteSizeUnit.PB).getPb()));
|
assertThat(ByteSizeUnit.PB.toPB(10), is(ByteSizeValue.of(10, ByteSizeUnit.PB).getPb()));
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testToIntBytes() {
|
public void testToIntBytes() {
|
||||||
|
@ -60,13 +65,13 @@ public class ByteSizeValueTests extends AbstractWireSerializingTestCase<ByteSize
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testToString() {
|
public void testToString() {
|
||||||
assertThat("10b", is(new ByteSizeValue(10, ByteSizeUnit.BYTES).toString()));
|
assertThat("10b", is(ByteSizeValue.of(10, ByteSizeUnit.BYTES).toString()));
|
||||||
assertThat("1.5kb", is(new ByteSizeValue((long) (1024 * 1.5), ByteSizeUnit.BYTES).toString()));
|
assertThat("1.5kb", is(ByteSizeValue.of((long) (1024 * 1.5), ByteSizeUnit.BYTES).toString()));
|
||||||
assertThat("1.5mb", is(new ByteSizeValue((long) (1024 * 1.5), ByteSizeUnit.KB).toString()));
|
assertThat("1.5mb", is(ByteSizeValue.of((long) (1024 * 1.5), ByteSizeUnit.KB).toString()));
|
||||||
assertThat("1.5gb", is(new ByteSizeValue((long) (1024 * 1.5), ByteSizeUnit.MB).toString()));
|
assertThat("1.5gb", is(ByteSizeValue.of((long) (1024 * 1.5), ByteSizeUnit.MB).toString()));
|
||||||
assertThat("1.5tb", is(new ByteSizeValue((long) (1024 * 1.5), ByteSizeUnit.GB).toString()));
|
assertThat("1.5tb", is(ByteSizeValue.of((long) (1024 * 1.5), ByteSizeUnit.GB).toString()));
|
||||||
assertThat("1.5pb", is(new ByteSizeValue((long) (1024 * 1.5), ByteSizeUnit.TB).toString()));
|
assertThat("1.5pb", is(ByteSizeValue.of((long) (1024 * 1.5), ByteSizeUnit.TB).toString()));
|
||||||
assertThat("1536pb", is(new ByteSizeValue((long) (1024 * 1.5), ByteSizeUnit.PB).toString()));
|
assertThat("1536pb", is(ByteSizeValue.of((long) (1024 * 1.5), ByteSizeUnit.PB).toString()));
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testParsing() {
|
public void testParsing() {
|
||||||
|
@ -155,8 +160,8 @@ public class ByteSizeValueTests extends AbstractWireSerializingTestCase<ByteSize
|
||||||
public void testCompareEquality() {
|
public void testCompareEquality() {
|
||||||
ByteSizeUnit randomUnit = randomFrom(ByteSizeUnit.values());
|
ByteSizeUnit randomUnit = randomFrom(ByteSizeUnit.values());
|
||||||
long firstRandom = randomNonNegativeLong() / randomUnit.toBytes(1);
|
long firstRandom = randomNonNegativeLong() / randomUnit.toBytes(1);
|
||||||
ByteSizeValue firstByteValue = new ByteSizeValue(firstRandom, randomUnit);
|
ByteSizeValue firstByteValue = ByteSizeValue.of(firstRandom, randomUnit);
|
||||||
ByteSizeValue secondByteValue = new ByteSizeValue(firstRandom, randomUnit);
|
ByteSizeValue secondByteValue = ByteSizeValue.of(firstRandom, randomUnit);
|
||||||
assertEquals(0, firstByteValue.compareTo(secondByteValue));
|
assertEquals(0, firstByteValue.compareTo(secondByteValue));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -164,8 +169,8 @@ public class ByteSizeValueTests extends AbstractWireSerializingTestCase<ByteSize
|
||||||
ByteSizeUnit unit = randomFrom(ByteSizeUnit.values());
|
ByteSizeUnit unit = randomFrom(ByteSizeUnit.values());
|
||||||
long firstRandom = randomNonNegativeLong() / unit.toBytes(1);
|
long firstRandom = randomNonNegativeLong() / unit.toBytes(1);
|
||||||
long secondRandom = randomValueOtherThan(firstRandom, () -> randomNonNegativeLong() / unit.toBytes(1));
|
long secondRandom = randomValueOtherThan(firstRandom, () -> randomNonNegativeLong() / unit.toBytes(1));
|
||||||
ByteSizeValue firstByteValue = new ByteSizeValue(firstRandom, unit);
|
ByteSizeValue firstByteValue = ByteSizeValue.of(firstRandom, unit);
|
||||||
ByteSizeValue secondByteValue = new ByteSizeValue(secondRandom, unit);
|
ByteSizeValue secondByteValue = ByteSizeValue.of(secondRandom, unit);
|
||||||
assertEquals(firstRandom > secondRandom, firstByteValue.compareTo(secondByteValue) > 0);
|
assertEquals(firstRandom > secondRandom, firstByteValue.compareTo(secondByteValue) > 0);
|
||||||
assertEquals(secondRandom > firstRandom, secondByteValue.compareTo(firstByteValue) > 0);
|
assertEquals(secondRandom > firstRandom, secondByteValue.compareTo(firstByteValue) > 0);
|
||||||
}
|
}
|
||||||
|
@ -173,17 +178,20 @@ public class ByteSizeValueTests extends AbstractWireSerializingTestCase<ByteSize
|
||||||
public void testCompareUnits() {
|
public void testCompareUnits() {
|
||||||
long number = randomLongBetween(1, Long.MAX_VALUE / ByteSizeUnit.PB.toBytes(1));
|
long number = randomLongBetween(1, Long.MAX_VALUE / ByteSizeUnit.PB.toBytes(1));
|
||||||
ByteSizeUnit randomUnit = randomValueOtherThan(ByteSizeUnit.PB, () -> randomFrom(ByteSizeUnit.values()));
|
ByteSizeUnit randomUnit = randomValueOtherThan(ByteSizeUnit.PB, () -> randomFrom(ByteSizeUnit.values()));
|
||||||
ByteSizeValue firstByteValue = new ByteSizeValue(number, randomUnit);
|
ByteSizeValue firstByteValue = ByteSizeValue.of(number, randomUnit);
|
||||||
ByteSizeValue secondByteValue = new ByteSizeValue(number, ByteSizeUnit.PB);
|
ByteSizeValue secondByteValue = ByteSizeValue.of(number, ByteSizeUnit.PB);
|
||||||
assertTrue(firstByteValue.compareTo(secondByteValue) < 0);
|
assertTrue(firstByteValue.compareTo(secondByteValue) < 0);
|
||||||
assertTrue(secondByteValue.compareTo(firstByteValue) > 0);
|
assertTrue(secondByteValue.compareTo(firstByteValue) > 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testOutOfRange() {
|
public void testOutOfRange() {
|
||||||
// Make sure a value of > Long.MAX_VALUE bytes throws an exception
|
// Make sure a value of > Long.MAX_VALUE bytes throws an exception
|
||||||
ByteSizeUnit unit = randomValueOtherThan(ByteSizeUnit.BYTES, () -> randomFrom(ByteSizeUnit.values()));
|
for (ByteSizeUnit unit : ByteSizeUnit.values()) {
|
||||||
|
if (unit == ByteSizeUnit.BYTES) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
long size = (long) randomDouble() * unit.toBytes(1) + (Long.MAX_VALUE - unit.toBytes(1));
|
long size = (long) randomDouble() * unit.toBytes(1) + (Long.MAX_VALUE - unit.toBytes(1));
|
||||||
IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> new ByteSizeValue(size, unit));
|
IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> ByteSizeValue.of(size, unit));
|
||||||
assertEquals(
|
assertEquals(
|
||||||
"Values greater than " + Long.MAX_VALUE + " bytes are not supported: " + size + unit.getSuffix(),
|
"Values greater than " + Long.MAX_VALUE + " bytes are not supported: " + size + unit.getSuffix(),
|
||||||
exception.getMessage()
|
exception.getMessage()
|
||||||
|
@ -192,19 +200,20 @@ public class ByteSizeValueTests extends AbstractWireSerializingTestCase<ByteSize
|
||||||
// Make sure for units other than BYTES a size of -1 throws an exception
|
// Make sure for units other than BYTES a size of -1 throws an exception
|
||||||
ByteSizeUnit unit2 = randomValueOtherThan(ByteSizeUnit.BYTES, () -> randomFrom(ByteSizeUnit.values()));
|
ByteSizeUnit unit2 = randomValueOtherThan(ByteSizeUnit.BYTES, () -> randomFrom(ByteSizeUnit.values()));
|
||||||
long size2 = -1L;
|
long size2 = -1L;
|
||||||
exception = expectThrows(IllegalArgumentException.class, () -> new ByteSizeValue(size2, unit2));
|
exception = expectThrows(IllegalArgumentException.class, () -> ByteSizeValue.of(size2, unit2));
|
||||||
assertEquals("Values less than -1 bytes are not supported: " + size2 + unit2.getSuffix(), exception.getMessage());
|
assertEquals("Values less than -1 bytes are not supported: " + size2 + unit2.getSuffix(), exception.getMessage());
|
||||||
|
|
||||||
// Make sure for any unit a size < -1 throws an exception
|
// Make sure for any unit a size < -1 throws an exception
|
||||||
ByteSizeUnit unit3 = randomFrom(ByteSizeUnit.values());
|
ByteSizeUnit unit3 = randomFrom(ByteSizeUnit.values());
|
||||||
long size3 = -1L * randomNonNegativeLong() - 1L;
|
long size3 = -1L * randomNonNegativeLong() - 1L;
|
||||||
exception = expectThrows(IllegalArgumentException.class, () -> new ByteSizeValue(size3, unit3));
|
exception = expectThrows(IllegalArgumentException.class, () -> ByteSizeValue.of(size3, unit3));
|
||||||
assertEquals("Values less than -1 bytes are not supported: " + size3 + unit3.getSuffix(), exception.getMessage());
|
assertEquals("Values less than -1 bytes are not supported: " + size3 + unit3.getSuffix(), exception.getMessage());
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
public void testConversionHashCode() {
|
public void testConversionHashCode() {
|
||||||
ByteSizeValue firstValue = new ByteSizeValue(randomIntBetween(0, Integer.MAX_VALUE), ByteSizeUnit.GB);
|
ByteSizeValue firstValue = ByteSizeValue.of(randomIntBetween(0, Integer.MAX_VALUE), ByteSizeUnit.GB);
|
||||||
ByteSizeValue secondValue = new ByteSizeValue(firstValue.getBytes(), ByteSizeUnit.BYTES);
|
ByteSizeValue secondValue = ByteSizeValue.of(firstValue.getBytes(), ByteSizeUnit.BYTES);
|
||||||
assertEquals(firstValue.hashCode(), secondValue.hashCode());
|
assertEquals(firstValue.hashCode(), secondValue.hashCode());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -216,7 +225,7 @@ public class ByteSizeValueTests extends AbstractWireSerializingTestCase<ByteSize
|
||||||
if (size > Long.MAX_VALUE / unit.toBytes(1)) {
|
if (size > Long.MAX_VALUE / unit.toBytes(1)) {
|
||||||
throw new AssertionError();
|
throw new AssertionError();
|
||||||
}
|
}
|
||||||
return new ByteSizeValue(size, unit);
|
return ByteSizeValue.of(size, unit);
|
||||||
} else {
|
} else {
|
||||||
return ByteSizeValue.ofBytes(randomNonNegativeLong());
|
return ByteSizeValue.ofBytes(randomNonNegativeLong());
|
||||||
}
|
}
|
||||||
|
@ -228,38 +237,11 @@ public class ByteSizeValueTests extends AbstractWireSerializingTestCase<ByteSize
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected ByteSizeValue mutateInstance(final ByteSizeValue instance) {
|
protected ByteSizeValue mutateInstance(final ByteSizeValue original) {
|
||||||
final long instanceSize = instance.getSize();
|
return new ByteSizeValue(
|
||||||
final ByteSizeUnit instanceUnit = instance.getUnit();
|
randomValueOtherThan(original.getSizeInBytes(), ESTestCase::randomNonNegativeLong),
|
||||||
final long mutateSize;
|
randomFrom(ByteSizeUnit.values())
|
||||||
final ByteSizeUnit mutateUnit;
|
|
||||||
switch (between(0, 1)) {
|
|
||||||
case 0 -> {
|
|
||||||
final long unitBytes = instanceUnit.toBytes(1);
|
|
||||||
mutateSize = randomValueOtherThan(instanceSize, () -> randomNonNegativeLong() / unitBytes);
|
|
||||||
mutateUnit = instanceUnit;
|
|
||||||
}
|
|
||||||
case 1 -> {
|
|
||||||
mutateUnit = randomValueOtherThan(instanceUnit, () -> randomFrom(ByteSizeUnit.values()));
|
|
||||||
final long newUnitBytes = mutateUnit.toBytes(1);
|
|
||||||
/*
|
|
||||||
* If size is zero we can not reuse zero because zero with any unit will be equal to zero with any other
|
|
||||||
* unit so in this case we need to randomize a new size. Additionally, if the size unit pair is such that
|
|
||||||
* the representation would be such that the number of represented bytes would exceed Long.Max_VALUE, we
|
|
||||||
* have to randomize a new size too.
|
|
||||||
*/
|
|
||||||
if (instanceSize == 0 || instanceSize >= Long.MAX_VALUE / newUnitBytes) {
|
|
||||||
mutateSize = randomValueOtherThanMany(
|
|
||||||
v -> v == instanceSize && v >= Long.MAX_VALUE / newUnitBytes,
|
|
||||||
() -> randomNonNegativeLong() / newUnitBytes
|
|
||||||
);
|
);
|
||||||
} else {
|
|
||||||
mutateSize = instanceSize;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
default -> throw new AssertionError("Invalid randomisation branch");
|
|
||||||
}
|
|
||||||
return new ByteSizeValue(mutateSize, mutateUnit);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testParse() {
|
public void testParse() {
|
||||||
|
@ -316,21 +298,24 @@ public class ByteSizeValueTests extends AbstractWireSerializingTestCase<ByteSize
|
||||||
assertEquals("failed to parse setting [test] with value [notANumber" + unitSuffix + "]", exception.getMessage());
|
assertEquals("failed to parse setting [test] with value [notANumber" + unitSuffix + "]", exception.getMessage());
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testParseFractionalNumber() throws IOException {
|
public void testParseFractionalNumber() {
|
||||||
ByteSizeUnit unit = randomValueOtherThan(ByteSizeUnit.BYTES, () -> randomFrom(ByteSizeUnit.values()));
|
for (var unit : ByteSizeUnit.values()) {
|
||||||
String fractionalValue = "23.5" + unit.getSuffix();
|
if (unit == ByteSizeUnit.BYTES) {
|
||||||
ByteSizeValue instance = ByteSizeValue.parseBytesSizeValue(fractionalValue, "test");
|
continue;
|
||||||
assertEquals(fractionalValue, instance.toString());
|
}
|
||||||
assertWarnings(
|
for (int tenths = 1; tenths <= 9; tenths++) {
|
||||||
"Fractional bytes values are deprecated. Use non-fractional bytes values instead: ["
|
checkFractionRoundTrip("23." + tenths + unit.getSuffix());
|
||||||
+ fractionalValue
|
}
|
||||||
+ "] found for setting [test]"
|
}
|
||||||
);
|
}
|
||||||
|
|
||||||
|
private void checkFractionRoundTrip(String fractionalValue) {
|
||||||
|
assertEquals(fractionalValue, ByteSizeValue.parseBytesSizeValue(fractionalValue, "test").toString());
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testGetBytesAsInt() {
|
public void testGetBytesAsInt() {
|
||||||
for (int i = 0; i < NUMBER_OF_TEST_RUNS; i++) {
|
for (int i = 0; i < NUMBER_OF_TEST_RUNS; i++) {
|
||||||
ByteSizeValue instance = new ByteSizeValue(randomIntBetween(1, 1000), randomFrom(ByteSizeUnit.values()));
|
ByteSizeValue instance = ByteSizeValue.of(randomIntBetween(1, 1000), randomFrom(ByteSizeUnit.values()));
|
||||||
long bytesValue = instance.getBytes();
|
long bytesValue = instance.getBytes();
|
||||||
if (bytesValue > Integer.MAX_VALUE) {
|
if (bytesValue > Integer.MAX_VALUE) {
|
||||||
IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> instance.bytesAsInt());
|
IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> instance.bytesAsInt());
|
||||||
|
@ -368,7 +353,7 @@ public class ByteSizeValueTests extends AbstractWireSerializingTestCase<ByteSize
|
||||||
private void testOf(ByteSizeUnit unit, Function<Long, ByteSizeValue> byteSizeValueFunction) {
|
private void testOf(ByteSizeUnit unit, Function<Long, ByteSizeValue> byteSizeValueFunction) {
|
||||||
for (int i = 0; i < NUMBER_OF_TEST_RUNS; i++) {
|
for (int i = 0; i < NUMBER_OF_TEST_RUNS; i++) {
|
||||||
long size = randomIntBetween(1, 1000);
|
long size = randomIntBetween(1, 1000);
|
||||||
ByteSizeValue expected = new ByteSizeValue(size, unit);
|
ByteSizeValue expected = ByteSizeValue.of(size, unit);
|
||||||
ByteSizeValue actual = byteSizeValueFunction.apply(size);
|
ByteSizeValue actual = byteSizeValueFunction.apply(size);
|
||||||
assertThat(actual, equalTo(expected));
|
assertThat(actual, equalTo(expected));
|
||||||
}
|
}
|
||||||
|
@ -381,27 +366,27 @@ public class ByteSizeValueTests extends AbstractWireSerializingTestCase<ByteSize
|
||||||
assertThat(ByteSizeValue.add(ByteSizeValue.ofBytes(100L), ByteSizeValue.ONE), is(ByteSizeValue.ofBytes(101L)));
|
assertThat(ByteSizeValue.add(ByteSizeValue.ofBytes(100L), ByteSizeValue.ONE), is(ByteSizeValue.ofBytes(101L)));
|
||||||
assertThat(ByteSizeValue.add(ByteSizeValue.ofBytes(100L), ByteSizeValue.ofBytes(2L)), is(ByteSizeValue.ofBytes(102L)));
|
assertThat(ByteSizeValue.add(ByteSizeValue.ofBytes(100L), ByteSizeValue.ofBytes(2L)), is(ByteSizeValue.ofBytes(102L)));
|
||||||
assertThat(
|
assertThat(
|
||||||
ByteSizeValue.add(new ByteSizeValue(8, ByteSizeUnit.KB), new ByteSizeValue(4, ByteSizeUnit.KB)),
|
ByteSizeValue.add(ByteSizeValue.of(8, ByteSizeUnit.KB), ByteSizeValue.of(4, ByteSizeUnit.KB)),
|
||||||
is(ByteSizeValue.ofBytes(12288L))
|
is(ByteSizeValue.ofBytes(12288L))
|
||||||
);
|
);
|
||||||
assertThat(
|
assertThat(
|
||||||
ByteSizeValue.add(new ByteSizeValue(8, ByteSizeUnit.MB), new ByteSizeValue(4, ByteSizeUnit.MB)),
|
ByteSizeValue.add(ByteSizeValue.of(8, ByteSizeUnit.MB), ByteSizeValue.of(4, ByteSizeUnit.MB)),
|
||||||
is(ByteSizeValue.ofBytes(12582912L))
|
is(ByteSizeValue.ofBytes(12582912L))
|
||||||
);
|
);
|
||||||
assertThat(
|
assertThat(
|
||||||
ByteSizeValue.add(new ByteSizeValue(8, ByteSizeUnit.GB), new ByteSizeValue(4, ByteSizeUnit.GB)),
|
ByteSizeValue.add(ByteSizeValue.of(8, ByteSizeUnit.GB), ByteSizeValue.of(4, ByteSizeUnit.GB)),
|
||||||
is(ByteSizeValue.ofBytes(12884901888L))
|
is(ByteSizeValue.ofBytes(12884901888L))
|
||||||
);
|
);
|
||||||
assertThat(
|
assertThat(
|
||||||
ByteSizeValue.add(new ByteSizeValue(8, ByteSizeUnit.TB), new ByteSizeValue(4, ByteSizeUnit.TB)),
|
ByteSizeValue.add(ByteSizeValue.of(8, ByteSizeUnit.TB), ByteSizeValue.of(4, ByteSizeUnit.TB)),
|
||||||
is(ByteSizeValue.ofBytes(13194139533312L))
|
is(ByteSizeValue.ofBytes(13194139533312L))
|
||||||
);
|
);
|
||||||
assertThat(
|
assertThat(
|
||||||
ByteSizeValue.add(new ByteSizeValue(8, ByteSizeUnit.PB), new ByteSizeValue(4, ByteSizeUnit.PB)),
|
ByteSizeValue.add(ByteSizeValue.of(8, ByteSizeUnit.PB), ByteSizeValue.of(4, ByteSizeUnit.PB)),
|
||||||
is(ByteSizeValue.ofBytes(13510798882111488L))
|
is(ByteSizeValue.ofBytes(13510798882111488L))
|
||||||
);
|
);
|
||||||
assertThat(
|
assertThat(
|
||||||
ByteSizeValue.add(new ByteSizeValue(8, ByteSizeUnit.PB), new ByteSizeValue(4, ByteSizeUnit.GB)),
|
ByteSizeValue.add(ByteSizeValue.of(8, ByteSizeUnit.PB), ByteSizeValue.of(4, ByteSizeUnit.GB)),
|
||||||
is(ByteSizeValue.ofBytes(9007203549708288L))
|
is(ByteSizeValue.ofBytes(9007203549708288L))
|
||||||
);
|
);
|
||||||
|
|
||||||
|
@ -429,27 +414,27 @@ public class ByteSizeValueTests extends AbstractWireSerializingTestCase<ByteSize
|
||||||
assertThat(ByteSizeValue.subtract(ByteSizeValue.ofBytes(100L), ByteSizeValue.ONE), is(ByteSizeValue.ofBytes(99L)));
|
assertThat(ByteSizeValue.subtract(ByteSizeValue.ofBytes(100L), ByteSizeValue.ONE), is(ByteSizeValue.ofBytes(99L)));
|
||||||
assertThat(ByteSizeValue.subtract(ByteSizeValue.ofBytes(100L), ByteSizeValue.ofBytes(2L)), is(ByteSizeValue.ofBytes(98L)));
|
assertThat(ByteSizeValue.subtract(ByteSizeValue.ofBytes(100L), ByteSizeValue.ofBytes(2L)), is(ByteSizeValue.ofBytes(98L)));
|
||||||
assertThat(
|
assertThat(
|
||||||
ByteSizeValue.subtract(new ByteSizeValue(8, ByteSizeUnit.KB), new ByteSizeValue(4, ByteSizeUnit.KB)),
|
ByteSizeValue.subtract(ByteSizeValue.of(8, ByteSizeUnit.KB), ByteSizeValue.of(4, ByteSizeUnit.KB)),
|
||||||
is(ByteSizeValue.ofBytes(4096L))
|
is(ByteSizeValue.ofBytes(4096L))
|
||||||
);
|
);
|
||||||
assertThat(
|
assertThat(
|
||||||
ByteSizeValue.subtract(new ByteSizeValue(8, ByteSizeUnit.MB), new ByteSizeValue(4, ByteSizeUnit.MB)),
|
ByteSizeValue.subtract(ByteSizeValue.of(8, ByteSizeUnit.MB), ByteSizeValue.of(4, ByteSizeUnit.MB)),
|
||||||
is(ByteSizeValue.ofBytes(4194304L))
|
is(ByteSizeValue.ofBytes(4194304L))
|
||||||
);
|
);
|
||||||
assertThat(
|
assertThat(
|
||||||
ByteSizeValue.subtract(new ByteSizeValue(8, ByteSizeUnit.GB), new ByteSizeValue(4, ByteSizeUnit.GB)),
|
ByteSizeValue.subtract(ByteSizeValue.of(8, ByteSizeUnit.GB), ByteSizeValue.of(4, ByteSizeUnit.GB)),
|
||||||
is(ByteSizeValue.ofBytes(4294967296L))
|
is(ByteSizeValue.ofBytes(4294967296L))
|
||||||
);
|
);
|
||||||
assertThat(
|
assertThat(
|
||||||
ByteSizeValue.subtract(new ByteSizeValue(8, ByteSizeUnit.TB), new ByteSizeValue(4, ByteSizeUnit.TB)),
|
ByteSizeValue.subtract(ByteSizeValue.of(8, ByteSizeUnit.TB), ByteSizeValue.of(4, ByteSizeUnit.TB)),
|
||||||
is(ByteSizeValue.ofBytes(4398046511104L))
|
is(ByteSizeValue.ofBytes(4398046511104L))
|
||||||
);
|
);
|
||||||
assertThat(
|
assertThat(
|
||||||
ByteSizeValue.subtract(new ByteSizeValue(8, ByteSizeUnit.PB), new ByteSizeValue(4, ByteSizeUnit.PB)),
|
ByteSizeValue.subtract(ByteSizeValue.of(8, ByteSizeUnit.PB), ByteSizeValue.of(4, ByteSizeUnit.PB)),
|
||||||
is(ByteSizeValue.ofBytes(4503599627370496L))
|
is(ByteSizeValue.ofBytes(4503599627370496L))
|
||||||
);
|
);
|
||||||
assertThat(
|
assertThat(
|
||||||
ByteSizeValue.subtract(new ByteSizeValue(8, ByteSizeUnit.PB), new ByteSizeValue(4, ByteSizeUnit.GB)),
|
ByteSizeValue.subtract(ByteSizeValue.of(8, ByteSizeUnit.PB), ByteSizeValue.of(4, ByteSizeUnit.GB)),
|
||||||
is(ByteSizeValue.ofBytes(9007194959773696L))
|
is(ByteSizeValue.ofBytes(9007194959773696L))
|
||||||
);
|
);
|
||||||
|
|
||||||
|
@ -484,37 +469,37 @@ public class ByteSizeValueTests extends AbstractWireSerializingTestCase<ByteSize
|
||||||
assertThat(ByteSizeValue.min(ByteSizeValue.ofBytes(2L), ByteSizeValue.ofBytes(100L)), is(ByteSizeValue.ofBytes(2L)));
|
assertThat(ByteSizeValue.min(ByteSizeValue.ofBytes(2L), ByteSizeValue.ofBytes(100L)), is(ByteSizeValue.ofBytes(2L)));
|
||||||
|
|
||||||
assertThat(
|
assertThat(
|
||||||
ByteSizeValue.min(new ByteSizeValue(8, ByteSizeUnit.KB), new ByteSizeValue(4, ByteSizeUnit.KB)),
|
ByteSizeValue.min(ByteSizeValue.of(8, ByteSizeUnit.KB), ByteSizeValue.of(4, ByteSizeUnit.KB)),
|
||||||
is(new ByteSizeValue(4, ByteSizeUnit.KB))
|
is(ByteSizeValue.of(4, ByteSizeUnit.KB))
|
||||||
);
|
);
|
||||||
assertThat(
|
assertThat(
|
||||||
ByteSizeValue.min(new ByteSizeValue(4, ByteSizeUnit.MB), new ByteSizeValue(8, ByteSizeUnit.MB)),
|
ByteSizeValue.min(ByteSizeValue.of(4, ByteSizeUnit.MB), ByteSizeValue.of(8, ByteSizeUnit.MB)),
|
||||||
is(new ByteSizeValue(4, ByteSizeUnit.MB))
|
is(ByteSizeValue.of(4, ByteSizeUnit.MB))
|
||||||
);
|
);
|
||||||
assertThat(
|
assertThat(
|
||||||
ByteSizeValue.min(new ByteSizeValue(16, ByteSizeUnit.GB), new ByteSizeValue(15, ByteSizeUnit.GB)),
|
ByteSizeValue.min(ByteSizeValue.of(16, ByteSizeUnit.GB), ByteSizeValue.of(15, ByteSizeUnit.GB)),
|
||||||
is(new ByteSizeValue(15, ByteSizeUnit.GB))
|
is(ByteSizeValue.of(15, ByteSizeUnit.GB))
|
||||||
);
|
);
|
||||||
assertThat(
|
assertThat(
|
||||||
ByteSizeValue.min(new ByteSizeValue(90, ByteSizeUnit.TB), new ByteSizeValue(91, ByteSizeUnit.TB)),
|
ByteSizeValue.min(ByteSizeValue.of(90, ByteSizeUnit.TB), ByteSizeValue.of(91, ByteSizeUnit.TB)),
|
||||||
is(new ByteSizeValue(90, ByteSizeUnit.TB))
|
is(ByteSizeValue.of(90, ByteSizeUnit.TB))
|
||||||
);
|
);
|
||||||
assertThat(
|
assertThat(
|
||||||
ByteSizeValue.min(new ByteSizeValue(2, ByteSizeUnit.PB), new ByteSizeValue(1, ByteSizeUnit.PB)),
|
ByteSizeValue.min(ByteSizeValue.of(2, ByteSizeUnit.PB), ByteSizeValue.of(1, ByteSizeUnit.PB)),
|
||||||
is(new ByteSizeValue(1, ByteSizeUnit.PB))
|
is(ByteSizeValue.of(1, ByteSizeUnit.PB))
|
||||||
);
|
);
|
||||||
assertThat(
|
assertThat(
|
||||||
ByteSizeValue.min(new ByteSizeValue(1, ByteSizeUnit.PB), new ByteSizeValue(1, ByteSizeUnit.GB)),
|
ByteSizeValue.min(ByteSizeValue.of(1, ByteSizeUnit.PB), ByteSizeValue.of(1, ByteSizeUnit.GB)),
|
||||||
is(new ByteSizeValue(1, ByteSizeUnit.GB))
|
is(ByteSizeValue.of(1, ByteSizeUnit.GB))
|
||||||
);
|
);
|
||||||
|
|
||||||
ByteSizeValue equalityResult = ByteSizeValue.min(new ByteSizeValue(1024, ByteSizeUnit.MB), new ByteSizeValue(1, ByteSizeUnit.GB));
|
ByteSizeValue equalityResult = ByteSizeValue.min(ByteSizeValue.of(1024, ByteSizeUnit.MB), ByteSizeValue.of(1, ByteSizeUnit.GB));
|
||||||
assertThat(equalityResult, is(new ByteSizeValue(1024, ByteSizeUnit.MB)));
|
assertThat(equalityResult, is(ByteSizeValue.of(1024, ByteSizeUnit.MB)));
|
||||||
assertThat(equalityResult.getUnit(), is(ByteSizeUnit.MB));
|
assertThat(equalityResult.getDesiredUnit(), is(ByteSizeUnit.MB));
|
||||||
|
|
||||||
equalityResult = ByteSizeValue.min(new ByteSizeValue(1, ByteSizeUnit.GB), new ByteSizeValue(1024, ByteSizeUnit.MB));
|
equalityResult = ByteSizeValue.min(ByteSizeValue.of(1, ByteSizeUnit.GB), ByteSizeValue.of(1024, ByteSizeUnit.MB));
|
||||||
assertThat(equalityResult, is(new ByteSizeValue(1, ByteSizeUnit.GB)));
|
assertThat(equalityResult, is(ByteSizeValue.of(1, ByteSizeUnit.GB)));
|
||||||
assertThat(equalityResult.getUnit(), is(ByteSizeUnit.GB));
|
assertThat(equalityResult.getDesiredUnit(), is(ByteSizeUnit.GB));
|
||||||
|
|
||||||
String exceptionMessage = "one of the arguments has -1 bytes";
|
String exceptionMessage = "one of the arguments has -1 bytes";
|
||||||
Exception e = expectThrows(IllegalArgumentException.class, () -> ByteSizeValue.min(ByteSizeValue.MINUS_ONE, ByteSizeValue.ONE));
|
Exception e = expectThrows(IllegalArgumentException.class, () -> ByteSizeValue.min(ByteSizeValue.MINUS_ONE, ByteSizeValue.ONE));
|
||||||
|
@ -532,4 +517,58 @@ public class ByteSizeValueTests extends AbstractWireSerializingTestCase<ByteSize
|
||||||
assertThat(newInstance, equalTo(expectedInstance));
|
assertThat(newInstance, equalTo(expectedInstance));
|
||||||
assertThat(newInstance.hashCode(), equalTo(expectedInstance.hashCode()));
|
assertThat(newInstance.hashCode(), equalTo(expectedInstance.hashCode()));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public void testBWCTransportFormat() throws IOException {
|
||||||
|
var tenMegs = ByteSizeValue.ofMb(10);
|
||||||
|
try (BytesStreamOutput expected = new BytesStreamOutput(); BytesStreamOutput actual = new BytesStreamOutput()) {
|
||||||
|
expected.writeZLong(10);
|
||||||
|
ByteSizeUnit.MB.writeTo(expected);
|
||||||
|
actual.setTransportVersion(TransportVersions.V_8_16_0);
|
||||||
|
tenMegs.writeTo(actual);
|
||||||
|
assertArrayEquals(
|
||||||
|
"Size denominated in the desired unit for backward compatibility",
|
||||||
|
expected.bytes().array(),
|
||||||
|
actual.bytes().array()
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public void testTwoDigitTransportRoundTrips() throws IOException {
|
||||||
|
TransportVersion tv = TransportVersion.current();
|
||||||
|
for (var desiredUnit : ByteSizeUnit.values()) {
|
||||||
|
if (desiredUnit == ByteSizeUnit.BYTES) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
checkTransportRoundTrip(ByteSizeValue.parseBytesSizeValue("23" + desiredUnit.getSuffix(), "test"), tv);
|
||||||
|
for (int tenths = 1; tenths <= 9; tenths++) {
|
||||||
|
checkTransportRoundTrip(ByteSizeValue.parseBytesSizeValue("23." + tenths + desiredUnit.getSuffix(), "test"), tv);
|
||||||
|
for (int hundredths = 1; hundredths <= 9; hundredths++) {
|
||||||
|
checkTransportRoundTrip(
|
||||||
|
ByteSizeValue.parseBytesSizeValue("23." + tenths + hundredths + desiredUnit.getSuffix(), "test"),
|
||||||
|
tv
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public void testIntegerTransportRoundTrips() throws IOException {
|
||||||
|
for (var tv : List.of(TransportVersion.current(), TransportVersions.V_8_16_0)) {
|
||||||
|
checkTransportRoundTrip(ByteSizeValue.ONE, tv);
|
||||||
|
checkTransportRoundTrip(ByteSizeValue.ZERO, tv);
|
||||||
|
checkTransportRoundTrip(ByteSizeValue.MINUS_ONE, tv);
|
||||||
|
for (var unit : ByteSizeUnit.values()) {
|
||||||
|
// Try increasing values until we exceed Long.MAX_VALUE and it wraps around negative
|
||||||
|
for (long bytes = unit.toBytes(1); bytes > 0; bytes *= 10) {
|
||||||
|
checkTransportRoundTrip(new ByteSizeValue(bytes, unit), tv);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private void checkTransportRoundTrip(ByteSizeValue original, TransportVersion transportVersion) throws IOException {
|
||||||
|
var deserialized = copyWriteable(original, writableRegistry(), ByteSizeValue::readFrom, transportVersion);
|
||||||
|
assertEquals(original.getSizeInBytes(), deserialized.getSizeInBytes());
|
||||||
|
assertEquals(original.getDesiredUnit(), deserialized.getDesiredUnit());
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -21,7 +21,7 @@ import static org.hamcrest.Matchers.is;
|
||||||
public class RelativeByteSizeValueTests extends ESTestCase {
|
public class RelativeByteSizeValueTests extends ESTestCase {
|
||||||
|
|
||||||
public void testDeserialization() throws IOException {
|
public void testDeserialization() throws IOException {
|
||||||
final var origin1 = new RelativeByteSizeValue(new ByteSizeValue(between(0, 2048), randomFrom(ByteSizeUnit.values())));
|
final var origin1 = new RelativeByteSizeValue(ByteSizeValue.of(between(0, 2048), randomFrom(ByteSizeUnit.values())));
|
||||||
final var origin2 = new RelativeByteSizeValue(new RatioValue(randomDoubleBetween(0.0, 100.0, true)));
|
final var origin2 = new RelativeByteSizeValue(new RatioValue(randomDoubleBetween(0.0, 100.0, true)));
|
||||||
final RelativeByteSizeValue target1, target2;
|
final RelativeByteSizeValue target1, target2;
|
||||||
|
|
||||||
|
@ -39,7 +39,7 @@ public class RelativeByteSizeValueTests extends ESTestCase {
|
||||||
assertNull(origin1.getRatio());
|
assertNull(origin1.getRatio());
|
||||||
assertNull(target1.getRatio());
|
assertNull(target1.getRatio());
|
||||||
assertEquals(origin1.getAbsolute(), target1.getAbsolute());
|
assertEquals(origin1.getAbsolute(), target1.getAbsolute());
|
||||||
assertEquals(origin1.getAbsolute().getUnit(), target1.getAbsolute().getUnit());
|
assertEquals(origin1.getAbsolute().getDesiredUnit(), target1.getAbsolute().getDesiredUnit());
|
||||||
|
|
||||||
assertFalse(origin2.isAbsolute());
|
assertFalse(origin2.isAbsolute());
|
||||||
assertFalse(target2.isAbsolute());
|
assertFalse(target2.isAbsolute());
|
||||||
|
@ -63,7 +63,7 @@ public class RelativeByteSizeValueTests extends ESTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testAbsolute() {
|
public void testAbsolute() {
|
||||||
ByteSizeValue value = new ByteSizeValue(between(0, 100), randomFrom(ByteSizeUnit.values()));
|
ByteSizeValue value = ByteSizeValue.of(between(0, 100), randomFrom(ByteSizeUnit.values()));
|
||||||
RelativeByteSizeValue parsed = RelativeByteSizeValue.parseRelativeByteSizeValue(value.getStringRep(), "test");
|
RelativeByteSizeValue parsed = RelativeByteSizeValue.parseRelativeByteSizeValue(value.getStringRep(), "test");
|
||||||
assertThat(parsed.getAbsolute(), equalTo(value));
|
assertThat(parsed.getAbsolute(), equalTo(value));
|
||||||
assertThat(parsed.isAbsolute(), is(true));
|
assertThat(parsed.isAbsolute(), is(true));
|
||||||
|
|
|
@ -106,7 +106,7 @@ public class BitArrayTests extends ESTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testClearingDoesntAllocate() {
|
public void testClearingDoesntAllocate() {
|
||||||
ByteSizeValue max = new ByteSizeValue(1, ByteSizeUnit.KB);
|
ByteSizeValue max = ByteSizeValue.of(1, ByteSizeUnit.KB);
|
||||||
MockBigArrays bigArrays = new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), max);
|
MockBigArrays bigArrays = new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), max);
|
||||||
try (BitArray bitArray = new BitArray(1, bigArrays)) {
|
try (BitArray bitArray = new BitArray(1, bigArrays)) {
|
||||||
bitArray.clear(100000000);
|
bitArray.clear(100000000);
|
||||||
|
|
|
@ -164,19 +164,19 @@ public class MergePolicyConfigTests extends ESTestCase {
|
||||||
Settings.builder()
|
Settings.builder()
|
||||||
.put(
|
.put(
|
||||||
MergePolicyConfig.INDEX_MERGE_POLICY_FLOOR_SEGMENT_SETTING.getKey(),
|
MergePolicyConfig.INDEX_MERGE_POLICY_FLOOR_SEGMENT_SETTING.getKey(),
|
||||||
new ByteSizeValue(MergePolicyConfig.DEFAULT_FLOOR_SEGMENT.getMb() + 1, ByteSizeUnit.MB)
|
ByteSizeValue.of(MergePolicyConfig.DEFAULT_FLOOR_SEGMENT.getMb() + 1, ByteSizeUnit.MB)
|
||||||
)
|
)
|
||||||
.build()
|
.build()
|
||||||
)
|
)
|
||||||
);
|
);
|
||||||
assertEquals(
|
assertEquals(
|
||||||
((TieredMergePolicy) indexSettings.getMergePolicy(false)).getFloorSegmentMB(),
|
((TieredMergePolicy) indexSettings.getMergePolicy(false)).getFloorSegmentMB(),
|
||||||
new ByteSizeValue(MergePolicyConfig.DEFAULT_FLOOR_SEGMENT.getMb() + 1, ByteSizeUnit.MB).getMbFrac(),
|
ByteSizeValue.of(MergePolicyConfig.DEFAULT_FLOOR_SEGMENT.getMb() + 1, ByteSizeUnit.MB).getMbFrac(),
|
||||||
0.001
|
0.001
|
||||||
);
|
);
|
||||||
assertEquals(
|
assertEquals(
|
||||||
((LogByteSizeMergePolicy) indexSettings.getMergePolicy(true)).getMinMergeMB(),
|
((LogByteSizeMergePolicy) indexSettings.getMergePolicy(true)).getMinMergeMB(),
|
||||||
new ByteSizeValue(MergePolicyConfig.DEFAULT_FLOOR_SEGMENT.getMb() + 1, ByteSizeUnit.MB).getMbFrac(),
|
ByteSizeValue.of(MergePolicyConfig.DEFAULT_FLOOR_SEGMENT.getMb() + 1, ByteSizeUnit.MB).getMbFrac(),
|
||||||
0.001
|
0.001
|
||||||
);
|
);
|
||||||
|
|
||||||
|
@ -303,12 +303,12 @@ public class MergePolicyConfigTests extends ESTestCase {
|
||||||
);
|
);
|
||||||
assertEquals(
|
assertEquals(
|
||||||
((TieredMergePolicy) indexSettings.getMergePolicy(false)).getFloorSegmentMB(),
|
((TieredMergePolicy) indexSettings.getMergePolicy(false)).getFloorSegmentMB(),
|
||||||
new ByteSizeValue(MergePolicyConfig.DEFAULT_FLOOR_SEGMENT.getMb(), ByteSizeUnit.MB).getMbFrac(),
|
ByteSizeValue.of(MergePolicyConfig.DEFAULT_FLOOR_SEGMENT.getMb(), ByteSizeUnit.MB).getMbFrac(),
|
||||||
0.00
|
0.00
|
||||||
);
|
);
|
||||||
assertEquals(
|
assertEquals(
|
||||||
((LogByteSizeMergePolicy) indexSettings.getMergePolicy(true)).getMinMergeMB(),
|
((LogByteSizeMergePolicy) indexSettings.getMergePolicy(true)).getMinMergeMB(),
|
||||||
new ByteSizeValue(MergePolicyConfig.DEFAULT_FLOOR_SEGMENT.getMb(), ByteSizeUnit.MB).getMbFrac(),
|
ByteSizeValue.of(MergePolicyConfig.DEFAULT_FLOOR_SEGMENT.getMb(), ByteSizeUnit.MB).getMbFrac(),
|
||||||
0.00
|
0.00
|
||||||
);
|
);
|
||||||
assertEquals(
|
assertEquals(
|
||||||
|
|
|
@ -290,8 +290,8 @@ public class TranslogTests extends ESTestCase {
|
||||||
private TranslogConfig getTranslogConfig(final Path path, final Settings settings, OperationListener listener) {
|
private TranslogConfig getTranslogConfig(final Path path, final Settings settings, OperationListener listener) {
|
||||||
final ByteSizeValue bufferSize = randomFrom(
|
final ByteSizeValue bufferSize = randomFrom(
|
||||||
TranslogConfig.DEFAULT_BUFFER_SIZE,
|
TranslogConfig.DEFAULT_BUFFER_SIZE,
|
||||||
new ByteSizeValue(8, ByteSizeUnit.KB),
|
ByteSizeValue.of(8, ByteSizeUnit.KB),
|
||||||
new ByteSizeValue(10 + randomInt(128 * 1024), ByteSizeUnit.BYTES)
|
ByteSizeValue.of(10 + randomInt(128 * 1024), ByteSizeUnit.BYTES)
|
||||||
);
|
);
|
||||||
|
|
||||||
final IndexSettings indexSettings = IndexSettingsModule.newIndexSettings(shardId.getIndex(), settings);
|
final IndexSettings indexSettings = IndexSettingsModule.newIndexSettings(shardId.getIndex(), settings);
|
||||||
|
@ -1395,7 +1395,7 @@ public class TranslogTests extends ESTestCase {
|
||||||
temp.getTranslogPath(),
|
temp.getTranslogPath(),
|
||||||
temp.getIndexSettings(),
|
temp.getIndexSettings(),
|
||||||
temp.getBigArrays(),
|
temp.getBigArrays(),
|
||||||
new ByteSizeValue(1, ByteSizeUnit.KB),
|
ByteSizeValue.of(1, ByteSizeUnit.KB),
|
||||||
randomBoolean() ? DiskIoBufferPool.INSTANCE : RANDOMIZING_IO_BUFFERS,
|
randomBoolean() ? DiskIoBufferPool.INSTANCE : RANDOMIZING_IO_BUFFERS,
|
||||||
TranslogConfig.NOOP_OPERATION_LISTENER,
|
TranslogConfig.NOOP_OPERATION_LISTENER,
|
||||||
true
|
true
|
||||||
|
@ -4080,7 +4080,7 @@ public class TranslogTests extends ESTestCase {
|
||||||
translogDir,
|
translogDir,
|
||||||
IndexSettingsModule.newIndexSettings(shardId.getIndex(), Settings.EMPTY),
|
IndexSettingsModule.newIndexSettings(shardId.getIndex(), Settings.EMPTY),
|
||||||
NON_RECYCLING_INSTANCE,
|
NON_RECYCLING_INSTANCE,
|
||||||
new ByteSizeValue(1, ByteSizeUnit.KB),
|
ByteSizeValue.of(1, ByteSizeUnit.KB),
|
||||||
randomBoolean() ? DiskIoBufferPool.INSTANCE : RANDOMIZING_IO_BUFFERS,
|
randomBoolean() ? DiskIoBufferPool.INSTANCE : RANDOMIZING_IO_BUFFERS,
|
||||||
TranslogConfig.NOOP_OPERATION_LISTENER,
|
TranslogConfig.NOOP_OPERATION_LISTENER,
|
||||||
false
|
false
|
||||||
|
|
|
@ -237,7 +237,7 @@ public class IndexingMemoryControllerTests extends IndexShardTestCase {
|
||||||
Settings.builder().put("indices.memory.index_buffer_size", "0.001%").put("indices.memory.min_index_buffer_size", "6mb").build()
|
Settings.builder().put("indices.memory.index_buffer_size", "0.001%").put("indices.memory.min_index_buffer_size", "6mb").build()
|
||||||
);
|
);
|
||||||
|
|
||||||
assertThat(controller.indexingBufferSize(), equalTo(new ByteSizeValue(6, ByteSizeUnit.MB).getBytes()));
|
assertThat(controller.indexingBufferSize(), equalTo(ByteSizeValue.of(6, ByteSizeUnit.MB).getBytes()));
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testNegativeMinIndexBufferSize() {
|
public void testNegativeMinIndexBufferSize() {
|
||||||
|
@ -289,7 +289,7 @@ public class IndexingMemoryControllerTests extends IndexShardTestCase {
|
||||||
Settings.builder().put("indices.memory.index_buffer_size", "90%").put("indices.memory.max_index_buffer_size", "6mb").build()
|
Settings.builder().put("indices.memory.index_buffer_size", "90%").put("indices.memory.max_index_buffer_size", "6mb").build()
|
||||||
);
|
);
|
||||||
|
|
||||||
assertThat(controller.indexingBufferSize(), equalTo(new ByteSizeValue(6, ByteSizeUnit.MB).getBytes()));
|
assertThat(controller.indexingBufferSize(), equalTo(ByteSizeValue.of(6, ByteSizeUnit.MB).getBytes()));
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testThrottling() throws Exception {
|
public void testThrottling() throws Exception {
|
||||||
|
|
|
@ -211,19 +211,19 @@ public class HierarchyCircuitBreakerServiceTests extends ESTestCase {
|
||||||
CircuitBreaker requestCircuitBreaker = service.getBreaker(CircuitBreaker.REQUEST);
|
CircuitBreaker requestCircuitBreaker = service.getBreaker(CircuitBreaker.REQUEST);
|
||||||
CircuitBreaker fieldDataCircuitBreaker = service.getBreaker(CircuitBreaker.FIELDDATA);
|
CircuitBreaker fieldDataCircuitBreaker = service.getBreaker(CircuitBreaker.FIELDDATA);
|
||||||
|
|
||||||
assertEquals(new ByteSizeValue(200, ByteSizeUnit.MB).getBytes(), service.stats().getStats(CircuitBreaker.PARENT).getLimit());
|
assertEquals(ByteSizeValue.of(200, ByteSizeUnit.MB).getBytes(), service.stats().getStats(CircuitBreaker.PARENT).getLimit());
|
||||||
assertEquals(new ByteSizeValue(150, ByteSizeUnit.MB).getBytes(), requestCircuitBreaker.getLimit());
|
assertEquals(ByteSizeValue.of(150, ByteSizeUnit.MB).getBytes(), requestCircuitBreaker.getLimit());
|
||||||
assertEquals(new ByteSizeValue(150, ByteSizeUnit.MB).getBytes(), fieldDataCircuitBreaker.getLimit());
|
assertEquals(ByteSizeValue.of(150, ByteSizeUnit.MB).getBytes(), fieldDataCircuitBreaker.getLimit());
|
||||||
|
|
||||||
fieldDataCircuitBreaker.addEstimateBytesAndMaybeBreak(new ByteSizeValue(50, ByteSizeUnit.MB).getBytes(), "should not break");
|
fieldDataCircuitBreaker.addEstimateBytesAndMaybeBreak(ByteSizeValue.of(50, ByteSizeUnit.MB).getBytes(), "should not break");
|
||||||
assertEquals(new ByteSizeValue(50, ByteSizeUnit.MB).getBytes(), fieldDataCircuitBreaker.getUsed(), 0.0);
|
assertEquals(ByteSizeValue.of(50, ByteSizeUnit.MB).getBytes(), fieldDataCircuitBreaker.getUsed(), 0.0);
|
||||||
requestCircuitBreaker.addEstimateBytesAndMaybeBreak(new ByteSizeValue(50, ByteSizeUnit.MB).getBytes(), "should not break");
|
requestCircuitBreaker.addEstimateBytesAndMaybeBreak(ByteSizeValue.of(50, ByteSizeUnit.MB).getBytes(), "should not break");
|
||||||
assertEquals(new ByteSizeValue(50, ByteSizeUnit.MB).getBytes(), requestCircuitBreaker.getUsed(), 0.0);
|
assertEquals(ByteSizeValue.of(50, ByteSizeUnit.MB).getBytes(), requestCircuitBreaker.getUsed(), 0.0);
|
||||||
requestCircuitBreaker.addEstimateBytesAndMaybeBreak(new ByteSizeValue(50, ByteSizeUnit.MB).getBytes(), "should not break");
|
requestCircuitBreaker.addEstimateBytesAndMaybeBreak(ByteSizeValue.of(50, ByteSizeUnit.MB).getBytes(), "should not break");
|
||||||
assertEquals(new ByteSizeValue(100, ByteSizeUnit.MB).getBytes(), requestCircuitBreaker.getUsed(), 0.0);
|
assertEquals(ByteSizeValue.of(100, ByteSizeUnit.MB).getBytes(), requestCircuitBreaker.getUsed(), 0.0);
|
||||||
CircuitBreakingException exception = expectThrows(
|
CircuitBreakingException exception = expectThrows(
|
||||||
CircuitBreakingException.class,
|
CircuitBreakingException.class,
|
||||||
() -> requestCircuitBreaker.addEstimateBytesAndMaybeBreak(new ByteSizeValue(50, ByteSizeUnit.MB).getBytes(), "should break")
|
() -> requestCircuitBreaker.addEstimateBytesAndMaybeBreak(ByteSizeValue.of(50, ByteSizeUnit.MB).getBytes(), "should break")
|
||||||
);
|
);
|
||||||
assertThat(exception.getMessage(), containsString("[parent] Data too large, data for [should break] would be"));
|
assertThat(exception.getMessage(), containsString("[parent] Data too large, data for [should break] would be"));
|
||||||
assertThat(exception.getMessage(), containsString("which is larger than the limit of [209715200/200mb]"));
|
assertThat(exception.getMessage(), containsString("which is larger than the limit of [209715200/200mb]"));
|
||||||
|
@ -733,7 +733,7 @@ public class HierarchyCircuitBreakerServiceTests extends ESTestCase {
|
||||||
);
|
);
|
||||||
|
|
||||||
long parentLimitBytes = service.getParentLimit();
|
long parentLimitBytes = service.getParentLimit();
|
||||||
assertEquals(new ByteSizeValue(100, ByteSizeUnit.BYTES).getBytes(), parentLimitBytes);
|
assertEquals(ByteSizeValue.of(100, ByteSizeUnit.BYTES).getBytes(), parentLimitBytes);
|
||||||
|
|
||||||
CircuitBreaker breaker = service.getBreaker(CircuitBreaker.REQUEST);
|
CircuitBreaker breaker = service.getBreaker(CircuitBreaker.REQUEST);
|
||||||
MultiBucketConsumerService.MultiBucketConsumer multiBucketConsumer = new MultiBucketConsumerService.MultiBucketConsumer(
|
MultiBucketConsumerService.MultiBucketConsumer multiBucketConsumer = new MultiBucketConsumerService.MultiBucketConsumer(
|
||||||
|
@ -800,7 +800,7 @@ public class HierarchyCircuitBreakerServiceTests extends ESTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
private static long mb(long size) {
|
private static long mb(long size) {
|
||||||
return new ByteSizeValue(size, ByteSizeUnit.MB).getBytes();
|
return ByteSizeValue.of(size, ByteSizeUnit.MB).getBytes();
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testUpdatingUseRealMemory() {
|
public void testUpdatingUseRealMemory() {
|
||||||
|
|
|
@ -83,7 +83,7 @@ import static org.hamcrest.Matchers.sameInstance;
|
||||||
import static org.mockito.Mockito.mock;
|
import static org.mockito.Mockito.mock;
|
||||||
|
|
||||||
public class PeerRecoveryTargetServiceTests extends IndexShardTestCase {
|
public class PeerRecoveryTargetServiceTests extends IndexShardTestCase {
|
||||||
private static final ByteSizeValue SNAPSHOT_FILE_PART_SIZE = new ByteSizeValue(Long.MAX_VALUE, ByteSizeUnit.BYTES);
|
private static final ByteSizeValue SNAPSHOT_FILE_PART_SIZE = ByteSizeValue.of(Long.MAX_VALUE, ByteSizeUnit.BYTES);
|
||||||
|
|
||||||
public void testWriteFileChunksConcurrently() throws Exception {
|
public void testWriteFileChunksConcurrently() throws Exception {
|
||||||
IndexShard sourceShard = newStartedShard(true);
|
IndexShard sourceShard = newStartedShard(true);
|
||||||
|
@ -454,7 +454,7 @@ public class PeerRecoveryTargetServiceTests extends IndexShardTestCase {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public int getReadSnapshotFileBufferSizeForRepo(String repository) {
|
public int getReadSnapshotFileBufferSizeForRepo(String repository) {
|
||||||
return (int) new ByteSizeValue(128, ByteSizeUnit.KB).getBytes();
|
return (int) ByteSizeValue.of(128, ByteSizeUnit.KB).getBytes();
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -526,7 +526,7 @@ public class PeerRecoveryTargetServiceTests extends IndexShardTestCase {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public int getReadSnapshotFileBufferSizeForRepo(String repository) {
|
public int getReadSnapshotFileBufferSizeForRepo(String repository) {
|
||||||
return (int) new ByteSizeValue(128, ByteSizeUnit.KB).getBytes();
|
return (int) ByteSizeValue.of(128, ByteSizeUnit.KB).getBytes();
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -636,7 +636,7 @@ public class PeerRecoveryTargetServiceTests extends IndexShardTestCase {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public int getReadSnapshotFileBufferSizeForRepo(String repository) {
|
public int getReadSnapshotFileBufferSizeForRepo(String repository) {
|
||||||
return (int) new ByteSizeValue(128, ByteSizeUnit.KB).getBytes();
|
return (int) ByteSizeValue.of(128, ByteSizeUnit.KB).getBytes();
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -699,7 +699,7 @@ public class PeerRecoveryTargetServiceTests extends IndexShardTestCase {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public int getReadSnapshotFileBufferSizeForRepo(String repository) {
|
public int getReadSnapshotFileBufferSizeForRepo(String repository) {
|
||||||
return (int) new ByteSizeValue(128, ByteSizeUnit.KB).getBytes();
|
return (int) ByteSizeValue.of(128, ByteSizeUnit.KB).getBytes();
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -713,7 +713,7 @@ public class PeerRecoveryTargetServiceTests extends IndexShardTestCase {
|
||||||
BlobStoreIndexShardSnapshot.FileInfo fileInfo = new BlobStoreIndexShardSnapshot.FileInfo(
|
BlobStoreIndexShardSnapshot.FileInfo fileInfo = new BlobStoreIndexShardSnapshot.FileInfo(
|
||||||
"name",
|
"name",
|
||||||
storeFileMetadata,
|
storeFileMetadata,
|
||||||
new ByteSizeValue(Long.MAX_VALUE, ByteSizeUnit.BYTES)
|
ByteSizeValue.of(Long.MAX_VALUE, ByteSizeUnit.BYTES)
|
||||||
);
|
);
|
||||||
|
|
||||||
recoveryTarget.incRef();
|
recoveryTarget.incRef();
|
||||||
|
|
|
@ -423,7 +423,7 @@ public class RecoverySettingsTests extends ESTestCase {
|
||||||
.withMemory(ByteSizeValue.ofBytes(randomLongBetween(1L, ByteSizeUnit.GB.toBytes(4L))))
|
.withMemory(ByteSizeValue.ofBytes(randomLongBetween(1L, ByteSizeUnit.GB.toBytes(4L))))
|
||||||
.build()
|
.build()
|
||||||
.getMaxBytesPerSec(),
|
.getMaxBytesPerSec(),
|
||||||
equalTo(new ByteSizeValue(40, ByteSizeUnit.MB))
|
equalTo(ByteSizeValue.of(40, ByteSizeUnit.MB))
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
|
@ -433,7 +433,7 @@ public class RecoverySettingsTests extends ESTestCase {
|
||||||
.withMemory(ByteSizeValue.ofBytes(randomLongBetween(ByteSizeUnit.GB.toBytes(4L) + 1L, ByteSizeUnit.GB.toBytes(8L))))
|
.withMemory(ByteSizeValue.ofBytes(randomLongBetween(ByteSizeUnit.GB.toBytes(4L) + 1L, ByteSizeUnit.GB.toBytes(8L))))
|
||||||
.build()
|
.build()
|
||||||
.getMaxBytesPerSec(),
|
.getMaxBytesPerSec(),
|
||||||
equalTo(new ByteSizeValue(60, ByteSizeUnit.MB))
|
equalTo(ByteSizeValue.of(60, ByteSizeUnit.MB))
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
|
@ -443,7 +443,7 @@ public class RecoverySettingsTests extends ESTestCase {
|
||||||
.withMemory(ByteSizeValue.ofBytes(randomLongBetween(ByteSizeUnit.GB.toBytes(8L) + 1L, ByteSizeUnit.GB.toBytes(16L))))
|
.withMemory(ByteSizeValue.ofBytes(randomLongBetween(ByteSizeUnit.GB.toBytes(8L) + 1L, ByteSizeUnit.GB.toBytes(16L))))
|
||||||
.build()
|
.build()
|
||||||
.getMaxBytesPerSec(),
|
.getMaxBytesPerSec(),
|
||||||
equalTo(new ByteSizeValue(90, ByteSizeUnit.MB))
|
equalTo(ByteSizeValue.of(90, ByteSizeUnit.MB))
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
|
@ -453,7 +453,7 @@ public class RecoverySettingsTests extends ESTestCase {
|
||||||
.withMemory(ByteSizeValue.ofBytes(randomLongBetween(ByteSizeUnit.GB.toBytes(16L) + 1L, ByteSizeUnit.GB.toBytes(32L))))
|
.withMemory(ByteSizeValue.ofBytes(randomLongBetween(ByteSizeUnit.GB.toBytes(16L) + 1L, ByteSizeUnit.GB.toBytes(32L))))
|
||||||
.build()
|
.build()
|
||||||
.getMaxBytesPerSec(),
|
.getMaxBytesPerSec(),
|
||||||
equalTo(new ByteSizeValue(125, ByteSizeUnit.MB))
|
equalTo(ByteSizeValue.of(125, ByteSizeUnit.MB))
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
|
@ -463,7 +463,7 @@ public class RecoverySettingsTests extends ESTestCase {
|
||||||
.withMemory(ByteSizeValue.ofBytes(randomLongBetween(ByteSizeUnit.GB.toBytes(32L) + 1L, ByteSizeUnit.TB.toBytes(4L))))
|
.withMemory(ByteSizeValue.ofBytes(randomLongBetween(ByteSizeUnit.GB.toBytes(32L) + 1L, ByteSizeUnit.TB.toBytes(4L))))
|
||||||
.build()
|
.build()
|
||||||
.getMaxBytesPerSec(),
|
.getMaxBytesPerSec(),
|
||||||
equalTo(new ByteSizeValue(250, ByteSizeUnit.MB))
|
equalTo(ByteSizeValue.of(250, ByteSizeUnit.MB))
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1686,7 +1686,7 @@ public class RecoverySourceHandlerTests extends MapperServiceTestCase {
|
||||||
0
|
0
|
||||||
);
|
);
|
||||||
|
|
||||||
ByteSizeValue partSize = new ByteSizeValue(Long.MAX_VALUE, ByteSizeUnit.BYTES);
|
ByteSizeValue partSize = ByteSizeValue.of(Long.MAX_VALUE, ByteSizeUnit.BYTES);
|
||||||
|
|
||||||
List<StoreFileMetadata> filesToRecoverFromSource = sourceFiles.subList(0, sourceFileCount);
|
List<StoreFileMetadata> filesToRecoverFromSource = sourceFiles.subList(0, sourceFileCount);
|
||||||
List<StoreFileMetadata> filesToRecoverFromSnapshot = sourceFiles.subList(sourceFileCount, sourceFiles.size());
|
List<StoreFileMetadata> filesToRecoverFromSnapshot = sourceFiles.subList(sourceFileCount, sourceFiles.size());
|
||||||
|
|
|
@ -486,11 +486,11 @@ public abstract class ESIntegTestCase extends ESTestCase {
|
||||||
if (random.nextBoolean()) {
|
if (random.nextBoolean()) {
|
||||||
builder.put(
|
builder.put(
|
||||||
IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(),
|
IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(),
|
||||||
new ByteSizeValue(RandomNumbers.randomIntBetween(random, 1, 300), ByteSizeUnit.MB)
|
ByteSizeValue.of(RandomNumbers.randomIntBetween(random, 1, 300), ByteSizeUnit.MB)
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
if (random.nextBoolean()) {
|
if (random.nextBoolean()) {
|
||||||
builder.put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(1, ByteSizeUnit.PB)); // just
|
builder.put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), ByteSizeValue.of(1, ByteSizeUnit.PB)); // just
|
||||||
// don't
|
// don't
|
||||||
// flush
|
// flush
|
||||||
}
|
}
|
||||||
|
|
|
@ -574,12 +574,12 @@ public final class InternalTestCluster extends TestCluster {
|
||||||
if (random.nextInt(10) == 0) { // do something crazy slow here
|
if (random.nextInt(10) == 0) { // do something crazy slow here
|
||||||
builder.put(
|
builder.put(
|
||||||
RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey(),
|
RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey(),
|
||||||
new ByteSizeValue(RandomNumbers.randomIntBetween(random, 1, 10), ByteSizeUnit.MB)
|
ByteSizeValue.of(RandomNumbers.randomIntBetween(random, 1, 10), ByteSizeUnit.MB)
|
||||||
);
|
);
|
||||||
} else {
|
} else {
|
||||||
builder.put(
|
builder.put(
|
||||||
RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey(),
|
RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey(),
|
||||||
new ByteSizeValue(RandomNumbers.randomIntBetween(random, 10, 200), ByteSizeUnit.MB)
|
ByteSizeValue.of(RandomNumbers.randomIntBetween(random, 10, 200), ByteSizeUnit.MB)
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -359,7 +359,7 @@ public class TopMetricsAggregatorTests extends AggregatorTestCase {
|
||||||
public void testTonsOfBucketsTriggersBreaker() throws IOException {
|
public void testTonsOfBucketsTriggersBreaker() throws IOException {
|
||||||
// Build a "simple" circuit breaker that trips at 20k
|
// Build a "simple" circuit breaker that trips at 20k
|
||||||
CircuitBreakerService breaker = mock(CircuitBreakerService.class);
|
CircuitBreakerService breaker = mock(CircuitBreakerService.class);
|
||||||
ByteSizeValue max = new ByteSizeValue(20, ByteSizeUnit.KB);
|
ByteSizeValue max = ByteSizeValue.of(20, ByteSizeUnit.KB);
|
||||||
when(breaker.getBreaker(CircuitBreaker.REQUEST)).thenReturn(new MockBigArrays.LimitedBreaker(CircuitBreaker.REQUEST, max));
|
when(breaker.getBreaker(CircuitBreaker.REQUEST)).thenReturn(new MockBigArrays.LimitedBreaker(CircuitBreaker.REQUEST, max));
|
||||||
|
|
||||||
// Collect some buckets with it
|
// Collect some buckets with it
|
||||||
|
|
|
@ -73,7 +73,7 @@ public abstract class AbstractFrozenAutoscalingIntegTestCase extends AbstractSna
|
||||||
.put(super.nodeSettings(nodeOrdinal, otherSettings))
|
.put(super.nodeSettings(nodeOrdinal, otherSettings))
|
||||||
.put(SELF_GENERATED_LICENSE_TYPE.getKey(), "trial");
|
.put(SELF_GENERATED_LICENSE_TYPE.getKey(), "trial");
|
||||||
if (DiscoveryNode.hasRole(otherSettings, DiscoveryNodeRole.DATA_FROZEN_NODE_ROLE)) {
|
if (DiscoveryNode.hasRole(otherSettings, DiscoveryNodeRole.DATA_FROZEN_NODE_ROLE)) {
|
||||||
builder.put(SharedBlobCacheService.SHARED_CACHE_SIZE_SETTING.getKey(), new ByteSizeValue(10, ByteSizeUnit.MB));
|
builder.put(SharedBlobCacheService.SHARED_CACHE_SIZE_SETTING.getKey(), ByteSizeValue.of(10, ByteSizeUnit.MB));
|
||||||
}
|
}
|
||||||
return builder.build();
|
return builder.build();
|
||||||
}
|
}
|
||||||
|
|
|
@ -68,7 +68,7 @@ public class FrozenShardsDeciderServiceTests extends AutoscalingTestCase {
|
||||||
);
|
);
|
||||||
assertThat(defaultSettingsResult.reason().summary(), equalTo("shard count [" + (shards * (replicas + 1) + "]")));
|
assertThat(defaultSettingsResult.reason().summary(), equalTo("shard count [" + (shards * (replicas + 1) + "]")));
|
||||||
|
|
||||||
ByteSizeValue memoryPerShard = new ByteSizeValue(
|
ByteSizeValue memoryPerShard = ByteSizeValue.of(
|
||||||
randomLongBetween(0, 1000),
|
randomLongBetween(0, 1000),
|
||||||
randomFrom(ByteSizeUnit.BYTES, ByteSizeUnit.KB, ByteSizeUnit.MB)
|
randomFrom(ByteSizeUnit.BYTES, ByteSizeUnit.KB, ByteSizeUnit.MB)
|
||||||
);
|
);
|
||||||
|
|
|
@ -813,7 +813,7 @@ public class SharedBlobCacheServiceTests extends ESTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testCacheSizeChanges() throws IOException {
|
public void testCacheSizeChanges() throws IOException {
|
||||||
ByteSizeValue val1 = new ByteSizeValue(randomIntBetween(1, 5), ByteSizeUnit.MB);
|
ByteSizeValue val1 = ByteSizeValue.of(randomIntBetween(1, 5), ByteSizeUnit.MB);
|
||||||
Settings settings = Settings.builder()
|
Settings settings = Settings.builder()
|
||||||
.put(NODE_NAME_SETTING.getKey(), "node")
|
.put(NODE_NAME_SETTING.getKey(), "node")
|
||||||
.put(SharedBlobCacheService.SHARED_CACHE_SIZE_SETTING.getKey(), val1.getStringRep())
|
.put(SharedBlobCacheService.SHARED_CACHE_SIZE_SETTING.getKey(), val1.getStringRep())
|
||||||
|
@ -834,7 +834,7 @@ public class SharedBlobCacheServiceTests extends ESTestCase {
|
||||||
assertEquals(val1.getBytes(), cacheService.getStats().size());
|
assertEquals(val1.getBytes(), cacheService.getStats().size());
|
||||||
}
|
}
|
||||||
|
|
||||||
ByteSizeValue val2 = new ByteSizeValue(randomIntBetween(1, 5), ByteSizeUnit.MB);
|
ByteSizeValue val2 = ByteSizeValue.of(randomIntBetween(1, 5), ByteSizeUnit.MB);
|
||||||
settings = Settings.builder()
|
settings = Settings.builder()
|
||||||
.put(settings)
|
.put(settings)
|
||||||
.put(SharedBlobCacheService.SHARED_CACHE_SIZE_SETTING.getKey(), val2.getStringRep())
|
.put(SharedBlobCacheService.SHARED_CACHE_SIZE_SETTING.getKey(), val2.getStringRep())
|
||||||
|
|
|
@ -120,7 +120,7 @@ public class CcrRetentionLeaseIT extends CcrIntegTestCase {
|
||||||
) throws IOException {
|
) throws IOException {
|
||||||
final ClusterUpdateSettingsRequest settingsRequest = new ClusterUpdateSettingsRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT)
|
final ClusterUpdateSettingsRequest settingsRequest = new ClusterUpdateSettingsRequest(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT)
|
||||||
.masterNodeTimeout(TimeValue.MAX_VALUE);
|
.masterNodeTimeout(TimeValue.MAX_VALUE);
|
||||||
final String chunkSize = new ByteSizeValue(randomFrom(4, 128, 1024), ByteSizeUnit.KB).getStringRep();
|
final String chunkSize = ByteSizeValue.of(randomFrom(4, 128, 1024), ByteSizeUnit.KB).getStringRep();
|
||||||
settingsRequest.persistentSettings(Settings.builder().put(CcrSettings.RECOVERY_CHUNK_SIZE.getKey(), chunkSize));
|
settingsRequest.persistentSettings(Settings.builder().put(CcrSettings.RECOVERY_CHUNK_SIZE.getKey(), chunkSize));
|
||||||
assertAcked(followerClient().admin().cluster().updateSettings(settingsRequest).actionGet());
|
assertAcked(followerClient().admin().cluster().updateSettings(settingsRequest).actionGet());
|
||||||
|
|
||||||
|
|
|
@ -92,10 +92,10 @@ public class FollowerFailOverIT extends CcrIntegTestCase {
|
||||||
availableDocs.release(between(100, 200));
|
availableDocs.release(between(100, 200));
|
||||||
PutFollowAction.Request follow = putFollow(leaderIndex, followerIndex);
|
PutFollowAction.Request follow = putFollow(leaderIndex, followerIndex);
|
||||||
follow.getParameters().setMaxReadRequestOperationCount(randomIntBetween(32, 2048));
|
follow.getParameters().setMaxReadRequestOperationCount(randomIntBetween(32, 2048));
|
||||||
follow.getParameters().setMaxReadRequestSize(new ByteSizeValue(randomIntBetween(1, 4096), ByteSizeUnit.KB));
|
follow.getParameters().setMaxReadRequestSize(ByteSizeValue.of(randomIntBetween(1, 4096), ByteSizeUnit.KB));
|
||||||
follow.getParameters().setMaxOutstandingReadRequests(randomIntBetween(1, 10));
|
follow.getParameters().setMaxOutstandingReadRequests(randomIntBetween(1, 10));
|
||||||
follow.getParameters().setMaxWriteRequestOperationCount(randomIntBetween(32, 2048));
|
follow.getParameters().setMaxWriteRequestOperationCount(randomIntBetween(32, 2048));
|
||||||
follow.getParameters().setMaxWriteRequestSize(new ByteSizeValue(randomIntBetween(1, 4096), ByteSizeUnit.KB));
|
follow.getParameters().setMaxWriteRequestSize(ByteSizeValue.of(randomIntBetween(1, 4096), ByteSizeUnit.KB));
|
||||||
follow.getParameters().setMaxOutstandingWriteRequests(randomIntBetween(1, 10));
|
follow.getParameters().setMaxOutstandingWriteRequests(randomIntBetween(1, 10));
|
||||||
logger.info("--> follow request {}", Strings.toString(follow));
|
logger.info("--> follow request {}", Strings.toString(follow));
|
||||||
followerClient().execute(PutFollowAction.INSTANCE, follow).get();
|
followerClient().execute(PutFollowAction.INSTANCE, follow).get();
|
||||||
|
@ -153,10 +153,10 @@ public class FollowerFailOverIT extends CcrIntegTestCase {
|
||||||
|
|
||||||
PutFollowAction.Request followRequest = putFollow("index1", "index2");
|
PutFollowAction.Request followRequest = putFollow("index1", "index2");
|
||||||
followRequest.getParameters().setMaxReadRequestOperationCount(randomIntBetween(32, 2048));
|
followRequest.getParameters().setMaxReadRequestOperationCount(randomIntBetween(32, 2048));
|
||||||
followRequest.getParameters().setMaxReadRequestSize(new ByteSizeValue(randomIntBetween(1, 4096), ByteSizeUnit.KB));
|
followRequest.getParameters().setMaxReadRequestSize(ByteSizeValue.of(randomIntBetween(1, 4096), ByteSizeUnit.KB));
|
||||||
followRequest.getParameters().setMaxOutstandingReadRequests(randomIntBetween(1, 10));
|
followRequest.getParameters().setMaxOutstandingReadRequests(randomIntBetween(1, 10));
|
||||||
followRequest.getParameters().setMaxWriteRequestOperationCount(randomIntBetween(32, 2048));
|
followRequest.getParameters().setMaxWriteRequestOperationCount(randomIntBetween(32, 2048));
|
||||||
followRequest.getParameters().setMaxWriteRequestSize(new ByteSizeValue(randomIntBetween(1, 4096), ByteSizeUnit.KB));
|
followRequest.getParameters().setMaxWriteRequestSize(ByteSizeValue.of(randomIntBetween(1, 4096), ByteSizeUnit.KB));
|
||||||
followRequest.getParameters().setMaxOutstandingWriteRequests(randomIntBetween(1, 10));
|
followRequest.getParameters().setMaxOutstandingWriteRequests(randomIntBetween(1, 10));
|
||||||
followRequest.waitForActiveShards(ActiveShardCount.ALL);
|
followRequest.waitForActiveShards(ActiveShardCount.ALL);
|
||||||
followerClient().execute(PutFollowAction.INSTANCE, followRequest).get();
|
followerClient().execute(PutFollowAction.INSTANCE, followRequest).get();
|
||||||
|
|
|
@ -150,7 +150,7 @@ public class IndexFollowingIT extends CcrIntegTestCase {
|
||||||
.setMasterNodeTimeout(TimeValue.MAX_VALUE)
|
.setMasterNodeTimeout(TimeValue.MAX_VALUE)
|
||||||
.setPersistentSettings(
|
.setPersistentSettings(
|
||||||
Settings.builder()
|
Settings.builder()
|
||||||
.put(CcrSettings.RECOVERY_CHUNK_SIZE.getKey(), new ByteSizeValue(randomIntBetween(1, 1000), ByteSizeUnit.KB))
|
.put(CcrSettings.RECOVERY_CHUNK_SIZE.getKey(), ByteSizeValue.of(randomIntBetween(1, 1000), ByteSizeUnit.KB))
|
||||||
)
|
)
|
||||||
.get();
|
.get();
|
||||||
|
|
||||||
|
@ -667,7 +667,7 @@ public class IndexFollowingIT extends CcrIntegTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
PutFollowAction.Request followRequest = putFollow("index1", "index2");
|
PutFollowAction.Request followRequest = putFollow("index1", "index2");
|
||||||
followRequest.getParameters().setMaxReadRequestSize(new ByteSizeValue(randomIntBetween(1, 1024), ByteSizeUnit.BYTES));
|
followRequest.getParameters().setMaxReadRequestSize(ByteSizeValue.of(randomIntBetween(1, 1024), ByteSizeUnit.BYTES));
|
||||||
followerClient().execute(PutFollowAction.INSTANCE, followRequest).get();
|
followerClient().execute(PutFollowAction.INSTANCE, followRequest).get();
|
||||||
|
|
||||||
final Map<ShardId, Long> firstBatchNumDocsPerShard = new HashMap<>();
|
final Map<ShardId, Long> firstBatchNumDocsPerShard = new HashMap<>();
|
||||||
|
|
|
@ -60,7 +60,7 @@ public final class CcrSettings {
|
||||||
*/
|
*/
|
||||||
public static final Setting<ByteSizeValue> RECOVERY_MAX_BYTES_PER_SECOND = Setting.byteSizeSetting(
|
public static final Setting<ByteSizeValue> RECOVERY_MAX_BYTES_PER_SECOND = Setting.byteSizeSetting(
|
||||||
"ccr.indices.recovery.max_bytes_per_sec",
|
"ccr.indices.recovery.max_bytes_per_sec",
|
||||||
new ByteSizeValue(40, ByteSizeUnit.MB),
|
ByteSizeValue.of(40, ByteSizeUnit.MB),
|
||||||
Setting.Property.Dynamic,
|
Setting.Property.Dynamic,
|
||||||
Setting.Property.NodeScope
|
Setting.Property.NodeScope
|
||||||
);
|
);
|
||||||
|
@ -70,9 +70,9 @@ public final class CcrSettings {
|
||||||
*/
|
*/
|
||||||
public static final Setting<ByteSizeValue> RECOVERY_CHUNK_SIZE = Setting.byteSizeSetting(
|
public static final Setting<ByteSizeValue> RECOVERY_CHUNK_SIZE = Setting.byteSizeSetting(
|
||||||
"ccr.indices.recovery.chunk_size",
|
"ccr.indices.recovery.chunk_size",
|
||||||
new ByteSizeValue(1, ByteSizeUnit.MB),
|
ByteSizeValue.of(1, ByteSizeUnit.MB),
|
||||||
new ByteSizeValue(1, ByteSizeUnit.KB),
|
ByteSizeValue.of(1, ByteSizeUnit.KB),
|
||||||
new ByteSizeValue(1, ByteSizeUnit.GB),
|
ByteSizeValue.of(1, ByteSizeUnit.GB),
|
||||||
Setting.Property.Dynamic,
|
Setting.Property.Dynamic,
|
||||||
Setting.Property.NodeScope
|
Setting.Property.NodeScope
|
||||||
);
|
);
|
||||||
|
|
|
@ -69,12 +69,12 @@ import static org.elasticsearch.xpack.ccr.Ccr.CCR_THREAD_POOL_NAME;
|
||||||
|
|
||||||
public class TransportResumeFollowAction extends AcknowledgedTransportMasterNodeAction<ResumeFollowAction.Request> {
|
public class TransportResumeFollowAction extends AcknowledgedTransportMasterNodeAction<ResumeFollowAction.Request> {
|
||||||
|
|
||||||
static final ByteSizeValue DEFAULT_MAX_READ_REQUEST_SIZE = new ByteSizeValue(32, ByteSizeUnit.MB);
|
static final ByteSizeValue DEFAULT_MAX_READ_REQUEST_SIZE = ByteSizeValue.of(32, ByteSizeUnit.MB);
|
||||||
static final ByteSizeValue DEFAULT_MAX_WRITE_REQUEST_SIZE = ByteSizeValue.ofBytes(Long.MAX_VALUE);
|
static final ByteSizeValue DEFAULT_MAX_WRITE_REQUEST_SIZE = ByteSizeValue.ofBytes(Long.MAX_VALUE);
|
||||||
private static final TimeValue DEFAULT_MAX_RETRY_DELAY = new TimeValue(500);
|
private static final TimeValue DEFAULT_MAX_RETRY_DELAY = new TimeValue(500);
|
||||||
private static final int DEFAULT_MAX_OUTSTANDING_WRITE_REQUESTS = 9;
|
private static final int DEFAULT_MAX_OUTSTANDING_WRITE_REQUESTS = 9;
|
||||||
private static final int DEFAULT_MAX_WRITE_BUFFER_COUNT = Integer.MAX_VALUE;
|
private static final int DEFAULT_MAX_WRITE_BUFFER_COUNT = Integer.MAX_VALUE;
|
||||||
private static final ByteSizeValue DEFAULT_MAX_WRITE_BUFFER_SIZE = new ByteSizeValue(512, ByteSizeUnit.MB);
|
private static final ByteSizeValue DEFAULT_MAX_WRITE_BUFFER_SIZE = ByteSizeValue.of(512, ByteSizeUnit.MB);
|
||||||
private static final int DEFAULT_MAX_READ_REQUEST_OPERATION_COUNT = 5120;
|
private static final int DEFAULT_MAX_READ_REQUEST_OPERATION_COUNT = 5120;
|
||||||
private static final int DEFAULT_MAX_WRITE_REQUEST_OPERATION_COUNT = 5120;
|
private static final int DEFAULT_MAX_WRITE_REQUEST_OPERATION_COUNT = 5120;
|
||||||
private static final int DEFAULT_MAX_OUTSTANDING_READ_REQUESTS = 12;
|
private static final int DEFAULT_MAX_OUTSTANDING_READ_REQUESTS = 12;
|
||||||
|
|
|
@ -60,7 +60,7 @@ import static org.elasticsearch.rest.RestRequest.Method.GET;
|
||||||
public class RestShardChangesAction extends BaseRestHandler {
|
public class RestShardChangesAction extends BaseRestHandler {
|
||||||
|
|
||||||
private static final long DEFAULT_FROM_SEQ_NO = 0L;
|
private static final long DEFAULT_FROM_SEQ_NO = 0L;
|
||||||
private static final ByteSizeValue DEFAULT_MAX_BATCH_SIZE = new ByteSizeValue(32, ByteSizeUnit.MB);
|
private static final ByteSizeValue DEFAULT_MAX_BATCH_SIZE = ByteSizeValue.of(32, ByteSizeUnit.MB);
|
||||||
private static final TimeValue DEFAULT_POLL_TIMEOUT = new TimeValue(1, TimeUnit.MINUTES);
|
private static final TimeValue DEFAULT_POLL_TIMEOUT = new TimeValue(1, TimeUnit.MINUTES);
|
||||||
private static final int DEFAULT_MAX_OPERATIONS_COUNT = 1024;
|
private static final int DEFAULT_MAX_OPERATIONS_COUNT = 1024;
|
||||||
private static final int DEFAULT_TIMEOUT_SECONDS = 60;
|
private static final int DEFAULT_TIMEOUT_SECONDS = 60;
|
||||||
|
|
|
@ -69,7 +69,7 @@ public class ResumeFollowActionRequestTests extends AbstractXContentSerializingT
|
||||||
followParameters.setMaxOutstandingWriteRequests(randomIntBetween(1, Integer.MAX_VALUE));
|
followParameters.setMaxOutstandingWriteRequests(randomIntBetween(1, Integer.MAX_VALUE));
|
||||||
}
|
}
|
||||||
if (randomBoolean()) {
|
if (randomBoolean()) {
|
||||||
followParameters.setMaxReadRequestSize(new ByteSizeValue(randomNonNegativeLong(), ByteSizeUnit.BYTES));
|
followParameters.setMaxReadRequestSize(ByteSizeValue.of(randomNonNegativeLong(), ByteSizeUnit.BYTES));
|
||||||
}
|
}
|
||||||
if (randomBoolean()) {
|
if (randomBoolean()) {
|
||||||
followParameters.setMaxWriteBufferCount(randomIntBetween(1, Integer.MAX_VALUE));
|
followParameters.setMaxWriteBufferCount(randomIntBetween(1, Integer.MAX_VALUE));
|
||||||
|
|
|
@ -67,7 +67,7 @@ public class ShardChangesActionTests extends ESSingleNodeTestCase {
|
||||||
min,
|
min,
|
||||||
size,
|
size,
|
||||||
indexShard.getHistoryUUID(),
|
indexShard.getHistoryUUID(),
|
||||||
new ByteSizeValue(Long.MAX_VALUE, ByteSizeUnit.BYTES)
|
ByteSizeValue.of(Long.MAX_VALUE, ByteSizeUnit.BYTES)
|
||||||
);
|
);
|
||||||
final List<Long> seenSeqNos = Arrays.stream(operations).map(Translog.Operation::seqNo).collect(Collectors.toList());
|
final List<Long> seenSeqNos = Arrays.stream(operations).map(Translog.Operation::seqNo).collect(Collectors.toList());
|
||||||
final List<Long> expectedSeqNos = LongStream.rangeClosed(min, max).boxed().collect(Collectors.toList());
|
final List<Long> expectedSeqNos = LongStream.rangeClosed(min, max).boxed().collect(Collectors.toList());
|
||||||
|
@ -84,7 +84,7 @@ public class ShardChangesActionTests extends ESSingleNodeTestCase {
|
||||||
numWrites,
|
numWrites,
|
||||||
numWrites + 1,
|
numWrites + 1,
|
||||||
indexShard.getHistoryUUID(),
|
indexShard.getHistoryUUID(),
|
||||||
new ByteSizeValue(Long.MAX_VALUE, ByteSizeUnit.BYTES)
|
ByteSizeValue.of(Long.MAX_VALUE, ByteSizeUnit.BYTES)
|
||||||
)
|
)
|
||||||
);
|
);
|
||||||
final String message = String.format(
|
final String message = String.format(
|
||||||
|
@ -103,7 +103,7 @@ public class ShardChangesActionTests extends ESSingleNodeTestCase {
|
||||||
numWrites - 10,
|
numWrites - 10,
|
||||||
numWrites + 10,
|
numWrites + 10,
|
||||||
indexShard.getHistoryUUID(),
|
indexShard.getHistoryUUID(),
|
||||||
new ByteSizeValue(Long.MAX_VALUE, ByteSizeUnit.BYTES)
|
ByteSizeValue.of(Long.MAX_VALUE, ByteSizeUnit.BYTES)
|
||||||
);
|
);
|
||||||
assertThat(operations.length, equalTo(10));
|
assertThat(operations.length, equalTo(10));
|
||||||
|
|
||||||
|
@ -116,7 +116,7 @@ public class ShardChangesActionTests extends ESSingleNodeTestCase {
|
||||||
0,
|
0,
|
||||||
10,
|
10,
|
||||||
"different-history-uuid",
|
"different-history-uuid",
|
||||||
new ByteSizeValue(Long.MAX_VALUE, ByteSizeUnit.BYTES)
|
ByteSizeValue.of(Long.MAX_VALUE, ByteSizeUnit.BYTES)
|
||||||
)
|
)
|
||||||
);
|
);
|
||||||
assertThat(
|
assertThat(
|
||||||
|
@ -136,7 +136,7 @@ public class ShardChangesActionTests extends ESSingleNodeTestCase {
|
||||||
fromSeqNo,
|
fromSeqNo,
|
||||||
batchSize,
|
batchSize,
|
||||||
indexShard.getHistoryUUID(),
|
indexShard.getHistoryUUID(),
|
||||||
new ByteSizeValue(Long.MAX_VALUE, ByteSizeUnit.BYTES)
|
ByteSizeValue.of(Long.MAX_VALUE, ByteSizeUnit.BYTES)
|
||||||
)
|
)
|
||||||
);
|
);
|
||||||
assertThat(
|
assertThat(
|
||||||
|
@ -159,7 +159,7 @@ public class ShardChangesActionTests extends ESSingleNodeTestCase {
|
||||||
0,
|
0,
|
||||||
1,
|
1,
|
||||||
indexShard.getHistoryUUID(),
|
indexShard.getHistoryUUID(),
|
||||||
new ByteSizeValue(Long.MAX_VALUE, ByteSizeUnit.BYTES)
|
ByteSizeValue.of(Long.MAX_VALUE, ByteSizeUnit.BYTES)
|
||||||
)
|
)
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
@ -179,7 +179,7 @@ public class ShardChangesActionTests extends ESSingleNodeTestCase {
|
||||||
0,
|
0,
|
||||||
randomIntBetween(100, 500),
|
randomIntBetween(100, 500),
|
||||||
indexShard.getHistoryUUID(),
|
indexShard.getHistoryUUID(),
|
||||||
new ByteSizeValue(256, ByteSizeUnit.BYTES)
|
ByteSizeValue.of(256, ByteSizeUnit.BYTES)
|
||||||
);
|
);
|
||||||
assertThat(operations.length, equalTo(8));
|
assertThat(operations.length, equalTo(8));
|
||||||
assertThat(operations[0].seqNo(), equalTo(0L));
|
assertThat(operations[0].seqNo(), equalTo(0L));
|
||||||
|
|
|
@ -105,7 +105,7 @@ public class ShardFollowNodeTaskRandomTests extends ESTestCase {
|
||||||
TransportResumeFollowAction.DEFAULT_MAX_READ_REQUEST_SIZE,
|
TransportResumeFollowAction.DEFAULT_MAX_READ_REQUEST_SIZE,
|
||||||
TransportResumeFollowAction.DEFAULT_MAX_READ_REQUEST_SIZE,
|
TransportResumeFollowAction.DEFAULT_MAX_READ_REQUEST_SIZE,
|
||||||
10240,
|
10240,
|
||||||
new ByteSizeValue(512, ByteSizeUnit.MB),
|
ByteSizeValue.of(512, ByteSizeUnit.MB),
|
||||||
TimeValue.timeValueMillis(10),
|
TimeValue.timeValueMillis(10),
|
||||||
TimeValue.timeValueMillis(10),
|
TimeValue.timeValueMillis(10),
|
||||||
Collections.emptyMap()
|
Collections.emptyMap()
|
||||||
|
|
|
@ -139,7 +139,7 @@ public class ShardFollowNodeTaskTests extends ESTestCase {
|
||||||
params.maxReadRequestOperationCount = 64;
|
params.maxReadRequestOperationCount = 64;
|
||||||
params.maxOutstandingReadRequests = 1;
|
params.maxOutstandingReadRequests = 1;
|
||||||
params.maxOutstandingWriteRequests = 0; // need to set outstandingWrites to 0, other the write buffer gets flushed immediately
|
params.maxOutstandingWriteRequests = 0; // need to set outstandingWrites to 0, other the write buffer gets flushed immediately
|
||||||
params.maxWriteBufferSize = new ByteSizeValue(1, ByteSizeUnit.KB);
|
params.maxWriteBufferSize = ByteSizeValue.of(1, ByteSizeUnit.KB);
|
||||||
ShardFollowNodeTask task = createShardFollowTask(params);
|
ShardFollowNodeTask task = createShardFollowTask(params);
|
||||||
startTask(task, 63, -1);
|
startTask(task, 63, -1);
|
||||||
|
|
||||||
|
|
|
@ -340,7 +340,7 @@ public class ShardFollowTaskReplicationTests extends ESIndexLevelReplicationTest
|
||||||
fromSeqNo,
|
fromSeqNo,
|
||||||
numOps,
|
numOps,
|
||||||
leadingPrimary.getHistoryUUID(),
|
leadingPrimary.getHistoryUUID(),
|
||||||
new ByteSizeValue(Long.MAX_VALUE, ByteSizeUnit.BYTES)
|
ByteSizeValue.of(Long.MAX_VALUE, ByteSizeUnit.BYTES)
|
||||||
);
|
);
|
||||||
|
|
||||||
IndexShard followingPrimary = followerGroup.getPrimary();
|
IndexShard followingPrimary = followerGroup.getPrimary();
|
||||||
|
@ -405,7 +405,7 @@ public class ShardFollowTaskReplicationTests extends ESIndexLevelReplicationTest
|
||||||
Future<Void> recoveryFuture = null;
|
Future<Void> recoveryFuture = null;
|
||||||
Settings settings = Settings.builder()
|
Settings settings = Settings.builder()
|
||||||
.put(CcrSettings.CCR_FOLLOWING_INDEX_SETTING.getKey(), true)
|
.put(CcrSettings.CCR_FOLLOWING_INDEX_SETTING.getKey(), true)
|
||||||
.put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(between(1, 1000), ByteSizeUnit.KB))
|
.put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), ByteSizeValue.of(between(1, 1000), ByteSizeUnit.KB))
|
||||||
.build();
|
.build();
|
||||||
IndexMetadata indexMetadata = buildIndexMetadata(between(0, 1), settings, indexMapping);
|
IndexMetadata indexMetadata = buildIndexMetadata(between(0, 1), settings, indexMapping);
|
||||||
try (ReplicationGroup group = new ReplicationGroup(indexMetadata) {
|
try (ReplicationGroup group = new ReplicationGroup(indexMetadata) {
|
||||||
|
@ -505,7 +505,7 @@ public class ShardFollowTaskReplicationTests extends ESIndexLevelReplicationTest
|
||||||
private ReplicationGroup createFollowGroup(ReplicationGroup leaderGroup, int replicas) throws IOException {
|
private ReplicationGroup createFollowGroup(ReplicationGroup leaderGroup, int replicas) throws IOException {
|
||||||
final Settings settings = Settings.builder()
|
final Settings settings = Settings.builder()
|
||||||
.put(CcrSettings.CCR_FOLLOWING_INDEX_SETTING.getKey(), true)
|
.put(CcrSettings.CCR_FOLLOWING_INDEX_SETTING.getKey(), true)
|
||||||
.put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(between(1, 1000), ByteSizeUnit.KB))
|
.put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), ByteSizeValue.of(between(1, 1000), ByteSizeUnit.KB))
|
||||||
.build();
|
.build();
|
||||||
IndexMetadata indexMetadata = buildIndexMetadata(replicas, settings, indexMapping);
|
IndexMetadata indexMetadata = buildIndexMetadata(replicas, settings, indexMapping);
|
||||||
return new ReplicationGroup(indexMetadata) {
|
return new ReplicationGroup(indexMetadata) {
|
||||||
|
@ -573,10 +573,10 @@ public class ShardFollowTaskReplicationTests extends ESIndexLevelReplicationTest
|
||||||
between(1, 64),
|
between(1, 64),
|
||||||
between(1, 8),
|
between(1, 8),
|
||||||
between(1, 4),
|
between(1, 4),
|
||||||
new ByteSizeValue(Long.MAX_VALUE, ByteSizeUnit.BYTES),
|
ByteSizeValue.of(Long.MAX_VALUE, ByteSizeUnit.BYTES),
|
||||||
new ByteSizeValue(Long.MAX_VALUE, ByteSizeUnit.BYTES),
|
ByteSizeValue.of(Long.MAX_VALUE, ByteSizeUnit.BYTES),
|
||||||
10240,
|
10240,
|
||||||
new ByteSizeValue(512, ByteSizeUnit.MB),
|
ByteSizeValue.of(512, ByteSizeUnit.MB),
|
||||||
TimeValue.timeValueMillis(10),
|
TimeValue.timeValueMillis(10),
|
||||||
TimeValue.timeValueMillis(10),
|
TimeValue.timeValueMillis(10),
|
||||||
Collections.emptyMap()
|
Collections.emptyMap()
|
||||||
|
|
|
@ -120,10 +120,10 @@ public class TransportActivateAutoFollowPatternActionTests extends ESTestCase {
|
||||||
randomIntBetween(1, 100),
|
randomIntBetween(1, 100),
|
||||||
randomIntBetween(1, 100),
|
randomIntBetween(1, 100),
|
||||||
randomIntBetween(1, 100),
|
randomIntBetween(1, 100),
|
||||||
new ByteSizeValue(randomIntBetween(1, 100), randomFrom(ByteSizeUnit.values())),
|
ByteSizeValue.of(randomIntBetween(1, 100), randomFrom(ByteSizeUnit.values())),
|
||||||
new ByteSizeValue(randomIntBetween(1, 100), randomFrom(ByteSizeUnit.values())),
|
ByteSizeValue.of(randomIntBetween(1, 100), randomFrom(ByteSizeUnit.values())),
|
||||||
randomIntBetween(1, 100),
|
randomIntBetween(1, 100),
|
||||||
new ByteSizeValue(randomIntBetween(1, 100), randomFrom(ByteSizeUnit.values())),
|
ByteSizeValue.of(randomIntBetween(1, 100), randomFrom(ByteSizeUnit.values())),
|
||||||
TimeValue.timeValueSeconds(randomIntBetween(30, 600)),
|
TimeValue.timeValueSeconds(randomIntBetween(30, 600)),
|
||||||
TimeValue.timeValueSeconds(randomIntBetween(30, 600))
|
TimeValue.timeValueSeconds(randomIntBetween(30, 600))
|
||||||
);
|
);
|
||||||
|
|
|
@ -86,7 +86,7 @@ public class TransportFollowStatsActionTests extends ESTestCase {
|
||||||
TransportResumeFollowAction.DEFAULT_MAX_READ_REQUEST_SIZE,
|
TransportResumeFollowAction.DEFAULT_MAX_READ_REQUEST_SIZE,
|
||||||
TransportResumeFollowAction.DEFAULT_MAX_READ_REQUEST_SIZE,
|
TransportResumeFollowAction.DEFAULT_MAX_READ_REQUEST_SIZE,
|
||||||
10240,
|
10240,
|
||||||
new ByteSizeValue(512, ByteSizeUnit.MB),
|
ByteSizeValue.of(512, ByteSizeUnit.MB),
|
||||||
TimeValue.timeValueMillis(10),
|
TimeValue.timeValueMillis(10),
|
||||||
TimeValue.timeValueMillis(10),
|
TimeValue.timeValueMillis(10),
|
||||||
Collections.emptyMap()
|
Collections.emptyMap()
|
||||||
|
|
|
@ -88,7 +88,7 @@ public class TransportUnfollowActionTests extends ESTestCase {
|
||||||
TransportResumeFollowAction.DEFAULT_MAX_READ_REQUEST_SIZE,
|
TransportResumeFollowAction.DEFAULT_MAX_READ_REQUEST_SIZE,
|
||||||
TransportResumeFollowAction.DEFAULT_MAX_READ_REQUEST_SIZE,
|
TransportResumeFollowAction.DEFAULT_MAX_READ_REQUEST_SIZE,
|
||||||
10240,
|
10240,
|
||||||
new ByteSizeValue(512, ByteSizeUnit.MB),
|
ByteSizeValue.of(512, ByteSizeUnit.MB),
|
||||||
TimeValue.timeValueMillis(10),
|
TimeValue.timeValueMillis(10),
|
||||||
TimeValue.timeValueMillis(10),
|
TimeValue.timeValueMillis(10),
|
||||||
Collections.emptyMap()
|
Collections.emptyMap()
|
||||||
|
|
|
@ -165,10 +165,10 @@ public class PauseFollowerIndexStepTests extends AbstractUnfollowIndexStepTestCa
|
||||||
1024,
|
1024,
|
||||||
1,
|
1,
|
||||||
1,
|
1,
|
||||||
new ByteSizeValue(32, ByteSizeUnit.MB),
|
ByteSizeValue.of(32, ByteSizeUnit.MB),
|
||||||
new ByteSizeValue(Long.MAX_VALUE, ByteSizeUnit.BYTES),
|
ByteSizeValue.of(Long.MAX_VALUE, ByteSizeUnit.BYTES),
|
||||||
10240,
|
10240,
|
||||||
new ByteSizeValue(512, ByteSizeUnit.MB),
|
ByteSizeValue.of(512, ByteSizeUnit.MB),
|
||||||
TimeValue.timeValueMillis(10),
|
TimeValue.timeValueMillis(10),
|
||||||
TimeValue.timeValueMillis(10),
|
TimeValue.timeValueMillis(10),
|
||||||
Map.of()
|
Map.of()
|
||||||
|
|
|
@ -60,20 +60,20 @@ public class WaitForRolloverReadyStepTests extends AbstractStepTestCase<WaitForR
|
||||||
Step.StepKey stepKey = randomStepKey();
|
Step.StepKey stepKey = randomStepKey();
|
||||||
Step.StepKey nextStepKey = randomStepKey();
|
Step.StepKey nextStepKey = randomStepKey();
|
||||||
ByteSizeUnit maxSizeUnit = randomFrom(ByteSizeUnit.values());
|
ByteSizeUnit maxSizeUnit = randomFrom(ByteSizeUnit.values());
|
||||||
ByteSizeValue maxSize = randomBoolean() ? null : new ByteSizeValue(randomNonNegativeLong() / maxSizeUnit.toBytes(1), maxSizeUnit);
|
ByteSizeValue maxSize = randomBoolean() ? null : ByteSizeValue.of(randomNonNegativeLong() / maxSizeUnit.toBytes(1), maxSizeUnit);
|
||||||
ByteSizeUnit maxPrimaryShardSizeUnit = randomFrom(ByteSizeUnit.values());
|
ByteSizeUnit maxPrimaryShardSizeUnit = randomFrom(ByteSizeUnit.values());
|
||||||
ByteSizeValue maxPrimaryShardSize = randomBoolean()
|
ByteSizeValue maxPrimaryShardSize = randomBoolean()
|
||||||
? null
|
? null
|
||||||
: new ByteSizeValue(randomNonNegativeLong() / maxPrimaryShardSizeUnit.toBytes(1), maxPrimaryShardSizeUnit);
|
: ByteSizeValue.of(randomNonNegativeLong() / maxPrimaryShardSizeUnit.toBytes(1), maxPrimaryShardSizeUnit);
|
||||||
Long maxDocs = randomBoolean() ? null : randomNonNegativeLong();
|
Long maxDocs = randomBoolean() ? null : randomNonNegativeLong();
|
||||||
TimeValue maxAge = (maxDocs == null && maxSize == null || randomBoolean()) ? randomPositiveTimeValue() : null;
|
TimeValue maxAge = (maxDocs == null && maxSize == null || randomBoolean()) ? randomPositiveTimeValue() : null;
|
||||||
Long maxPrimaryShardDocs = randomBoolean() ? null : randomNonNegativeLong();
|
Long maxPrimaryShardDocs = randomBoolean() ? null : randomNonNegativeLong();
|
||||||
ByteSizeUnit minSizeUnit = randomFrom(ByteSizeUnit.values());
|
ByteSizeUnit minSizeUnit = randomFrom(ByteSizeUnit.values());
|
||||||
ByteSizeValue minSize = randomBoolean() ? null : new ByteSizeValue(randomNonNegativeLong() / minSizeUnit.toBytes(1), minSizeUnit);
|
ByteSizeValue minSize = randomBoolean() ? null : ByteSizeValue.of(randomNonNegativeLong() / minSizeUnit.toBytes(1), minSizeUnit);
|
||||||
ByteSizeUnit minPrimaryShardSizeUnit = randomFrom(ByteSizeUnit.values());
|
ByteSizeUnit minPrimaryShardSizeUnit = randomFrom(ByteSizeUnit.values());
|
||||||
ByteSizeValue minPrimaryShardSize = randomBoolean()
|
ByteSizeValue minPrimaryShardSize = randomBoolean()
|
||||||
? null
|
? null
|
||||||
: new ByteSizeValue(randomNonNegativeLong() / minPrimaryShardSizeUnit.toBytes(1), minPrimaryShardSizeUnit);
|
: ByteSizeValue.of(randomNonNegativeLong() / minPrimaryShardSizeUnit.toBytes(1), minPrimaryShardSizeUnit);
|
||||||
Long minDocs = randomBoolean() ? null : randomNonNegativeLong();
|
Long minDocs = randomBoolean() ? null : randomNonNegativeLong();
|
||||||
TimeValue minAge = (minDocs == null || randomBoolean()) ? randomPositiveTimeValue() : null;
|
TimeValue minAge = (minDocs == null || randomBoolean()) ? randomPositiveTimeValue() : null;
|
||||||
Long minPrimaryShardDocs = randomBoolean() ? null : randomNonNegativeLong();
|
Long minPrimaryShardDocs = randomBoolean() ? null : randomNonNegativeLong();
|
||||||
|
@ -115,22 +115,22 @@ public class WaitForRolloverReadyStepTests extends AbstractStepTestCase<WaitForR
|
||||||
case 1 -> nextKey = new Step.StepKey(nextKey.phase(), nextKey.action(), nextKey.name() + randomAlphaOfLength(5));
|
case 1 -> nextKey = new Step.StepKey(nextKey.phase(), nextKey.action(), nextKey.name() + randomAlphaOfLength(5));
|
||||||
case 2 -> maxSize = randomValueOtherThan(maxSize, () -> {
|
case 2 -> maxSize = randomValueOtherThan(maxSize, () -> {
|
||||||
ByteSizeUnit maxSizeUnit = randomFrom(ByteSizeUnit.values());
|
ByteSizeUnit maxSizeUnit = randomFrom(ByteSizeUnit.values());
|
||||||
return new ByteSizeValue(randomNonNegativeLong() / maxSizeUnit.toBytes(1), maxSizeUnit);
|
return ByteSizeValue.of(randomNonNegativeLong() / maxSizeUnit.toBytes(1), maxSizeUnit);
|
||||||
});
|
});
|
||||||
case 3 -> maxPrimaryShardSize = randomValueOtherThan(maxPrimaryShardSize, () -> {
|
case 3 -> maxPrimaryShardSize = randomValueOtherThan(maxPrimaryShardSize, () -> {
|
||||||
ByteSizeUnit maxPrimaryShardSizeUnit = randomFrom(ByteSizeUnit.values());
|
ByteSizeUnit maxPrimaryShardSizeUnit = randomFrom(ByteSizeUnit.values());
|
||||||
return new ByteSizeValue(randomNonNegativeLong() / maxPrimaryShardSizeUnit.toBytes(1), maxPrimaryShardSizeUnit);
|
return ByteSizeValue.of(randomNonNegativeLong() / maxPrimaryShardSizeUnit.toBytes(1), maxPrimaryShardSizeUnit);
|
||||||
});
|
});
|
||||||
case 4 -> maxAge = randomValueOtherThan(maxAge, () -> randomPositiveTimeValue());
|
case 4 -> maxAge = randomValueOtherThan(maxAge, () -> randomPositiveTimeValue());
|
||||||
case 5 -> maxDocs = randomValueOtherThan(maxDocs, ESTestCase::randomNonNegativeLong);
|
case 5 -> maxDocs = randomValueOtherThan(maxDocs, ESTestCase::randomNonNegativeLong);
|
||||||
case 6 -> maxPrimaryShardDocs = randomValueOtherThan(maxPrimaryShardDocs, ESTestCase::randomNonNegativeLong);
|
case 6 -> maxPrimaryShardDocs = randomValueOtherThan(maxPrimaryShardDocs, ESTestCase::randomNonNegativeLong);
|
||||||
case 7 -> minSize = randomValueOtherThan(minSize, () -> {
|
case 7 -> minSize = randomValueOtherThan(minSize, () -> {
|
||||||
ByteSizeUnit minSizeUnit = randomFrom(ByteSizeUnit.values());
|
ByteSizeUnit minSizeUnit = randomFrom(ByteSizeUnit.values());
|
||||||
return new ByteSizeValue(randomNonNegativeLong() / minSizeUnit.toBytes(1), minSizeUnit);
|
return ByteSizeValue.of(randomNonNegativeLong() / minSizeUnit.toBytes(1), minSizeUnit);
|
||||||
});
|
});
|
||||||
case 8 -> minPrimaryShardSize = randomValueOtherThan(minPrimaryShardSize, () -> {
|
case 8 -> minPrimaryShardSize = randomValueOtherThan(minPrimaryShardSize, () -> {
|
||||||
ByteSizeUnit minPrimaryShardSizeUnit = randomFrom(ByteSizeUnit.values());
|
ByteSizeUnit minPrimaryShardSizeUnit = randomFrom(ByteSizeUnit.values());
|
||||||
return new ByteSizeValue(randomNonNegativeLong() / minPrimaryShardSizeUnit.toBytes(1), minPrimaryShardSizeUnit);
|
return ByteSizeValue.of(randomNonNegativeLong() / minPrimaryShardSizeUnit.toBytes(1), minPrimaryShardSizeUnit);
|
||||||
});
|
});
|
||||||
case 9 -> minAge = randomValueOtherThan(minAge, () -> randomPositiveTimeValue());
|
case 9 -> minAge = randomValueOtherThan(minAge, () -> randomPositiveTimeValue());
|
||||||
case 10 -> minDocs = randomValueOtherThan(minDocs, ESTestCase::randomNonNegativeLong);
|
case 10 -> minDocs = randomValueOtherThan(minDocs, ESTestCase::randomNonNegativeLong);
|
||||||
|
|
|
@ -34,7 +34,7 @@ public class ForecastJobActionRequestTests extends AbstractXContentSerializingTe
|
||||||
}
|
}
|
||||||
if (randomBoolean()) {
|
if (randomBoolean()) {
|
||||||
request.setMaxModelMemory(
|
request.setMaxModelMemory(
|
||||||
randomLongBetween(new ByteSizeValue(1, ByteSizeUnit.MB).getBytes(), new ByteSizeValue(499, ByteSizeUnit.MB).getBytes())
|
randomLongBetween(ByteSizeValue.of(1, ByteSizeUnit.MB).getBytes(), ByteSizeValue.of(499, ByteSizeUnit.MB).getBytes())
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
return request;
|
return request;
|
||||||
|
|
|
@ -165,7 +165,7 @@ public class DataFrameAnalyticsConfigTests extends AbstractBWCSerializationTestC
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
if (randomBoolean()) {
|
if (randomBoolean()) {
|
||||||
builder.setModelMemoryLimit(new ByteSizeValue(randomIntBetween(1, 16), randomFrom(ByteSizeUnit.MB, ByteSizeUnit.GB)));
|
builder.setModelMemoryLimit(ByteSizeValue.of(randomIntBetween(1, 16), randomFrom(ByteSizeUnit.MB, ByteSizeUnit.GB)));
|
||||||
}
|
}
|
||||||
if (randomBoolean()) {
|
if (randomBoolean()) {
|
||||||
builder.setDescription(randomAlphaOfLength(20));
|
builder.setDescription(randomAlphaOfLength(20));
|
||||||
|
@ -285,31 +285,31 @@ public class DataFrameAnalyticsConfigTests extends AbstractBWCSerializationTestC
|
||||||
assertTooSmall(
|
assertTooSmall(
|
||||||
expectThrows(
|
expectThrows(
|
||||||
ElasticsearchStatusException.class,
|
ElasticsearchStatusException.class,
|
||||||
() -> builder.setModelMemoryLimit(new ByteSizeValue(-1, ByteSizeUnit.BYTES)).build()
|
() -> builder.setModelMemoryLimit(ByteSizeValue.of(-1, ByteSizeUnit.BYTES)).build()
|
||||||
)
|
)
|
||||||
);
|
);
|
||||||
assertTooSmall(
|
assertTooSmall(
|
||||||
expectThrows(
|
expectThrows(
|
||||||
ElasticsearchStatusException.class,
|
ElasticsearchStatusException.class,
|
||||||
() -> builder.setModelMemoryLimit(new ByteSizeValue(0, ByteSizeUnit.BYTES)).build()
|
() -> builder.setModelMemoryLimit(ByteSizeValue.of(0, ByteSizeUnit.BYTES)).build()
|
||||||
)
|
)
|
||||||
);
|
);
|
||||||
assertTooSmall(
|
assertTooSmall(
|
||||||
expectThrows(
|
expectThrows(
|
||||||
ElasticsearchStatusException.class,
|
ElasticsearchStatusException.class,
|
||||||
() -> builder.setModelMemoryLimit(new ByteSizeValue(0, ByteSizeUnit.KB)).build()
|
() -> builder.setModelMemoryLimit(ByteSizeValue.of(0, ByteSizeUnit.KB)).build()
|
||||||
)
|
)
|
||||||
);
|
);
|
||||||
assertTooSmall(
|
assertTooSmall(
|
||||||
expectThrows(
|
expectThrows(
|
||||||
ElasticsearchStatusException.class,
|
ElasticsearchStatusException.class,
|
||||||
() -> builder.setModelMemoryLimit(new ByteSizeValue(0, ByteSizeUnit.MB)).build()
|
() -> builder.setModelMemoryLimit(ByteSizeValue.of(0, ByteSizeUnit.MB)).build()
|
||||||
)
|
)
|
||||||
);
|
);
|
||||||
assertTooSmall(
|
assertTooSmall(
|
||||||
expectThrows(
|
expectThrows(
|
||||||
ElasticsearchStatusException.class,
|
ElasticsearchStatusException.class,
|
||||||
() -> builder.setModelMemoryLimit(new ByteSizeValue(1023, ByteSizeUnit.BYTES)).build()
|
() -> builder.setModelMemoryLimit(ByteSizeValue.of(1023, ByteSizeUnit.BYTES)).build()
|
||||||
)
|
)
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
@ -329,7 +329,7 @@ public class DataFrameAnalyticsConfigTests extends AbstractBWCSerializationTestC
|
||||||
|
|
||||||
DataFrameAnalyticsConfig defaultLimitConfig = createRandomBuilder("foo").setModelMemoryLimit(null).build();
|
DataFrameAnalyticsConfig defaultLimitConfig = createRandomBuilder("foo").setModelMemoryLimit(null).build();
|
||||||
|
|
||||||
ByteSizeValue maxLimit = new ByteSizeValue(randomIntBetween(500, 1000), ByteSizeUnit.MB);
|
ByteSizeValue maxLimit = ByteSizeValue.of(randomIntBetween(500, 1000), ByteSizeUnit.MB);
|
||||||
if (maxLimit.compareTo(defaultLimitConfig.getModelMemoryLimit()) < 0) {
|
if (maxLimit.compareTo(defaultLimitConfig.getModelMemoryLimit()) < 0) {
|
||||||
assertThat(maxLimit, equalTo(new DataFrameAnalyticsConfig.Builder(defaultLimitConfig, maxLimit).build().getModelMemoryLimit()));
|
assertThat(maxLimit, equalTo(new DataFrameAnalyticsConfig.Builder(defaultLimitConfig, maxLimit).build().getModelMemoryLimit()));
|
||||||
} else {
|
} else {
|
||||||
|
@ -342,10 +342,10 @@ public class DataFrameAnalyticsConfigTests extends AbstractBWCSerializationTestC
|
||||||
|
|
||||||
public void testExplicitModelMemoryLimitTooHigh() {
|
public void testExplicitModelMemoryLimitTooHigh() {
|
||||||
|
|
||||||
ByteSizeValue configuredLimit = new ByteSizeValue(randomIntBetween(5, 10), ByteSizeUnit.GB);
|
ByteSizeValue configuredLimit = ByteSizeValue.of(randomIntBetween(5, 10), ByteSizeUnit.GB);
|
||||||
DataFrameAnalyticsConfig explicitLimitConfig = createRandomBuilder("foo").setModelMemoryLimit(configuredLimit).build();
|
DataFrameAnalyticsConfig explicitLimitConfig = createRandomBuilder("foo").setModelMemoryLimit(configuredLimit).build();
|
||||||
|
|
||||||
ByteSizeValue maxLimit = new ByteSizeValue(randomIntBetween(500, 1000), ByteSizeUnit.MB);
|
ByteSizeValue maxLimit = ByteSizeValue.of(randomIntBetween(500, 1000), ByteSizeUnit.MB);
|
||||||
ElasticsearchStatusException e = expectThrows(
|
ElasticsearchStatusException e = expectThrows(
|
||||||
ElasticsearchStatusException.class,
|
ElasticsearchStatusException.class,
|
||||||
() -> new DataFrameAnalyticsConfig.Builder(explicitLimitConfig, maxLimit).build()
|
() -> new DataFrameAnalyticsConfig.Builder(explicitLimitConfig, maxLimit).build()
|
||||||
|
|
|
@ -54,19 +54,19 @@ public class MemoryEstimationTests extends AbstractXContentSerializingTestCase<M
|
||||||
|
|
||||||
public void testConstructor_SmallValues() {
|
public void testConstructor_SmallValues() {
|
||||||
MemoryEstimation memoryEstimation = new MemoryEstimation(
|
MemoryEstimation memoryEstimation = new MemoryEstimation(
|
||||||
new ByteSizeValue(120, ByteSizeUnit.KB),
|
ByteSizeValue.of(120, ByteSizeUnit.KB),
|
||||||
new ByteSizeValue(30, ByteSizeUnit.KB)
|
ByteSizeValue.of(30, ByteSizeUnit.KB)
|
||||||
);
|
);
|
||||||
assertThat(memoryEstimation.getExpectedMemoryWithoutDisk(), equalTo(new ByteSizeValue(120, ByteSizeUnit.KB)));
|
assertThat(memoryEstimation.getExpectedMemoryWithoutDisk(), equalTo(ByteSizeValue.of(120, ByteSizeUnit.KB)));
|
||||||
assertThat(memoryEstimation.getExpectedMemoryWithDisk(), equalTo(new ByteSizeValue(30, ByteSizeUnit.KB)));
|
assertThat(memoryEstimation.getExpectedMemoryWithDisk(), equalTo(ByteSizeValue.of(30, ByteSizeUnit.KB)));
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testConstructor() {
|
public void testConstructor() {
|
||||||
MemoryEstimation memoryEstimation = new MemoryEstimation(
|
MemoryEstimation memoryEstimation = new MemoryEstimation(
|
||||||
new ByteSizeValue(20, ByteSizeUnit.MB),
|
ByteSizeValue.of(20, ByteSizeUnit.MB),
|
||||||
new ByteSizeValue(10, ByteSizeUnit.MB)
|
ByteSizeValue.of(10, ByteSizeUnit.MB)
|
||||||
);
|
);
|
||||||
assertThat(memoryEstimation.getExpectedMemoryWithoutDisk(), equalTo(new ByteSizeValue(20, ByteSizeUnit.MB)));
|
assertThat(memoryEstimation.getExpectedMemoryWithoutDisk(), equalTo(ByteSizeValue.of(20, ByteSizeUnit.MB)));
|
||||||
assertThat(memoryEstimation.getExpectedMemoryWithDisk(), equalTo(new ByteSizeValue(10, ByteSizeUnit.MB)));
|
assertThat(memoryEstimation.getExpectedMemoryWithDisk(), equalTo(ByteSizeValue.of(10, ByteSizeUnit.MB)));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -176,7 +176,7 @@ public class JobTests extends AbstractXContentSerializingTestCase<Job> {
|
||||||
|
|
||||||
public void testValidateAnalysisLimitsAndSetDefaults_whenMaxIsLessThanTheDefault() {
|
public void testValidateAnalysisLimitsAndSetDefaults_whenMaxIsLessThanTheDefault() {
|
||||||
Job.Builder builder = buildJobBuilder("foo");
|
Job.Builder builder = buildJobBuilder("foo");
|
||||||
builder.validateAnalysisLimitsAndSetDefaults(new ByteSizeValue(512L, ByteSizeUnit.MB));
|
builder.validateAnalysisLimitsAndSetDefaults(ByteSizeValue.of(512L, ByteSizeUnit.MB));
|
||||||
|
|
||||||
Job job = builder.build();
|
Job job = builder.build();
|
||||||
assertNotNull(job.getAnalysisLimits());
|
assertNotNull(job.getAnalysisLimits());
|
||||||
|
@ -189,7 +189,7 @@ public class JobTests extends AbstractXContentSerializingTestCase<Job> {
|
||||||
builder.setAnalysisLimits(new AnalysisLimits(4096L, null));
|
builder.setAnalysisLimits(new AnalysisLimits(4096L, null));
|
||||||
ElasticsearchStatusException e = expectThrows(
|
ElasticsearchStatusException e = expectThrows(
|
||||||
ElasticsearchStatusException.class,
|
ElasticsearchStatusException.class,
|
||||||
() -> builder.validateAnalysisLimitsAndSetDefaults(new ByteSizeValue(1000L, ByteSizeUnit.MB))
|
() -> builder.validateAnalysisLimitsAndSetDefaults(ByteSizeValue.of(1000L, ByteSizeUnit.MB))
|
||||||
);
|
);
|
||||||
assertEquals(
|
assertEquals(
|
||||||
"model_memory_limit [4gb] must be less than the value of the "
|
"model_memory_limit [4gb] must be less than the value of the "
|
||||||
|
@ -198,7 +198,7 @@ public class JobTests extends AbstractXContentSerializingTestCase<Job> {
|
||||||
e.getMessage()
|
e.getMessage()
|
||||||
);
|
);
|
||||||
|
|
||||||
builder.validateAnalysisLimitsAndSetDefaults(new ByteSizeValue(8192L, ByteSizeUnit.MB));
|
builder.validateAnalysisLimitsAndSetDefaults(ByteSizeValue.of(8192L, ByteSizeUnit.MB));
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testEquals_GivenDifferentClass() {
|
public void testEquals_GivenDifferentClass() {
|
||||||
|
|
|
@ -360,7 +360,7 @@ public class JobUpdateTests extends AbstractXContentSerializingTestCase<JobUpdat
|
||||||
|
|
||||||
ElasticsearchStatusException e = expectThrows(
|
ElasticsearchStatusException e = expectThrows(
|
||||||
ElasticsearchStatusException.class,
|
ElasticsearchStatusException.class,
|
||||||
() -> update.mergeWithJob(jobBuilder.build(), new ByteSizeValue(512L, ByteSizeUnit.MB))
|
() -> update.mergeWithJob(jobBuilder.build(), ByteSizeValue.of(512L, ByteSizeUnit.MB))
|
||||||
);
|
);
|
||||||
assertEquals(
|
assertEquals(
|
||||||
"model_memory_limit [1gb] must be less than the value of the xpack.ml.max_model_memory_limit setting [512mb]",
|
"model_memory_limit [1gb] must be less than the value of the xpack.ml.max_model_memory_limit setting [512mb]",
|
||||||
|
@ -386,14 +386,14 @@ public class JobUpdateTests extends AbstractXContentSerializingTestCase<JobUpdat
|
||||||
|
|
||||||
Exception e = expectThrows(
|
Exception e = expectThrows(
|
||||||
ElasticsearchStatusException.class,
|
ElasticsearchStatusException.class,
|
||||||
() -> updateAboveMaxLimit.mergeWithJob(jobBuilder.build(), new ByteSizeValue(5000L, ByteSizeUnit.MB))
|
() -> updateAboveMaxLimit.mergeWithJob(jobBuilder.build(), ByteSizeValue.of(5000L, ByteSizeUnit.MB))
|
||||||
);
|
);
|
||||||
assertEquals(
|
assertEquals(
|
||||||
"model_memory_limit [7.8gb] must be less than the value of the xpack.ml.max_model_memory_limit setting [4.8gb]",
|
"model_memory_limit [7.8gb] must be less than the value of the xpack.ml.max_model_memory_limit setting [4.8gb]",
|
||||||
e.getMessage()
|
e.getMessage()
|
||||||
);
|
);
|
||||||
|
|
||||||
updateAboveMaxLimit.mergeWithJob(jobBuilder.build(), new ByteSizeValue(10000L, ByteSizeUnit.MB));
|
updateAboveMaxLimit.mergeWithJob(jobBuilder.build(), ByteSizeValue.of(10000L, ByteSizeUnit.MB));
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testUpdate_givenEmptySnapshot() {
|
public void testUpdate_givenEmptySnapshot() {
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue