mirror of
https://github.com/elastic/elasticsearch.git
synced 2025-04-25 07:37:19 -04:00
Merge remote-tracking branch 'es/master' into enrich
This commit is contained in:
commit
f97cc7f355
642 changed files with 8326 additions and 6297 deletions
|
@ -202,11 +202,12 @@ public class RestTestsFromSnippetsTask extends SnippetsTask {
|
||||||
previousTest = snippet
|
previousTest = snippet
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if (snippet.testResponse) {
|
if (snippet.testResponse || snippet.language == 'console-result') {
|
||||||
response(snippet)
|
response(snippet)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if (snippet.test || snippet.console) {
|
if (snippet.test || snippet.console ||
|
||||||
|
snippet.language == 'console') {
|
||||||
test(snippet)
|
test(snippet)
|
||||||
previousTest = snippet
|
previousTest = snippet
|
||||||
return
|
return
|
||||||
|
|
|
@ -464,13 +464,6 @@ public class ElasticsearchNode implements TestClusterConfiguration {
|
||||||
public void restart() {
|
public void restart() {
|
||||||
LOGGER.info("Restarting {}", this);
|
LOGGER.info("Restarting {}", this);
|
||||||
stop(false);
|
stop(false);
|
||||||
try {
|
|
||||||
Files.delete(httpPortsFile);
|
|
||||||
Files.delete(transportPortFile);
|
|
||||||
} catch (IOException e) {
|
|
||||||
throw new UncheckedIOException(e);
|
|
||||||
}
|
|
||||||
|
|
||||||
start();
|
start();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -479,11 +472,11 @@ public class ElasticsearchNode implements TestClusterConfiguration {
|
||||||
if (currentDistro + 1 >= distributions.size()) {
|
if (currentDistro + 1 >= distributions.size()) {
|
||||||
throw new TestClustersException("Ran out of versions to go to for " + this);
|
throw new TestClustersException("Ran out of versions to go to for " + this);
|
||||||
}
|
}
|
||||||
LOGGER.info("Switch version from {} to {} for {}",
|
logToProcessStdout("Switch version from " + getVersion() + " to " + distributions.get(currentDistro + 1).getVersion());
|
||||||
getVersion(), distributions.get(currentDistro + 1).getVersion(), this
|
stop(false);
|
||||||
);
|
|
||||||
currentDistro += 1;
|
currentDistro += 1;
|
||||||
restart();
|
setting("node.attr.upgraded", "true");
|
||||||
|
start();
|
||||||
}
|
}
|
||||||
|
|
||||||
private boolean isSettingMissingOrTrue(String name) {
|
private boolean isSettingMissingOrTrue(String name) {
|
||||||
|
@ -716,6 +709,17 @@ public class ElasticsearchNode implements TestClusterConfiguration {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public synchronized void stop(boolean tailLogs) {
|
public synchronized void stop(boolean tailLogs) {
|
||||||
|
logToProcessStdout("Stopping node");
|
||||||
|
try {
|
||||||
|
if (Files.exists(httpPortsFile)) {
|
||||||
|
Files.delete(httpPortsFile);
|
||||||
|
}
|
||||||
|
if (Files.exists(transportPortFile)) {
|
||||||
|
Files.delete(transportPortFile);
|
||||||
|
}
|
||||||
|
} catch (IOException e) {
|
||||||
|
throw new UncheckedIOException(e);
|
||||||
|
}
|
||||||
if (esProcess == null && tailLogs) {
|
if (esProcess == null && tailLogs) {
|
||||||
// This is a special case. If start() throws an exception the plugin will still call stop
|
// This is a special case. If start() throws an exception the plugin will still call stop
|
||||||
// Another exception here would eat the orriginal.
|
// Another exception here would eat the orriginal.
|
||||||
|
@ -867,6 +871,8 @@ public class ElasticsearchNode implements TestClusterConfiguration {
|
||||||
|
|
||||||
private void createWorkingDir(Path distroExtractDir) throws IOException {
|
private void createWorkingDir(Path distroExtractDir) throws IOException {
|
||||||
syncWithLinks(distroExtractDir, distroDir);
|
syncWithLinks(distroExtractDir, distroDir);
|
||||||
|
// Start configuration from scratch in case of a restart
|
||||||
|
project.delete(configFile.getParent());
|
||||||
Files.createDirectories(configFile.getParent());
|
Files.createDirectories(configFile.getParent());
|
||||||
Files.createDirectories(confPathRepo);
|
Files.createDirectories(confPathRepo);
|
||||||
Files.createDirectories(confPathData);
|
Files.createDirectories(confPathData);
|
||||||
|
@ -958,6 +964,17 @@ public class ElasticsearchNode implements TestClusterConfiguration {
|
||||||
// Don't wait for state, just start up quickly. This will also allow new and old nodes in the BWC case to become the master
|
// Don't wait for state, just start up quickly. This will also allow new and old nodes in the BWC case to become the master
|
||||||
defaultConfig.put("discovery.initial_state_timeout", "0s");
|
defaultConfig.put("discovery.initial_state_timeout", "0s");
|
||||||
|
|
||||||
|
// TODO: Remove these once https://github.com/elastic/elasticsearch/issues/46091 is fixed
|
||||||
|
defaultConfig.put("logger.org.elasticsearch.action.support.master.TransportMasterNodeAction", "TRACE");
|
||||||
|
defaultConfig.put("logger.org.elasticsearch.cluster.metadata.MetaDataCreateIndexService", "TRACE");
|
||||||
|
defaultConfig.put("logger.org.elasticsearch.cluster.service", "DEBUG");
|
||||||
|
defaultConfig.put("logger.org.elasticsearch.cluster.coordination", "DEBUG");
|
||||||
|
defaultConfig.put("logger.org.elasticsearch.gateway.MetaStateService", "TRACE");
|
||||||
|
if (getVersion().getMajor() >= 8) {
|
||||||
|
defaultConfig.put("cluster.service.slow_task_logging_threshold", "5s");
|
||||||
|
defaultConfig.put("cluster.service.slow_master_task_logging_threshold", "5s");
|
||||||
|
}
|
||||||
|
|
||||||
HashSet<String> overriden = new HashSet<>(defaultConfig.keySet());
|
HashSet<String> overriden = new HashSet<>(defaultConfig.keySet());
|
||||||
overriden.retainAll(settings.keySet());
|
overriden.retainAll(settings.keySet());
|
||||||
overriden.removeAll(OVERRIDABLE_SETTINGS);
|
overriden.removeAll(OVERRIDABLE_SETTINGS);
|
||||||
|
|
|
@ -15,6 +15,10 @@ public class VersionProperties {
|
||||||
return elasticsearch;
|
return elasticsearch;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public static Version getElasticsearchVersion() {
|
||||||
|
return Version.fromString(elasticsearch);
|
||||||
|
}
|
||||||
|
|
||||||
public static String getLucene() {
|
public static String getLucene() {
|
||||||
return lucene;
|
return lucene;
|
||||||
}
|
}
|
||||||
|
|
|
@ -3,10 +3,14 @@ Elasticsearch documentation build process.
|
||||||
|
|
||||||
See: https://github.com/elastic/docs
|
See: https://github.com/elastic/docs
|
||||||
|
|
||||||
Snippets marked with `// CONSOLE` are automatically annotated with "VIEW IN
|
Snippets marked with `[source,console]` are automatically annotated with
|
||||||
CONSOLE" and "COPY AS CURL" in the documentation and are automatically tested
|
"VIEW IN CONSOLE" and "COPY AS CURL" in the documentation and are automatically
|
||||||
by the command `gradle :docs:check`. To test just the docs from a single page,
|
tested by the command `./gradlew -pdocs check`. To test just the docs from a
|
||||||
use e.g. `./gradlew :docs:integTestRunner --tests "*rollover*"`.
|
single page, use e.g. `./gradlew -ddocs integTestRunner --tests "*rollover*"`.
|
||||||
|
|
||||||
|
NOTE: Previously we use `// CONSOLE` instead of `[source,console]`. This worked
|
||||||
|
well for a long time so you'll see it all over early branches but we're phasing
|
||||||
|
it out because it requires some unpleasant hackery on the docs build side.
|
||||||
|
|
||||||
NOTE: If you have an elasticsearch-extra folder alongside your elasticsearch
|
NOTE: If you have an elasticsearch-extra folder alongside your elasticsearch
|
||||||
folder, you must temporarily rename it when you are testing 6.3 or later branches.
|
folder, you must temporarily rename it when you are testing 6.3 or later branches.
|
||||||
|
@ -45,10 +49,21 @@ for its modifiers:
|
||||||
header. If the response doesn't include a `Warning` header with the exact
|
header. If the response doesn't include a `Warning` header with the exact
|
||||||
text then the test fails. If the response includes `Warning` headers that
|
text then the test fails. If the response includes `Warning` headers that
|
||||||
aren't expected then the test fails.
|
aren't expected then the test fails.
|
||||||
* `// TESTRESPONSE`: Matches this snippet against the body of the response of
|
* `[source,console-result]`: Matches this snippet against the body of the
|
||||||
the last test. If the response is JSON then order is ignored. If you add
|
response of the last test. If the response is JSON then order is ignored. If
|
||||||
`// TEST[continued]` to the snippet after `// TESTRESPONSE` it will continue
|
you add `// TEST[continued]` to the snippet after `[source,console-result]`
|
||||||
in the same test, allowing you to interleave requests with responses to check.
|
it will continue in the same test, allowing you to interleave requests with
|
||||||
|
responses to check.
|
||||||
|
* `// TESTRESPONSE`: Explicitly marks a snippet as a test response even without
|
||||||
|
`[source,console-result]`. Similarly to `// TEST` this is mostly used for
|
||||||
|
its modifiers.
|
||||||
|
* You can't use `[source,console-result]` immediately after `// TESTSETUP`.
|
||||||
|
Instead, consider using `// TEST[continued]` or rearrange your snippets.
|
||||||
|
|
||||||
|
NOTE: Previously we only used `// TESTRESPONSE` instead of
|
||||||
|
`[source,console-result]` so you'll see that a lot in older branches but we
|
||||||
|
prefer `[source,console-result]` now.
|
||||||
|
|
||||||
* `// TESTRESPONSE[s/foo/bar/]`: Substitutions. See `// TEST[s/foo/bar]` for
|
* `// TESTRESPONSE[s/foo/bar/]`: Substitutions. See `// TEST[s/foo/bar]` for
|
||||||
how it works. These are much more common than `// TEST[s/foo/bar]` because
|
how it works. These are much more common than `// TEST[s/foo/bar]` because
|
||||||
they are useful for eliding portions of the response that are not pertinent
|
they are useful for eliding portions of the response that are not pertinent
|
||||||
|
@ -62,8 +77,6 @@ for its modifiers:
|
||||||
"figures out" the path. This is especially useful for making sweeping
|
"figures out" the path. This is especially useful for making sweeping
|
||||||
assertions like "I made up all the numbers in this example, don't compare
|
assertions like "I made up all the numbers in this example, don't compare
|
||||||
them" which looks like `// TESTRESPONSE[s/\d+/$body.$_path/]`.
|
them" which looks like `// TESTRESPONSE[s/\d+/$body.$_path/]`.
|
||||||
* You can't use `// TESTRESPONSE` immediately after `// TESTSETUP`. Instead,
|
|
||||||
consider using `// TEST[continued]` or rearrange your snippets.
|
|
||||||
* `// TESTRESPONSE[non_json]`: Add substitutions for testing responses in a
|
* `// TESTRESPONSE[non_json]`: Add substitutions for testing responses in a
|
||||||
format other than JSON. Use this after all other substitutions so it doesn't
|
format other than JSON. Use this after all other substitutions so it doesn't
|
||||||
make other substitutions difficult.
|
make other substitutions difficult.
|
||||||
|
@ -98,7 +111,7 @@ endyaml
|
||||||
```
|
```
|
||||||
|
|
||||||
This allows slightly more expressive testing of the snippets. Since that syntax
|
This allows slightly more expressive testing of the snippets. Since that syntax
|
||||||
is not supported by CONSOLE the usual way to incorporate it is with a
|
is not supported by `[source,console]` the usual way to incorporate it is with a
|
||||||
`// TEST[s//]` marker like this:
|
`// TEST[s//]` marker like this:
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
|
@ -5,12 +5,12 @@
|
||||||
--
|
--
|
||||||
|
|
||||||
[id="{upid}-{api}"]
|
[id="{upid}-{api}"]
|
||||||
=== Ack Watch API
|
=== Ack watch API
|
||||||
|
|
||||||
[id="{upid}-{api}-request"]
|
[id="{upid}-{api}-request"]
|
||||||
==== Execution
|
==== Execution
|
||||||
|
|
||||||
{xpack-ref}/actions.html#actions-ack-throttle[Acknowledging a watch] enables you
|
{stack-ov}/actions.html#actions-ack-throttle[Acknowledging a watch] enables you
|
||||||
to manually throttle execution of a watch's actions. A watch can be acknowledged
|
to manually throttle execution of a watch's actions. A watch can be acknowledged
|
||||||
through the following request:
|
through the following request:
|
||||||
|
|
||||||
|
|
|
@ -5,7 +5,7 @@
|
||||||
--
|
--
|
||||||
|
|
||||||
[id="{upid}-{api}"]
|
[id="{upid}-{api}"]
|
||||||
=== Activate Watch API
|
=== Activate watch API
|
||||||
|
|
||||||
[id="{upid}-{api}-request"]
|
[id="{upid}-{api}-request"]
|
||||||
==== Execution
|
==== Execution
|
||||||
|
@ -29,7 +29,7 @@ include-tagged::{doc-tests-file}[{api}-response]
|
||||||
<1> `watchStatus` contains status of the watch
|
<1> `watchStatus` contains status of the watch
|
||||||
|
|
||||||
[id="{upid}-{api}-request-async"]
|
[id="{upid}-{api}-request-async"]
|
||||||
==== Asynchronous Execution
|
==== Asynchronous execution
|
||||||
|
|
||||||
This request can be executed asynchronously:
|
This request can be executed asynchronously:
|
||||||
|
|
||||||
|
|
|
@ -5,6 +5,6 @@
|
||||||
:doc-tests-file: {doc-tests}/WatcherDocumentationIT.java
|
:doc-tests-file: {doc-tests}/WatcherDocumentationIT.java
|
||||||
--
|
--
|
||||||
[[java-rest-high-watcher-deactivate-watch]]
|
[[java-rest-high-watcher-deactivate-watch]]
|
||||||
=== Deactivate Watch API
|
=== Deactivate watch API
|
||||||
|
|
||||||
include::../execution.asciidoc[]
|
include::../execution.asciidoc[]
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
[[java-rest-high-x-pack-watcher-delete-watch]]
|
[[java-rest-high-x-pack-watcher-delete-watch]]
|
||||||
=== Delete Watch API
|
=== Delete watch API
|
||||||
|
|
||||||
[[java-rest-high-x-pack-watcher-delete-watch-execution]]
|
[[java-rest-high-x-pack-watcher-delete-watch-execution]]
|
||||||
==== Execution
|
==== Execution
|
||||||
|
@ -26,7 +26,7 @@ include-tagged::{doc-tests}/WatcherDocumentationIT.java[x-pack-put-watch-respons
|
||||||
<3> `_version` returns the version of the deleted watch
|
<3> `_version` returns the version of the deleted watch
|
||||||
|
|
||||||
[[java-rest-high-x-pack-watcher-delete-watch-async]]
|
[[java-rest-high-x-pack-watcher-delete-watch-async]]
|
||||||
==== Asynchronous Execution
|
==== Asynchronous execution
|
||||||
|
|
||||||
This request can be executed asynchronously:
|
This request can be executed asynchronously:
|
||||||
|
|
||||||
|
|
|
@ -4,7 +4,7 @@
|
||||||
:response: ExecuteWatchResponse
|
:response: ExecuteWatchResponse
|
||||||
--
|
--
|
||||||
[id="{upid}-{api}"]
|
[id="{upid}-{api}"]
|
||||||
=== Execute Watch API
|
=== Execute watch API
|
||||||
|
|
||||||
The execute watch API allows clients to immediately execute a watch, either
|
The execute watch API allows clients to immediately execute a watch, either
|
||||||
one that has been previously added via the
|
one that has been previously added via the
|
||||||
|
@ -27,7 +27,7 @@ include-tagged::{doc-tests-file}[x-pack-execute-watch-by-id]
|
||||||
<6> Enable debug mode
|
<6> Enable debug mode
|
||||||
|
|
||||||
[id="{upid}-{api}-response-by-id"]
|
[id="{upid}-{api}-response-by-id"]
|
||||||
==== Execute by id Response
|
==== Execute by id response
|
||||||
|
|
||||||
The returned `Response` contains details of the execution:
|
The returned `Response` contains details of the execution:
|
||||||
|
|
||||||
|
|
|
@ -5,7 +5,7 @@
|
||||||
--
|
--
|
||||||
|
|
||||||
[id="{upid}-{api}"]
|
[id="{upid}-{api}"]
|
||||||
=== Get Watch API
|
=== Get watch API
|
||||||
|
|
||||||
[id="{upid}-{api}-request"]
|
[id="{upid}-{api}-request"]
|
||||||
==== Execution
|
==== Execution
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
[[java-rest-high-x-pack-watcher-put-watch]]
|
[[java-rest-high-x-pack-watcher-put-watch]]
|
||||||
=== Put Watch API
|
=== Put watch API
|
||||||
|
|
||||||
[[java-rest-high-x-pack-watcher-put-watch-execution]]
|
[[java-rest-high-x-pack-watcher-put-watch-execution]]
|
||||||
==== Execution
|
==== Execution
|
||||||
|
@ -28,7 +28,7 @@ include-tagged::{doc-tests}/WatcherDocumentationIT.java[x-pack-put-watch-respons
|
||||||
<3> `_version` returns the newly created version
|
<3> `_version` returns the newly created version
|
||||||
|
|
||||||
[[java-rest-high-x-pack-watcher-put-watch-async]]
|
[[java-rest-high-x-pack-watcher-put-watch-async]]
|
||||||
==== Asynchronous Execution
|
==== Asynchronous execution
|
||||||
|
|
||||||
This request can be executed asynchronously:
|
This request can be executed asynchronously:
|
||||||
|
|
||||||
|
|
|
@ -4,7 +4,7 @@
|
||||||
:response: StartWatchServiceResponse
|
:response: StartWatchServiceResponse
|
||||||
--
|
--
|
||||||
[id="{upid}-{api}"]
|
[id="{upid}-{api}"]
|
||||||
=== Start Watch Service API
|
=== Start watch service API
|
||||||
|
|
||||||
[id="{upid}-{api}-request"]
|
[id="{upid}-{api}-request"]
|
||||||
==== Execution
|
==== Execution
|
||||||
|
|
|
@ -4,7 +4,7 @@
|
||||||
:response: StopWatchServiceResponse
|
:response: StopWatchServiceResponse
|
||||||
--
|
--
|
||||||
[id="{upid}-{api}"]
|
[id="{upid}-{api}"]
|
||||||
=== Stop Watch Service API
|
=== Stop watch service API
|
||||||
|
|
||||||
[[java-rest-high-watcher-stop-watch-service-execution]]
|
[[java-rest-high-watcher-stop-watch-service-execution]]
|
||||||
==== Execution
|
==== Execution
|
||||||
|
|
|
@ -4,7 +4,7 @@
|
||||||
:response: WatcherStatsResponse
|
:response: WatcherStatsResponse
|
||||||
--
|
--
|
||||||
[id="{upid}-{api}"]
|
[id="{upid}-{api}"]
|
||||||
=== Watcher Stats API
|
=== Get Watcher stats API
|
||||||
|
|
||||||
[id="{upid}-{api}-request"]
|
[id="{upid}-{api}-request"]
|
||||||
==== Execution
|
==== Execution
|
||||||
|
|
|
@ -38,7 +38,7 @@ POST /hockey/_explain/1
|
||||||
Which shows that the class of `doc.first` is
|
Which shows that the class of `doc.first` is
|
||||||
`org.elasticsearch.index.fielddata.ScriptDocValues.Longs` by responding with:
|
`org.elasticsearch.index.fielddata.ScriptDocValues.Longs` by responding with:
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
---------------------------------------------------------
|
---------------------------------------------------------
|
||||||
{
|
{
|
||||||
"error": {
|
"error": {
|
||||||
|
@ -68,7 +68,7 @@ POST /hockey/_update/1
|
||||||
|
|
||||||
The response looks like:
|
The response looks like:
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
---------------------------------------------------------
|
---------------------------------------------------------
|
||||||
{
|
{
|
||||||
"error" : {
|
"error" : {
|
||||||
|
|
|
@ -30,7 +30,7 @@ If no context is specified then this context is used by default.
|
||||||
|
|
||||||
Request:
|
Request:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
----------------------------------------------------------------
|
----------------------------------------------------------------
|
||||||
POST /_scripts/painless/_execute
|
POST /_scripts/painless/_execute
|
||||||
{
|
{
|
||||||
|
@ -43,17 +43,15 @@ POST /_scripts/painless/_execute
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
----------------------------------------------------------------
|
----------------------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
|
|
||||||
Response:
|
Response:
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
{
|
{
|
||||||
"result": "0.1"
|
"result": "0.1"
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// TESTRESPONSE
|
|
||||||
|
|
||||||
===== Filter context
|
===== Filter context
|
||||||
|
|
||||||
|
@ -69,7 +67,7 @@ index:: The name of an index containing a mapping that is compatible with the do
|
||||||
|
|
||||||
*Example*
|
*Example*
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
----------------------------------------------------------------
|
----------------------------------------------------------------
|
||||||
PUT /my-index
|
PUT /my-index
|
||||||
{
|
{
|
||||||
|
@ -99,17 +97,15 @@ POST /_scripts/painless/_execute
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
----------------------------------------------------------------
|
----------------------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
|
|
||||||
Response:
|
Response:
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
{
|
{
|
||||||
"result": true
|
"result": true
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// TESTRESPONSE
|
|
||||||
|
|
||||||
|
|
||||||
===== Score context
|
===== Score context
|
||||||
|
@ -125,7 +121,7 @@ query:: If `_score` is used in the script then a query can specified that will b
|
||||||
|
|
||||||
*Example*
|
*Example*
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
----------------------------------------------------------------
|
----------------------------------------------------------------
|
||||||
PUT /my-index
|
PUT /my-index
|
||||||
{
|
{
|
||||||
|
@ -159,14 +155,12 @@ POST /_scripts/painless/_execute
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
----------------------------------------------------------------
|
----------------------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
|
|
||||||
Response:
|
Response:
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
{
|
{
|
||||||
"result": 0.8
|
"result": 0.8
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// TESTRESPONSE
|
|
||||||
|
|
|
@ -185,7 +185,7 @@ GET icu_sample/_analyze
|
||||||
|
|
||||||
The above `analyze` request returns the following:
|
The above `analyze` request returns the following:
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
{
|
{
|
||||||
"tokens": [
|
"tokens": [
|
||||||
|
@ -199,7 +199,7 @@ The above `analyze` request returns the following:
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// TESTRESPONSE
|
|
||||||
|
|
||||||
[[analysis-icu-normalization]]
|
[[analysis-icu-normalization]]
|
||||||
==== ICU Normalization Token Filter
|
==== ICU Normalization Token Filter
|
||||||
|
|
|
@ -191,7 +191,7 @@ GET kuromoji_sample/_analyze
|
||||||
|
|
||||||
The above `analyze` request returns the following:
|
The above `analyze` request returns the following:
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
{
|
{
|
||||||
"tokens" : [ {
|
"tokens" : [ {
|
||||||
|
@ -209,7 +209,7 @@ The above `analyze` request returns the following:
|
||||||
} ]
|
} ]
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// TESTRESPONSE
|
|
||||||
|
|
||||||
[[analysis-kuromoji-baseform]]
|
[[analysis-kuromoji-baseform]]
|
||||||
==== `kuromoji_baseform` token filter
|
==== `kuromoji_baseform` token filter
|
||||||
|
@ -247,7 +247,7 @@ GET kuromoji_sample/_analyze
|
||||||
|
|
||||||
which responds with:
|
which responds with:
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
{
|
{
|
||||||
"tokens" : [ {
|
"tokens" : [ {
|
||||||
|
@ -259,7 +259,7 @@ which responds with:
|
||||||
} ]
|
} ]
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// TESTRESPONSE
|
|
||||||
|
|
||||||
[[analysis-kuromoji-speech]]
|
[[analysis-kuromoji-speech]]
|
||||||
==== `kuromoji_part_of_speech` token filter
|
==== `kuromoji_part_of_speech` token filter
|
||||||
|
@ -313,7 +313,7 @@ GET kuromoji_sample/_analyze
|
||||||
|
|
||||||
Which responds with:
|
Which responds with:
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
{
|
{
|
||||||
"tokens" : [ {
|
"tokens" : [ {
|
||||||
|
@ -331,7 +331,7 @@ Which responds with:
|
||||||
} ]
|
} ]
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// TESTRESPONSE
|
|
||||||
|
|
||||||
[[analysis-kuromoji-readingform]]
|
[[analysis-kuromoji-readingform]]
|
||||||
==== `kuromoji_readingform` token filter
|
==== `kuromoji_readingform` token filter
|
||||||
|
@ -504,7 +504,7 @@ GET kuromoji_sample/_analyze
|
||||||
|
|
||||||
The above request returns:
|
The above request returns:
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
{
|
{
|
||||||
"tokens" : [ {
|
"tokens" : [ {
|
||||||
|
@ -516,7 +516,7 @@ The above request returns:
|
||||||
} ]
|
} ]
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// TESTRESPONSE
|
|
||||||
|
|
||||||
[[analysis-kuromoji-number]]
|
[[analysis-kuromoji-number]]
|
||||||
==== `kuromoji_number` token filter
|
==== `kuromoji_number` token filter
|
||||||
|
@ -554,7 +554,7 @@ GET kuromoji_sample/_analyze
|
||||||
|
|
||||||
Which results in:
|
Which results in:
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
{
|
{
|
||||||
"tokens" : [ {
|
"tokens" : [ {
|
||||||
|
@ -566,4 +566,3 @@ Which results in:
|
||||||
} ]
|
} ]
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// TESTRESPONSE
|
|
||||||
|
|
|
@ -125,7 +125,7 @@ GET nori_sample/_analyze
|
||||||
|
|
||||||
The above `analyze` request returns the following:
|
The above `analyze` request returns the following:
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
{
|
{
|
||||||
"tokens" : [ {
|
"tokens" : [ {
|
||||||
|
@ -150,7 +150,6 @@ The above `analyze` request returns the following:
|
||||||
}]
|
}]
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// TESTRESPONSE
|
|
||||||
|
|
||||||
<1> This is a compound token that spans two positions (`mixed` mode).
|
<1> This is a compound token that spans two positions (`mixed` mode).
|
||||||
--
|
--
|
||||||
|
@ -210,7 +209,7 @@ GET _analyze
|
||||||
|
|
||||||
Which responds with:
|
Which responds with:
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
{
|
{
|
||||||
"detail": {
|
"detail": {
|
||||||
|
@ -297,7 +296,7 @@ Which responds with:
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// TESTRESPONSE
|
|
||||||
|
|
||||||
[[analysis-nori-speech]]
|
[[analysis-nori-speech]]
|
||||||
==== `nori_part_of_speech` token filter
|
==== `nori_part_of_speech` token filter
|
||||||
|
@ -371,7 +370,7 @@ GET nori_sample/_analyze
|
||||||
|
|
||||||
Which responds with:
|
Which responds with:
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
{
|
{
|
||||||
"tokens" : [ {
|
"tokens" : [ {
|
||||||
|
@ -389,7 +388,7 @@ Which responds with:
|
||||||
} ]
|
} ]
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// TESTRESPONSE
|
|
||||||
|
|
||||||
[[analysis-nori-readingform]]
|
[[analysis-nori-readingform]]
|
||||||
==== `nori_readingform` token filter
|
==== `nori_readingform` token filter
|
||||||
|
@ -426,7 +425,7 @@ GET nori_sample/_analyze
|
||||||
|
|
||||||
Which responds with:
|
Which responds with:
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
{
|
{
|
||||||
"tokens" : [ {
|
"tokens" : [ {
|
||||||
|
@ -438,6 +437,5 @@ Which responds with:
|
||||||
}]
|
}]
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// TESTRESPONSE
|
|
||||||
|
|
||||||
<1> The Hanja form is replaced by the Hangul translation.
|
<1> The Hanja form is replaced by the Hangul translation.
|
||||||
|
|
|
@ -99,7 +99,7 @@ GET smartcn_example/_analyze
|
||||||
|
|
||||||
The above request returns:
|
The above request returns:
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
{
|
{
|
||||||
"tokens": [
|
"tokens": [
|
||||||
|
@ -428,4 +428,3 @@ The above request returns:
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// TESTRESPONSE
|
|
|
@ -94,7 +94,7 @@ GET polish_stop_example/_analyze
|
||||||
|
|
||||||
The above request returns:
|
The above request returns:
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
{
|
{
|
||||||
"tokens" : [
|
"tokens" : [
|
||||||
|
@ -115,4 +115,3 @@ The above request returns:
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// TESTRESPONSE
|
|
||||||
|
|
|
@ -56,24 +56,22 @@ bin/elasticsearch-keystore add discovery.ec2.secret_key
|
||||||
|
|
||||||
The available settings for the EC2 discovery plugin are as follows.
|
The available settings for the EC2 discovery plugin are as follows.
|
||||||
|
|
||||||
`discovery.ec2.access_key`::
|
`discovery.ec2.access_key` ({ref}/secure-settings.html[Secure], {ref}/secure-settings.html#reloadable-secure-settings[reloadable])::
|
||||||
|
|
||||||
An EC2 access key. If set, you must also set `discovery.ec2.secret_key`.
|
An EC2 access key. If set, you must also set `discovery.ec2.secret_key`.
|
||||||
If unset, `discovery-ec2` will instead use the instance role. This setting
|
If unset, `discovery-ec2` will instead use the instance role. This setting
|
||||||
is sensitive and must be stored in the {ref}/secure-settings.html[{es}
|
is sensitive and must be stored in the {es} keystore.
|
||||||
keystore].
|
|
||||||
|
|
||||||
`discovery.ec2.secret_key`::
|
`discovery.ec2.secret_key` ({ref}/secure-settings.html[Secure], {ref}/secure-settings.html#reloadable-secure-settings[reloadable])::
|
||||||
|
|
||||||
An EC2 secret key. If set, you must also set `discovery.ec2.access_key`.
|
An EC2 secret key. If set, you must also set `discovery.ec2.access_key`.
|
||||||
This setting is sensitive and must be stored in the
|
This setting is sensitive and must be stored in the {es} keystore.
|
||||||
{ref}/secure-settings.html[{es} keystore].
|
|
||||||
|
|
||||||
`discovery.ec2.session_token`::
|
`discovery.ec2.session_token` ({ref}/secure-settings.html[Secure], {ref}/secure-settings.html#reloadable-secure-settings[reloadable])::
|
||||||
|
|
||||||
An EC2 session token. If set, you must also set `discovery.ec2.access_key`
|
An EC2 session token. If set, you must also set `discovery.ec2.access_key`
|
||||||
and `discovery.ec2.secret_key`. This setting is sensitive and must be
|
and `discovery.ec2.secret_key`. This setting is sensitive and must be
|
||||||
stored in the {ref}/secure-settings.html[{es} keystore].
|
stored in the {es} keystore.
|
||||||
|
|
||||||
`discovery.ec2.endpoint`::
|
`discovery.ec2.endpoint`::
|
||||||
|
|
||||||
|
@ -99,19 +97,19 @@ The available settings for the EC2 discovery plugin are as follows.
|
||||||
this setting determines the port to use to connect to the proxy. Defaults to
|
this setting determines the port to use to connect to the proxy. Defaults to
|
||||||
`80`.
|
`80`.
|
||||||
|
|
||||||
`discovery.ec2.proxy.username`::
|
`discovery.ec2.proxy.username` ({ref}/secure-settings.html[Secure], {ref}/secure-settings.html#reloadable-secure-settings[reloadable])::
|
||||||
|
|
||||||
When the address of an HTTP proxy is given in `discovery.ec2.proxy.host`,
|
When the address of an HTTP proxy is given in `discovery.ec2.proxy.host`,
|
||||||
this setting determines the username to use to connect to the proxy. When
|
this setting determines the username to use to connect to the proxy. When
|
||||||
not set, no username is used. This setting is sensitive and must be stored
|
not set, no username is used. This setting is sensitive and must be stored
|
||||||
in the {ref}/secure-settings.html[{es} keystore].
|
in the {es} keystore.
|
||||||
|
|
||||||
`discovery.ec2.proxy.password`::
|
`discovery.ec2.proxy.password` ({ref}/secure-settings.html[Secure], {ref}/secure-settings.html#reloadable-secure-settings[reloadable])::
|
||||||
|
|
||||||
When the address of an HTTP proxy is given in `discovery.ec2.proxy.host`,
|
When the address of an HTTP proxy is given in `discovery.ec2.proxy.host`,
|
||||||
this setting determines the password to use to connect to the proxy. When
|
this setting determines the password to use to connect to the proxy. When
|
||||||
not set, no password is used. This setting is sensitive and must be stored
|
not set, no password is used. This setting is sensitive and must be stored
|
||||||
in the {ref}/secure-settings.html[{es} keystore].
|
in the {es} keystore.
|
||||||
|
|
||||||
`discovery.ec2.read_timeout`::
|
`discovery.ec2.read_timeout`::
|
||||||
|
|
||||||
|
|
|
@ -55,7 +55,7 @@ GET my_index/_doc/my_id
|
||||||
|
|
||||||
Returns this:
|
Returns this:
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
{
|
{
|
||||||
"found": true,
|
"found": true,
|
||||||
|
@ -140,7 +140,7 @@ GET my_index/_doc/my_id
|
||||||
|
|
||||||
Returns this:
|
Returns this:
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
{
|
{
|
||||||
"found": true,
|
"found": true,
|
||||||
|
@ -190,7 +190,7 @@ GET my_index/_doc/my_id_2
|
||||||
|
|
||||||
Returns this:
|
Returns this:
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
{
|
{
|
||||||
"found": true,
|
"found": true,
|
||||||
|
@ -284,7 +284,8 @@ GET my_index/_doc/my_id
|
||||||
// CONSOLE
|
// CONSOLE
|
||||||
|
|
||||||
Returns this:
|
Returns this:
|
||||||
[source,js]
|
|
||||||
|
[source,console-result]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
{
|
{
|
||||||
"_index" : "my_index",
|
"_index" : "my_index",
|
||||||
|
|
|
@ -19,13 +19,6 @@ bin/elasticsearch-keystore add azure.client.default.account
|
||||||
bin/elasticsearch-keystore add azure.client.default.key
|
bin/elasticsearch-keystore add azure.client.default.key
|
||||||
----------------------------------------------------------------
|
----------------------------------------------------------------
|
||||||
|
|
||||||
Where `account` is the azure account name and `key` the azure secret key. Instead of an azure secret key under `key`, you can alternatively
|
|
||||||
define a shared access signatures (SAS) token under `sas_token` to use for authentication instead. When using an SAS token instead of an
|
|
||||||
account key, the SAS token must have read (r), write (w), list (l), and delete (d) permissions for the repository base path and
|
|
||||||
all its contents. These permissions need to be granted for the blob service (b) and apply to resource types service (s), container (c), and
|
|
||||||
object (o).
|
|
||||||
These settings are used by the repository's internal azure client.
|
|
||||||
|
|
||||||
Note that you can also define more than one account:
|
Note that you can also define more than one account:
|
||||||
|
|
||||||
[source,sh]
|
[source,sh]
|
||||||
|
@ -36,42 +29,8 @@ bin/elasticsearch-keystore add azure.client.secondary.account
|
||||||
bin/elasticsearch-keystore add azure.client.secondary.sas_token
|
bin/elasticsearch-keystore add azure.client.secondary.sas_token
|
||||||
----------------------------------------------------------------
|
----------------------------------------------------------------
|
||||||
|
|
||||||
`default` is the default account name which will be used by a repository,
|
For more information about these settings, see
|
||||||
unless you set an explicit one in the
|
<<repository-azure-client-settings>>.
|
||||||
<<repository-azure-repository-settings, repository settings>>.
|
|
||||||
|
|
||||||
The `account`, `key`, and `sas_token` storage settings are
|
|
||||||
{ref}/secure-settings.html#reloadable-secure-settings[reloadable]. After you
|
|
||||||
reload the settings, the internal azure clients, which are used to transfer the
|
|
||||||
snapshot, will utilize the latest settings from the keystore.
|
|
||||||
|
|
||||||
NOTE: In progress snapshot/restore jobs will not be preempted by a *reload*
|
|
||||||
of the storage secure settings. They will complete using the client as it was built
|
|
||||||
when the operation started.
|
|
||||||
|
|
||||||
You can set the client side timeout to use when making any single request. It can be defined globally, per account or both.
|
|
||||||
It's not set by default which means that Elasticsearch is using the
|
|
||||||
http://azure.github.io/azure-storage-java/com/microsoft/azure/storage/RequestOptions.html#setTimeoutIntervalInMs(java.lang.Integer)[default value]
|
|
||||||
set by the azure client (known as 5 minutes).
|
|
||||||
|
|
||||||
`max_retries` can help to control the exponential backoff policy. It will fix the number of retries
|
|
||||||
in case of failures before considering the snapshot is failing. Defaults to `3` retries.
|
|
||||||
The initial backoff period is defined by Azure SDK as `30s`. Which means `30s` of wait time
|
|
||||||
before retrying after a first timeout or failure. The maximum backoff period is defined by Azure SDK as
|
|
||||||
`90s`.
|
|
||||||
|
|
||||||
`endpoint_suffix` can be used to specify Azure endpoint suffix explicitly. Defaults to `core.windows.net`.
|
|
||||||
|
|
||||||
[source,yaml]
|
|
||||||
----
|
|
||||||
azure.client.default.timeout: 10s
|
|
||||||
azure.client.default.max_retries: 7
|
|
||||||
azure.client.default.endpoint_suffix: core.chinacloudapi.cn
|
|
||||||
azure.client.secondary.timeout: 30s
|
|
||||||
----
|
|
||||||
|
|
||||||
In this example, timeout will be `10s` per try for `default` with `7` retries before failing
|
|
||||||
and endpoint suffix will be `core.chinacloudapi.cn` and `30s` per try for `secondary` with `3` retries.
|
|
||||||
|
|
||||||
[IMPORTANT]
|
[IMPORTANT]
|
||||||
.Supported Azure Storage Account types
|
.Supported Azure Storage Account types
|
||||||
|
@ -86,18 +45,103 @@ The Azure Repository plugin works with all Standard storage accounts
|
||||||
https://azure.microsoft.com/en-gb/documentation/articles/storage-premium-storage[Premium Locally Redundant Storage] (`Premium_LRS`) is **not supported** as it is only usable as VM disk storage, not as general storage.
|
https://azure.microsoft.com/en-gb/documentation/articles/storage-premium-storage[Premium Locally Redundant Storage] (`Premium_LRS`) is **not supported** as it is only usable as VM disk storage, not as general storage.
|
||||||
===============================================
|
===============================================
|
||||||
|
|
||||||
You can register a proxy per client using the following settings:
|
[[repository-azure-client-settings]]
|
||||||
|
==== Client settings
|
||||||
|
|
||||||
|
The client that you use to connect to Azure has a number of settings available.
|
||||||
|
The settings have the form `azure.client.CLIENT_NAME.SETTING_NAME`. By default,
|
||||||
|
`azure` repositories use a client named `default`, but this can be modified using
|
||||||
|
the <<repository-azure-repository-settings,repository setting>> `client`.
|
||||||
|
For example:
|
||||||
|
|
||||||
|
[source,console]
|
||||||
|
----
|
||||||
|
PUT _snapshot/my_backup
|
||||||
|
{
|
||||||
|
"type": "azure",
|
||||||
|
"settings": {
|
||||||
|
"client": "secondary"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
----
|
||||||
|
// TEST[skip:we don't have azure setup while testing this]
|
||||||
|
|
||||||
|
Most client settings can be added to the `elasticsearch.yml` configuration file.
|
||||||
|
For example:
|
||||||
|
|
||||||
[source,yaml]
|
[source,yaml]
|
||||||
----
|
----
|
||||||
azure.client.default.proxy.host: proxy.host
|
azure.client.default.timeout: 10s
|
||||||
azure.client.default.proxy.port: 8888
|
azure.client.default.max_retries: 7
|
||||||
azure.client.default.proxy.type: http
|
azure.client.default.endpoint_suffix: core.chinacloudapi.cn
|
||||||
|
azure.client.secondary.timeout: 30s
|
||||||
----
|
----
|
||||||
|
|
||||||
Supported values for `proxy.type` are `direct` (default), `http` or `socks`.
|
In this example, the client side timeout is `10s` per try for the `default`
|
||||||
When `proxy.type` is set to `http` or `socks`, `proxy.host` and `proxy.port` must be provided.
|
account with `7` retries before failing. The endpoint suffix is
|
||||||
|
`core.chinacloudapi.cn` and `30s` per try for the `secondary` account with `3`
|
||||||
|
retries.
|
||||||
|
|
||||||
|
The `account`, `key`, and `sas_token` storage settings are reloadable secure
|
||||||
|
settings, which you add to the {es} keystore. For more information about
|
||||||
|
creating and updating the {es} keystore, see
|
||||||
|
{ref}/secure-settings.html[Secure settings]. After you reload the settings, the
|
||||||
|
internal Azure clients, which are used to transfer the snapshot, utilize the
|
||||||
|
latest settings from the keystore.
|
||||||
|
|
||||||
|
NOTE: In progress snapshot or restore jobs will not be preempted by a *reload*
|
||||||
|
of the storage secure settings. They will complete using the client as it was
|
||||||
|
built when the operation started.
|
||||||
|
|
||||||
|
The following list contains the available client settings. Those that must be
|
||||||
|
stored in the keystore are marked as "secure"; the other settings belong in the
|
||||||
|
`elasticsearch.yml` file.
|
||||||
|
|
||||||
|
`account` ({ref}/secure-settings.html[Secure], {ref}/secure-settings.html#reloadable-secure-settings[reloadable])::
|
||||||
|
The Azure account name, which is used by the repository's internal Azure client.
|
||||||
|
|
||||||
|
`endpoint_suffix`::
|
||||||
|
The Azure endpoint suffix to connect to. The default value is
|
||||||
|
`core.windows.net`.
|
||||||
|
|
||||||
|
`key` ({ref}/secure-settings.html[Secure], {ref}/secure-settings.html#reloadable-secure-settings[reloadable])::
|
||||||
|
The Azure secret key, which is used by the repository's internal Azure client. Alternatively, use `sas_token`.
|
||||||
|
|
||||||
|
`max_retries`::
|
||||||
|
The number of retries to use when an Azure request fails. This setting helps
|
||||||
|
control the exponential backoff policy. It specifies the number of retries
|
||||||
|
that must occur before the snapshot fails. The default value is `3`. The
|
||||||
|
initial backoff period is defined by Azure SDK as `30s`. Thus there is `30s`
|
||||||
|
of wait time before retrying after a first timeout or failure. The maximum
|
||||||
|
backoff period is defined by Azure SDK as `90s`.
|
||||||
|
|
||||||
|
`proxy.host`::
|
||||||
|
The host name of a proxy to connect to Azure through. For example: `azure.client.default.proxy.host: proxy.host`.
|
||||||
|
|
||||||
|
`proxy.port`::
|
||||||
|
The port of a proxy to connect to Azure through. For example, `azure.client.default.proxy.port: 8888`.
|
||||||
|
|
||||||
|
`proxy.type`::
|
||||||
|
Register a proxy type for the client. Supported values are `direct`, `http`,
|
||||||
|
and `socks`. For example: `azure.client.default.proxy.type: http`. When
|
||||||
|
`proxy.type` is set to `http` or `socks`, `proxy.host` and `proxy.port` must
|
||||||
|
also be provided. The default value is `direct`.
|
||||||
|
|
||||||
|
`sas_token` ({ref}/secure-settings.html[Secure], {ref}/secure-settings.html#reloadable-secure-settings[reloadable])::
|
||||||
|
A shared access signatures (SAS) token, which the repository's internal Azure
|
||||||
|
client uses for authentication. The SAS token must have read (r), write (w),
|
||||||
|
list (l), and delete (d) permissions for the repository base path and all its
|
||||||
|
contents. These permissions must be granted for the blob service (b) and apply
|
||||||
|
to resource types service (s), container (c), and object (o). Alternatively,
|
||||||
|
use `key`.
|
||||||
|
|
||||||
|
`timeout`::
|
||||||
|
The client side timeout for any single request to Azure. The value should
|
||||||
|
specify the time unit. For example, a value of `5s` specifies a 5 second
|
||||||
|
timeout. There is no default value, which means that {es} uses the
|
||||||
|
http://azure.github.io/azure-storage-java/com/microsoft/azure/storage/RequestOptions.html#setTimeoutIntervalInMs(java.lang.Integer)[default value]
|
||||||
|
set by the Azure client (known as 5 minutes). This setting can be defined
|
||||||
|
globally, per account, or both.
|
||||||
|
|
||||||
[[repository-azure-repository-settings]]
|
[[repository-azure-repository-settings]]
|
||||||
==== Repository settings
|
==== Repository settings
|
||||||
|
|
|
@ -158,9 +158,9 @@ bin/elasticsearch-keystore add-file gcs.client.default.credentials_file /path/se
|
||||||
The following are the available client settings. Those that must be stored in the keystore
|
The following are the available client settings. Those that must be stored in the keystore
|
||||||
are marked as `Secure`.
|
are marked as `Secure`.
|
||||||
|
|
||||||
`credentials_file`::
|
`credentials_file` ({ref}/secure-settings.html[Secure], {ref}/secure-settings.html#reloadable-secure-settings[reloadable])::
|
||||||
|
|
||||||
The service account file that is used to authenticate to the Google Cloud Storage service. (Secure)
|
The service account file that is used to authenticate to the Google Cloud Storage service.
|
||||||
|
|
||||||
`endpoint`::
|
`endpoint`::
|
||||||
|
|
||||||
|
|
|
@ -101,16 +101,16 @@ The following list contains the available client settings. Those that must be
|
||||||
stored in the keystore are marked as "secure" and are *reloadable*; the other
|
stored in the keystore are marked as "secure" and are *reloadable*; the other
|
||||||
settings belong in the `elasticsearch.yml` file.
|
settings belong in the `elasticsearch.yml` file.
|
||||||
|
|
||||||
`access_key` ({ref}/secure-settings.html[Secure])::
|
`access_key` ({ref}/secure-settings.html[Secure], {ref}/secure-settings.html#reloadable-secure-settings[reloadable])::
|
||||||
|
|
||||||
An S3 access key. If set, the `secret_key` setting must also be specified.
|
An S3 access key. If set, the `secret_key` setting must also be specified.
|
||||||
If unset, the client will use the instance or container role instead.
|
If unset, the client will use the instance or container role instead.
|
||||||
|
|
||||||
`secret_key` ({ref}/secure-settings.html[Secure])::
|
`secret_key` ({ref}/secure-settings.html[Secure], {ref}/secure-settings.html#reloadable-secure-settings[reloadable])::
|
||||||
|
|
||||||
An S3 secret key. If set, the `access_key` setting must also be specified.
|
An S3 secret key. If set, the `access_key` setting must also be specified.
|
||||||
|
|
||||||
`session_token` ({ref}/secure-settings.html[Secure])::
|
`session_token` ({ref}/secure-settings.html[Secure], {ref}/secure-settings.html#reloadable-secure-settings[reloadable])::
|
||||||
|
|
||||||
An S3 session token. If set, the `access_key` and `secret_key` settings
|
An S3 session token. If set, the `access_key` and `secret_key` settings
|
||||||
must also be specified.
|
must also be specified.
|
||||||
|
@ -137,11 +137,11 @@ settings belong in the `elasticsearch.yml` file.
|
||||||
|
|
||||||
The port of a proxy to connect to S3 through.
|
The port of a proxy to connect to S3 through.
|
||||||
|
|
||||||
`proxy.username` ({ref}/secure-settings.html[Secure])::
|
`proxy.username` ({ref}/secure-settings.html[Secure], {ref}/secure-settings.html#reloadable-secure-settings[reloadable])::
|
||||||
|
|
||||||
The username to connect to the `proxy.host` with.
|
The username to connect to the `proxy.host` with.
|
||||||
|
|
||||||
`proxy.password` ({ref}/secure-settings.html[Secure])::
|
`proxy.password` ({ref}/secure-settings.html[Secure], {ref}/secure-settings.html#reloadable-secure-settings[reloadable])::
|
||||||
|
|
||||||
The password to connect to the `proxy.host` with.
|
The password to connect to the `proxy.host` with.
|
||||||
|
|
||||||
|
|
|
@ -75,7 +75,7 @@ It is preferable to have a <<backup-security-repos, dedicated repository>> for
|
||||||
this special index. If you wish, you can also snapshot the system indices for other {stack} components to this repository.
|
this special index. If you wish, you can also snapshot the system indices for other {stack} components to this repository.
|
||||||
+
|
+
|
||||||
--
|
--
|
||||||
[source,js]
|
[source,console]
|
||||||
-----------------------------------
|
-----------------------------------
|
||||||
PUT /_snapshot/my_backup
|
PUT /_snapshot/my_backup
|
||||||
{
|
{
|
||||||
|
@ -85,7 +85,6 @@ PUT /_snapshot/my_backup
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
-----------------------------------
|
-----------------------------------
|
||||||
// CONSOLE
|
|
||||||
|
|
||||||
The user calling this API must have the elevated `manage` cluster privilege to
|
The user calling this API must have the elevated `manage` cluster privilege to
|
||||||
prevent non-administrators exfiltrating data.
|
prevent non-administrators exfiltrating data.
|
||||||
|
@ -99,7 +98,7 @@ The following example creates a new user `snapshot_user` in the
|
||||||
{stack-ov}/native-realm.html[native realm], but it is not important which
|
{stack-ov}/native-realm.html[native realm], but it is not important which
|
||||||
realm the user is a member of:
|
realm the user is a member of:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
POST /_security/user/snapshot_user
|
POST /_security/user/snapshot_user
|
||||||
{
|
{
|
||||||
|
@ -107,7 +106,6 @@ POST /_security/user/snapshot_user
|
||||||
"roles" : [ "snapshot_user" ]
|
"roles" : [ "snapshot_user" ]
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[skip:security is not enabled in this fixture]
|
// TEST[skip:security is not enabled in this fixture]
|
||||||
|
|
||||||
--
|
--
|
||||||
|
@ -118,7 +116,7 @@ POST /_security/user/snapshot_user
|
||||||
The following example shows how to use the create snapshot API to backup
|
The following example shows how to use the create snapshot API to backup
|
||||||
the `.security` index to the `my_backup` repository:
|
the `.security` index to the `my_backup` repository:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
PUT /_snapshot/my_backup/snapshot_1
|
PUT /_snapshot/my_backup/snapshot_1
|
||||||
{
|
{
|
||||||
|
@ -126,7 +124,6 @@ PUT /_snapshot/my_backup/snapshot_1
|
||||||
"include_global_state": true <1>
|
"include_global_state": true <1>
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[continued]
|
// TEST[continued]
|
||||||
|
|
||||||
<1> This parameter value captures all the persistent settings stored in the
|
<1> This parameter value captures all the persistent settings stored in the
|
||||||
|
@ -189,18 +186,16 @@ the {security-features}.
|
||||||
To restore your security configuration from a backup, first make sure that the
|
To restore your security configuration from a backup, first make sure that the
|
||||||
repository holding `.security` snapshots is installed:
|
repository holding `.security` snapshots is installed:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
GET /_snapshot/my_backup
|
GET /_snapshot/my_backup
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[continued]
|
// TEST[continued]
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
GET /_snapshot/my_backup/snapshot_1
|
GET /_snapshot/my_backup/snapshot_1
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[continued]
|
// TEST[continued]
|
||||||
|
|
||||||
Then log into one of the node hosts, navigate to {es} installation directory,
|
Then log into one of the node hosts, navigate to {es} installation directory,
|
||||||
|
|
|
@ -37,12 +37,10 @@ parameter for the snapshot API. Alternatively, you can extract these
|
||||||
configuration values in text format by using the
|
configuration values in text format by using the
|
||||||
<<cluster-get-settings, get settings API>>:
|
<<cluster-get-settings, get settings API>>:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
GET _cluster/settings?pretty&flat_settings&filter_path=persistent
|
GET _cluster/settings?pretty&flat_settings&filter_path=persistent
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
//CONSOLE
|
|
||||||
//TEST
|
|
||||||
|
|
||||||
You can store the output of this as a file together with the rest of
|
You can store the output of this as a file together with the rest of
|
||||||
configuration files.
|
configuration files.
|
||||||
|
|
|
@ -28,7 +28,7 @@ other than the default of the ampersand.
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
PUT /emails/_bulk?refresh
|
PUT /emails/_bulk?refresh
|
||||||
{ "index" : { "_id" : 1 } }
|
{ "index" : { "_id" : 1 } }
|
||||||
|
@ -54,7 +54,6 @@ GET emails/_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
|
|
||||||
In the above example, we analyse email messages to see which groups of individuals
|
In the above example, we analyse email messages to see which groups of individuals
|
||||||
have exchanged messages.
|
have exchanged messages.
|
||||||
|
@ -63,7 +62,7 @@ of groups that have recorded interactions.
|
||||||
|
|
||||||
Response:
|
Response:
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
{
|
{
|
||||||
"took": 9,
|
"took": 9,
|
||||||
|
|
|
@ -10,7 +10,7 @@ The buckets field is optional, and will default to 10 buckets if not specified.
|
||||||
|
|
||||||
Requesting a target of 10 buckets.
|
Requesting a target of 10 buckets.
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
POST /sales/_search?size=0
|
POST /sales/_search?size=0
|
||||||
{
|
{
|
||||||
|
@ -24,7 +24,6 @@ POST /sales/_search?size=0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:sales]
|
// TEST[setup:sales]
|
||||||
|
|
||||||
==== Keys
|
==== Keys
|
||||||
|
@ -37,7 +36,7 @@ date string using the format specified with the `format` parameter:
|
||||||
TIP: If no `format` is specified, then it will use the first date
|
TIP: If no `format` is specified, then it will use the first date
|
||||||
<<mapping-date-format,format>> specified in the field mapping.
|
<<mapping-date-format,format>> specified in the field mapping.
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
POST /sales/_search?size=0
|
POST /sales/_search?size=0
|
||||||
{
|
{
|
||||||
|
@ -52,14 +51,13 @@ POST /sales/_search?size=0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:sales]
|
// TEST[setup:sales]
|
||||||
|
|
||||||
<1> Supports expressive date <<date-format-pattern,format pattern>>
|
<1> Supports expressive date <<date-format-pattern,format pattern>>
|
||||||
|
|
||||||
Response:
|
Response:
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
{
|
{
|
||||||
...
|
...
|
||||||
|
@ -119,7 +117,7 @@ Time zones may either be specified as an ISO 8601 UTC offset (e.g. `+01:00` or
|
||||||
|
|
||||||
Consider the following example:
|
Consider the following example:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
---------------------------------
|
---------------------------------
|
||||||
PUT my_index/log/1?refresh
|
PUT my_index/log/1?refresh
|
||||||
{
|
{
|
||||||
|
@ -148,12 +146,11 @@ GET my_index/_search?size=0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
---------------------------------
|
---------------------------------
|
||||||
// CONSOLE
|
|
||||||
|
|
||||||
UTC is used if no time zone is specified, three 1-hour buckets are returned
|
UTC is used if no time zone is specified, three 1-hour buckets are returned
|
||||||
starting at midnight UTC on 1 October 2015:
|
starting at midnight UTC on 1 October 2015:
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
---------------------------------
|
---------------------------------
|
||||||
{
|
{
|
||||||
...
|
...
|
||||||
|
@ -186,7 +183,7 @@ starting at midnight UTC on 1 October 2015:
|
||||||
If a `time_zone` of `-01:00` is specified, then midnight starts at one hour before
|
If a `time_zone` of `-01:00` is specified, then midnight starts at one hour before
|
||||||
midnight UTC:
|
midnight UTC:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
---------------------------------
|
---------------------------------
|
||||||
GET my_index/_search?size=0
|
GET my_index/_search?size=0
|
||||||
{
|
{
|
||||||
|
@ -201,7 +198,6 @@ GET my_index/_search?size=0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
---------------------------------
|
---------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[continued]
|
// TEST[continued]
|
||||||
|
|
||||||
|
|
||||||
|
@ -209,7 +205,7 @@ Now three 1-hour buckets are still returned but the first bucket starts at
|
||||||
11:00pm on 30 September 2015 since that is the local time for the bucket in
|
11:00pm on 30 September 2015 since that is the local time for the bucket in
|
||||||
the specified time zone.
|
the specified time zone.
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
---------------------------------
|
---------------------------------
|
||||||
{
|
{
|
||||||
...
|
...
|
||||||
|
@ -273,7 +269,7 @@ The accepted units for `minimum_interval` are:
|
||||||
* minute
|
* minute
|
||||||
* second
|
* second
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
POST /sales/_search?size=0
|
POST /sales/_search?size=0
|
||||||
{
|
{
|
||||||
|
@ -288,7 +284,6 @@ POST /sales/_search?size=0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:sales]
|
// TEST[setup:sales]
|
||||||
|
|
||||||
==== Missing value
|
==== Missing value
|
||||||
|
@ -297,7 +292,7 @@ The `missing` parameter defines how documents that are missing a value should be
|
||||||
By default they will be ignored but it is also possible to treat them as if they
|
By default they will be ignored but it is also possible to treat them as if they
|
||||||
had a value.
|
had a value.
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
POST /sales/_search?size=0
|
POST /sales/_search?size=0
|
||||||
{
|
{
|
||||||
|
@ -312,7 +307,6 @@ POST /sales/_search?size=0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:sales]
|
// TEST[setup:sales]
|
||||||
|
|
||||||
<1> Documents without a value in the `publish_date` field will fall into the same bucket as documents that have the value `2000-01-01`.
|
<1> Documents without a value in the `publish_date` field will fall into the same bucket as documents that have the value `2000-01-01`.
|
||||||
|
|
|
@ -9,7 +9,7 @@ This aggregation has a single option:
|
||||||
|
|
||||||
For example, let's say we have an index of questions and answers. The answer type has the following `join` field in the mapping:
|
For example, let's say we have an index of questions and answers. The answer type has the following `join` field in the mapping:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
PUT child_example
|
PUT child_example
|
||||||
{
|
{
|
||||||
|
@ -25,7 +25,6 @@ PUT child_example
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
|
|
||||||
The `question` document contain a tag field and the `answer` documents contain an owner field. With the `children`
|
The `question` document contain a tag field and the `answer` documents contain an owner field. With the `children`
|
||||||
aggregation the tag buckets can be mapped to the owner buckets in a single request even though the two fields exist in
|
aggregation the tag buckets can be mapped to the owner buckets in a single request even though the two fields exist in
|
||||||
|
@ -33,7 +32,7 @@ two different kinds of documents.
|
||||||
|
|
||||||
An example of a question document:
|
An example of a question document:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
PUT child_example/_doc/1
|
PUT child_example/_doc/1
|
||||||
{
|
{
|
||||||
|
@ -49,12 +48,11 @@ PUT child_example/_doc/1
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[continued]
|
// TEST[continued]
|
||||||
|
|
||||||
Examples of `answer` documents:
|
Examples of `answer` documents:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
PUT child_example/_doc/2?routing=1
|
PUT child_example/_doc/2?routing=1
|
||||||
{
|
{
|
||||||
|
@ -86,12 +84,11 @@ PUT child_example/_doc/3?routing=1&refresh
|
||||||
"creation_date": "2009-05-05T13:45:37.030"
|
"creation_date": "2009-05-05T13:45:37.030"
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[continued]
|
// TEST[continued]
|
||||||
|
|
||||||
The following request can be built that connects the two together:
|
The following request can be built that connects the two together:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
POST child_example/_search?size=0
|
POST child_example/_search?size=0
|
||||||
{
|
{
|
||||||
|
@ -120,7 +117,6 @@ POST child_example/_search?size=0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[continued]
|
// TEST[continued]
|
||||||
|
|
||||||
<1> The `type` points to type / mapping with the name `answer`.
|
<1> The `type` points to type / mapping with the name `answer`.
|
||||||
|
@ -129,7 +125,7 @@ The above example returns the top question tags and per tag the top answer owner
|
||||||
|
|
||||||
Possible response:
|
Possible response:
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
{
|
{
|
||||||
"took": 25,
|
"took": 25,
|
||||||
|
|
|
@ -112,7 +112,7 @@ The values are extracted from a field or a script exactly like the `terms` aggre
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
GET /_search
|
GET /_search
|
||||||
{
|
{
|
||||||
|
@ -127,11 +127,10 @@ GET /_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
|
|
||||||
Like the `terms` aggregation it is also possible to use a script to create the values for the composite buckets:
|
Like the `terms` aggregation it is also possible to use a script to create the values for the composite buckets:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
GET /_search
|
GET /_search
|
||||||
{
|
{
|
||||||
|
@ -155,7 +154,6 @@ GET /_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
|
|
||||||
===== Histogram
|
===== Histogram
|
||||||
|
|
||||||
|
@ -166,7 +164,7 @@ a value of `101` would be translated to `100` which is the key for the interval
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
GET /_search
|
GET /_search
|
||||||
{
|
{
|
||||||
|
@ -181,11 +179,10 @@ GET /_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
|
|
||||||
The values are built from a numeric field or a script that return numerical values:
|
The values are built from a numeric field or a script that return numerical values:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
GET /_search
|
GET /_search
|
||||||
{
|
{
|
||||||
|
@ -210,7 +207,6 @@ GET /_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
|
|
||||||
|
|
||||||
===== Date Histogram
|
===== Date Histogram
|
||||||
|
@ -218,7 +214,7 @@ GET /_search
|
||||||
The `date_histogram` is similar to the `histogram` value source except that the interval
|
The `date_histogram` is similar to the `histogram` value source except that the interval
|
||||||
is specified by date/time expression:
|
is specified by date/time expression:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
GET /_search
|
GET /_search
|
||||||
{
|
{
|
||||||
|
@ -233,7 +229,6 @@ GET /_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
|
|
||||||
The example above creates an interval per day and translates all `timestamp` values to the start of its closest intervals.
|
The example above creates an interval per day and translates all `timestamp` values to the start of its closest intervals.
|
||||||
Available expressions for interval: `year`, `quarter`, `month`, `week`, `day`, `hour`, `minute`, `second`
|
Available expressions for interval: `year`, `quarter`, `month`, `week`, `day`, `hour`, `minute`, `second`
|
||||||
|
@ -248,7 +243,7 @@ Internally, a date is represented as a 64 bit number representing a timestamp in
|
||||||
These timestamps are returned as the bucket keys. It is possible to return a formatted date string instead using
|
These timestamps are returned as the bucket keys. It is possible to return a formatted date string instead using
|
||||||
the format specified with the format parameter:
|
the format specified with the format parameter:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
GET /_search
|
GET /_search
|
||||||
{
|
{
|
||||||
|
@ -271,7 +266,6 @@ GET /_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
|
|
||||||
<1> Supports expressive date <<date-format-pattern,format pattern>>
|
<1> Supports expressive date <<date-format-pattern,format pattern>>
|
||||||
|
|
||||||
|
@ -291,7 +285,7 @@ The `sources` parameter accepts an array of values source.
|
||||||
It is possible to mix different values source to create composite buckets.
|
It is possible to mix different values source to create composite buckets.
|
||||||
For example:
|
For example:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
GET /_search
|
GET /_search
|
||||||
{
|
{
|
||||||
|
@ -307,14 +301,13 @@ GET /_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
|
|
||||||
This will create composite buckets from the values created by two values source, a `date_histogram` and a `terms`.
|
This will create composite buckets from the values created by two values source, a `date_histogram` and a `terms`.
|
||||||
Each bucket is composed of two values, one for each value source defined in the aggregation.
|
Each bucket is composed of two values, one for each value source defined in the aggregation.
|
||||||
Any type of combinations is allowed and the order in the array is preserved
|
Any type of combinations is allowed and the order in the array is preserved
|
||||||
in the composite buckets.
|
in the composite buckets.
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
GET /_search
|
GET /_search
|
||||||
{
|
{
|
||||||
|
@ -331,7 +324,6 @@ GET /_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
|
|
||||||
==== Order
|
==== Order
|
||||||
|
|
||||||
|
@ -344,7 +336,7 @@ It is possible to define the direction of the sort for each value source by sett
|
||||||
or `desc` (descending order) directly in the value source definition.
|
or `desc` (descending order) directly in the value source definition.
|
||||||
For example:
|
For example:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
GET /_search
|
GET /_search
|
||||||
{
|
{
|
||||||
|
@ -360,7 +352,6 @@ GET /_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
|
|
||||||
\... will sort the composite bucket in descending order when comparing values from the `date_histogram` source
|
\... will sort the composite bucket in descending order when comparing values from the `date_histogram` source
|
||||||
and in ascending order when comparing values from the `terms` source.
|
and in ascending order when comparing values from the `terms` source.
|
||||||
|
@ -371,7 +362,7 @@ By default documents without a value for a given source are ignored.
|
||||||
It is possible to include them in the response by setting `missing_bucket` to
|
It is possible to include them in the response by setting `missing_bucket` to
|
||||||
`true` (defaults to `false`):
|
`true` (defaults to `false`):
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
GET /_search
|
GET /_search
|
||||||
{
|
{
|
||||||
|
@ -386,7 +377,6 @@ GET /_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
|
|
||||||
In the example above the source `product_name` will emit an explicit `null` value
|
In the example above the source `product_name` will emit an explicit `null` value
|
||||||
for documents without a value for the field `product`.
|
for documents without a value for the field `product`.
|
||||||
|
@ -411,7 +401,7 @@ If all composite buckets should be retrieved it is preferable to use a small siz
|
||||||
and then use the `after` parameter to retrieve the next results.
|
and then use the `after` parameter to retrieve the next results.
|
||||||
For example:
|
For example:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
GET /_search
|
GET /_search
|
||||||
{
|
{
|
||||||
|
@ -428,12 +418,11 @@ GET /_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[s/_search/_search\?filter_path=aggregations/]
|
// TEST[s/_search/_search\?filter_path=aggregations/]
|
||||||
|
|
||||||
\... returns:
|
\... returns:
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
{
|
{
|
||||||
...
|
...
|
||||||
|
@ -477,7 +466,7 @@ the last composite buckets returned in a previous round.
|
||||||
For the example below the last bucket can be found in `after_key` and the next
|
For the example below the last bucket can be found in `after_key` and the next
|
||||||
round of result can be retrieved with:
|
round of result can be retrieved with:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
GET /_search
|
GET /_search
|
||||||
{
|
{
|
||||||
|
@ -495,7 +484,6 @@ GET /_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
|
|
||||||
<1> Should restrict the aggregation to buckets that sort **after** the provided values.
|
<1> Should restrict the aggregation to buckets that sort **after** the provided values.
|
||||||
|
|
||||||
|
@ -507,7 +495,7 @@ parent aggregation.
|
||||||
For instance the following example computes the average value of a field
|
For instance the following example computes the average value of a field
|
||||||
per composite bucket:
|
per composite bucket:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
GET /_search
|
GET /_search
|
||||||
{
|
{
|
||||||
|
@ -528,12 +516,11 @@ GET /_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[s/_search/_search\?filter_path=aggregations/]
|
// TEST[s/_search/_search\?filter_path=aggregations/]
|
||||||
|
|
||||||
\... returns:
|
\... returns:
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
{
|
{
|
||||||
...
|
...
|
||||||
|
|
|
@ -103,7 +103,7 @@ specified timezone, so that the date and time are the same at the start and end.
|
||||||
===== Calendar Interval Examples
|
===== Calendar Interval Examples
|
||||||
As an example, here is an aggregation requesting bucket intervals of a month in calendar time:
|
As an example, here is an aggregation requesting bucket intervals of a month in calendar time:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
POST /sales/_search?size=0
|
POST /sales/_search?size=0
|
||||||
{
|
{
|
||||||
|
@ -117,13 +117,12 @@ POST /sales/_search?size=0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:sales]
|
// TEST[setup:sales]
|
||||||
|
|
||||||
If you attempt to use multiples of calendar units, the aggregation will fail because only
|
If you attempt to use multiples of calendar units, the aggregation will fail because only
|
||||||
singular calendar units are supported:
|
singular calendar units are supported:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
POST /sales/_search?size=0
|
POST /sales/_search?size=0
|
||||||
{
|
{
|
||||||
|
@ -137,7 +136,6 @@ POST /sales/_search?size=0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:sales]
|
// TEST[setup:sales]
|
||||||
// TEST[catch:bad_request]
|
// TEST[catch:bad_request]
|
||||||
|
|
||||||
|
@ -199,7 +197,7 @@ Defined as 24 hours (86,400,000 milliseconds)
|
||||||
If we try to recreate the "month" `calendar_interval` from earlier, we can approximate that with
|
If we try to recreate the "month" `calendar_interval` from earlier, we can approximate that with
|
||||||
30 fixed days:
|
30 fixed days:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
POST /sales/_search?size=0
|
POST /sales/_search?size=0
|
||||||
{
|
{
|
||||||
|
@ -213,12 +211,11 @@ POST /sales/_search?size=0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:sales]
|
// TEST[setup:sales]
|
||||||
|
|
||||||
But if we try to use a calendar unit that is not supported, such as weeks, we'll get an exception:
|
But if we try to use a calendar unit that is not supported, such as weeks, we'll get an exception:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
POST /sales/_search?size=0
|
POST /sales/_search?size=0
|
||||||
{
|
{
|
||||||
|
@ -232,7 +229,6 @@ POST /sales/_search?size=0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:sales]
|
// TEST[setup:sales]
|
||||||
// TEST[catch:bad_request]
|
// TEST[catch:bad_request]
|
||||||
|
|
||||||
|
@ -290,7 +286,7 @@ date string using the `format` parameter specification:
|
||||||
TIP: If you don't specify `format`, the first date
|
TIP: If you don't specify `format`, the first date
|
||||||
<<mapping-date-format,format>> specified in the field mapping is used.
|
<<mapping-date-format,format>> specified in the field mapping is used.
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
POST /sales/_search?size=0
|
POST /sales/_search?size=0
|
||||||
{
|
{
|
||||||
|
@ -305,14 +301,13 @@ POST /sales/_search?size=0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:sales]
|
// TEST[setup:sales]
|
||||||
|
|
||||||
<1> Supports expressive date <<date-format-pattern,format pattern>>
|
<1> Supports expressive date <<date-format-pattern,format pattern>>
|
||||||
|
|
||||||
Response:
|
Response:
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
{
|
{
|
||||||
...
|
...
|
||||||
|
@ -353,7 +348,7 @@ such as`America/Los_Angeles`.
|
||||||
|
|
||||||
Consider the following example:
|
Consider the following example:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
---------------------------------
|
---------------------------------
|
||||||
PUT my_index/_doc/1?refresh
|
PUT my_index/_doc/1?refresh
|
||||||
{
|
{
|
||||||
|
@ -377,13 +372,12 @@ GET my_index/_search?size=0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
---------------------------------
|
---------------------------------
|
||||||
// CONSOLE
|
|
||||||
|
|
||||||
If you don't specify a timezone, UTC is used. This would result in both of these
|
If you don't specify a timezone, UTC is used. This would result in both of these
|
||||||
documents being placed into the same day bucket, which starts at midnight UTC
|
documents being placed into the same day bucket, which starts at midnight UTC
|
||||||
on 1 October 2015:
|
on 1 October 2015:
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
---------------------------------
|
---------------------------------
|
||||||
{
|
{
|
||||||
...
|
...
|
||||||
|
@ -405,7 +399,7 @@ on 1 October 2015:
|
||||||
If you specify a `time_zone` of `-01:00`, midnight in that timezone is one hour
|
If you specify a `time_zone` of `-01:00`, midnight in that timezone is one hour
|
||||||
before midnight UTC:
|
before midnight UTC:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
---------------------------------
|
---------------------------------
|
||||||
GET my_index/_search?size=0
|
GET my_index/_search?size=0
|
||||||
{
|
{
|
||||||
|
@ -420,13 +414,12 @@ GET my_index/_search?size=0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
---------------------------------
|
---------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[continued]
|
// TEST[continued]
|
||||||
|
|
||||||
Now the first document falls into the bucket for 30 September 2015, while the
|
Now the first document falls into the bucket for 30 September 2015, while the
|
||||||
second document falls into the bucket for 1 October 2015:
|
second document falls into the bucket for 1 October 2015:
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
---------------------------------
|
---------------------------------
|
||||||
{
|
{
|
||||||
...
|
...
|
||||||
|
@ -474,7 +467,7 @@ For example, when using an interval of `day`, each bucket runs from midnight
|
||||||
to midnight. Setting the `offset` parameter to `+6h` changes each bucket
|
to midnight. Setting the `offset` parameter to `+6h` changes each bucket
|
||||||
to run from 6am to 6am:
|
to run from 6am to 6am:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
-----------------------------
|
-----------------------------
|
||||||
PUT my_index/_doc/1?refresh
|
PUT my_index/_doc/1?refresh
|
||||||
{
|
{
|
||||||
|
@ -499,12 +492,11 @@ GET my_index/_search?size=0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
-----------------------------
|
-----------------------------
|
||||||
// CONSOLE
|
|
||||||
|
|
||||||
Instead of a single bucket starting at midnight, the above request groups the
|
Instead of a single bucket starting at midnight, the above request groups the
|
||||||
documents into buckets starting at 6am:
|
documents into buckets starting at 6am:
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
-----------------------------
|
-----------------------------
|
||||||
{
|
{
|
||||||
...
|
...
|
||||||
|
@ -536,7 +528,7 @@ adjustments have been made.
|
||||||
Setting the `keyed` flag to `true` associates a unique string key with each
|
Setting the `keyed` flag to `true` associates a unique string key with each
|
||||||
bucket and returns the ranges as a hash rather than an array:
|
bucket and returns the ranges as a hash rather than an array:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
POST /sales/_search?size=0
|
POST /sales/_search?size=0
|
||||||
{
|
{
|
||||||
|
@ -552,12 +544,11 @@ POST /sales/_search?size=0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:sales]
|
// TEST[setup:sales]
|
||||||
|
|
||||||
Response:
|
Response:
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
{
|
{
|
||||||
...
|
...
|
||||||
|
@ -606,7 +597,7 @@ The `missing` parameter defines how to treat documents that are missing a value.
|
||||||
By default, they are ignored, but it is also possible to treat them as if they
|
By default, they are ignored, but it is also possible to treat them as if they
|
||||||
have a value.
|
have a value.
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
POST /sales/_search?size=0
|
POST /sales/_search?size=0
|
||||||
{
|
{
|
||||||
|
@ -621,7 +612,6 @@ POST /sales/_search?size=0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:sales]
|
// TEST[setup:sales]
|
||||||
|
|
||||||
<1> Documents without a value in the `publish_date` field will fall into the
|
<1> Documents without a value in the `publish_date` field will fall into the
|
||||||
|
@ -640,7 +630,7 @@ When you need to aggregate the results by day of the week, use a script that
|
||||||
returns the day of the week:
|
returns the day of the week:
|
||||||
|
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
POST /sales/_search?size=0
|
POST /sales/_search?size=0
|
||||||
{
|
{
|
||||||
|
@ -656,12 +646,11 @@ POST /sales/_search?size=0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:sales]
|
// TEST[setup:sales]
|
||||||
|
|
||||||
Response:
|
Response:
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
{
|
{
|
||||||
...
|
...
|
||||||
|
|
|
@ -12,7 +12,7 @@ for each range.
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
POST /sales/_search?size=0
|
POST /sales/_search?size=0
|
||||||
{
|
{
|
||||||
|
@ -30,7 +30,6 @@ POST /sales/_search?size=0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:sales s/now-10M\/M/10-2015/]
|
// TEST[setup:sales s/now-10M\/M/10-2015/]
|
||||||
|
|
||||||
<1> < now minus 10 months, rounded down to the start of the month.
|
<1> < now minus 10 months, rounded down to the start of the month.
|
||||||
|
@ -42,7 +41,7 @@ documents dated since 10 months ago
|
||||||
|
|
||||||
Response:
|
Response:
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
{
|
{
|
||||||
...
|
...
|
||||||
|
@ -75,7 +74,7 @@ be treated. By default they will be ignored but it is also possible to treat
|
||||||
them as if they had a value. This is done by adding a set of fieldname :
|
them as if they had a value. This is done by adding a set of fieldname :
|
||||||
value mappings to specify default values per field.
|
value mappings to specify default values per field.
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
POST /sales/_search?size=0
|
POST /sales/_search?size=0
|
||||||
{
|
{
|
||||||
|
@ -100,7 +99,6 @@ POST /sales/_search?size=0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:sales]
|
// TEST[setup:sales]
|
||||||
|
|
||||||
<1> Documents without a value in the `date` field will be added to the "Older"
|
<1> Documents without a value in the `date` field will be added to the "Older"
|
||||||
|
@ -267,7 +265,7 @@ The `time_zone` parameter is also applied to rounding in date math expressions.
|
||||||
As an example, to round to the beginning of the day in the CET time zone, you
|
As an example, to round to the beginning of the day in the CET time zone, you
|
||||||
can do the following:
|
can do the following:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
POST /sales/_search?size=0
|
POST /sales/_search?size=0
|
||||||
{
|
{
|
||||||
|
@ -286,7 +284,6 @@ POST /sales/_search?size=0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:sales]
|
// TEST[setup:sales]
|
||||||
|
|
||||||
<1> This date will be converted to `2016-02-01T00:00:00.000+01:00`.
|
<1> This date will be converted to `2016-02-01T00:00:00.000+01:00`.
|
||||||
|
@ -297,7 +294,7 @@ POST /sales/_search?size=0
|
||||||
Setting the `keyed` flag to `true` will associate a unique string key with each
|
Setting the `keyed` flag to `true` will associate a unique string key with each
|
||||||
bucket and return the ranges as a hash rather than an array:
|
bucket and return the ranges as a hash rather than an array:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
POST /sales/_search?size=0
|
POST /sales/_search?size=0
|
||||||
{
|
{
|
||||||
|
@ -316,12 +313,11 @@ POST /sales/_search?size=0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:sales s/now-10M\/M/10-2015/]
|
// TEST[setup:sales s/now-10M\/M/10-2015/]
|
||||||
|
|
||||||
Response:
|
Response:
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
{
|
{
|
||||||
...
|
...
|
||||||
|
@ -347,7 +343,7 @@ Response:
|
||||||
|
|
||||||
It is also possible to customize the key for each range:
|
It is also possible to customize the key for each range:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
POST /sales/_search?size=0
|
POST /sales/_search?size=0
|
||||||
{
|
{
|
||||||
|
@ -366,12 +362,11 @@ POST /sales/_search?size=0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:sales]
|
// TEST[setup:sales]
|
||||||
|
|
||||||
Response:
|
Response:
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
{
|
{
|
||||||
...
|
...
|
||||||
|
|
|
@ -26,7 +26,7 @@ Example:
|
||||||
We might want to see which tags are strongly associated with `#elasticsearch` on StackOverflow
|
We might want to see which tags are strongly associated with `#elasticsearch` on StackOverflow
|
||||||
forum posts but ignoring the effects of some prolific users with a tendency to misspell #Kibana as #Cabana.
|
forum posts but ignoring the effects of some prolific users with a tendency to misspell #Kibana as #Cabana.
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
POST /stackoverflow/_search?size=0
|
POST /stackoverflow/_search?size=0
|
||||||
{
|
{
|
||||||
|
@ -53,12 +53,11 @@ POST /stackoverflow/_search?size=0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:stackoverflow]
|
// TEST[setup:stackoverflow]
|
||||||
|
|
||||||
Response:
|
Response:
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
{
|
{
|
||||||
...
|
...
|
||||||
|
@ -92,7 +91,7 @@ Response:
|
||||||
In this scenario we might want to diversify on a combination of field values. We can use a `script` to produce a hash of the
|
In this scenario we might want to diversify on a combination of field values. We can use a `script` to produce a hash of the
|
||||||
multiple values in a tags field to ensure we don't have a sample that consists of the same repeated combinations of tags.
|
multiple values in a tags field to ensure we don't have a sample that consists of the same repeated combinations of tags.
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
POST /stackoverflow/_search?size=0
|
POST /stackoverflow/_search?size=0
|
||||||
{
|
{
|
||||||
|
@ -123,12 +122,11 @@ POST /stackoverflow/_search?size=0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:stackoverflow]
|
// TEST[setup:stackoverflow]
|
||||||
|
|
||||||
Response:
|
Response:
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
{
|
{
|
||||||
...
|
...
|
||||||
|
|
|
@ -5,7 +5,7 @@ Defines a single bucket of all the documents in the current document set context
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
POST /sales/_search?size=0
|
POST /sales/_search?size=0
|
||||||
{
|
{
|
||||||
|
@ -19,14 +19,13 @@ POST /sales/_search?size=0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:sales]
|
// TEST[setup:sales]
|
||||||
|
|
||||||
In the above example, we calculate the average price of all the products that are of type t-shirt.
|
In the above example, we calculate the average price of all the products that are of type t-shirt.
|
||||||
|
|
||||||
Response:
|
Response:
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
{
|
{
|
||||||
...
|
...
|
||||||
|
|
|
@ -7,7 +7,7 @@ filter.
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
PUT /logs/_bulk?refresh
|
PUT /logs/_bulk?refresh
|
||||||
{ "index" : { "_id" : 1 } }
|
{ "index" : { "_id" : 1 } }
|
||||||
|
@ -32,7 +32,6 @@ GET logs/_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
|
|
||||||
In the above example, we analyze log messages. The aggregation will build two
|
In the above example, we analyze log messages. The aggregation will build two
|
||||||
collection (buckets) of log messages - one for all those containing an error,
|
collection (buckets) of log messages - one for all those containing an error,
|
||||||
|
@ -40,7 +39,7 @@ and another for all those containing a warning.
|
||||||
|
|
||||||
Response:
|
Response:
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
{
|
{
|
||||||
"took": 9,
|
"took": 9,
|
||||||
|
@ -70,7 +69,7 @@ Response:
|
||||||
The filters field can also be provided as an array of filters, as in the
|
The filters field can also be provided as an array of filters, as in the
|
||||||
following request:
|
following request:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
GET logs/_search
|
GET logs/_search
|
||||||
{
|
{
|
||||||
|
@ -87,13 +86,12 @@ GET logs/_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[continued]
|
// TEST[continued]
|
||||||
|
|
||||||
The filtered buckets are returned in the same order as provided in the
|
The filtered buckets are returned in the same order as provided in the
|
||||||
request. The response for this example would be:
|
request. The response for this example would be:
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
{
|
{
|
||||||
"took": 4,
|
"took": 4,
|
||||||
|
@ -133,7 +131,7 @@ this parameter will implicitly set the `other_bucket` parameter to `true`.
|
||||||
|
|
||||||
The following snippet shows a response where the `other` bucket is requested to be named `other_messages`.
|
The following snippet shows a response where the `other` bucket is requested to be named `other_messages`.
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
PUT logs/_doc/4?refresh
|
PUT logs/_doc/4?refresh
|
||||||
{
|
{
|
||||||
|
@ -156,12 +154,11 @@ GET logs/_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[continued]
|
// TEST[continued]
|
||||||
|
|
||||||
The response would be something like the following:
|
The response would be something like the following:
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
{
|
{
|
||||||
"took": 3,
|
"took": 3,
|
||||||
|
|
|
@ -3,7 +3,7 @@
|
||||||
|
|
||||||
A multi-bucket aggregation that works on `geo_point` fields and conceptually works very similar to the <<search-aggregations-bucket-range-aggregation,range>> aggregation. The user can define a point of origin and a set of distance range buckets. The aggregation evaluate the distance of each document value from the origin point and determines the buckets it belongs to based on the ranges (a document belongs to a bucket if the distance between the document and the origin falls within the distance range of the bucket).
|
A multi-bucket aggregation that works on `geo_point` fields and conceptually works very similar to the <<search-aggregations-bucket-range-aggregation,range>> aggregation. The user can define a point of origin and a set of distance range buckets. The aggregation evaluate the distance of each document value from the origin point and determines the buckets it belongs to based on the ranges (a document belongs to a bucket if the distance between the document and the origin falls within the distance range of the bucket).
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
PUT /museums
|
PUT /museums
|
||||||
{
|
{
|
||||||
|
@ -47,11 +47,10 @@ POST /museums/_search?size=0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
|
|
||||||
Response:
|
Response:
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
{
|
{
|
||||||
...
|
...
|
||||||
|
@ -90,7 +89,7 @@ The specified field must be of type `geo_point` (which can only be set explicitl
|
||||||
|
|
||||||
By default, the distance unit is `m` (meters) but it can also accept: `mi` (miles), `in` (inches), `yd` (yards), `km` (kilometers), `cm` (centimeters), `mm` (millimeters).
|
By default, the distance unit is `m` (meters) but it can also accept: `mi` (miles), `in` (inches), `yd` (yards), `km` (kilometers), `cm` (centimeters), `mm` (millimeters).
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
POST /museums/_search?size=0
|
POST /museums/_search?size=0
|
||||||
{
|
{
|
||||||
|
@ -110,14 +109,13 @@ POST /museums/_search?size=0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[continued]
|
// TEST[continued]
|
||||||
|
|
||||||
<1> The distances will be computed in kilometers
|
<1> The distances will be computed in kilometers
|
||||||
|
|
||||||
There are two distance calculation modes: `arc` (the default), and `plane`. The `arc` calculation is the most accurate. The `plane` is the fastest but least accurate. Consider using `plane` when your search context is "narrow", and spans smaller geographical areas (~5km). `plane` will return higher error margins for searches across very large areas (e.g. cross continent search). The distance calculation type can be set using the `distance_type` parameter:
|
There are two distance calculation modes: `arc` (the default), and `plane`. The `arc` calculation is the most accurate. The `plane` is the fastest but least accurate. Consider using `plane` when your search context is "narrow", and spans smaller geographical areas (~5km). `plane` will return higher error margins for searches across very large areas (e.g. cross continent search). The distance calculation type can be set using the `distance_type` parameter:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
POST /museums/_search?size=0
|
POST /museums/_search?size=0
|
||||||
{
|
{
|
||||||
|
@ -138,14 +136,13 @@ POST /museums/_search?size=0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[continued]
|
// TEST[continued]
|
||||||
|
|
||||||
==== Keyed Response
|
==== Keyed Response
|
||||||
|
|
||||||
Setting the `keyed` flag to `true` will associate a unique string key with each bucket and return the ranges as a hash rather than an array:
|
Setting the `keyed` flag to `true` will associate a unique string key with each bucket and return the ranges as a hash rather than an array:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
POST /museums/_search?size=0
|
POST /museums/_search?size=0
|
||||||
{
|
{
|
||||||
|
@ -165,12 +162,11 @@ POST /museums/_search?size=0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[continued]
|
// TEST[continued]
|
||||||
|
|
||||||
Response:
|
Response:
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
{
|
{
|
||||||
...
|
...
|
||||||
|
@ -200,7 +196,7 @@ Response:
|
||||||
|
|
||||||
It is also possible to customize the key for each range:
|
It is also possible to customize the key for each range:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
POST /museums/_search?size=0
|
POST /museums/_search?size=0
|
||||||
{
|
{
|
||||||
|
@ -220,12 +216,11 @@ POST /museums/_search?size=0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[continued]
|
// TEST[continued]
|
||||||
|
|
||||||
Response:
|
Response:
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
{
|
{
|
||||||
...
|
...
|
||||||
|
|
|
@ -17,7 +17,7 @@ The specified field must be of type `geo_point` (which can only be set explicitl
|
||||||
|
|
||||||
==== Simple low-precision request
|
==== Simple low-precision request
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
PUT /museums
|
PUT /museums
|
||||||
{
|
{
|
||||||
|
@ -56,11 +56,10 @@ POST /museums/_search?size=0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
|
|
||||||
Response:
|
Response:
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
{
|
{
|
||||||
...
|
...
|
||||||
|
@ -90,7 +89,7 @@ Response:
|
||||||
|
|
||||||
When requesting detailed buckets (typically for displaying a "zoomed in" map) a filter like <<query-dsl-geo-bounding-box-query,geo_bounding_box>> should be applied to narrow the subject area otherwise potentially millions of buckets will be created and returned.
|
When requesting detailed buckets (typically for displaying a "zoomed in" map) a filter like <<query-dsl-geo-bounding-box-query,geo_bounding_box>> should be applied to narrow the subject area otherwise potentially millions of buckets will be created and returned.
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
POST /museums/_search?size=0
|
POST /museums/_search?size=0
|
||||||
{
|
{
|
||||||
|
@ -116,13 +115,12 @@ POST /museums/_search?size=0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[continued]
|
// TEST[continued]
|
||||||
|
|
||||||
The geohashes returned by the `geohash_grid` aggregation can be also used for zooming in. To zoom into the
|
The geohashes returned by the `geohash_grid` aggregation can be also used for zooming in. To zoom into the
|
||||||
first geohash `u17` returned in the previous example, it should be specified as both `top_left` and `bottom_right` corner:
|
first geohash `u17` returned in the previous example, it should be specified as both `top_left` and `bottom_right` corner:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
POST /museums/_search?size=0
|
POST /museums/_search?size=0
|
||||||
{
|
{
|
||||||
|
@ -148,10 +146,9 @@ POST /museums/_search?size=0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[continued]
|
// TEST[continued]
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
{
|
{
|
||||||
...
|
...
|
||||||
|
|
|
@ -30,7 +30,7 @@ fields, in which case all points will be taken into account during aggregation.
|
||||||
|
|
||||||
==== Simple low-precision request
|
==== Simple low-precision request
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
PUT /museums
|
PUT /museums
|
||||||
{
|
{
|
||||||
|
@ -69,11 +69,10 @@ POST /museums/_search?size=0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
|
|
||||||
Response:
|
Response:
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
{
|
{
|
||||||
...
|
...
|
||||||
|
@ -106,7 +105,7 @@ a filter like <<query-dsl-geo-bounding-box-query,geo_bounding_box>> should be
|
||||||
applied to narrow the subject area otherwise potentially millions of buckets
|
applied to narrow the subject area otherwise potentially millions of buckets
|
||||||
will be created and returned.
|
will be created and returned.
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
POST /museums/_search?size=0
|
POST /museums/_search?size=0
|
||||||
{
|
{
|
||||||
|
@ -132,10 +131,9 @@ POST /museums/_search?size=0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[continued]
|
// TEST[continued]
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
{
|
{
|
||||||
...
|
...
|
||||||
|
|
|
@ -11,7 +11,7 @@ NOTE: Global aggregators can only be placed as top level aggregators because
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
POST /sales/_search?size=0
|
POST /sales/_search?size=0
|
||||||
{
|
{
|
||||||
|
@ -29,7 +29,6 @@ POST /sales/_search?size=0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:sales]
|
// TEST[setup:sales]
|
||||||
|
|
||||||
<1> The `global` aggregation has an empty body
|
<1> The `global` aggregation has an empty body
|
||||||
|
@ -42,7 +41,7 @@ all products in our catalog, not just on the "shirts").
|
||||||
|
|
||||||
The response for the above aggregation:
|
The response for the above aggregation:
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
{
|
{
|
||||||
...
|
...
|
||||||
|
|
|
@ -19,7 +19,7 @@ The `interval` must be a positive decimal, while the `offset` must be a decimal
|
||||||
|
|
||||||
The following snippet "buckets" the products based on their `price` by interval of `50`:
|
The following snippet "buckets" the products based on their `price` by interval of `50`:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
POST /sales/_search?size=0
|
POST /sales/_search?size=0
|
||||||
{
|
{
|
||||||
|
@ -33,12 +33,11 @@ POST /sales/_search?size=0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:sales]
|
// TEST[setup:sales]
|
||||||
|
|
||||||
And the following may be the response:
|
And the following may be the response:
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
{
|
{
|
||||||
...
|
...
|
||||||
|
@ -78,7 +77,7 @@ The response above show that no documents has a price that falls within the rang
|
||||||
response will fill gaps in the histogram with empty buckets. It is possible change that and request buckets with
|
response will fill gaps in the histogram with empty buckets. It is possible change that and request buckets with
|
||||||
a higher minimum count thanks to the `min_doc_count` setting:
|
a higher minimum count thanks to the `min_doc_count` setting:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
POST /sales/_search?size=0
|
POST /sales/_search?size=0
|
||||||
{
|
{
|
||||||
|
@ -93,12 +92,11 @@ POST /sales/_search?size=0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:sales]
|
// TEST[setup:sales]
|
||||||
|
|
||||||
Response:
|
Response:
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
{
|
{
|
||||||
...
|
...
|
||||||
|
@ -154,7 +152,7 @@ under a range `filter` aggregation with the appropriate `from`/`to` settings.
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
POST /sales/_search?size=0
|
POST /sales/_search?size=0
|
||||||
{
|
{
|
||||||
|
@ -175,7 +173,6 @@ POST /sales/_search?size=0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:sales]
|
// TEST[setup:sales]
|
||||||
|
|
||||||
==== Order
|
==== Order
|
||||||
|
@ -199,7 +196,7 @@ documents.
|
||||||
By default, the buckets are returned as an ordered array. It is also possible to request the response as a hash
|
By default, the buckets are returned as an ordered array. It is also possible to request the response as a hash
|
||||||
instead keyed by the buckets keys:
|
instead keyed by the buckets keys:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
POST /sales/_search?size=0
|
POST /sales/_search?size=0
|
||||||
{
|
{
|
||||||
|
@ -214,12 +211,11 @@ POST /sales/_search?size=0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:sales]
|
// TEST[setup:sales]
|
||||||
|
|
||||||
Response:
|
Response:
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
{
|
{
|
||||||
...
|
...
|
||||||
|
@ -259,7 +255,7 @@ The `missing` parameter defines how documents that are missing a value should be
|
||||||
By default they will be ignored but it is also possible to treat them as if they
|
By default they will be ignored but it is also possible to treat them as if they
|
||||||
had a value.
|
had a value.
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
POST /sales/_search?size=0
|
POST /sales/_search?size=0
|
||||||
{
|
{
|
||||||
|
@ -274,7 +270,6 @@ POST /sales/_search?size=0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:sales]
|
// TEST[setup:sales]
|
||||||
|
|
||||||
<1> Documents without a value in the `quantity` field will fall into the same bucket as documents that have the value `0`.
|
<1> Documents without a value in the `quantity` field will fall into the same bucket as documents that have the value `0`.
|
||||||
|
|
|
@ -5,7 +5,7 @@ Just like the dedicated <<search-aggregations-bucket-daterange-aggregation,date>
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
GET /ip_addresses/_search
|
GET /ip_addresses/_search
|
||||||
{
|
{
|
||||||
|
@ -23,12 +23,11 @@ GET /ip_addresses/_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:iprange]
|
// TEST[setup:iprange]
|
||||||
|
|
||||||
Response:
|
Response:
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
{
|
{
|
||||||
...
|
...
|
||||||
|
@ -55,7 +54,7 @@ Response:
|
||||||
|
|
||||||
IP ranges can also be defined as CIDR masks:
|
IP ranges can also be defined as CIDR masks:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
GET /ip_addresses/_search
|
GET /ip_addresses/_search
|
||||||
{
|
{
|
||||||
|
@ -73,12 +72,11 @@ GET /ip_addresses/_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:iprange]
|
// TEST[setup:iprange]
|
||||||
|
|
||||||
Response:
|
Response:
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
{
|
{
|
||||||
...
|
...
|
||||||
|
@ -109,7 +107,7 @@ Response:
|
||||||
|
|
||||||
Setting the `keyed` flag to `true` will associate a unique string key with each bucket and return the ranges as a hash rather than an array:
|
Setting the `keyed` flag to `true` will associate a unique string key with each bucket and return the ranges as a hash rather than an array:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
GET /ip_addresses/_search
|
GET /ip_addresses/_search
|
||||||
{
|
{
|
||||||
|
@ -128,12 +126,11 @@ GET /ip_addresses/_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:iprange]
|
// TEST[setup:iprange]
|
||||||
|
|
||||||
Response:
|
Response:
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
{
|
{
|
||||||
...
|
...
|
||||||
|
@ -158,7 +155,7 @@ Response:
|
||||||
|
|
||||||
It is also possible to customize the key for each range:
|
It is also possible to customize the key for each range:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
GET /ip_addresses/_search
|
GET /ip_addresses/_search
|
||||||
{
|
{
|
||||||
|
@ -177,12 +174,11 @@ GET /ip_addresses/_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:iprange]
|
// TEST[setup:iprange]
|
||||||
|
|
||||||
Response:
|
Response:
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
{
|
{
|
||||||
...
|
...
|
||||||
|
|
|
@ -5,7 +5,7 @@ A field data based single bucket aggregation, that creates a bucket of all docum
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
POST /sales/_search?size=0
|
POST /sales/_search?size=0
|
||||||
{
|
{
|
||||||
|
@ -16,14 +16,13 @@ POST /sales/_search?size=0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:sales]
|
// TEST[setup:sales]
|
||||||
|
|
||||||
In the above example, we get the total number of products that do not have a price.
|
In the above example, we get the total number of products that do not have a price.
|
||||||
|
|
||||||
Response:
|
Response:
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
{
|
{
|
||||||
...
|
...
|
||||||
|
|
|
@ -6,7 +6,7 @@ A special single bucket aggregation that enables aggregating nested documents.
|
||||||
For example, lets say we have an index of products, and each product holds the list of resellers - each having its own
|
For example, lets say we have an index of products, and each product holds the list of resellers - each having its own
|
||||||
price for the product. The mapping could look like:
|
price for the product. The mapping could look like:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
PUT /index
|
PUT /index
|
||||||
{
|
{
|
||||||
|
@ -23,13 +23,12 @@ PUT /index
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TESTSETUP
|
// TESTSETUP
|
||||||
<1> The `resellers` is an array that holds nested documents under the `product` object.
|
<1> The `resellers` is an array that holds nested documents under the `product` object.
|
||||||
|
|
||||||
The following aggregations will return the minimum price products can be purchased in:
|
The following aggregations will return the minimum price products can be purchased in:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
GET /_search
|
GET /_search
|
||||||
{
|
{
|
||||||
|
@ -48,7 +47,6 @@ GET /_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[s/GET \/_search/GET \/_search\?filter_path=aggregations/]
|
// TEST[s/GET \/_search/GET \/_search\?filter_path=aggregations/]
|
||||||
// TEST[s/^/PUT index\/_doc\/0\?refresh\n{"name":"led", "resellers": [{"name": "foo", "price": 350.00}, {"name": "bar", "price": 500.00}]}\n/]
|
// TEST[s/^/PUT index\/_doc\/0\?refresh\n{"name":"led", "resellers": [{"name": "foo", "price": 350.00}, {"name": "bar", "price": 500.00}]}\n/]
|
||||||
|
|
||||||
|
@ -57,7 +55,7 @@ Then one can define any type of aggregation over these nested documents.
|
||||||
|
|
||||||
Response:
|
Response:
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
{
|
{
|
||||||
...
|
...
|
||||||
|
|
|
@ -9,7 +9,7 @@ This aggregation has a single option:
|
||||||
|
|
||||||
For example, let's say we have an index of questions and answers. The answer type has the following `join` field in the mapping:
|
For example, let's say we have an index of questions and answers. The answer type has the following `join` field in the mapping:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
PUT parent_example
|
PUT parent_example
|
||||||
{
|
{
|
||||||
|
@ -25,7 +25,6 @@ PUT parent_example
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
|
|
||||||
The `question` document contain a tag field and the `answer` documents contain an owner field. With the `parent`
|
The `question` document contain a tag field and the `answer` documents contain an owner field. With the `parent`
|
||||||
aggregation the owner buckets can be mapped to the tag buckets in a single request even though the two fields exist in
|
aggregation the owner buckets can be mapped to the tag buckets in a single request even though the two fields exist in
|
||||||
|
@ -33,7 +32,7 @@ two different kinds of documents.
|
||||||
|
|
||||||
An example of a question document:
|
An example of a question document:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
PUT parent_example/_doc/1
|
PUT parent_example/_doc/1
|
||||||
{
|
{
|
||||||
|
@ -49,12 +48,11 @@ PUT parent_example/_doc/1
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[continued]
|
// TEST[continued]
|
||||||
|
|
||||||
Examples of `answer` documents:
|
Examples of `answer` documents:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
PUT parent_example/_doc/2?routing=1
|
PUT parent_example/_doc/2?routing=1
|
||||||
{
|
{
|
||||||
|
@ -86,12 +84,11 @@ PUT parent_example/_doc/3?routing=1&refresh
|
||||||
"creation_date": "2009-05-05T13:45:37.030"
|
"creation_date": "2009-05-05T13:45:37.030"
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[continued]
|
// TEST[continued]
|
||||||
|
|
||||||
The following request can be built that connects the two together:
|
The following request can be built that connects the two together:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
POST parent_example/_search?size=0
|
POST parent_example/_search?size=0
|
||||||
{
|
{
|
||||||
|
@ -120,7 +117,6 @@ POST parent_example/_search?size=0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[continued]
|
// TEST[continued]
|
||||||
|
|
||||||
<1> The `type` points to type / mapping with the name `answer`.
|
<1> The `type` points to type / mapping with the name `answer`.
|
||||||
|
@ -129,7 +125,7 @@ The above example returns the top answer owners and per owner the top question t
|
||||||
|
|
||||||
Possible response:
|
Possible response:
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
{
|
{
|
||||||
"took": 9,
|
"took": 9,
|
||||||
|
|
|
@ -6,7 +6,7 @@ Note that this aggregation includes the `from` value and excludes the `to` value
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
GET /_search
|
GET /_search
|
||||||
{
|
{
|
||||||
|
@ -24,13 +24,12 @@ GET /_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:sales]
|
// TEST[setup:sales]
|
||||||
// TEST[s/GET \/_search/GET \/_search\?filter_path=aggregations/]
|
// TEST[s/GET \/_search/GET \/_search\?filter_path=aggregations/]
|
||||||
|
|
||||||
Response:
|
Response:
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
{
|
{
|
||||||
...
|
...
|
||||||
|
@ -64,7 +63,7 @@ Response:
|
||||||
|
|
||||||
Setting the `keyed` flag to `true` will associate a unique string key with each bucket and return the ranges as a hash rather than an array:
|
Setting the `keyed` flag to `true` will associate a unique string key with each bucket and return the ranges as a hash rather than an array:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
GET /_search
|
GET /_search
|
||||||
{
|
{
|
||||||
|
@ -83,13 +82,12 @@ GET /_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:sales]
|
// TEST[setup:sales]
|
||||||
// TEST[s/GET \/_search/GET \/_search\?filter_path=aggregations/]
|
// TEST[s/GET \/_search/GET \/_search\?filter_path=aggregations/]
|
||||||
|
|
||||||
Response:
|
Response:
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
{
|
{
|
||||||
...
|
...
|
||||||
|
@ -118,7 +116,7 @@ Response:
|
||||||
|
|
||||||
It is also possible to customize the key for each range:
|
It is also possible to customize the key for each range:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
GET /_search
|
GET /_search
|
||||||
{
|
{
|
||||||
|
@ -137,13 +135,12 @@ GET /_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:sales]
|
// TEST[setup:sales]
|
||||||
// TEST[s/GET \/_search/GET \/_search\?filter_path=aggregations/]
|
// TEST[s/GET \/_search/GET \/_search\?filter_path=aggregations/]
|
||||||
|
|
||||||
Response:
|
Response:
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
{
|
{
|
||||||
...
|
...
|
||||||
|
@ -177,7 +174,7 @@ will be executed during aggregation execution.
|
||||||
|
|
||||||
The following example shows how to use an `inline` script with the `painless` script language and no script parameters:
|
The following example shows how to use an `inline` script with the `painless` script language and no script parameters:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
GET /_search
|
GET /_search
|
||||||
{
|
{
|
||||||
|
@ -198,11 +195,10 @@ GET /_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
|
|
||||||
It is also possible to use stored scripts. Here is a simple stored script:
|
It is also possible to use stored scripts. Here is a simple stored script:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
POST /_scripts/convert_currency
|
POST /_scripts/convert_currency
|
||||||
{
|
{
|
||||||
|
@ -212,12 +208,11 @@ POST /_scripts/convert_currency
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:sales]
|
// TEST[setup:sales]
|
||||||
|
|
||||||
And this new stored script can be used in the range aggregation like this:
|
And this new stored script can be used in the range aggregation like this:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
GET /_search
|
GET /_search
|
||||||
{
|
{
|
||||||
|
@ -240,7 +235,6 @@ GET /_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[s/GET \/_search/GET \/_search\?filter_path=aggregations/]
|
// TEST[s/GET \/_search/GET \/_search\?filter_path=aggregations/]
|
||||||
// TEST[continued]
|
// TEST[continued]
|
||||||
<1> Id of the stored script
|
<1> Id of the stored script
|
||||||
|
@ -248,7 +242,7 @@ GET /_search
|
||||||
|
|
||||||
//////////////////////////
|
//////////////////////////
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
{
|
{
|
||||||
"aggregations": {
|
"aggregations": {
|
||||||
|
@ -270,7 +264,6 @@ GET /_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// TESTRESPONSE
|
|
||||||
|
|
||||||
//////////////////////////
|
//////////////////////////
|
||||||
|
|
||||||
|
@ -278,7 +271,7 @@ GET /_search
|
||||||
|
|
||||||
Lets say the product prices are in USD but we would like to get the price ranges in EURO. We can use value script to convert the prices prior the aggregation (assuming conversion rate of 0.8)
|
Lets say the product prices are in USD but we would like to get the price ranges in EURO. We can use value script to convert the prices prior the aggregation (assuming conversion rate of 0.8)
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
GET /sales/_search
|
GET /sales/_search
|
||||||
{
|
{
|
||||||
|
@ -302,14 +295,13 @@ GET /sales/_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:sales]
|
// TEST[setup:sales]
|
||||||
|
|
||||||
==== Sub Aggregations
|
==== Sub Aggregations
|
||||||
|
|
||||||
The following example, not only "bucket" the documents to the different buckets but also computes statistics over the prices in each price range
|
The following example, not only "bucket" the documents to the different buckets but also computes statistics over the prices in each price range
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
GET /_search
|
GET /_search
|
||||||
{
|
{
|
||||||
|
@ -332,13 +324,12 @@ GET /_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:sales]
|
// TEST[setup:sales]
|
||||||
// TEST[s/GET \/_search/GET \/_search\?filter_path=aggregations/]
|
// TEST[s/GET \/_search/GET \/_search\?filter_path=aggregations/]
|
||||||
|
|
||||||
Response:
|
Response:
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
{
|
{
|
||||||
...
|
...
|
||||||
|
@ -391,7 +382,7 @@ Response:
|
||||||
|
|
||||||
If a sub aggregation is also based on the same value source as the range aggregation (like the `stats` aggregation in the example above) it is possible to leave out the value source definition for it. The following will return the same response as above:
|
If a sub aggregation is also based on the same value source as the range aggregation (like the `stats` aggregation in the example above) it is possible to leave out the value source definition for it. The following will return the same response as above:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
GET /_search
|
GET /_search
|
||||||
{
|
{
|
||||||
|
@ -414,5 +405,4 @@ GET /_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
<1> We don't need to specify the `price` as we "inherit" it by default from the parent `range` aggregation
|
<1> We don't need to specify the `price` as we "inherit" it by default from the parent `range` aggregation
|
||||||
|
|
|
@ -85,7 +85,7 @@ better approximation, but higher memory usage. Cannot be smaller than `0.00001`
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
GET /_search
|
GET /_search
|
||||||
{
|
{
|
||||||
|
@ -98,12 +98,11 @@ GET /_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[s/_search/_search\?filter_path=aggregations/]
|
// TEST[s/_search/_search\?filter_path=aggregations/]
|
||||||
|
|
||||||
Response:
|
Response:
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
{
|
{
|
||||||
...
|
...
|
||||||
|
@ -124,7 +123,7 @@ Response:
|
||||||
In this example, the only bucket that we see is the "swing" bucket, because it is the only term that appears in
|
In this example, the only bucket that we see is the "swing" bucket, because it is the only term that appears in
|
||||||
one document. If we increase the `max_doc_count` to `2`, we'll see some more buckets:
|
one document. If we increase the `max_doc_count` to `2`, we'll see some more buckets:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
GET /_search
|
GET /_search
|
||||||
{
|
{
|
||||||
|
@ -138,12 +137,11 @@ GET /_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[s/_search/_search\?filter_path=aggregations/]
|
// TEST[s/_search/_search\?filter_path=aggregations/]
|
||||||
|
|
||||||
This now shows the "jazz" term which has a `doc_count` of 2":
|
This now shows the "jazz" term which has a `doc_count` of 2":
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
{
|
{
|
||||||
...
|
...
|
||||||
|
@ -275,7 +273,7 @@ It is possible to filter the values for which buckets will be created. This can
|
||||||
|
|
||||||
===== Filtering Values with regular expressions
|
===== Filtering Values with regular expressions
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
GET /_search
|
GET /_search
|
||||||
{
|
{
|
||||||
|
@ -290,7 +288,6 @@ GET /_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
|
|
||||||
In the above example, buckets will be created for all the tags that starts with `swi`, except those starting
|
In the above example, buckets will be created for all the tags that starts with `swi`, except those starting
|
||||||
with `electro` (so the tag `swing` will be aggregated but not `electro_swing`). The `include` regular expression will determine what
|
with `electro` (so the tag `swing` will be aggregated but not `electro_swing`). The `include` regular expression will determine what
|
||||||
|
@ -304,7 +301,7 @@ The syntax is the same as <<regexp-syntax,regexp queries>>.
|
||||||
For matching based on exact values the `include` and `exclude` parameters can simply take an array of
|
For matching based on exact values the `include` and `exclude` parameters can simply take an array of
|
||||||
strings that represent the terms as they are found in the index:
|
strings that represent the terms as they are found in the index:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
GET /_search
|
GET /_search
|
||||||
{
|
{
|
||||||
|
@ -319,7 +316,6 @@ GET /_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
|
|
||||||
|
|
||||||
==== Missing value
|
==== Missing value
|
||||||
|
@ -328,7 +324,7 @@ The `missing` parameter defines how documents that are missing a value should be
|
||||||
By default they will be ignored but it is also possible to treat them as if they
|
By default they will be ignored but it is also possible to treat them as if they
|
||||||
had a value.
|
had a value.
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
GET /_search
|
GET /_search
|
||||||
{
|
{
|
||||||
|
@ -342,7 +338,6 @@ GET /_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
|
|
||||||
<1> Documents without a value in the `tags` field will fall into the same bucket as documents that have the value `N/A`.
|
<1> Documents without a value in the `tags` field will fall into the same bucket as documents that have the value `N/A`.
|
||||||
|
|
||||||
|
|
|
@ -15,7 +15,7 @@ a nested object field that falls outside the `nested` aggregation's nested struc
|
||||||
For example, lets say we have an index for a ticket system with issues and comments. The comments are inlined into
|
For example, lets say we have an index for a ticket system with issues and comments. The comments are inlined into
|
||||||
the issue documents as nested documents. The mapping could look like:
|
the issue documents as nested documents. The mapping could look like:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
PUT /issues
|
PUT /issues
|
||||||
{
|
{
|
||||||
|
@ -33,7 +33,7 @@ PUT /issues
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
<1> The `comments` is an array that holds nested documents under the `issue` object.
|
<1> The `comments` is an array that holds nested documents under the `issue` object.
|
||||||
|
|
||||||
The following aggregations will return the top commenters' username that have commented and per top commenter the top
|
The following aggregations will return the top commenters' username that have commented and per top commenter the top
|
||||||
|
@ -41,17 +41,16 @@ tags of the issues the user has commented on:
|
||||||
|
|
||||||
//////////////////////////
|
//////////////////////////
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
POST /issues/_doc/0?refresh
|
POST /issues/_doc/0?refresh
|
||||||
{"tags": ["tag_1"], "comments": [{"username": "username_1"}]}
|
{"tags": ["tag_1"], "comments": [{"username": "username_1"}]}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[continued]
|
// TEST[continued]
|
||||||
|
|
||||||
//////////////////////////
|
//////////////////////////
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
GET /issues/_search
|
GET /issues/_search
|
||||||
{
|
{
|
||||||
|
@ -86,7 +85,6 @@ GET /issues/_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[continued]
|
// TEST[continued]
|
||||||
// TEST[s/_search/_search\?filter_path=aggregations/]
|
// TEST[s/_search/_search\?filter_path=aggregations/]
|
||||||
|
|
||||||
|
@ -100,7 +98,7 @@ object types have been defined in the mapping
|
||||||
|
|
||||||
Possible response snippet:
|
Possible response snippet:
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
{
|
{
|
||||||
"aggregations": {
|
"aggregations": {
|
||||||
|
|
|
@ -15,7 +15,7 @@ A query on StackOverflow data for the popular term `javascript` OR the rarer ter
|
||||||
the `significant_terms` aggregation on top-scoring documents that are more likely to match
|
the `significant_terms` aggregation on top-scoring documents that are more likely to match
|
||||||
the most interesting parts of our query we use a sample.
|
the most interesting parts of our query we use a sample.
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
POST /stackoverflow/_search?size=0
|
POST /stackoverflow/_search?size=0
|
||||||
{
|
{
|
||||||
|
@ -41,12 +41,11 @@ POST /stackoverflow/_search?size=0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:stackoverflow]
|
// TEST[setup:stackoverflow]
|
||||||
|
|
||||||
Response:
|
Response:
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
{
|
{
|
||||||
...
|
...
|
||||||
|
@ -85,7 +84,7 @@ Without the `sampler` aggregation the request query considers the full "long tai
|
||||||
less significant terms such as `jquery` and `angular` rather than focusing on the more insightful Kibana-related terms.
|
less significant terms such as `jquery` and `angular` rather than focusing on the more insightful Kibana-related terms.
|
||||||
|
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
POST /stackoverflow/_search?size=0
|
POST /stackoverflow/_search?size=0
|
||||||
{
|
{
|
||||||
|
@ -105,12 +104,11 @@ POST /stackoverflow/_search?size=0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:stackoverflow]
|
// TEST[setup:stackoverflow]
|
||||||
|
|
||||||
Response:
|
Response:
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
{
|
{
|
||||||
...
|
...
|
||||||
|
|
|
@ -64,7 +64,7 @@ set used for statistical comparisons is the index or indices from which the resu
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
GET /_search
|
GET /_search
|
||||||
{
|
{
|
||||||
|
@ -78,12 +78,11 @@ GET /_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[s/_search/_search\?filter_path=aggregations/]
|
// TEST[s/_search/_search\?filter_path=aggregations/]
|
||||||
|
|
||||||
Response:
|
Response:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
{
|
{
|
||||||
...
|
...
|
||||||
|
@ -125,7 +124,7 @@ A simpler way to perform analysis across multiple categories is to use a parent-
|
||||||
|
|
||||||
Example using a parent aggregation for segmentation:
|
Example using a parent aggregation for segmentation:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
GET /_search
|
GET /_search
|
||||||
{
|
{
|
||||||
|
@ -141,12 +140,11 @@ GET /_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[s/_search/_search\?filter_path=aggregations/]
|
// TEST[s/_search/_search\?filter_path=aggregations/]
|
||||||
|
|
||||||
Response:
|
Response:
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
{
|
{
|
||||||
...
|
...
|
||||||
|
@ -203,7 +201,7 @@ Now we have anomaly detection for each of the police forces using a single reque
|
||||||
We can use other forms of top-level aggregations to segment our data, for example segmenting by geographic
|
We can use other forms of top-level aggregations to segment our data, for example segmenting by geographic
|
||||||
area to identify unusual hot-spots of a particular crime type:
|
area to identify unusual hot-spots of a particular crime type:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
GET /_search
|
GET /_search
|
||||||
{
|
{
|
||||||
|
@ -222,7 +220,6 @@ GET /_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
|
|
||||||
This example uses the `geohash_grid` aggregation to create result buckets that represent geographic areas, and inside each
|
This example uses the `geohash_grid` aggregation to create result buckets that represent geographic areas, and inside each
|
||||||
bucket we can identify anomalous levels of a crime type in these tightly-focused areas e.g.
|
bucket we can identify anomalous levels of a crime type in these tightly-focused areas e.g.
|
||||||
|
@ -464,7 +461,7 @@ NOTE: `shard_size` cannot be smaller than `size` (as it doesn't make much sens
|
||||||
|
|
||||||
It is possible to only return terms that match more than a configured number of hits using the `min_doc_count` option:
|
It is possible to only return terms that match more than a configured number of hits using the `min_doc_count` option:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
GET /_search
|
GET /_search
|
||||||
{
|
{
|
||||||
|
@ -478,7 +475,7 @@ GET /_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
The above aggregation would only return tags which have been found in 10 hits or more. Default value is `3`.
|
The above aggregation would only return tags which have been found in 10 hits or more. Default value is `3`.
|
||||||
|
|
||||||
|
|
||||||
|
@ -507,7 +504,7 @@ The default source of statistical information for background term frequencies is
|
||||||
scope can be narrowed through the use of a `background_filter` to focus in on significant terms within a narrower
|
scope can be narrowed through the use of a `background_filter` to focus in on significant terms within a narrower
|
||||||
context:
|
context:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
GET /_search
|
GET /_search
|
||||||
{
|
{
|
||||||
|
@ -528,7 +525,6 @@ GET /_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
|
|
||||||
The above filter would help focus in on terms that were peculiar to the city of Madrid rather than revealing
|
The above filter would help focus in on terms that were peculiar to the city of Madrid rather than revealing
|
||||||
terms like "Spanish" that are unusual in the full index's worldwide context but commonplace in the subset of documents containing the
|
terms like "Spanish" that are unusual in the full index's worldwide context but commonplace in the subset of documents containing the
|
||||||
|
@ -566,7 +562,7 @@ is significantly faster. By default, `map` is only used when running an aggregat
|
||||||
ordinals.
|
ordinals.
|
||||||
|
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
GET /_search
|
GET /_search
|
||||||
{
|
{
|
||||||
|
@ -580,7 +576,6 @@ GET /_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
|
|
||||||
<1> the possible values are `map`, `global_ordinals`
|
<1> the possible values are `map`, `global_ordinals`
|
||||||
|
|
||||||
|
|
|
@ -32,7 +32,7 @@ and the _background_set used for statistical comparisons is the index or indices
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
GET news/_search
|
GET news/_search
|
||||||
{
|
{
|
||||||
|
@ -53,13 +53,12 @@ GET news/_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:news]
|
// TEST[setup:news]
|
||||||
|
|
||||||
|
|
||||||
Response:
|
Response:
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
{
|
{
|
||||||
"took": 9,
|
"took": 9,
|
||||||
|
@ -147,7 +146,7 @@ The uncleansed documents have thrown up some odd-looking terms that are, on the
|
||||||
correlated with appearances of our search term "elasticsearch" e.g. "pozmantier".
|
correlated with appearances of our search term "elasticsearch" e.g. "pozmantier".
|
||||||
We can drill down into examples of these documents to see why pozmantier is connected using this query:
|
We can drill down into examples of these documents to see why pozmantier is connected using this query:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
GET news/_search
|
GET news/_search
|
||||||
{
|
{
|
||||||
|
@ -167,8 +166,8 @@ GET news/_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:news]
|
// TEST[setup:news]
|
||||||
|
|
||||||
The results show a series of very similar news articles about a judging panel for a number of tech projects:
|
The results show a series of very similar news articles about a judging panel for a number of tech projects:
|
||||||
|
|
||||||
[source,js]
|
[source,js]
|
||||||
|
@ -215,7 +214,7 @@ Fortunately similar documents tend to rank similarly so as part of examining the
|
||||||
aggregation can apply a filter to remove sequences of any 6 or more tokens that have already been seen. Let's try this same query now but
|
aggregation can apply a filter to remove sequences of any 6 or more tokens that have already been seen. Let's try this same query now but
|
||||||
with the `filter_duplicate_text` setting turned on:
|
with the `filter_duplicate_text` setting turned on:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
GET news/_search
|
GET news/_search
|
||||||
{
|
{
|
||||||
|
@ -241,7 +240,6 @@ GET news/_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:news]
|
// TEST[setup:news]
|
||||||
|
|
||||||
The results from analysing our deduplicated text are obviously of higher quality to anyone familiar with the elastic stack:
|
The results from analysing our deduplicated text are obviously of higher quality to anyone familiar with the elastic stack:
|
||||||
|
@ -418,7 +416,7 @@ The default source of statistical information for background term frequencies is
|
||||||
scope can be narrowed through the use of a `background_filter` to focus in on significant terms within a narrower
|
scope can be narrowed through the use of a `background_filter` to focus in on significant terms within a narrower
|
||||||
context:
|
context:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
GET news/_search
|
GET news/_search
|
||||||
{
|
{
|
||||||
|
@ -439,7 +437,6 @@ GET news/_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:news]
|
// TEST[setup:news]
|
||||||
|
|
||||||
The above filter would help focus in on terms that were peculiar to the city of Madrid rather than revealing
|
The above filter would help focus in on terms that were peculiar to the city of Madrid rather than revealing
|
||||||
|
@ -457,7 +454,7 @@ JSON field(s) and the indexed field being aggregated can differ.
|
||||||
In these cases it is possible to list the JSON _source fields from which text
|
In these cases it is possible to list the JSON _source fields from which text
|
||||||
will be analyzed using the `source_fields` parameter:
|
will be analyzed using the `source_fields` parameter:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
GET news/_search
|
GET news/_search
|
||||||
{
|
{
|
||||||
|
@ -476,7 +473,6 @@ GET news/_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:news]
|
// TEST[setup:news]
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -53,7 +53,7 @@ POST /products/_bulk?refresh
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
GET /_search
|
GET /_search
|
||||||
{
|
{
|
||||||
|
@ -64,14 +64,14 @@ GET /_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[s/_search/_search\?filter_path=aggregations/]
|
// TEST[s/_search/_search\?filter_path=aggregations/]
|
||||||
|
|
||||||
<1> `terms` aggregation should be a field of type `keyword` or any other data type suitable for bucket aggregations. In order to use it with `text` you will need to enable
|
<1> `terms` aggregation should be a field of type `keyword` or any other data type suitable for bucket aggregations. In order to use it with `text` you will need to enable
|
||||||
<<fielddata, fielddata>>.
|
<<fielddata, fielddata>>.
|
||||||
|
|
||||||
Response:
|
Response:
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
{
|
{
|
||||||
...
|
...
|
||||||
|
@ -98,6 +98,7 @@ Response:
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// TESTRESPONSE[s/\.\.\.//]
|
// TESTRESPONSE[s/\.\.\.//]
|
||||||
|
|
||||||
<1> an upper bound of the error on the document counts for each term, see <<search-aggregations-bucket-terms-aggregation-approximate-counts,below>>
|
<1> an upper bound of the error on the document counts for each term, see <<search-aggregations-bucket-terms-aggregation-approximate-counts,below>>
|
||||||
<2> when there are lots of unique terms, Elasticsearch only returns the top terms; this number is the sum of the document counts for all buckets that are not part of the response
|
<2> when there are lots of unique terms, Elasticsearch only returns the top terms; this number is the sum of the document counts for all buckets that are not part of the response
|
||||||
<3> the list of the top buckets, the meaning of `top` being defined by the <<search-aggregations-bucket-terms-aggregation-order,order>>
|
<3> the list of the top buckets, the meaning of `top` being defined by the <<search-aggregations-bucket-terms-aggregation-order,order>>
|
||||||
|
@ -130,7 +131,7 @@ combined to give a final view. Consider the following scenario:
|
||||||
A request is made to obtain the top 5 terms in the field product, ordered by descending document count from an index with
|
A request is made to obtain the top 5 terms in the field product, ordered by descending document count from an index with
|
||||||
3 shards. In this case each shard is asked to give its top 5 terms.
|
3 shards. In this case each shard is asked to give its top 5 terms.
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
GET /_search
|
GET /_search
|
||||||
{
|
{
|
||||||
|
@ -144,7 +145,6 @@ GET /_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[s/_search/_search\?filter_path=aggregations/]
|
// TEST[s/_search/_search\?filter_path=aggregations/]
|
||||||
|
|
||||||
The terms for each of the three shards are shown below with their
|
The terms for each of the three shards are shown below with their
|
||||||
|
@ -230,7 +230,7 @@ terms. This is calculated as the sum of the document count from the last term re
|
||||||
given above the value would be 46 (2 + 15 + 29). This means that in the worst case scenario a term which was not returned
|
given above the value would be 46 (2 + 15 + 29). This means that in the worst case scenario a term which was not returned
|
||||||
could have the 4th highest document count.
|
could have the 4th highest document count.
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
{
|
{
|
||||||
...
|
...
|
||||||
|
@ -260,7 +260,7 @@ could have the 4th highest document count.
|
||||||
|
|
||||||
The second error value can be enabled by setting the `show_term_doc_count_error` parameter to true:
|
The second error value can be enabled by setting the `show_term_doc_count_error` parameter to true:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
GET /_search
|
GET /_search
|
||||||
{
|
{
|
||||||
|
@ -275,7 +275,6 @@ GET /_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[s/_search/_search\?filter_path=aggregations/]
|
// TEST[s/_search/_search\?filter_path=aggregations/]
|
||||||
|
|
||||||
|
|
||||||
|
@ -287,7 +286,7 @@ The actual document count of Product C was 54 so the document count was only act
|
||||||
it would be off by 15. Product A, however has an error of 0 for its document count, since every shard returned it we can be confident
|
it would be off by 15. Product A, however has an error of 0 for its document count, since every shard returned it we can be confident
|
||||||
that the count returned is accurate.
|
that the count returned is accurate.
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
{
|
{
|
||||||
...
|
...
|
||||||
|
@ -338,7 +337,7 @@ but at least the top buckets will be correctly picked.
|
||||||
|
|
||||||
Ordering the buckets by their doc `_count` in an ascending manner:
|
Ordering the buckets by their doc `_count` in an ascending manner:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
GET /_search
|
GET /_search
|
||||||
{
|
{
|
||||||
|
@ -352,11 +351,10 @@ GET /_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
|
|
||||||
Ordering the buckets alphabetically by their terms in an ascending manner:
|
Ordering the buckets alphabetically by their terms in an ascending manner:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
GET /_search
|
GET /_search
|
||||||
{
|
{
|
||||||
|
@ -370,13 +368,12 @@ GET /_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
|
|
||||||
deprecated[6.0.0, Use `_key` instead of `_term` to order buckets by their term]
|
deprecated[6.0.0, Use `_key` instead of `_term` to order buckets by their term]
|
||||||
|
|
||||||
Ordering the buckets by single value metrics sub-aggregation (identified by the aggregation name):
|
Ordering the buckets by single value metrics sub-aggregation (identified by the aggregation name):
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
GET /_search
|
GET /_search
|
||||||
{
|
{
|
||||||
|
@ -393,11 +390,10 @@ GET /_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
|
|
||||||
Ordering the buckets by multi value metrics sub-aggregation (identified by the aggregation name):
|
Ordering the buckets by multi value metrics sub-aggregation (identified by the aggregation name):
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
GET /_search
|
GET /_search
|
||||||
{
|
{
|
||||||
|
@ -414,7 +410,6 @@ GET /_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
|
|
||||||
[NOTE]
|
[NOTE]
|
||||||
.Pipeline aggs cannot be used for sorting
|
.Pipeline aggs cannot be used for sorting
|
||||||
|
@ -444,7 +439,7 @@ METRIC = <the name of the metric (in case of multi-value metrics a
|
||||||
PATH = <AGG_NAME> [ <AGG_SEPARATOR>, <AGG_NAME> ]* [ <METRIC_SEPARATOR>, <METRIC> ] ;
|
PATH = <AGG_NAME> [ <AGG_SEPARATOR>, <AGG_NAME> ]* [ <METRIC_SEPARATOR>, <METRIC> ] ;
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
GET /_search
|
GET /_search
|
||||||
{
|
{
|
||||||
|
@ -466,13 +461,12 @@ GET /_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
|
|
||||||
The above will sort the artist's countries buckets based on the average play count among the rock songs.
|
The above will sort the artist's countries buckets based on the average play count among the rock songs.
|
||||||
|
|
||||||
Multiple criteria can be used to order the buckets by providing an array of order criteria such as the following:
|
Multiple criteria can be used to order the buckets by providing an array of order criteria such as the following:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
GET /_search
|
GET /_search
|
||||||
{
|
{
|
||||||
|
@ -494,7 +488,6 @@ GET /_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
|
|
||||||
The above will sort the artist's countries buckets based on the average play count among the rock songs and then by
|
The above will sort the artist's countries buckets based on the average play count among the rock songs and then by
|
||||||
their `doc_count` in descending order.
|
their `doc_count` in descending order.
|
||||||
|
@ -506,7 +499,7 @@ tie-breaker in ascending alphabetical order to prevent non-deterministic orderin
|
||||||
|
|
||||||
It is possible to only return terms that match more than a configured number of hits using the `min_doc_count` option:
|
It is possible to only return terms that match more than a configured number of hits using the `min_doc_count` option:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
GET /_search
|
GET /_search
|
||||||
{
|
{
|
||||||
|
@ -520,7 +513,6 @@ GET /_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
|
|
||||||
The above aggregation would only return tags which have been found in 10 hits or more. Default value is `1`.
|
The above aggregation would only return tags which have been found in 10 hits or more. Default value is `1`.
|
||||||
|
|
||||||
|
@ -548,7 +540,7 @@ WARNING: When NOT sorting on `doc_count` descending, high values of `min_doc_cou
|
||||||
|
|
||||||
Generating the terms using a script:
|
Generating the terms using a script:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
GET /_search
|
GET /_search
|
||||||
{
|
{
|
||||||
|
@ -564,13 +556,12 @@ GET /_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
|
|
||||||
This will interpret the `script` parameter as an `inline` script with the default script language and no script parameters. To use a stored script use the following syntax:
|
This will interpret the `script` parameter as an `inline` script with the default script language and no script parameters. To use a stored script use the following syntax:
|
||||||
|
|
||||||
//////////////////////////
|
//////////////////////////
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
POST /_scripts/my_script
|
POST /_scripts/my_script
|
||||||
{
|
{
|
||||||
|
@ -580,11 +571,10 @@ POST /_scripts/my_script
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
|
|
||||||
//////////////////////////
|
//////////////////////////
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
GET /_search
|
GET /_search
|
||||||
{
|
{
|
||||||
|
@ -602,12 +592,11 @@ GET /_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[continued]
|
// TEST[continued]
|
||||||
|
|
||||||
==== Value Script
|
==== Value Script
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
GET /_search
|
GET /_search
|
||||||
{
|
{
|
||||||
|
@ -624,7 +613,6 @@ GET /_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
|
|
||||||
==== Filtering Values
|
==== Filtering Values
|
||||||
|
|
||||||
|
@ -634,7 +622,7 @@ It is possible to filter the values for which buckets will be created. This can
|
||||||
|
|
||||||
===== Filtering Values with regular expressions
|
===== Filtering Values with regular expressions
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
GET /_search
|
GET /_search
|
||||||
{
|
{
|
||||||
|
@ -649,7 +637,6 @@ GET /_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
|
|
||||||
In the above example, buckets will be created for all the tags that has the word `sport` in them, except those starting
|
In the above example, buckets will be created for all the tags that has the word `sport` in them, except those starting
|
||||||
with `water_` (so the tag `water_sports` will not be aggregated). The `include` regular expression will determine what
|
with `water_` (so the tag `water_sports` will not be aggregated). The `include` regular expression will determine what
|
||||||
|
@ -663,7 +650,7 @@ The syntax is the same as <<regexp-syntax,regexp queries>>.
|
||||||
For matching based on exact values the `include` and `exclude` parameters can simply take an array of
|
For matching based on exact values the `include` and `exclude` parameters can simply take an array of
|
||||||
strings that represent the terms as they are found in the index:
|
strings that represent the terms as they are found in the index:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
GET /_search
|
GET /_search
|
||||||
{
|
{
|
||||||
|
@ -683,7 +670,6 @@ GET /_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
|
|
||||||
===== Filtering Values with partitions
|
===== Filtering Values with partitions
|
||||||
|
|
||||||
|
@ -693,7 +679,7 @@ This can be achieved by grouping the field's values into a number of partitions
|
||||||
only one partition in each request.
|
only one partition in each request.
|
||||||
Consider this request which is looking for accounts that have not logged any access recently:
|
Consider this request which is looking for accounts that have not logged any access recently:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
GET /_search
|
GET /_search
|
||||||
{
|
{
|
||||||
|
@ -722,7 +708,6 @@ GET /_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
|
|
||||||
This request is finding the last logged access date for a subset of customer accounts because we
|
This request is finding the last logged access date for a subset of customer accounts because we
|
||||||
might want to expire some customer accounts who haven't been seen for a long while.
|
might want to expire some customer accounts who haven't been seen for a long while.
|
||||||
|
@ -786,7 +771,7 @@ are expanded in one depth-first pass and only then any pruning occurs.
|
||||||
In some scenarios this can be very wasteful and can hit memory constraints.
|
In some scenarios this can be very wasteful and can hit memory constraints.
|
||||||
An example problem scenario is querying a movie database for the 10 most popular actors and their 5 most common co-stars:
|
An example problem scenario is querying a movie database for the 10 most popular actors and their 5 most common co-stars:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
GET /_search
|
GET /_search
|
||||||
{
|
{
|
||||||
|
@ -808,7 +793,6 @@ GET /_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
|
|
||||||
Even though the number of actors may be comparatively small and we want only 50 result buckets there is a combinatorial explosion of buckets
|
Even though the number of actors may be comparatively small and we want only 50 result buckets there is a combinatorial explosion of buckets
|
||||||
during calculation - a single actor can produce n² buckets where n is the number of actors. The sane option would be to first determine
|
during calculation - a single actor can produce n² buckets where n is the number of actors. The sane option would be to first determine
|
||||||
|
@ -818,7 +802,7 @@ mode as opposed to the `depth_first` mode.
|
||||||
NOTE: The `breadth_first` is the default mode for fields with a cardinality bigger than the requested size or when the cardinality is unknown (numeric fields or scripts for instance).
|
NOTE: The `breadth_first` is the default mode for fields with a cardinality bigger than the requested size or when the cardinality is unknown (numeric fields or scripts for instance).
|
||||||
It is possible to override the default heuristic and to provide a collect mode directly in the request:
|
It is possible to override the default heuristic and to provide a collect mode directly in the request:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
GET /_search
|
GET /_search
|
||||||
{
|
{
|
||||||
|
@ -841,7 +825,6 @@ GET /_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
|
|
||||||
<1> the possible values are `breadth_first` and `depth_first`
|
<1> the possible values are `breadth_first` and `depth_first`
|
||||||
|
|
||||||
|
@ -870,7 +853,7 @@ so memory usage is linear to the number of values of the documents that are part
|
||||||
is significantly faster. By default, `map` is only used when running an aggregation on scripts, since they don't have
|
is significantly faster. By default, `map` is only used when running an aggregation on scripts, since they don't have
|
||||||
ordinals.
|
ordinals.
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
GET /_search
|
GET /_search
|
||||||
{
|
{
|
||||||
|
@ -884,7 +867,6 @@ GET /_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
|
|
||||||
<1> The possible values are `map`, `global_ordinals`
|
<1> The possible values are `map`, `global_ordinals`
|
||||||
|
|
||||||
|
@ -896,7 +878,7 @@ The `missing` parameter defines how documents that are missing a value should be
|
||||||
By default they will be ignored but it is also possible to treat them as if they
|
By default they will be ignored but it is also possible to treat them as if they
|
||||||
had a value.
|
had a value.
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
GET /_search
|
GET /_search
|
||||||
{
|
{
|
||||||
|
@ -910,7 +892,6 @@ GET /_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
|
|
||||||
<1> Documents without a value in the `tags` field will fall into the same bucket as documents that have the value `N/A`.
|
<1> Documents without a value in the `tags` field will fall into the same bucket as documents that have the value `N/A`.
|
||||||
|
|
||||||
|
|
|
@ -35,7 +35,7 @@ POST /_refresh
|
||||||
|
|
||||||
The following example demonstrates the use of matrix stats to describe the relationship between income and poverty.
|
The following example demonstrates the use of matrix stats to describe the relationship between income and poverty.
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
GET /_search
|
GET /_search
|
||||||
{
|
{
|
||||||
|
@ -48,13 +48,12 @@ GET /_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[s/_search/_search\?filter_path=aggregations/]
|
// TEST[s/_search/_search\?filter_path=aggregations/]
|
||||||
|
|
||||||
The aggregation type is `matrix_stats` and the `fields` setting defines the set of fields (as an array) for computing
|
The aggregation type is `matrix_stats` and the `fields` setting defines the set of fields (as an array) for computing
|
||||||
the statistics. The above request returns the following response:
|
the statistics. The above request returns the following response:
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
{
|
{
|
||||||
...
|
...
|
||||||
|
@ -119,7 +118,7 @@ The `missing` parameter defines how documents that are missing a value should be
|
||||||
By default they will be ignored but it is also possible to treat them as if they had a value.
|
By default they will be ignored but it is also possible to treat them as if they had a value.
|
||||||
This is done by adding a set of fieldname : value mappings to specify default values per field.
|
This is done by adding a set of fieldname : value mappings to specify default values per field.
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
GET /_search
|
GET /_search
|
||||||
{
|
{
|
||||||
|
@ -133,7 +132,6 @@ GET /_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
|
|
||||||
<1> Documents without a value in the `income` field will have the default value `50000`.
|
<1> Documents without a value in the `income` field will have the default value `50000`.
|
||||||
|
|
||||||
|
|
|
@ -6,7 +6,7 @@ A `single-value` metrics aggregation that computes the average of numeric values
|
||||||
Assuming the data consists of documents representing exams grades (between 0
|
Assuming the data consists of documents representing exams grades (between 0
|
||||||
and 100) of students we can average their scores with:
|
and 100) of students we can average their scores with:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
POST /exams/_search?size=0
|
POST /exams/_search?size=0
|
||||||
{
|
{
|
||||||
|
@ -15,12 +15,11 @@ POST /exams/_search?size=0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:exams]
|
// TEST[setup:exams]
|
||||||
|
|
||||||
The above aggregation computes the average grade over all documents. The aggregation type is `avg` and the `field` setting defines the numeric field of the documents the average will be computed on. The above will return the following:
|
The above aggregation computes the average grade over all documents. The aggregation type is `avg` and the `field` setting defines the numeric field of the documents the average will be computed on. The above will return the following:
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
{
|
{
|
||||||
...
|
...
|
||||||
|
@ -39,7 +38,7 @@ The name of the aggregation (`avg_grade` above) also serves as the key by which
|
||||||
|
|
||||||
Computing the average grade based on a script:
|
Computing the average grade based on a script:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
POST /exams/_search?size=0
|
POST /exams/_search?size=0
|
||||||
{
|
{
|
||||||
|
@ -54,12 +53,11 @@ POST /exams/_search?size=0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:exams]
|
// TEST[setup:exams]
|
||||||
|
|
||||||
This will interpret the `script` parameter as an `inline` script with the `painless` script language and no script parameters. To use a stored script use the following syntax:
|
This will interpret the `script` parameter as an `inline` script with the `painless` script language and no script parameters. To use a stored script use the following syntax:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
POST /exams/_search?size=0
|
POST /exams/_search?size=0
|
||||||
{
|
{
|
||||||
|
@ -77,14 +75,13 @@ POST /exams/_search?size=0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:exams,stored_example_script]
|
// TEST[setup:exams,stored_example_script]
|
||||||
|
|
||||||
===== Value Script
|
===== Value Script
|
||||||
|
|
||||||
It turned out that the exam was way above the level of the students and a grade correction needs to be applied. We can use value script to get the new average:
|
It turned out that the exam was way above the level of the students and a grade correction needs to be applied. We can use value script to get the new average:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
POST /exams/_search?size=0
|
POST /exams/_search?size=0
|
||||||
{
|
{
|
||||||
|
@ -104,7 +101,6 @@ POST /exams/_search?size=0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:exams]
|
// TEST[setup:exams]
|
||||||
|
|
||||||
==== Missing value
|
==== Missing value
|
||||||
|
@ -113,7 +109,7 @@ The `missing` parameter defines how documents that are missing a value should be
|
||||||
By default they will be ignored but it is also possible to treat them as if they
|
By default they will be ignored but it is also possible to treat them as if they
|
||||||
had a value.
|
had a value.
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
POST /exams/_search?size=0
|
POST /exams/_search?size=0
|
||||||
{
|
{
|
||||||
|
@ -127,7 +123,6 @@ POST /exams/_search?size=0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:exams]
|
// TEST[setup:exams]
|
||||||
|
|
||||||
<1> Documents without a value in the `grade` field will fall into the same bucket as documents that have the value `10`.
|
<1> Documents without a value in the `grade` field will fall into the same bucket as documents that have the value `10`.
|
||||||
|
|
|
@ -7,7 +7,7 @@ document or generated by a script.
|
||||||
|
|
||||||
Assume you are indexing store sales and would like to count the unique number of sold products that match a query:
|
Assume you are indexing store sales and would like to count the unique number of sold products that match a query:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
POST /sales/_search?size=0
|
POST /sales/_search?size=0
|
||||||
{
|
{
|
||||||
|
@ -20,12 +20,11 @@ POST /sales/_search?size=0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:sales]
|
// TEST[setup:sales]
|
||||||
|
|
||||||
Response:
|
Response:
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
{
|
{
|
||||||
...
|
...
|
||||||
|
@ -42,7 +41,7 @@ Response:
|
||||||
|
|
||||||
This aggregation also supports the `precision_threshold` option:
|
This aggregation also supports the `precision_threshold` option:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
POST /sales/_search?size=0
|
POST /sales/_search?size=0
|
||||||
{
|
{
|
||||||
|
@ -56,7 +55,6 @@ POST /sales/_search?size=0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:sales]
|
// TEST[setup:sales]
|
||||||
|
|
||||||
<1> The `precision_threshold` options allows to trade memory for accuracy, and
|
<1> The `precision_threshold` options allows to trade memory for accuracy, and
|
||||||
|
@ -183,7 +181,7 @@ make sure that hashes are computed at most once per unique value per segment.
|
||||||
The `cardinality` metric supports scripting, with a noticeable performance hit
|
The `cardinality` metric supports scripting, with a noticeable performance hit
|
||||||
however since hashes need to be computed on the fly.
|
however since hashes need to be computed on the fly.
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
POST /sales/_search?size=0
|
POST /sales/_search?size=0
|
||||||
{
|
{
|
||||||
|
@ -199,12 +197,11 @@ POST /sales/_search?size=0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:sales]
|
// TEST[setup:sales]
|
||||||
|
|
||||||
This will interpret the `script` parameter as an `inline` script with the `painless` script language and no script parameters. To use a stored script use the following syntax:
|
This will interpret the `script` parameter as an `inline` script with the `painless` script language and no script parameters. To use a stored script use the following syntax:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
POST /sales/_search?size=0
|
POST /sales/_search?size=0
|
||||||
{
|
{
|
||||||
|
@ -223,7 +220,6 @@ POST /sales/_search?size=0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[skip:no script]
|
// TEST[skip:no script]
|
||||||
|
|
||||||
==== Missing value
|
==== Missing value
|
||||||
|
@ -232,7 +228,7 @@ The `missing` parameter defines how documents that are missing a value should be
|
||||||
By default they will be ignored but it is also possible to treat them as if they
|
By default they will be ignored but it is also possible to treat them as if they
|
||||||
had a value.
|
had a value.
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
POST /sales/_search?size=0
|
POST /sales/_search?size=0
|
||||||
{
|
{
|
||||||
|
@ -246,6 +242,5 @@ POST /sales/_search?size=0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:sales]
|
// TEST[setup:sales]
|
||||||
<1> Documents without a value in the `tag` field will fall into the same bucket as documents that have the value `N/A`.
|
<1> Documents without a value in the `tag` field will fall into the same bucket as documents that have the value `N/A`.
|
||||||
|
|
|
@ -7,7 +7,7 @@ The `extended_stats` aggregations is an extended version of the <<search-aggrega
|
||||||
|
|
||||||
Assuming the data consists of documents representing exams grades (between 0 and 100) of students
|
Assuming the data consists of documents representing exams grades (between 0 and 100) of students
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
GET /exams/_search
|
GET /exams/_search
|
||||||
{
|
{
|
||||||
|
@ -17,13 +17,12 @@ GET /exams/_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:exams]
|
// TEST[setup:exams]
|
||||||
|
|
||||||
The above aggregation computes the grades statistics over all documents. The aggregation type is `extended_stats` and the `field` setting defines the numeric field of the documents the stats will be computed on. The above will return the following:
|
The above aggregation computes the grades statistics over all documents. The aggregation type is `extended_stats` and the `field` setting defines the numeric field of the documents the stats will be computed on. The above will return the following:
|
||||||
|
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
{
|
{
|
||||||
...
|
...
|
||||||
|
@ -55,7 +54,7 @@ By default, the `extended_stats` metric will return an object called `std_deviat
|
||||||
deviations from the mean. This can be a useful way to visualize variance of your data. If you want a different boundary, for example
|
deviations from the mean. This can be a useful way to visualize variance of your data. If you want a different boundary, for example
|
||||||
three standard deviations, you can set `sigma` in the request:
|
three standard deviations, you can set `sigma` in the request:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
GET /exams/_search
|
GET /exams/_search
|
||||||
{
|
{
|
||||||
|
@ -70,7 +69,6 @@ GET /exams/_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:exams]
|
// TEST[setup:exams]
|
||||||
<1> `sigma` controls how many standard deviations +/- from the mean should be displayed
|
<1> `sigma` controls how many standard deviations +/- from the mean should be displayed
|
||||||
|
|
||||||
|
@ -89,7 +87,7 @@ if your data is skewed heavily left or right, the value returned will be mislead
|
||||||
|
|
||||||
Computing the grades stats based on a script:
|
Computing the grades stats based on a script:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
GET /exams/_search
|
GET /exams/_search
|
||||||
{
|
{
|
||||||
|
@ -106,12 +104,11 @@ GET /exams/_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:exams]
|
// TEST[setup:exams]
|
||||||
|
|
||||||
This will interpret the `script` parameter as an `inline` script with the `painless` script language and no script parameters. To use a stored script use the following syntax:
|
This will interpret the `script` parameter as an `inline` script with the `painless` script language and no script parameters. To use a stored script use the following syntax:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
GET /exams/_search
|
GET /exams/_search
|
||||||
{
|
{
|
||||||
|
@ -130,14 +127,13 @@ GET /exams/_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:exams,stored_example_script]
|
// TEST[setup:exams,stored_example_script]
|
||||||
|
|
||||||
===== Value Script
|
===== Value Script
|
||||||
|
|
||||||
It turned out that the exam was way above the level of the students and a grade correction needs to be applied. We can use value script to get the new stats:
|
It turned out that the exam was way above the level of the students and a grade correction needs to be applied. We can use value script to get the new stats:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
GET /exams/_search
|
GET /exams/_search
|
||||||
{
|
{
|
||||||
|
@ -158,7 +154,6 @@ GET /exams/_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:exams]
|
// TEST[setup:exams]
|
||||||
|
|
||||||
==== Missing value
|
==== Missing value
|
||||||
|
@ -167,7 +162,7 @@ The `missing` parameter defines how documents that are missing a value should be
|
||||||
By default they will be ignored but it is also possible to treat them as if they
|
By default they will be ignored but it is also possible to treat them as if they
|
||||||
had a value.
|
had a value.
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
GET /exams/_search
|
GET /exams/_search
|
||||||
{
|
{
|
||||||
|
@ -182,7 +177,6 @@ GET /exams/_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:exams]
|
// TEST[setup:exams]
|
||||||
|
|
||||||
<1> Documents without a value in the `grade` field will fall into the same bucket as documents that have the value `0`.
|
<1> Documents without a value in the `grade` field will fall into the same bucket as documents that have the value `0`.
|
||||||
|
|
|
@ -6,7 +6,7 @@ A metric aggregation that computes the bounding box containing all geo_point val
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
PUT /museums
|
PUT /museums
|
||||||
{
|
{
|
||||||
|
@ -48,7 +48,6 @@ POST /museums/_search?size=0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
|
|
||||||
<1> The `geo_bounds` aggregation specifies the field to use to obtain the bounds
|
<1> The `geo_bounds` aggregation specifies the field to use to obtain the bounds
|
||||||
<2> `wrap_longitude` is an optional parameter which specifies whether the bounding box should be allowed to overlap the international date line. The default value is `true`
|
<2> `wrap_longitude` is an optional parameter which specifies whether the bounding box should be allowed to overlap the international date line. The default value is `true`
|
||||||
|
@ -57,7 +56,7 @@ The above aggregation demonstrates how one would compute the bounding box of the
|
||||||
|
|
||||||
The response for the above aggregation:
|
The response for the above aggregation:
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
{
|
{
|
||||||
...
|
...
|
||||||
|
|
|
@ -5,7 +5,7 @@ A metric aggregation that computes the weighted https://en.wikipedia.org/wiki/Ce
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
PUT /museums
|
PUT /museums
|
||||||
{
|
{
|
||||||
|
@ -43,7 +43,6 @@ POST /museums/_search?size=0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
|
|
||||||
<1> The `geo_centroid` aggregation specifies the field to use for computing the centroid. (NOTE: field must be a <<geo-point>> type)
|
<1> The `geo_centroid` aggregation specifies the field to use for computing the centroid. (NOTE: field must be a <<geo-point>> type)
|
||||||
|
|
||||||
|
@ -51,7 +50,7 @@ The above aggregation demonstrates how one would compute the centroid of the loc
|
||||||
|
|
||||||
The response for the above aggregation:
|
The response for the above aggregation:
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
{
|
{
|
||||||
...
|
...
|
||||||
|
@ -72,7 +71,7 @@ The `geo_centroid` aggregation is more interesting when combined as a sub-aggreg
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
POST /museums/_search?size=0
|
POST /museums/_search?size=0
|
||||||
{
|
{
|
||||||
|
@ -88,7 +87,6 @@ POST /museums/_search?size=0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[continued]
|
// TEST[continued]
|
||||||
|
|
||||||
The above example uses `geo_centroid` as a sub-aggregation to a
|
The above example uses `geo_centroid` as a sub-aggregation to a
|
||||||
|
@ -97,7 +95,7 @@ for finding the central location for museums in each city.
|
||||||
|
|
||||||
The response for the above aggregation:
|
The response for the above aggregation:
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
{
|
{
|
||||||
...
|
...
|
||||||
|
|
|
@ -12,7 +12,7 @@ whose absolute value is greater than +2^53+.
|
||||||
|
|
||||||
Computing the max price value across all documents
|
Computing the max price value across all documents
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
POST /sales/_search?size=0
|
POST /sales/_search?size=0
|
||||||
{
|
{
|
||||||
|
@ -21,12 +21,11 @@ POST /sales/_search?size=0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:sales]
|
// TEST[setup:sales]
|
||||||
|
|
||||||
Response:
|
Response:
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
{
|
{
|
||||||
...
|
...
|
||||||
|
@ -48,7 +47,7 @@ response.
|
||||||
The `max` aggregation can also calculate the maximum of a script. The example
|
The `max` aggregation can also calculate the maximum of a script. The example
|
||||||
below computes the maximum price:
|
below computes the maximum price:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
POST /sales/_search
|
POST /sales/_search
|
||||||
{
|
{
|
||||||
|
@ -63,13 +62,12 @@ POST /sales/_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:sales]
|
// TEST[setup:sales]
|
||||||
|
|
||||||
This will use the <<modules-scripting-painless, Painless>> scripting language
|
This will use the <<modules-scripting-painless, Painless>> scripting language
|
||||||
and no script parameters. To use a stored script use the following syntax:
|
and no script parameters. To use a stored script use the following syntax:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
POST /sales/_search
|
POST /sales/_search
|
||||||
{
|
{
|
||||||
|
@ -87,7 +85,6 @@ POST /sales/_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:sales,stored_example_script]
|
// TEST[setup:sales,stored_example_script]
|
||||||
|
|
||||||
==== Value Script
|
==== Value Script
|
||||||
|
@ -97,7 +94,7 @@ would like to compute the max in EURO (and for the sake of this example, let's
|
||||||
say the conversion rate is 1.2). We can use a value script to apply the
|
say the conversion rate is 1.2). We can use a value script to apply the
|
||||||
conversion rate to every value before it is aggregated:
|
conversion rate to every value before it is aggregated:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
POST /sales/_search
|
POST /sales/_search
|
||||||
{
|
{
|
||||||
|
@ -116,7 +113,6 @@ POST /sales/_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:sales]
|
// TEST[setup:sales]
|
||||||
|
|
||||||
==== Missing value
|
==== Missing value
|
||||||
|
@ -125,7 +121,7 @@ The `missing` parameter defines how documents that are missing a value should
|
||||||
be treated. By default they will be ignored but it is also possible to treat
|
be treated. By default they will be ignored but it is also possible to treat
|
||||||
them as if they had a value.
|
them as if they had a value.
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
POST /sales/_search
|
POST /sales/_search
|
||||||
{
|
{
|
||||||
|
@ -139,7 +135,6 @@ POST /sales/_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:sales]
|
// TEST[setup:sales]
|
||||||
|
|
||||||
<1> Documents without a value in the `grade` field will fall into the same
|
<1> Documents without a value in the `grade` field will fall into the same
|
||||||
|
|
|
@ -24,7 +24,7 @@ In this example we have a product which has an average rating of
|
||||||
3 stars. Let's look at its ratings' median absolute deviation to determine
|
3 stars. Let's look at its ratings' median absolute deviation to determine
|
||||||
how much they vary
|
how much they vary
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
---------------------------------------------------------
|
---------------------------------------------------------
|
||||||
GET reviews/_search
|
GET reviews/_search
|
||||||
{
|
{
|
||||||
|
@ -43,7 +43,6 @@ GET reviews/_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
---------------------------------------------------------
|
---------------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:reviews]
|
// TEST[setup:reviews]
|
||||||
<1> `rating` must be a numeric field
|
<1> `rating` must be a numeric field
|
||||||
|
|
||||||
|
@ -51,7 +50,7 @@ The resulting median absolute deviation of `2` tells us that there is a fair
|
||||||
amount of variability in the ratings. Reviewers must have diverse opinions about
|
amount of variability in the ratings. Reviewers must have diverse opinions about
|
||||||
this product.
|
this product.
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
---------------------------------------------------------
|
---------------------------------------------------------
|
||||||
{
|
{
|
||||||
...
|
...
|
||||||
|
@ -84,7 +83,7 @@ cost of higher memory usage. For more about the characteristics of the TDigest
|
||||||
`compression` parameter see
|
`compression` parameter see
|
||||||
<<search-aggregations-metrics-percentile-aggregation-compression>>.
|
<<search-aggregations-metrics-percentile-aggregation-compression>>.
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
---------------------------------------------------------
|
---------------------------------------------------------
|
||||||
GET reviews/_search
|
GET reviews/_search
|
||||||
{
|
{
|
||||||
|
@ -99,7 +98,6 @@ GET reviews/_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
---------------------------------------------------------
|
---------------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:reviews]
|
// TEST[setup:reviews]
|
||||||
|
|
||||||
The default `compression` value for this aggregation is `1000`. At this
|
The default `compression` value for this aggregation is `1000`. At this
|
||||||
|
@ -114,7 +112,7 @@ of one to ten, we can using scripting.
|
||||||
|
|
||||||
To provide an inline script:
|
To provide an inline script:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
---------------------------------------------------------
|
---------------------------------------------------------
|
||||||
GET reviews/_search
|
GET reviews/_search
|
||||||
{
|
{
|
||||||
|
@ -134,12 +132,11 @@ GET reviews/_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
---------------------------------------------------------
|
---------------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:reviews]
|
// TEST[setup:reviews]
|
||||||
|
|
||||||
To provide a stored script:
|
To provide a stored script:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
---------------------------------------------------------
|
---------------------------------------------------------
|
||||||
GET reviews/_search
|
GET reviews/_search
|
||||||
{
|
{
|
||||||
|
@ -158,7 +155,6 @@ GET reviews/_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
---------------------------------------------------------
|
---------------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:reviews,stored_example_script]
|
// TEST[setup:reviews,stored_example_script]
|
||||||
|
|
||||||
==== Missing value
|
==== Missing value
|
||||||
|
@ -170,7 +166,7 @@ as if they had a value.
|
||||||
Let's be optimistic and assume some reviewers loved the product so much that
|
Let's be optimistic and assume some reviewers loved the product so much that
|
||||||
they forgot to give it a rating. We'll assign them five stars
|
they forgot to give it a rating. We'll assign them five stars
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
---------------------------------------------------------
|
---------------------------------------------------------
|
||||||
GET reviews/_search
|
GET reviews/_search
|
||||||
{
|
{
|
||||||
|
@ -185,5 +181,4 @@ GET reviews/_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
---------------------------------------------------------
|
---------------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:reviews]
|
// TEST[setup:reviews]
|
||||||
|
|
|
@ -12,7 +12,7 @@ whose absolute value is greater than +2^53+.
|
||||||
|
|
||||||
Computing the min price value across all documents:
|
Computing the min price value across all documents:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
POST /sales/_search?size=0
|
POST /sales/_search?size=0
|
||||||
{
|
{
|
||||||
|
@ -21,12 +21,11 @@ POST /sales/_search?size=0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:sales]
|
// TEST[setup:sales]
|
||||||
|
|
||||||
Response:
|
Response:
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
{
|
{
|
||||||
...
|
...
|
||||||
|
@ -49,7 +48,7 @@ response.
|
||||||
The `min` aggregation can also calculate the minimum of a script. The example
|
The `min` aggregation can also calculate the minimum of a script. The example
|
||||||
below computes the minimum price:
|
below computes the minimum price:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
POST /sales/_search
|
POST /sales/_search
|
||||||
{
|
{
|
||||||
|
@ -64,13 +63,12 @@ POST /sales/_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:sales]
|
// TEST[setup:sales]
|
||||||
|
|
||||||
This will use the <<modules-scripting-painless, Painless>> scripting language
|
This will use the <<modules-scripting-painless, Painless>> scripting language
|
||||||
and no script parameters. To use a stored script use the following syntax:
|
and no script parameters. To use a stored script use the following syntax:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
POST /sales/_search
|
POST /sales/_search
|
||||||
{
|
{
|
||||||
|
@ -88,7 +86,6 @@ POST /sales/_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:sales,stored_example_script]
|
// TEST[setup:sales,stored_example_script]
|
||||||
|
|
||||||
==== Value Script
|
==== Value Script
|
||||||
|
@ -98,7 +95,7 @@ would like to compute the min in EURO (and for the sake of this example, let's
|
||||||
say the conversion rate is 1.2). We can use a value script to apply the
|
say the conversion rate is 1.2). We can use a value script to apply the
|
||||||
conversion rate to every value before it is aggregated:
|
conversion rate to every value before it is aggregated:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
POST /sales/_search
|
POST /sales/_search
|
||||||
{
|
{
|
||||||
|
@ -117,7 +114,6 @@ POST /sales/_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:sales]
|
// TEST[setup:sales]
|
||||||
|
|
||||||
==== Missing value
|
==== Missing value
|
||||||
|
@ -126,7 +122,7 @@ The `missing` parameter defines how documents that are missing a value should
|
||||||
be treated. By default they will be ignored but it is also possible to treat
|
be treated. By default they will be ignored but it is also possible to treat
|
||||||
them as if they had a value.
|
them as if they had a value.
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
POST /sales/_search
|
POST /sales/_search
|
||||||
{
|
{
|
||||||
|
@ -140,7 +136,6 @@ POST /sales/_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:sales]
|
// TEST[setup:sales]
|
||||||
|
|
||||||
<1> Documents without a value in the `grade` field will fall into the same
|
<1> Documents without a value in the `grade` field will fall into the same
|
||||||
|
|
|
@ -24,7 +24,7 @@ but it can be easily skewed by a single slow response.
|
||||||
|
|
||||||
Let's look at a range of percentiles representing load time:
|
Let's look at a range of percentiles representing load time:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
GET latency/_search
|
GET latency/_search
|
||||||
{
|
{
|
||||||
|
@ -38,14 +38,13 @@ GET latency/_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:latency]
|
// TEST[setup:latency]
|
||||||
<1> The field `load_time` must be a numeric field
|
<1> The field `load_time` must be a numeric field
|
||||||
|
|
||||||
By default, the `percentile` metric will generate a range of
|
By default, the `percentile` metric will generate a range of
|
||||||
percentiles: `[ 1, 5, 25, 50, 75, 95, 99 ]`. The response will look like this:
|
percentiles: `[ 1, 5, 25, 50, 75, 95, 99 ]`. The response will look like this:
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
{
|
{
|
||||||
...
|
...
|
||||||
|
@ -76,7 +75,7 @@ Often, administrators are only interested in outliers -- the extreme percentiles
|
||||||
We can specify just the percents we are interested in (requested percentiles
|
We can specify just the percents we are interested in (requested percentiles
|
||||||
must be a value between 0-100 inclusive):
|
must be a value between 0-100 inclusive):
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
GET latency/_search
|
GET latency/_search
|
||||||
{
|
{
|
||||||
|
@ -91,7 +90,6 @@ GET latency/_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:latency]
|
// TEST[setup:latency]
|
||||||
<1> Use the `percents` parameter to specify particular percentiles to calculate
|
<1> Use the `percents` parameter to specify particular percentiles to calculate
|
||||||
|
|
||||||
|
@ -99,7 +97,7 @@ GET latency/_search
|
||||||
|
|
||||||
By default the `keyed` flag is set to `true` which associates a unique string key with each bucket and returns the ranges as a hash rather than an array. Setting the `keyed` flag to `false` will disable this behavior:
|
By default the `keyed` flag is set to `true` which associates a unique string key with each bucket and returns the ranges as a hash rather than an array. Setting the `keyed` flag to `false` will disable this behavior:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
GET latency/_search
|
GET latency/_search
|
||||||
{
|
{
|
||||||
|
@ -114,12 +112,11 @@ GET latency/_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:latency]
|
// TEST[setup:latency]
|
||||||
|
|
||||||
Response:
|
Response:
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
{
|
{
|
||||||
...
|
...
|
||||||
|
@ -168,7 +165,7 @@ The percentile metric supports scripting. For example, if our load times
|
||||||
are in milliseconds but we want percentiles calculated in seconds, we could use
|
are in milliseconds but we want percentiles calculated in seconds, we could use
|
||||||
a script to convert them on-the-fly:
|
a script to convert them on-the-fly:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
GET latency/_search
|
GET latency/_search
|
||||||
{
|
{
|
||||||
|
@ -188,7 +185,6 @@ GET latency/_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:latency]
|
// TEST[setup:latency]
|
||||||
|
|
||||||
<1> The `field` parameter is replaced with a `script` parameter, which uses the
|
<1> The `field` parameter is replaced with a `script` parameter, which uses the
|
||||||
|
@ -197,7 +193,7 @@ script to generate values which percentiles are calculated on
|
||||||
|
|
||||||
This will interpret the `script` parameter as an `inline` script with the `painless` script language and no script parameters. To use a stored script use the following syntax:
|
This will interpret the `script` parameter as an `inline` script with the `painless` script language and no script parameters. To use a stored script use the following syntax:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
GET latency/_search
|
GET latency/_search
|
||||||
{
|
{
|
||||||
|
@ -216,7 +212,6 @@ GET latency/_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:latency,stored_example_script]
|
// TEST[setup:latency,stored_example_script]
|
||||||
|
|
||||||
[[search-aggregations-metrics-percentile-aggregation-approximation]]
|
[[search-aggregations-metrics-percentile-aggregation-approximation]]
|
||||||
|
@ -262,7 +257,7 @@ it. It would not be the case on more skewed distributions.
|
||||||
Approximate algorithms must balance memory utilization with estimation accuracy.
|
Approximate algorithms must balance memory utilization with estimation accuracy.
|
||||||
This balance can be controlled using a `compression` parameter:
|
This balance can be controlled using a `compression` parameter:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
GET latency/_search
|
GET latency/_search
|
||||||
{
|
{
|
||||||
|
@ -279,7 +274,6 @@ GET latency/_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:latency]
|
// TEST[setup:latency]
|
||||||
|
|
||||||
<1> Compression controls memory usage and approximation error
|
<1> Compression controls memory usage and approximation error
|
||||||
|
@ -313,7 +307,7 @@ for values up to 1 millisecond and 3.6 seconds (or better) for the maximum track
|
||||||
|
|
||||||
The HDR Histogram can be used by specifying the `method` parameter in the request:
|
The HDR Histogram can be used by specifying the `method` parameter in the request:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
GET latency/_search
|
GET latency/_search
|
||||||
{
|
{
|
||||||
|
@ -331,7 +325,6 @@ GET latency/_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:latency]
|
// TEST[setup:latency]
|
||||||
|
|
||||||
<1> `hdr` object indicates that HDR Histogram should be used to calculate the percentiles and specific settings for this algorithm can be specified inside the object
|
<1> `hdr` object indicates that HDR Histogram should be used to calculate the percentiles and specific settings for this algorithm can be specified inside the object
|
||||||
|
@ -346,7 +339,7 @@ The `missing` parameter defines how documents that are missing a value should be
|
||||||
By default they will be ignored but it is also possible to treat them as if they
|
By default they will be ignored but it is also possible to treat them as if they
|
||||||
had a value.
|
had a value.
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
GET latency/_search
|
GET latency/_search
|
||||||
{
|
{
|
||||||
|
@ -361,7 +354,6 @@ GET latency/_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:latency]
|
// TEST[setup:latency]
|
||||||
|
|
||||||
<1> Documents without a value in the `grade` field will fall into the same bucket as documents that have the value `10`.
|
<1> Documents without a value in the `grade` field will fall into the same bucket as documents that have the value `10`.
|
||||||
|
|
|
@ -22,7 +22,7 @@ Assume your data consists of website load times. You may have a service agreeme
|
||||||
|
|
||||||
Let's look at a range of percentiles representing load time:
|
Let's look at a range of percentiles representing load time:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
GET latency/_search
|
GET latency/_search
|
||||||
{
|
{
|
||||||
|
@ -37,13 +37,13 @@ GET latency/_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:latency]
|
// TEST[setup:latency]
|
||||||
|
|
||||||
<1> The field `load_time` must be a numeric field
|
<1> The field `load_time` must be a numeric field
|
||||||
|
|
||||||
The response will look like this:
|
The response will look like this:
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
{
|
{
|
||||||
...
|
...
|
||||||
|
@ -67,7 +67,7 @@ hitting the 95% load time target
|
||||||
|
|
||||||
By default the `keyed` flag is set to `true` associates a unique string key with each bucket and returns the ranges as a hash rather than an array. Setting the `keyed` flag to `false` will disable this behavior:
|
By default the `keyed` flag is set to `true` associates a unique string key with each bucket and returns the ranges as a hash rather than an array. Setting the `keyed` flag to `false` will disable this behavior:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
GET latency/_search
|
GET latency/_search
|
||||||
{
|
{
|
||||||
|
@ -83,12 +83,11 @@ GET latency/_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:latency]
|
// TEST[setup:latency]
|
||||||
|
|
||||||
Response:
|
Response:
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
{
|
{
|
||||||
...
|
...
|
||||||
|
@ -118,7 +117,7 @@ The percentile rank metric supports scripting. For example, if our load times
|
||||||
are in milliseconds but we want to specify values in seconds, we could use
|
are in milliseconds but we want to specify values in seconds, we could use
|
||||||
a script to convert them on-the-fly:
|
a script to convert them on-the-fly:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
GET latency/_search
|
GET latency/_search
|
||||||
{
|
{
|
||||||
|
@ -139,15 +138,15 @@ GET latency/_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:latency]
|
// TEST[setup:latency]
|
||||||
|
|
||||||
<1> The `field` parameter is replaced with a `script` parameter, which uses the
|
<1> The `field` parameter is replaced with a `script` parameter, which uses the
|
||||||
script to generate values which percentile ranks are calculated on
|
script to generate values which percentile ranks are calculated on
|
||||||
<2> Scripting supports parameterized input just like any other script
|
<2> Scripting supports parameterized input just like any other script
|
||||||
|
|
||||||
This will interpret the `script` parameter as an `inline` script with the `painless` script language and no script parameters. To use a stored script use the following syntax:
|
This will interpret the `script` parameter as an `inline` script with the `painless` script language and no script parameters. To use a stored script use the following syntax:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
GET latency/_search
|
GET latency/_search
|
||||||
{
|
{
|
||||||
|
@ -167,7 +166,6 @@ GET latency/_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:latency,stored_example_script]
|
// TEST[setup:latency,stored_example_script]
|
||||||
|
|
||||||
==== HDR Histogram
|
==== HDR Histogram
|
||||||
|
@ -183,7 +181,7 @@ microseconds) in a histogram set to 3 significant digits, it will maintain a val
|
||||||
|
|
||||||
The HDR Histogram can be used by specifying the `method` parameter in the request:
|
The HDR Histogram can be used by specifying the `method` parameter in the request:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
GET latency/_search
|
GET latency/_search
|
||||||
{
|
{
|
||||||
|
@ -201,8 +199,8 @@ GET latency/_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:latency]
|
// TEST[setup:latency]
|
||||||
|
|
||||||
<1> `hdr` object indicates that HDR Histogram should be used to calculate the percentiles and specific settings for this algorithm can be specified inside the object
|
<1> `hdr` object indicates that HDR Histogram should be used to calculate the percentiles and specific settings for this algorithm can be specified inside the object
|
||||||
<2> `number_of_significant_value_digits` specifies the resolution of values for the histogram in number of significant digits
|
<2> `number_of_significant_value_digits` specifies the resolution of values for the histogram in number of significant digits
|
||||||
|
|
||||||
|
@ -215,7 +213,7 @@ The `missing` parameter defines how documents that are missing a value should be
|
||||||
By default they will be ignored but it is also possible to treat them as if they
|
By default they will be ignored but it is also possible to treat them as if they
|
||||||
had a value.
|
had a value.
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
GET latency/_search
|
GET latency/_search
|
||||||
{
|
{
|
||||||
|
@ -231,6 +229,6 @@ GET latency/_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:latency]
|
// TEST[setup:latency]
|
||||||
|
|
||||||
<1> Documents without a value in the `load_time` field will fall into the same bucket as documents that have the value `10`.
|
<1> Documents without a value in the `load_time` field will fall into the same bucket as documents that have the value `10`.
|
||||||
|
|
|
@ -5,7 +5,7 @@ A metric aggregation that executes using scripts to provide a metric output.
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
POST ledger/_search?size=0
|
POST ledger/_search?size=0
|
||||||
{
|
{
|
||||||
|
@ -24,7 +24,6 @@ POST ledger/_search?size=0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:ledger]
|
// TEST[setup:ledger]
|
||||||
|
|
||||||
<1> `init_script` is an optional parameter, all other scripts are required.
|
<1> `init_script` is an optional parameter, all other scripts are required.
|
||||||
|
@ -33,7 +32,7 @@ The above aggregation demonstrates how one would use the script aggregation comp
|
||||||
|
|
||||||
The response for the above aggregation:
|
The response for the above aggregation:
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
{
|
{
|
||||||
"took": 218,
|
"took": 218,
|
||||||
|
@ -50,7 +49,7 @@ The response for the above aggregation:
|
||||||
|
|
||||||
The above example can also be specified using stored scripts as follows:
|
The above example can also be specified using stored scripts as follows:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
POST ledger/_search?size=0
|
POST ledger/_search?size=0
|
||||||
{
|
{
|
||||||
|
@ -77,7 +76,6 @@ POST ledger/_search?size=0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:ledger,stored_scripted_metric_script]
|
// TEST[setup:ledger,stored_scripted_metric_script]
|
||||||
|
|
||||||
<1> script parameters for `init`, `map` and `combine` scripts must be specified
|
<1> script parameters for `init`, `map` and `combine` scripts must be specified
|
||||||
|
@ -86,7 +84,7 @@ in a global `params` object so that it can be shared between the scripts.
|
||||||
////
|
////
|
||||||
Verify this response as well but in a hidden block.
|
Verify this response as well but in a hidden block.
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
{
|
{
|
||||||
"took": 218,
|
"took": 218,
|
||||||
|
@ -145,7 +143,7 @@ final combined profit which will be returned in the response of the aggregation.
|
||||||
|
|
||||||
Imagine a situation where you index the following documents into an index with 2 shards:
|
Imagine a situation where you index the following documents into an index with 2 shards:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
PUT /transactions/_bulk?refresh
|
PUT /transactions/_bulk?refresh
|
||||||
{"index":{"_id":1}}
|
{"index":{"_id":1}}
|
||||||
|
@ -157,7 +155,6 @@ PUT /transactions/_bulk?refresh
|
||||||
{"index":{"_id":4}}
|
{"index":{"_id":4}}
|
||||||
{"type": "sale","amount": 130}
|
{"type": "sale","amount": 130}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
|
|
||||||
Lets say that documents 1 and 3 end up on shard A and documents 2 and 4 end up on shard B. The following is a breakdown of what the aggregation result is
|
Lets say that documents 1 and 3 end up on shard A and documents 2 and 4 end up on shard B. The following is a breakdown of what the aggregation result is
|
||||||
at each stage of the example above.
|
at each stage of the example above.
|
||||||
|
|
|
@ -7,7 +7,7 @@ The stats that are returned consist of: `min`, `max`, `sum`, `count` and `avg`.
|
||||||
|
|
||||||
Assuming the data consists of documents representing exams grades (between 0 and 100) of students
|
Assuming the data consists of documents representing exams grades (between 0 and 100) of students
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
POST /exams/_search?size=0
|
POST /exams/_search?size=0
|
||||||
{
|
{
|
||||||
|
@ -16,13 +16,12 @@ POST /exams/_search?size=0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:exams]
|
// TEST[setup:exams]
|
||||||
|
|
||||||
The above aggregation computes the grades statistics over all documents. The aggregation type is `stats` and the `field` setting defines the numeric field of the documents the stats will be computed on. The above will return the following:
|
The above aggregation computes the grades statistics over all documents. The aggregation type is `stats` and the `field` setting defines the numeric field of the documents the stats will be computed on. The above will return the following:
|
||||||
|
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
{
|
{
|
||||||
...
|
...
|
||||||
|
@ -46,7 +45,7 @@ The name of the aggregation (`grades_stats` above) also serves as the key by whi
|
||||||
|
|
||||||
Computing the grades stats based on a script:
|
Computing the grades stats based on a script:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
POST /exams/_search?size=0
|
POST /exams/_search?size=0
|
||||||
{
|
{
|
||||||
|
@ -62,12 +61,11 @@ POST /exams/_search?size=0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:exams]
|
// TEST[setup:exams]
|
||||||
|
|
||||||
This will interpret the `script` parameter as an `inline` script with the `painless` script language and no script parameters. To use a stored script use the following syntax:
|
This will interpret the `script` parameter as an `inline` script with the `painless` script language and no script parameters. To use a stored script use the following syntax:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
POST /exams/_search?size=0
|
POST /exams/_search?size=0
|
||||||
{
|
{
|
||||||
|
@ -85,14 +83,13 @@ POST /exams/_search?size=0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:exams,stored_example_script]
|
// TEST[setup:exams,stored_example_script]
|
||||||
|
|
||||||
===== Value Script
|
===== Value Script
|
||||||
|
|
||||||
It turned out that the exam was way above the level of the students and a grade correction needs to be applied. We can use a value script to get the new stats:
|
It turned out that the exam was way above the level of the students and a grade correction needs to be applied. We can use a value script to get the new stats:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
POST /exams/_search?size=0
|
POST /exams/_search?size=0
|
||||||
{
|
{
|
||||||
|
@ -112,7 +109,6 @@ POST /exams/_search?size=0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:exams]
|
// TEST[setup:exams]
|
||||||
|
|
||||||
==== Missing value
|
==== Missing value
|
||||||
|
@ -121,7 +117,7 @@ The `missing` parameter defines how documents that are missing a value should be
|
||||||
By default they will be ignored but it is also possible to treat them as if they
|
By default they will be ignored but it is also possible to treat them as if they
|
||||||
had a value.
|
had a value.
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
POST /exams/_search?size=0
|
POST /exams/_search?size=0
|
||||||
{
|
{
|
||||||
|
@ -135,7 +131,6 @@ POST /exams/_search?size=0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:exams]
|
// TEST[setup:exams]
|
||||||
|
|
||||||
<1> Documents without a value in the `grade` field will fall into the same bucket as documents that have the value `0`.
|
<1> Documents without a value in the `grade` field will fall into the same bucket as documents that have the value `0`.
|
||||||
|
|
|
@ -6,7 +6,7 @@ A `single-value` metrics aggregation that sums up numeric values that are extrac
|
||||||
Assuming the data consists of documents representing sales records we can sum
|
Assuming the data consists of documents representing sales records we can sum
|
||||||
the sale price of all hats with:
|
the sale price of all hats with:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
POST /sales/_search?size=0
|
POST /sales/_search?size=0
|
||||||
{
|
{
|
||||||
|
@ -22,12 +22,11 @@ POST /sales/_search?size=0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:sales]
|
// TEST[setup:sales]
|
||||||
|
|
||||||
Resulting in:
|
Resulting in:
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
{
|
{
|
||||||
...
|
...
|
||||||
|
@ -46,7 +45,7 @@ The name of the aggregation (`hat_prices` above) also serves as the key by which
|
||||||
|
|
||||||
We could also use a script to fetch the sales price:
|
We could also use a script to fetch the sales price:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
POST /sales/_search?size=0
|
POST /sales/_search?size=0
|
||||||
{
|
{
|
||||||
|
@ -68,12 +67,11 @@ POST /sales/_search?size=0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:sales]
|
// TEST[setup:sales]
|
||||||
|
|
||||||
This will interpret the `script` parameter as an `inline` script with the `painless` script language and no script parameters. To use a stored script use the following syntax:
|
This will interpret the `script` parameter as an `inline` script with the `painless` script language and no script parameters. To use a stored script use the following syntax:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
POST /sales/_search?size=0
|
POST /sales/_search?size=0
|
||||||
{
|
{
|
||||||
|
@ -98,7 +96,6 @@ POST /sales/_search?size=0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:sales,stored_example_script]
|
// TEST[setup:sales,stored_example_script]
|
||||||
|
|
||||||
===== Value Script
|
===== Value Script
|
||||||
|
@ -106,7 +103,7 @@ POST /sales/_search?size=0
|
||||||
It is also possible to access the field value from the script using `_value`.
|
It is also possible to access the field value from the script using `_value`.
|
||||||
For example, this will sum the square of the prices for all hats:
|
For example, this will sum the square of the prices for all hats:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
POST /sales/_search?size=0
|
POST /sales/_search?size=0
|
||||||
{
|
{
|
||||||
|
@ -129,7 +126,6 @@ POST /sales/_search?size=0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:sales]
|
// TEST[setup:sales]
|
||||||
|
|
||||||
==== Missing value
|
==== Missing value
|
||||||
|
@ -139,7 +135,7 @@ be treated. By default documents missing the value will be ignored but it is
|
||||||
also possible to treat them as if they had a value. For example, this treats
|
also possible to treat them as if they had a value. For example, this treats
|
||||||
all hat sales without a price as being `100`.
|
all hat sales without a price as being `100`.
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
POST /sales/_search?size=0
|
POST /sales/_search?size=0
|
||||||
{
|
{
|
||||||
|
@ -160,5 +156,4 @@ POST /sales/_search?size=0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:sales]
|
// TEST[setup:sales]
|
||||||
|
|
|
@ -32,7 +32,7 @@ The top_hits aggregation returns regular search hits, because of this many per h
|
||||||
In the following example we group the sales by type and per type we show the last sale.
|
In the following example we group the sales by type and per type we show the last sale.
|
||||||
For each sale only the date and price fields are being included in the source.
|
For each sale only the date and price fields are being included in the source.
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
POST /sales/_search?size=0
|
POST /sales/_search?size=0
|
||||||
{
|
{
|
||||||
|
@ -63,12 +63,11 @@ POST /sales/_search?size=0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:sales]
|
// TEST[setup:sales]
|
||||||
|
|
||||||
Possible response:
|
Possible response:
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
{
|
{
|
||||||
...
|
...
|
||||||
|
@ -185,7 +184,7 @@ belong to. By defining a `terms` aggregator on the `domain` field we group the r
|
||||||
Also a `max` aggregator is defined which is used by the `terms` aggregator's order feature to return the buckets by
|
Also a `max` aggregator is defined which is used by the `terms` aggregator's order feature to return the buckets by
|
||||||
relevancy order of the most relevant document in a bucket.
|
relevancy order of the most relevant document in a bucket.
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
POST /sales/_search
|
POST /sales/_search
|
||||||
{
|
{
|
||||||
|
@ -218,7 +217,6 @@ POST /sales/_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:sales]
|
// TEST[setup:sales]
|
||||||
|
|
||||||
At the moment the `max` (or `min`) aggregator is needed to make sure the buckets from the `terms` aggregator are
|
At the moment the `max` (or `min`) aggregator is needed to make sure the buckets from the `terms` aggregator are
|
||||||
|
@ -239,7 +237,7 @@ and includes the array field and the offset in the array field the nested hit be
|
||||||
|
|
||||||
Let's see how it works with a real sample. Considering the following mapping:
|
Let's see how it works with a real sample. Considering the following mapping:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
PUT /sales
|
PUT /sales
|
||||||
{
|
{
|
||||||
|
@ -257,12 +255,12 @@ PUT /sales
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
<1> The `comments` is an array that holds nested documents under the `product` object.
|
<1> The `comments` is an array that holds nested documents under the `product` object.
|
||||||
|
|
||||||
And some documents:
|
And some documents:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
PUT /sales/_doc/1?refresh
|
PUT /sales/_doc/1?refresh
|
||||||
{
|
{
|
||||||
|
@ -274,12 +272,11 @@ PUT /sales/_doc/1?refresh
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[continued]
|
// TEST[continued]
|
||||||
|
|
||||||
It's now possible to execute the following `top_hits` aggregation (wrapped in a `nested` aggregation):
|
It's now possible to execute the following `top_hits` aggregation (wrapped in a `nested` aggregation):
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
POST /sales/_search
|
POST /sales/_search
|
||||||
{
|
{
|
||||||
|
@ -308,13 +305,12 @@ POST /sales/_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[continued]
|
// TEST[continued]
|
||||||
// TEST[s/_search/_search\?filter_path=aggregations.by_sale.by_user.buckets/]
|
// TEST[s/_search/_search\?filter_path=aggregations.by_sale.by_user.buckets/]
|
||||||
|
|
||||||
Top hits response snippet with a nested hit, which resides in the first slot of array field `comments`:
|
Top hits response snippet with a nested hit, which resides in the first slot of array field `comments`:
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
{
|
{
|
||||||
...
|
...
|
||||||
|
@ -359,6 +355,7 @@ Top hits response snippet with a nested hit, which resides in the first slot of
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// TESTRESPONSE[s/\.\.\.//]
|
// TESTRESPONSE[s/\.\.\.//]
|
||||||
|
|
||||||
<1> Name of the array field containing the nested hit
|
<1> Name of the array field containing the nested hit
|
||||||
<2> Position if the nested hit in the containing array
|
<2> Position if the nested hit in the containing array
|
||||||
<3> Source of the nested hit
|
<3> Source of the nested hit
|
||||||
|
|
|
@ -6,7 +6,7 @@ These values can be extracted either from specific fields in the documents, or b
|
||||||
this aggregator will be used in conjunction with other single-value aggregations. For example, when computing the `avg`
|
this aggregator will be used in conjunction with other single-value aggregations. For example, when computing the `avg`
|
||||||
one might be interested in the number of values the average is computed over.
|
one might be interested in the number of values the average is computed over.
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
POST /sales/_search?size=0
|
POST /sales/_search?size=0
|
||||||
{
|
{
|
||||||
|
@ -15,12 +15,11 @@ POST /sales/_search?size=0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:sales]
|
// TEST[setup:sales]
|
||||||
|
|
||||||
Response:
|
Response:
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
{
|
{
|
||||||
...
|
...
|
||||||
|
@ -40,7 +39,7 @@ retrieved from the returned response.
|
||||||
|
|
||||||
Counting the values generated by a script:
|
Counting the values generated by a script:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
POST /sales/_search?size=0
|
POST /sales/_search?size=0
|
||||||
{
|
{
|
||||||
|
@ -55,12 +54,11 @@ POST /sales/_search?size=0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:sales]
|
// TEST[setup:sales]
|
||||||
|
|
||||||
This will interpret the `script` parameter as an `inline` script with the `painless` script language and no script parameters. To use a stored script use the following syntax:
|
This will interpret the `script` parameter as an `inline` script with the `painless` script language and no script parameters. To use a stored script use the following syntax:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
POST /sales/_search?size=0
|
POST /sales/_search?size=0
|
||||||
{
|
{
|
||||||
|
@ -78,5 +76,4 @@ POST /sales/_search?size=0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:sales,stored_example_script]
|
// TEST[setup:sales,stored_example_script]
|
||||||
|
|
|
@ -51,7 +51,7 @@ The `value` and `weight` objects have per-field specific configuration:
|
||||||
If our documents have a `"grade"` field that holds a 0-100 numeric score, and a `"weight"` field which holds an arbitrary numeric weight,
|
If our documents have a `"grade"` field that holds a 0-100 numeric score, and a `"weight"` field which holds an arbitrary numeric weight,
|
||||||
we can calculate the weighted average using:
|
we can calculate the weighted average using:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
POST /exams/_search
|
POST /exams/_search
|
||||||
{
|
{
|
||||||
|
@ -70,12 +70,11 @@ POST /exams/_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:exams]
|
// TEST[setup:exams]
|
||||||
|
|
||||||
Which yields a response like:
|
Which yields a response like:
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
{
|
{
|
||||||
...
|
...
|
||||||
|
@ -98,7 +97,7 @@ This single weight will be applied independently to each value extracted from th
|
||||||
|
|
||||||
This example show how a single document with multiple values will be averaged with a single weight:
|
This example show how a single document with multiple values will be averaged with a single weight:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
POST /exams/_doc?refresh
|
POST /exams/_doc?refresh
|
||||||
{
|
{
|
||||||
|
@ -123,12 +122,11 @@ POST /exams/_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST
|
// TEST
|
||||||
|
|
||||||
The three values (`1`, `2`, and `3`) will be included as independent values, all with the weight of `2`:
|
The three values (`1`, `2`, and `3`) will be included as independent values, all with the weight of `2`:
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
{
|
{
|
||||||
...
|
...
|
||||||
|
@ -149,7 +147,7 @@ The aggregation returns `2.0` as the result, which matches what we would expect
|
||||||
Both the value and the weight can be derived from a script, instead of a field. As a simple example, the following
|
Both the value and the weight can be derived from a script, instead of a field. As a simple example, the following
|
||||||
will add one to the grade and weight in the document using a script:
|
will add one to the grade and weight in the document using a script:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
POST /exams/_search
|
POST /exams/_search
|
||||||
{
|
{
|
||||||
|
@ -168,7 +166,6 @@ POST /exams/_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:exams]
|
// TEST[setup:exams]
|
||||||
|
|
||||||
|
|
||||||
|
@ -182,7 +179,7 @@ If the `weight` field is missing, it is assumed to have a weight of `1` (like a
|
||||||
|
|
||||||
Both of these defaults can be overridden with the `missing` parameter:
|
Both of these defaults can be overridden with the `missing` parameter:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
POST /exams/_search
|
POST /exams/_search
|
||||||
{
|
{
|
||||||
|
@ -203,6 +200,5 @@ POST /exams/_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:exams]
|
// TEST[setup:exams]
|
||||||
|
|
||||||
|
|
|
@ -15,7 +15,7 @@ See <<shard-request-cache>> for more details.
|
||||||
There are many occasions when aggregations are required but search hits are not. For these cases the hits can be ignored by
|
There are many occasions when aggregations are required but search hits are not. For these cases the hits can be ignored by
|
||||||
setting `size=0`. For example:
|
setting `size=0`. For example:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
GET /twitter/_search
|
GET /twitter/_search
|
||||||
{
|
{
|
||||||
|
@ -29,7 +29,6 @@ GET /twitter/_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:twitter]
|
// TEST[setup:twitter]
|
||||||
|
|
||||||
Setting `size` to `0` avoids executing the fetch phase of the search making the request more efficient.
|
Setting `size` to `0` avoids executing the fetch phase of the search making the request more efficient.
|
||||||
|
@ -42,7 +41,7 @@ at response time.
|
||||||
|
|
||||||
Consider this example where we want to associate the color blue with our `terms` aggregation.
|
Consider this example where we want to associate the color blue with our `terms` aggregation.
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
GET /twitter/_search
|
GET /twitter/_search
|
||||||
{
|
{
|
||||||
|
@ -59,12 +58,11 @@ GET /twitter/_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:twitter]
|
// TEST[setup:twitter]
|
||||||
|
|
||||||
Then that piece of metadata will be returned in place for our `titles` terms aggregation
|
Then that piece of metadata will be returned in place for our `titles` terms aggregation
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
{
|
{
|
||||||
"aggregations": {
|
"aggregations": {
|
||||||
|
@ -94,7 +92,7 @@ Considering the following <<search-aggregations-bucket-datehistogram-aggregation
|
||||||
`tweets_over_time` which has a sub <<search-aggregations-metrics-top-hits-aggregation, 'top_hits` aggregation>> named
|
`tweets_over_time` which has a sub <<search-aggregations-metrics-top-hits-aggregation, 'top_hits` aggregation>> named
|
||||||
`top_users`:
|
`top_users`:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
GET /twitter/_search?typed_keys
|
GET /twitter/_search?typed_keys
|
||||||
{
|
{
|
||||||
|
@ -115,13 +113,12 @@ GET /twitter/_search?typed_keys
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:twitter]
|
// TEST[setup:twitter]
|
||||||
|
|
||||||
In the response, the aggregations names will be changed to respectively `date_histogram#tweets_over_time` and
|
In the response, the aggregations names will be changed to respectively `date_histogram#tweets_over_time` and
|
||||||
`top_hits#top_users`, reflecting the internal types of each aggregation:
|
`top_hits#top_users`, reflecting the internal types of each aggregation:
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
{
|
{
|
||||||
"aggregations": {
|
"aggregations": {
|
||||||
|
|
|
@ -50,7 +50,7 @@ Paths are relative from the position of the pipeline aggregation; they are not a
|
||||||
aggregation tree. For example, this derivative is embedded inside a date_histogram and refers to a "sibling"
|
aggregation tree. For example, this derivative is embedded inside a date_histogram and refers to a "sibling"
|
||||||
metric `"the_sum"`:
|
metric `"the_sum"`:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
POST /_search
|
POST /_search
|
||||||
{
|
{
|
||||||
|
@ -72,7 +72,7 @@ POST /_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
<1> The metric is called `"the_sum"`
|
<1> The metric is called `"the_sum"`
|
||||||
<2> The `buckets_path` refers to the metric via a relative path `"the_sum"`
|
<2> The `buckets_path` refers to the metric via a relative path `"the_sum"`
|
||||||
|
|
||||||
|
@ -80,7 +80,7 @@ POST /_search
|
||||||
instead of embedded "inside" them. For example, the `max_bucket` aggregation uses the `buckets_path` to specify
|
instead of embedded "inside" them. For example, the `max_bucket` aggregation uses the `buckets_path` to specify
|
||||||
a metric embedded inside a sibling aggregation:
|
a metric embedded inside a sibling aggregation:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
POST /_search
|
POST /_search
|
||||||
{
|
{
|
||||||
|
@ -106,8 +106,8 @@ POST /_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:sales]
|
// TEST[setup:sales]
|
||||||
|
|
||||||
<1> `buckets_path` instructs this max_bucket aggregation that we want the maximum value of the `sales` aggregation in the
|
<1> `buckets_path` instructs this max_bucket aggregation that we want the maximum value of the `sales` aggregation in the
|
||||||
`sales_per_month` date histogram.
|
`sales_per_month` date histogram.
|
||||||
|
|
||||||
|
@ -115,7 +115,7 @@ If a Sibling pipeline agg references a multi-bucket aggregation, such as a `term
|
||||||
select specific keys from the multi-bucket. For example, a `bucket_script` could select two specific buckets (via
|
select specific keys from the multi-bucket. For example, a `bucket_script` could select two specific buckets (via
|
||||||
their bucket keys) to perform the calculation:
|
their bucket keys) to perform the calculation:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
POST /_search
|
POST /_search
|
||||||
{
|
{
|
||||||
|
@ -152,8 +152,8 @@ POST /_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:sales]
|
// TEST[setup:sales]
|
||||||
|
|
||||||
<1> `buckets_path` selects the hats and bags buckets (via `['hat']`/`['bag']``) to use in the script specifically,
|
<1> `buckets_path` selects the hats and bags buckets (via `['hat']`/`['bag']``) to use in the script specifically,
|
||||||
instead of fetching all the buckets from `sale_type` aggregation
|
instead of fetching all the buckets from `sale_type` aggregation
|
||||||
|
|
||||||
|
@ -164,7 +164,7 @@ Instead of pathing to a metric, `buckets_path` can use a special `"_count"` path
|
||||||
the pipeline aggregation to use the document count as its input. For example, a derivative can be calculated
|
the pipeline aggregation to use the document count as its input. For example, a derivative can be calculated
|
||||||
on the document count of each bucket, instead of a specific metric:
|
on the document count of each bucket, instead of a specific metric:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
POST /_search
|
POST /_search
|
||||||
{
|
{
|
||||||
|
@ -183,14 +183,14 @@ POST /_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
<1> By using `_count` instead of a metric name, we can calculate the derivative of document counts in the histogram
|
<1> By using `_count` instead of a metric name, we can calculate the derivative of document counts in the histogram
|
||||||
|
|
||||||
The `buckets_path` can also use `"_bucket_count"` and path to a multi-bucket aggregation to use the number of buckets
|
The `buckets_path` can also use `"_bucket_count"` and path to a multi-bucket aggregation to use the number of buckets
|
||||||
returned by that aggregation in the pipeline aggregation instead of a metric. for example a `bucket_selector` can be
|
returned by that aggregation in the pipeline aggregation instead of a metric. for example a `bucket_selector` can be
|
||||||
used here to filter out buckets which contain no buckets for an inner terms aggregation:
|
used here to filter out buckets which contain no buckets for an inner terms aggregation:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
POST /sales/_search
|
POST /sales/_search
|
||||||
{
|
{
|
||||||
|
@ -222,8 +222,8 @@ POST /sales/_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:sales]
|
// TEST[setup:sales]
|
||||||
|
|
||||||
<1> By using `_bucket_count` instead of a metric name, we can filter out `histo` buckets where they contain no buckets
|
<1> By using `_bucket_count` instead of a metric name, we can filter out `histo` buckets where they contain no buckets
|
||||||
for the `categories` aggregation
|
for the `categories` aggregation
|
||||||
|
|
||||||
|
@ -281,6 +281,7 @@ include::pipeline/percentiles-bucket-aggregation.asciidoc[]
|
||||||
include::pipeline/movavg-aggregation.asciidoc[]
|
include::pipeline/movavg-aggregation.asciidoc[]
|
||||||
include::pipeline/movfn-aggregation.asciidoc[]
|
include::pipeline/movfn-aggregation.asciidoc[]
|
||||||
include::pipeline/cumulative-sum-aggregation.asciidoc[]
|
include::pipeline/cumulative-sum-aggregation.asciidoc[]
|
||||||
|
include::pipeline/cumulative-cardinality-aggregation.asciidoc[]
|
||||||
include::pipeline/bucket-script-aggregation.asciidoc[]
|
include::pipeline/bucket-script-aggregation.asciidoc[]
|
||||||
include::pipeline/bucket-selector-aggregation.asciidoc[]
|
include::pipeline/bucket-selector-aggregation.asciidoc[]
|
||||||
include::pipeline/bucket-sort-aggregation.asciidoc[]
|
include::pipeline/bucket-sort-aggregation.asciidoc[]
|
||||||
|
|
|
@ -33,7 +33,7 @@ An `avg_bucket` aggregation looks like this in isolation:
|
||||||
|
|
||||||
The following snippet calculates the average of the total monthly `sales`:
|
The following snippet calculates the average of the total monthly `sales`:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
POST /_search
|
POST /_search
|
||||||
{
|
{
|
||||||
|
@ -61,14 +61,14 @@ POST /_search
|
||||||
}
|
}
|
||||||
|
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:sales]
|
// TEST[setup:sales]
|
||||||
|
|
||||||
<1> `buckets_path` instructs this avg_bucket aggregation that we want the (mean) average value of the `sales` aggregation in the
|
<1> `buckets_path` instructs this avg_bucket aggregation that we want the (mean) average value of the `sales` aggregation in the
|
||||||
`sales_per_month` date histogram.
|
`sales_per_month` date histogram.
|
||||||
|
|
||||||
And the following may be the response:
|
And the following may be the response:
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
{
|
{
|
||||||
"took": 11,
|
"took": 11,
|
||||||
|
|
|
@ -41,7 +41,7 @@ for more details) |Required |
|
||||||
|
|
||||||
The following snippet calculates the ratio percentage of t-shirt sales compared to total sales each month:
|
The following snippet calculates the ratio percentage of t-shirt sales compared to total sales each month:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
POST /sales/_search
|
POST /sales/_search
|
||||||
{
|
{
|
||||||
|
@ -86,12 +86,11 @@ POST /sales/_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:sales]
|
// TEST[setup:sales]
|
||||||
|
|
||||||
And the following may be the response:
|
And the following may be the response:
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
{
|
{
|
||||||
"took": 11,
|
"took": 11,
|
||||||
|
|
|
@ -44,7 +44,7 @@ for more details) |Required |
|
||||||
|
|
||||||
The following snippet only retains buckets where the total sales for the month is more than 200:
|
The following snippet only retains buckets where the total sales for the month is more than 200:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
POST /sales/_search
|
POST /sales/_search
|
||||||
{
|
{
|
||||||
|
@ -74,12 +74,11 @@ POST /sales/_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:sales]
|
// TEST[setup:sales]
|
||||||
|
|
||||||
And the following may be the response:
|
And the following may be the response:
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
{
|
{
|
||||||
"took": 11,
|
"took": 11,
|
||||||
|
@ -113,4 +112,5 @@ And the following may be the response:
|
||||||
// TESTRESPONSE[s/"took": 11/"took": $body.took/]
|
// TESTRESPONSE[s/"took": 11/"took": $body.took/]
|
||||||
// TESTRESPONSE[s/"_shards": \.\.\./"_shards": $body._shards/]
|
// TESTRESPONSE[s/"_shards": \.\.\./"_shards": $body._shards/]
|
||||||
// TESTRESPONSE[s/"hits": \.\.\./"hits": $body.hits/]
|
// TESTRESPONSE[s/"hits": \.\.\./"hits": $body.hits/]
|
||||||
|
|
||||||
<1> Bucket for `2015/02/01 00:00:00` has been removed as its total sales was less than 200
|
<1> Bucket for `2015/02/01 00:00:00` has been removed as its total sales was less than 200
|
||||||
|
|
|
@ -47,7 +47,7 @@ is ascending.
|
||||||
|
|
||||||
The following snippet returns the buckets corresponding to the 3 months with the highest total sales in descending order:
|
The following snippet returns the buckets corresponding to the 3 months with the highest total sales in descending order:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
POST /sales/_search
|
POST /sales/_search
|
||||||
{
|
{
|
||||||
|
@ -77,14 +77,14 @@ POST /sales/_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:sales]
|
// TEST[setup:sales]
|
||||||
|
|
||||||
<1> `sort` is set to use the values of `total_sales` in descending order
|
<1> `sort` is set to use the values of `total_sales` in descending order
|
||||||
<2> `size` is set to `3` meaning only the top 3 months in `total_sales` will be returned
|
<2> `size` is set to `3` meaning only the top 3 months in `total_sales` will be returned
|
||||||
|
|
||||||
And the following may be the response:
|
And the following may be the response:
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
{
|
{
|
||||||
"took": 82,
|
"took": 82,
|
||||||
|
@ -135,7 +135,7 @@ without specifying `sort`.
|
||||||
|
|
||||||
The following example simply truncates the result so that only the second bucket is returned:
|
The following example simply truncates the result so that only the second bucket is returned:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
POST /sales/_search
|
POST /sales/_search
|
||||||
{
|
{
|
||||||
|
@ -158,12 +158,11 @@ POST /sales/_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:sales]
|
// TEST[setup:sales]
|
||||||
|
|
||||||
Response:
|
Response:
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
{
|
{
|
||||||
"took": 11,
|
"took": 11,
|
||||||
|
|
|
@ -38,7 +38,7 @@ A `cumulative_cardinality` aggregation looks like this in isolation:
|
||||||
|
|
||||||
The following snippet calculates the cumulative cardinality of the total daily `users`:
|
The following snippet calculates the cumulative cardinality of the total daily `users`:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
GET /user_hits/_search
|
GET /user_hits/_search
|
||||||
{
|
{
|
||||||
|
@ -65,14 +65,13 @@ GET /user_hits/_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:user_hits]
|
// TEST[setup:user_hits]
|
||||||
|
|
||||||
<1> `buckets_path` instructs this aggregation to use the output of the `distinct_users` aggregation for the cumulative cardinality
|
<1> `buckets_path` instructs this aggregation to use the output of the `distinct_users` aggregation for the cumulative cardinality
|
||||||
|
|
||||||
And the following may be the response:
|
And the following may be the response:
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
{
|
{
|
||||||
"took": 11,
|
"took": 11,
|
||||||
|
@ -138,7 +137,7 @@ are added each day, rather than the total cumulative count.
|
||||||
|
|
||||||
This can be accomplished by adding a `derivative` aggregation to our query:
|
This can be accomplished by adding a `derivative` aggregation to our query:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
GET /user_hits/_search
|
GET /user_hits/_search
|
||||||
{
|
{
|
||||||
|
@ -170,13 +169,12 @@ GET /user_hits/_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:user_hits]
|
// TEST[setup:user_hits]
|
||||||
|
|
||||||
|
|
||||||
And the following may be the response:
|
And the following may be the response:
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
{
|
{
|
||||||
"took": 11,
|
"took": 11,
|
||||||
|
|
|
@ -31,7 +31,7 @@ A `cumulative_sum` aggregation looks like this in isolation:
|
||||||
|
|
||||||
The following snippet calculates the cumulative sum of the total monthly `sales`:
|
The following snippet calculates the cumulative sum of the total monthly `sales`:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
POST /sales/_search
|
POST /sales/_search
|
||||||
{
|
{
|
||||||
|
@ -58,14 +58,13 @@ POST /sales/_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:sales]
|
// TEST[setup:sales]
|
||||||
|
|
||||||
<1> `buckets_path` instructs this cumulative sum aggregation to use the output of the `sales` aggregation for the cumulative sum
|
<1> `buckets_path` instructs this cumulative sum aggregation to use the output of the `sales` aggregation for the cumulative sum
|
||||||
|
|
||||||
And the following may be the response:
|
And the following may be the response:
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
{
|
{
|
||||||
"took": 11,
|
"took": 11,
|
||||||
|
|
|
@ -34,7 +34,7 @@ A `derivative` aggregation looks like this in isolation:
|
||||||
|
|
||||||
The following snippet calculates the derivative of the total monthly `sales`:
|
The following snippet calculates the derivative of the total monthly `sales`:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
POST /sales/_search
|
POST /sales/_search
|
||||||
{
|
{
|
||||||
|
@ -61,14 +61,13 @@ POST /sales/_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:sales]
|
// TEST[setup:sales]
|
||||||
|
|
||||||
<1> `buckets_path` instructs this derivative aggregation to use the output of the `sales` aggregation for the derivative
|
<1> `buckets_path` instructs this derivative aggregation to use the output of the `sales` aggregation for the derivative
|
||||||
|
|
||||||
And the following may be the response:
|
And the following may be the response:
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
{
|
{
|
||||||
"took": 11,
|
"took": 11,
|
||||||
|
@ -128,7 +127,7 @@ A second order derivative can be calculated by chaining the derivative pipeline
|
||||||
pipeline aggregation as in the following example which will calculate both the first and the second order derivative of the total
|
pipeline aggregation as in the following example which will calculate both the first and the second order derivative of the total
|
||||||
monthly sales:
|
monthly sales:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
POST /sales/_search
|
POST /sales/_search
|
||||||
{
|
{
|
||||||
|
@ -160,14 +159,13 @@ POST /sales/_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:sales]
|
// TEST[setup:sales]
|
||||||
|
|
||||||
<1> `buckets_path` for the second derivative points to the name of the first derivative
|
<1> `buckets_path` for the second derivative points to the name of the first derivative
|
||||||
|
|
||||||
And the following may be the response:
|
And the following may be the response:
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
{
|
{
|
||||||
"took": 50,
|
"took": 50,
|
||||||
|
@ -228,7 +226,7 @@ The derivative aggregation allows the units of the derivative values to be speci
|
||||||
`normalized_value` which reports the derivative value in the desired x-axis units. In the below example we calculate the derivative
|
`normalized_value` which reports the derivative value in the desired x-axis units. In the below example we calculate the derivative
|
||||||
of the total sales per month but ask for the derivative of the sales as in the units of sales per day:
|
of the total sales per month but ask for the derivative of the sales as in the units of sales per day:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
POST /sales/_search
|
POST /sales/_search
|
||||||
{
|
{
|
||||||
|
@ -256,13 +254,12 @@ POST /sales/_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:sales]
|
// TEST[setup:sales]
|
||||||
<1> `unit` specifies what unit to use for the x-axis of the derivative calculation
|
<1> `unit` specifies what unit to use for the x-axis of the derivative calculation
|
||||||
|
|
||||||
And the following may be the response:
|
And the following may be the response:
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
{
|
{
|
||||||
"took": 50,
|
"took": 50,
|
||||||
|
@ -312,5 +309,6 @@ And the following may be the response:
|
||||||
// TESTRESPONSE[s/"took": 50/"took": $body.took/]
|
// TESTRESPONSE[s/"took": 50/"took": $body.took/]
|
||||||
// TESTRESPONSE[s/"_shards": \.\.\./"_shards": $body._shards/]
|
// TESTRESPONSE[s/"_shards": \.\.\./"_shards": $body._shards/]
|
||||||
// TESTRESPONSE[s/"hits": \.\.\./"hits": $body.hits/]
|
// TESTRESPONSE[s/"hits": \.\.\./"hits": $body.hits/]
|
||||||
|
|
||||||
<1> `value` is reported in the original units of 'per month'
|
<1> `value` is reported in the original units of 'per month'
|
||||||
<2> `normalized_value` is reported in the desired units of 'per day'
|
<2> `normalized_value` is reported in the desired units of 'per day'
|
||||||
|
|
|
@ -35,7 +35,7 @@ A `extended_stats_bucket` aggregation looks like this in isolation:
|
||||||
|
|
||||||
The following snippet calculates the extended stats for monthly `sales` bucket:
|
The following snippet calculates the extended stats for monthly `sales` bucket:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
POST /sales/_search
|
POST /sales/_search
|
||||||
{
|
{
|
||||||
|
@ -62,7 +62,6 @@ POST /sales/_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:sales]
|
// TEST[setup:sales]
|
||||||
|
|
||||||
<1> `bucket_paths` instructs this `extended_stats_bucket` aggregation that we want the calculate stats for the `sales` aggregation in the
|
<1> `bucket_paths` instructs this `extended_stats_bucket` aggregation that we want the calculate stats for the `sales` aggregation in the
|
||||||
|
@ -70,7 +69,7 @@ POST /sales/_search
|
||||||
|
|
||||||
And the following may be the response:
|
And the following may be the response:
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
{
|
{
|
||||||
"took": 11,
|
"took": 11,
|
||||||
|
|
|
@ -33,7 +33,7 @@ A `max_bucket` aggregation looks like this in isolation:
|
||||||
|
|
||||||
The following snippet calculates the maximum of the total monthly `sales`:
|
The following snippet calculates the maximum of the total monthly `sales`:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
POST /sales/_search
|
POST /sales/_search
|
||||||
{
|
{
|
||||||
|
@ -60,7 +60,6 @@ POST /sales/_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:sales]
|
// TEST[setup:sales]
|
||||||
|
|
||||||
<1> `buckets_path` instructs this max_bucket aggregation that we want the maximum value of the `sales` aggregation in the
|
<1> `buckets_path` instructs this max_bucket aggregation that we want the maximum value of the `sales` aggregation in the
|
||||||
|
@ -68,7 +67,7 @@ POST /sales/_search
|
||||||
|
|
||||||
And the following may be the response:
|
And the following may be the response:
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
{
|
{
|
||||||
"took": 11,
|
"took": 11,
|
||||||
|
|
|
@ -33,7 +33,7 @@ A `min_bucket` aggregation looks like this in isolation:
|
||||||
|
|
||||||
The following snippet calculates the minimum of the total monthly `sales`:
|
The following snippet calculates the minimum of the total monthly `sales`:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
POST /sales/_search
|
POST /sales/_search
|
||||||
{
|
{
|
||||||
|
@ -60,7 +60,6 @@ POST /sales/_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:sales]
|
// TEST[setup:sales]
|
||||||
|
|
||||||
<1> `buckets_path` instructs this min_bucket aggregation that we want the minimum value of the `sales` aggregation in the
|
<1> `buckets_path` instructs this min_bucket aggregation that we want the minimum value of the `sales` aggregation in the
|
||||||
|
@ -68,7 +67,7 @@ POST /sales/_search
|
||||||
|
|
||||||
And the following may be the response:
|
And the following may be the response:
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
{
|
{
|
||||||
"took": 11,
|
"took": 11,
|
||||||
|
|
|
@ -38,7 +38,7 @@ A `moving_fn` aggregation looks like this in isolation:
|
||||||
`moving_fn` aggregations must be embedded inside of a `histogram` or `date_histogram` aggregation. They can be
|
`moving_fn` aggregations must be embedded inside of a `histogram` or `date_histogram` aggregation. They can be
|
||||||
embedded like any other metric aggregation:
|
embedded like any other metric aggregation:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
POST /_search
|
POST /_search
|
||||||
{
|
{
|
||||||
|
@ -65,7 +65,6 @@ POST /_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:sales]
|
// TEST[setup:sales]
|
||||||
|
|
||||||
<1> A `date_histogram` named "my_date_histo" is constructed on the "timestamp" field, with one-day intervals
|
<1> A `date_histogram` named "my_date_histo" is constructed on the "timestamp" field, with one-day intervals
|
||||||
|
@ -79,7 +78,7 @@ The `buckets_path` parameter is then used to "point" at one of the sibling metri
|
||||||
|
|
||||||
An example response from the above aggregation may look like:
|
An example response from the above aggregation may look like:
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
{
|
{
|
||||||
"took": 11,
|
"took": 11,
|
||||||
|
@ -140,7 +139,7 @@ kind of calculation and emit a single `double` as the result. Emitting `null` i
|
||||||
|
|
||||||
For example, this script will simply return the first value from the window, or `NaN` if no values are available:
|
For example, this script will simply return the first value from the window, or `NaN` if no values are available:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
POST /_search
|
POST /_search
|
||||||
{
|
{
|
||||||
|
@ -167,7 +166,6 @@ POST /_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:sales]
|
// TEST[setup:sales]
|
||||||
|
|
||||||
[[shift-parameter]]
|
[[shift-parameter]]
|
||||||
|
@ -211,7 +209,7 @@ is only calculated over the real values. If the window is empty, or all values a
|
||||||
|`values` |The window of values to find the maximum
|
|`values` |The window of values to find the maximum
|
||||||
|===
|
|===
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
POST /_search
|
POST /_search
|
||||||
{
|
{
|
||||||
|
@ -238,7 +236,6 @@ POST /_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:sales]
|
// TEST[setup:sales]
|
||||||
|
|
||||||
===== min Function
|
===== min Function
|
||||||
|
@ -254,7 +251,7 @@ is only calculated over the real values. If the window is empty, or all values a
|
||||||
|`values` |The window of values to find the minimum
|
|`values` |The window of values to find the minimum
|
||||||
|===
|
|===
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
POST /_search
|
POST /_search
|
||||||
{
|
{
|
||||||
|
@ -281,7 +278,6 @@ POST /_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:sales]
|
// TEST[setup:sales]
|
||||||
|
|
||||||
===== sum Function
|
===== sum Function
|
||||||
|
@ -297,7 +293,7 @@ the sum is only calculated over the real values. If the window is empty, or all
|
||||||
|`values` |The window of values to find the sum of
|
|`values` |The window of values to find the sum of
|
||||||
|===
|
|===
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
POST /_search
|
POST /_search
|
||||||
{
|
{
|
||||||
|
@ -324,7 +320,6 @@ POST /_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:sales]
|
// TEST[setup:sales]
|
||||||
|
|
||||||
===== stdDev Function
|
===== stdDev Function
|
||||||
|
@ -342,7 +337,7 @@ This function accepts a collection of doubles and average, then returns the stan
|
||||||
|`avg` |The average of the window
|
|`avg` |The average of the window
|
||||||
|===
|
|===
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
POST /_search
|
POST /_search
|
||||||
{
|
{
|
||||||
|
@ -369,7 +364,6 @@ POST /_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:sales]
|
// TEST[setup:sales]
|
||||||
|
|
||||||
The `avg` parameter must be provided to the standard deviation function because different styles of averages can be computed on the window
|
The `avg` parameter must be provided to the standard deviation function because different styles of averages can be computed on the window
|
||||||
|
@ -394,7 +388,7 @@ values.
|
||||||
|`values` |The window of values to find the sum of
|
|`values` |The window of values to find the sum of
|
||||||
|===
|
|===
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
POST /_search
|
POST /_search
|
||||||
{
|
{
|
||||||
|
@ -421,7 +415,6 @@ POST /_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:sales]
|
// TEST[setup:sales]
|
||||||
|
|
||||||
==== linearWeightedAvg Function
|
==== linearWeightedAvg Function
|
||||||
|
@ -440,7 +433,7 @@ If the window is empty, or all values are `null`/`NaN`, `NaN` is returned as the
|
||||||
|`values` |The window of values to find the sum of
|
|`values` |The window of values to find the sum of
|
||||||
|===
|
|===
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
POST /_search
|
POST /_search
|
||||||
{
|
{
|
||||||
|
@ -467,7 +460,6 @@ POST /_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:sales]
|
// TEST[setup:sales]
|
||||||
|
|
||||||
==== ewma Function
|
==== ewma Function
|
||||||
|
@ -492,7 +484,7 @@ values.
|
||||||
|`alpha` |Exponential decay
|
|`alpha` |Exponential decay
|
||||||
|===
|
|===
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
POST /_search
|
POST /_search
|
||||||
{
|
{
|
||||||
|
@ -519,7 +511,6 @@ POST /_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:sales]
|
// TEST[setup:sales]
|
||||||
|
|
||||||
|
|
||||||
|
@ -550,7 +541,7 @@ values.
|
||||||
|`beta` |Trend decay value
|
|`beta` |Trend decay value
|
||||||
|===
|
|===
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
POST /_search
|
POST /_search
|
||||||
{
|
{
|
||||||
|
@ -577,7 +568,6 @@ POST /_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:sales]
|
// TEST[setup:sales]
|
||||||
|
|
||||||
In practice, the `alpha` value behaves very similarly in `holtMovAvg` as `ewmaMovAvg`: small values produce more smoothing
|
In practice, the `alpha` value behaves very similarly in `holtMovAvg` as `ewmaMovAvg`: small values produce more smoothing
|
||||||
|
@ -616,7 +606,7 @@ values.
|
||||||
|`multiplicative` |True if you wish to use multiplicative holt-winters, false to use additive
|
|`multiplicative` |True if you wish to use multiplicative holt-winters, false to use additive
|
||||||
|===
|
|===
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
POST /_search
|
POST /_search
|
||||||
{
|
{
|
||||||
|
@ -643,7 +633,6 @@ POST /_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:sales]
|
// TEST[setup:sales]
|
||||||
|
|
||||||
[WARNING]
|
[WARNING]
|
||||||
|
|
|
@ -34,7 +34,7 @@ A `percentiles_bucket` aggregation looks like this in isolation:
|
||||||
|
|
||||||
The following snippet calculates the percentiles for the total monthly `sales` buckets:
|
The following snippet calculates the percentiles for the total monthly `sales` buckets:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
POST /sales/_search
|
POST /sales/_search
|
||||||
{
|
{
|
||||||
|
@ -62,7 +62,6 @@ POST /sales/_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:sales]
|
// TEST[setup:sales]
|
||||||
|
|
||||||
<1> `buckets_path` instructs this percentiles_bucket aggregation that we want to calculate percentiles for
|
<1> `buckets_path` instructs this percentiles_bucket aggregation that we want to calculate percentiles for
|
||||||
|
@ -71,7 +70,7 @@ the `sales` aggregation in the `sales_per_month` date histogram.
|
||||||
|
|
||||||
And the following may be the response:
|
And the following may be the response:
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
{
|
{
|
||||||
"took": 11,
|
"took": 11,
|
||||||
|
|
|
@ -60,7 +60,7 @@ A `serial_diff` aggregation looks like this in isolation:
|
||||||
|
|
||||||
`serial_diff` aggregations must be embedded inside of a `histogram` or `date_histogram` aggregation:
|
`serial_diff` aggregations must be embedded inside of a `histogram` or `date_histogram` aggregation:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
POST /_search
|
POST /_search
|
||||||
{
|
{
|
||||||
|
@ -88,7 +88,6 @@ POST /_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
|
|
||||||
<1> A `date_histogram` named "my_date_histo" is constructed on the "timestamp" field, with one-day intervals
|
<1> A `date_histogram` named "my_date_histo" is constructed on the "timestamp" field, with one-day intervals
|
||||||
<2> A `sum` metric is used to calculate the sum of a field. This could be any metric (sum, min, max, etc)
|
<2> A `sum` metric is used to calculate the sum of a field. This could be any metric (sum, min, max, etc)
|
||||||
|
|
|
@ -32,7 +32,7 @@ A `stats_bucket` aggregation looks like this in isolation:
|
||||||
|
|
||||||
The following snippet calculates the stats for monthly `sales`:
|
The following snippet calculates the stats for monthly `sales`:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
POST /sales/_search
|
POST /sales/_search
|
||||||
{
|
{
|
||||||
|
@ -59,7 +59,6 @@ POST /sales/_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:sales]
|
// TEST[setup:sales]
|
||||||
|
|
||||||
<1> `bucket_paths` instructs this `stats_bucket` aggregation that we want the calculate stats for the `sales` aggregation in the
|
<1> `bucket_paths` instructs this `stats_bucket` aggregation that we want the calculate stats for the `sales` aggregation in the
|
||||||
|
@ -67,7 +66,7 @@ POST /sales/_search
|
||||||
|
|
||||||
And the following may be the response:
|
And the following may be the response:
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
{
|
{
|
||||||
"took": 11,
|
"took": 11,
|
||||||
|
|
|
@ -32,7 +32,7 @@ A `sum_bucket` aggregation looks like this in isolation:
|
||||||
|
|
||||||
The following snippet calculates the sum of all the total monthly `sales` buckets:
|
The following snippet calculates the sum of all the total monthly `sales` buckets:
|
||||||
|
|
||||||
[source,js]
|
[source,console]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
POST /sales/_search
|
POST /sales/_search
|
||||||
{
|
{
|
||||||
|
@ -59,7 +59,6 @@ POST /sales/_search
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
|
||||||
// TEST[setup:sales]
|
// TEST[setup:sales]
|
||||||
|
|
||||||
<1> `buckets_path` instructs this sum_bucket aggregation that we want the sum of the `sales` aggregation in the
|
<1> `buckets_path` instructs this sum_bucket aggregation that we want the sum of the `sales` aggregation in the
|
||||||
|
@ -67,7 +66,7 @@ POST /sales/_search
|
||||||
|
|
||||||
And the following may be the response:
|
And the following may be the response:
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
{
|
{
|
||||||
"took": 11,
|
"took": 11,
|
||||||
|
|
|
@ -63,7 +63,7 @@ POST my_index/_analyze
|
||||||
|
|
||||||
/////////////////////
|
/////////////////////
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
----------------------------
|
----------------------------
|
||||||
{
|
{
|
||||||
"tokens": [
|
"tokens": [
|
||||||
|
@ -91,6 +91,5 @@ POST my_index/_analyze
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
----------------------------
|
----------------------------
|
||||||
// TESTRESPONSE
|
|
||||||
|
|
||||||
/////////////////////
|
/////////////////////
|
||||||
|
|
|
@ -89,7 +89,7 @@ POST my_index/_analyze
|
||||||
|
|
||||||
/////////////////////
|
/////////////////////
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
----------------------------
|
----------------------------
|
||||||
{
|
{
|
||||||
"tokens": [
|
"tokens": [
|
||||||
|
@ -124,7 +124,6 @@ POST my_index/_analyze
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
----------------------------
|
----------------------------
|
||||||
// TESTRESPONSE
|
|
||||||
|
|
||||||
/////////////////////
|
/////////////////////
|
||||||
|
|
||||||
|
@ -216,7 +215,7 @@ are defined later in the request.
|
||||||
|
|
||||||
/////////////////////
|
/////////////////////
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
----------------------------
|
----------------------------
|
||||||
{
|
{
|
||||||
"tokens": [
|
"tokens": [
|
||||||
|
@ -251,7 +250,6 @@ are defined later in the request.
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
----------------------------
|
----------------------------
|
||||||
// TESTRESPONSE
|
|
||||||
|
|
||||||
/////////////////////
|
/////////////////////
|
||||||
|
|
||||||
|
|
|
@ -24,7 +24,7 @@ POST _analyze
|
||||||
|
|
||||||
/////////////////////
|
/////////////////////
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
----------------------------
|
----------------------------
|
||||||
{
|
{
|
||||||
"tokens": [
|
"tokens": [
|
||||||
|
@ -38,7 +38,6 @@ POST _analyze
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
----------------------------
|
----------------------------
|
||||||
// TESTRESPONSE
|
|
||||||
|
|
||||||
/////////////////////
|
/////////////////////
|
||||||
|
|
||||||
|
@ -110,7 +109,7 @@ POST my_index/_analyze
|
||||||
|
|
||||||
/////////////////////
|
/////////////////////
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
----------------------------
|
----------------------------
|
||||||
{
|
{
|
||||||
"tokens": [
|
"tokens": [
|
||||||
|
@ -124,7 +123,6 @@ POST my_index/_analyze
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
----------------------------
|
----------------------------
|
||||||
// TESTRESPONSE
|
|
||||||
|
|
||||||
/////////////////////
|
/////////////////////
|
||||||
|
|
||||||
|
|
|
@ -19,7 +19,7 @@ POST _analyze
|
||||||
|
|
||||||
/////////////////////
|
/////////////////////
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
----------------------------
|
----------------------------
|
||||||
{
|
{
|
||||||
"tokens": [
|
"tokens": [
|
||||||
|
@ -33,7 +33,6 @@ POST _analyze
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
----------------------------
|
----------------------------
|
||||||
// TESTRESPONSE
|
|
||||||
|
|
||||||
/////////////////////
|
/////////////////////
|
||||||
|
|
||||||
|
|
|
@ -34,7 +34,7 @@ POST _analyze
|
||||||
|
|
||||||
/////////////////////
|
/////////////////////
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
----------------------------
|
----------------------------
|
||||||
{
|
{
|
||||||
"tokens": [
|
"tokens": [
|
||||||
|
@ -125,7 +125,6 @@ POST _analyze
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
----------------------------
|
----------------------------
|
||||||
// TESTRESPONSE
|
|
||||||
|
|
||||||
/////////////////////
|
/////////////////////
|
||||||
|
|
||||||
|
@ -205,7 +204,7 @@ POST my_index/_analyze
|
||||||
|
|
||||||
/////////////////////
|
/////////////////////
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
----------------------------
|
----------------------------
|
||||||
{
|
{
|
||||||
"tokens": [
|
"tokens": [
|
||||||
|
@ -247,7 +246,6 @@ POST my_index/_analyze
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
----------------------------
|
----------------------------
|
||||||
// TESTRESPONSE
|
|
||||||
|
|
||||||
/////////////////////
|
/////////////////////
|
||||||
|
|
||||||
|
@ -290,7 +288,7 @@ GET my_index/_analyze
|
||||||
|
|
||||||
/////////////////////
|
/////////////////////
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
----------------------------
|
----------------------------
|
||||||
{
|
{
|
||||||
"tokens": [
|
"tokens": [
|
||||||
|
@ -339,7 +337,6 @@ GET my_index/_analyze
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
----------------------------
|
----------------------------
|
||||||
// TESTRESPONSE
|
|
||||||
|
|
||||||
/////////////////////
|
/////////////////////
|
||||||
|
|
||||||
|
|
|
@ -19,7 +19,7 @@ POST _analyze
|
||||||
|
|
||||||
/////////////////////
|
/////////////////////
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
----------------------------
|
----------------------------
|
||||||
{
|
{
|
||||||
"tokens": [
|
"tokens": [
|
||||||
|
@ -103,7 +103,6 @@ POST _analyze
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
----------------------------
|
----------------------------
|
||||||
// TESTRESPONSE
|
|
||||||
|
|
||||||
/////////////////////
|
/////////////////////
|
||||||
|
|
||||||
|
|
|
@ -22,7 +22,7 @@ POST _analyze
|
||||||
|
|
||||||
/////////////////////
|
/////////////////////
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
----------------------------
|
----------------------------
|
||||||
{
|
{
|
||||||
"tokens": [
|
"tokens": [
|
||||||
|
@ -106,7 +106,6 @@ POST _analyze
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
----------------------------
|
----------------------------
|
||||||
// TESTRESPONSE
|
|
||||||
|
|
||||||
/////////////////////
|
/////////////////////
|
||||||
|
|
||||||
|
@ -176,7 +175,7 @@ POST my_index/_analyze
|
||||||
|
|
||||||
/////////////////////
|
/////////////////////
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
----------------------------
|
----------------------------
|
||||||
{
|
{
|
||||||
"tokens": [
|
"tokens": [
|
||||||
|
@ -253,7 +252,6 @@ POST my_index/_analyze
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
----------------------------
|
----------------------------
|
||||||
// TESTRESPONSE
|
|
||||||
|
|
||||||
/////////////////////
|
/////////////////////
|
||||||
|
|
||||||
|
|
|
@ -20,7 +20,7 @@ POST _analyze
|
||||||
|
|
||||||
/////////////////////
|
/////////////////////
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
----------------------------
|
----------------------------
|
||||||
{
|
{
|
||||||
"tokens": [
|
"tokens": [
|
||||||
|
@ -90,7 +90,6 @@ POST _analyze
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
----------------------------
|
----------------------------
|
||||||
// TESTRESPONSE
|
|
||||||
|
|
||||||
/////////////////////
|
/////////////////////
|
||||||
|
|
||||||
|
@ -154,7 +153,7 @@ POST my_index/_analyze
|
||||||
|
|
||||||
/////////////////////
|
/////////////////////
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
----------------------------
|
----------------------------
|
||||||
{
|
{
|
||||||
"tokens": [
|
"tokens": [
|
||||||
|
@ -217,7 +216,6 @@ POST my_index/_analyze
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
----------------------------
|
----------------------------
|
||||||
// TESTRESPONSE
|
|
||||||
|
|
||||||
/////////////////////
|
/////////////////////
|
||||||
|
|
||||||
|
|
|
@ -19,7 +19,7 @@ POST _analyze
|
||||||
|
|
||||||
/////////////////////
|
/////////////////////
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
----------------------------
|
----------------------------
|
||||||
{
|
{
|
||||||
"tokens": [
|
"tokens": [
|
||||||
|
@ -96,7 +96,6 @@ POST _analyze
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
----------------------------
|
----------------------------
|
||||||
// TESTRESPONSE
|
|
||||||
|
|
||||||
/////////////////////
|
/////////////////////
|
||||||
|
|
||||||
|
|
|
@ -22,7 +22,7 @@ POST _analyze
|
||||||
|
|
||||||
/////////////////////
|
/////////////////////
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
----------------------------
|
----------------------------
|
||||||
{
|
{
|
||||||
"tokens": [
|
"tokens": [
|
||||||
|
@ -36,7 +36,6 @@ POST _analyze
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
----------------------------
|
----------------------------
|
||||||
// TESTRESPONSE
|
|
||||||
|
|
||||||
/////////////////////
|
/////////////////////
|
||||||
|
|
||||||
|
@ -103,7 +102,7 @@ POST my_index/_analyze
|
||||||
|
|
||||||
/////////////////////
|
/////////////////////
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
----------------------------
|
----------------------------
|
||||||
{
|
{
|
||||||
"tokens": [
|
"tokens": [
|
||||||
|
@ -117,7 +116,6 @@ POST my_index/_analyze
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
----------------------------
|
----------------------------
|
||||||
// TESTRESPONSE
|
|
||||||
|
|
||||||
/////////////////////
|
/////////////////////
|
||||||
|
|
||||||
|
|
|
@ -76,7 +76,7 @@ POST my_index/_analyze
|
||||||
|
|
||||||
/////////////////////
|
/////////////////////
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
----------------------------
|
----------------------------
|
||||||
{
|
{
|
||||||
"tokens": [
|
"tokens": [
|
||||||
|
@ -90,7 +90,6 @@ POST my_index/_analyze
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
----------------------------
|
----------------------------
|
||||||
// TESTRESPONSE
|
|
||||||
|
|
||||||
/////////////////////
|
/////////////////////
|
||||||
|
|
||||||
|
@ -143,7 +142,7 @@ POST my_index/_analyze
|
||||||
|
|
||||||
/////////////////////
|
/////////////////////
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
----------------------------
|
----------------------------
|
||||||
{
|
{
|
||||||
"tokens": [
|
"tokens": [
|
||||||
|
@ -185,7 +184,7 @@ POST my_index/_analyze
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
----------------------------
|
----------------------------
|
||||||
// TESTRESPONSE
|
|
||||||
|
|
||||||
/////////////////////
|
/////////////////////
|
||||||
|
|
||||||
|
|
|
@ -144,7 +144,7 @@ POST my_index/_analyze
|
||||||
|
|
||||||
/////////////////////
|
/////////////////////
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
----------------------------
|
----------------------------
|
||||||
{
|
{
|
||||||
"tokens": [
|
"tokens": [
|
||||||
|
@ -186,7 +186,6 @@ POST my_index/_analyze
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
----------------------------
|
----------------------------
|
||||||
// TESTRESPONSE
|
|
||||||
|
|
||||||
/////////////////////
|
/////////////////////
|
||||||
|
|
||||||
|
@ -227,7 +226,7 @@ GET my_index/_search
|
||||||
|
|
||||||
The output from the above is:
|
The output from the above is:
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
----------------------------
|
----------------------------
|
||||||
{
|
{
|
||||||
"timed_out": false,
|
"timed_out": false,
|
||||||
|
@ -264,4 +263,5 @@ The output from the above is:
|
||||||
}
|
}
|
||||||
----------------------------
|
----------------------------
|
||||||
// TESTRESPONSE[s/"took".*/"took": "$body.took",/]
|
// TESTRESPONSE[s/"took".*/"took": "$body.took",/]
|
||||||
|
|
||||||
<1> Note the incorrect highlight.
|
<1> Note the incorrect highlight.
|
||||||
|
|
|
@ -87,7 +87,7 @@ POST /common_grams_example/_analyze
|
||||||
|
|
||||||
And the response will be:
|
And the response will be:
|
||||||
|
|
||||||
[source,js]
|
[source,console-result]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
{
|
{
|
||||||
"tokens" : [
|
"tokens" : [
|
||||||
|
@ -168,4 +168,3 @@ And the response will be:
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// TESTRESPONSE
|
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue