mirror of
https://github.com/elastic/elasticsearch.git
synced 2025-04-21 05:37:23 -04:00
FixedBitSet -> BitSet
This commit is contained in:
parent
9887ea73e8
commit
4e5445c775
29 changed files with 111 additions and 98 deletions
|
@ -24,10 +24,11 @@ import org.apache.lucene.search.BitsFilteredDocIdSet;
|
|||
import org.apache.lucene.search.DocIdSet;
|
||||
import org.apache.lucene.search.DocIdSetIterator;
|
||||
import org.apache.lucene.util.BitDocIdSet;
|
||||
import org.apache.lucene.util.BitSet;
|
||||
import org.apache.lucene.util.Bits;
|
||||
import org.apache.lucene.util.FixedBitSet;
|
||||
import org.apache.lucene.util.RamUsageEstimator;
|
||||
import org.apache.lucene.util.RoaringDocIdSet;
|
||||
import org.apache.lucene.util.SparseFixedBitSet;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -107,16 +108,21 @@ public class DocIdSets {
|
|||
if (iterator == null) {
|
||||
return new Bits.MatchNoBits(reader.maxDoc());
|
||||
}
|
||||
return toFixedBitSet(iterator, reader.maxDoc());
|
||||
return toBitSet(iterator, reader.maxDoc());
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a {@link FixedBitSet} from an iterator.
|
||||
* Creates a {@link BitSet} from an iterator.
|
||||
*/
|
||||
public static FixedBitSet toFixedBitSet(DocIdSetIterator iterator, int numBits) throws IOException {
|
||||
FixedBitSet set = new FixedBitSet(numBits);
|
||||
set.or(iterator);
|
||||
return set;
|
||||
public static BitSet toBitSet(DocIdSetIterator iterator, int numBits) throws IOException {
|
||||
BitDocIdSet.Builder builder = new BitDocIdSet.Builder(numBits);
|
||||
builder.or(iterator);
|
||||
BitDocIdSet result = builder.build();
|
||||
if (result != null) {
|
||||
return result.bits();
|
||||
} else {
|
||||
return new SparseFixedBitSet(numBits);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -46,11 +46,11 @@ public class IndexCache extends AbstractIndexComponent implements CloseableCompo
|
|||
private ClusterService clusterService;
|
||||
|
||||
@Inject
|
||||
public IndexCache(Index index, @IndexSettings Settings indexSettings, FilterCache filterCache, QueryParserCache queryParserCache, BitsetFilterCache fixedBitSetFilterCache) {
|
||||
public IndexCache(Index index, @IndexSettings Settings indexSettings, FilterCache filterCache, QueryParserCache queryParserCache, BitsetFilterCache bitsetFilterCache) {
|
||||
super(index, indexSettings);
|
||||
this.filterCache = filterCache;
|
||||
this.queryParserCache = queryParserCache;
|
||||
this.bitsetFilterCache = fixedBitSetFilterCache;
|
||||
this.bitsetFilterCache = bitsetFilterCache;
|
||||
}
|
||||
|
||||
@Inject(optional = true)
|
||||
|
|
|
@ -29,7 +29,7 @@ import org.apache.lucene.search.DocIdSet;
|
|||
import org.apache.lucene.search.Filter;
|
||||
import org.apache.lucene.search.join.BitDocIdSetFilter;
|
||||
import org.apache.lucene.util.BitDocIdSet;
|
||||
import org.apache.lucene.util.FixedBitSet;
|
||||
import org.apache.lucene.util.SparseFixedBitSet;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
|
@ -141,15 +141,19 @@ public class BitsetFilterCache extends AbstractIndexComponent implements LeafRea
|
|||
public Value call() throws Exception {
|
||||
DocIdSet docIdSet = filter.getDocIdSet(context, null);
|
||||
final BitDocIdSet bitSet;
|
||||
// TODO: change to simple instanceof if BitDocIdSet gets prevSetBit
|
||||
if (docIdSet instanceof BitDocIdSet && ((BitDocIdSet)docIdSet).bits() instanceof FixedBitSet) {
|
||||
if (docIdSet instanceof BitDocIdSet) {
|
||||
bitSet = (BitDocIdSet) docIdSet;
|
||||
} else {
|
||||
FixedBitSet fbs = new FixedBitSet(context.reader().maxDoc());
|
||||
BitDocIdSet.Builder builder = new BitDocIdSet.Builder(context.reader().maxDoc());
|
||||
if (docIdSet != null && docIdSet != DocIdSet.EMPTY) {
|
||||
fbs.or(docIdSet.iterator());
|
||||
builder.or(docIdSet.iterator());
|
||||
}
|
||||
bitSet = new BitDocIdSet(fbs);
|
||||
BitDocIdSet bits = builder.build();
|
||||
// code expects this to be non-null
|
||||
if (bits == null) {
|
||||
bits = new BitDocIdSet(new SparseFixedBitSet(context.reader().maxDoc()), 0);
|
||||
}
|
||||
bitSet = bits;
|
||||
}
|
||||
|
||||
Value value = new Value(bitSet, shardId);
|
||||
|
@ -182,8 +186,8 @@ public class BitsetFilterCache extends AbstractIndexComponent implements LeafRea
|
|||
}
|
||||
IndexShard shard = indexService.shard(entry.getValue().shardId.id());
|
||||
if (shard != null) {
|
||||
ShardBitsetFilterCache shardFixedBitSetFilterCache = shard.shardBitsetFilterCache();
|
||||
shardFixedBitSetFilterCache.onRemoval(entry.getValue().bitset.ramBytesUsed());
|
||||
ShardBitsetFilterCache shardBitsetFilterCache = shard.shardBitsetFilterCache();
|
||||
shardBitsetFilterCache.onRemoval(entry.getValue().bitset.ramBytesUsed());
|
||||
}
|
||||
// if null then this means the shard has already been removed and the stats are 0 anyway for the shard this key belongs to
|
||||
}
|
||||
|
|
|
@ -40,7 +40,7 @@ public class SegmentsStats implements Streamable, ToXContent {
|
|||
private long indexWriterMemoryInBytes;
|
||||
private long indexWriterMaxMemoryInBytes;
|
||||
private long versionMapMemoryInBytes;
|
||||
private long fixedBitSetMemoryInBytes;
|
||||
private long bitsetMemoryInBytes;
|
||||
|
||||
public SegmentsStats() {
|
||||
|
||||
|
@ -63,8 +63,8 @@ public class SegmentsStats implements Streamable, ToXContent {
|
|||
this.versionMapMemoryInBytes += versionMapMemoryInBytes;
|
||||
}
|
||||
|
||||
public void addFixedBitSetMemoryInBytes(long fixedBitSetMemoryInBytes) {
|
||||
this.fixedBitSetMemoryInBytes += fixedBitSetMemoryInBytes;
|
||||
public void addBitsetMemoryInBytes(long bitsetMemoryInBytes) {
|
||||
this.bitsetMemoryInBytes += bitsetMemoryInBytes;
|
||||
}
|
||||
|
||||
public void add(SegmentsStats mergeStats) {
|
||||
|
@ -75,7 +75,7 @@ public class SegmentsStats implements Streamable, ToXContent {
|
|||
addIndexWriterMemoryInBytes(mergeStats.indexWriterMemoryInBytes);
|
||||
addIndexWriterMaxMemoryInBytes(mergeStats.indexWriterMaxMemoryInBytes);
|
||||
addVersionMapMemoryInBytes(mergeStats.versionMapMemoryInBytes);
|
||||
addFixedBitSetMemoryInBytes(mergeStats.fixedBitSetMemoryInBytes);
|
||||
addBitsetMemoryInBytes(mergeStats.bitsetMemoryInBytes);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -130,14 +130,14 @@ public class SegmentsStats implements Streamable, ToXContent {
|
|||
}
|
||||
|
||||
/**
|
||||
* Estimation of how much the cached fixed bit sets are taking. (which nested and p/c rely on)
|
||||
* Estimation of how much the cached bit sets are taking. (which nested and p/c rely on)
|
||||
*/
|
||||
public long getFixedBitSetMemoryInBytes() {
|
||||
return fixedBitSetMemoryInBytes;
|
||||
public long getBitsetMemoryInBytes() {
|
||||
return bitsetMemoryInBytes;
|
||||
}
|
||||
|
||||
public ByteSizeValue getFixedBitSetMemory() {
|
||||
return new ByteSizeValue(fixedBitSetMemoryInBytes);
|
||||
public ByteSizeValue getBitsetMemory() {
|
||||
return new ByteSizeValue(bitsetMemoryInBytes);
|
||||
}
|
||||
|
||||
public static SegmentsStats readSegmentsStats(StreamInput in) throws IOException {
|
||||
|
@ -154,7 +154,7 @@ public class SegmentsStats implements Streamable, ToXContent {
|
|||
builder.byteSizeField(Fields.INDEX_WRITER_MEMORY_IN_BYTES, Fields.INDEX_WRITER_MEMORY, indexWriterMemoryInBytes);
|
||||
builder.byteSizeField(Fields.INDEX_WRITER_MAX_MEMORY_IN_BYTES, Fields.INDEX_WRITER_MAX_MEMORY, indexWriterMaxMemoryInBytes);
|
||||
builder.byteSizeField(Fields.VERSION_MAP_MEMORY_IN_BYTES, Fields.VERSION_MAP_MEMORY, versionMapMemoryInBytes);
|
||||
builder.byteSizeField(Fields.FIXED_BIT_SET_MEMORY_IN_BYTES, Fields.FIXED_BIT_SET, fixedBitSetMemoryInBytes);
|
||||
builder.byteSizeField(Fields.FIXED_BIT_SET_MEMORY_IN_BYTES, Fields.FIXED_BIT_SET, bitsetMemoryInBytes);
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
|
@ -186,7 +186,7 @@ public class SegmentsStats implements Streamable, ToXContent {
|
|||
indexWriterMaxMemoryInBytes = in.readLong();
|
||||
}
|
||||
if (in.getVersion().onOrAfter(Version.V_1_4_0_Beta1)) {
|
||||
fixedBitSetMemoryInBytes = in.readLong();
|
||||
bitsetMemoryInBytes = in.readLong();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -202,7 +202,7 @@ public class SegmentsStats implements Streamable, ToXContent {
|
|||
out.writeLong(indexWriterMaxMemoryInBytes);
|
||||
}
|
||||
if (out.getVersion().onOrAfter(Version.V_1_4_0_Beta1)) {
|
||||
out.writeLong(fixedBitSetMemoryInBytes);
|
||||
out.writeLong(bitsetMemoryInBytes);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -31,7 +31,6 @@ import org.elasticsearch.common.settings.Settings;
|
|||
import java.io.Closeable;
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
import java.util.Comparator;
|
||||
|
||||
/**
|
||||
* Simple class to build document ID <-> ordinal mapping. Note: Ordinals are
|
||||
|
@ -379,10 +378,10 @@ public final class OrdinalsBuilder implements Closeable {
|
|||
}
|
||||
|
||||
/**
|
||||
* Builds a {@link FixedBitSet} where each documents bit is that that has one or more ordinals associated with it.
|
||||
* Builds a {@link BitSet} where each documents bit is that that has one or more ordinals associated with it.
|
||||
* if every document has an ordinal associated with it this method returns <code>null</code>
|
||||
*/
|
||||
public FixedBitSet buildDocsWithValuesSet() {
|
||||
public BitSet buildDocsWithValuesSet() {
|
||||
if (numDocsWithValue == maxDoc) {
|
||||
return null;
|
||||
}
|
||||
|
|
|
@ -117,10 +117,10 @@ public class DoubleArrayIndexFieldData extends AbstractIndexFieldData<AtomicNume
|
|||
|
||||
};
|
||||
} else {
|
||||
final FixedBitSet set = builder.buildDocsWithValuesSet();
|
||||
final BitSet set = builder.buildDocsWithValuesSet();
|
||||
|
||||
// there's sweet spot where due to low unique value count, using ordinals will consume less memory
|
||||
long singleValuesArraySize = reader.maxDoc() * RamUsageEstimator.NUM_BYTES_DOUBLE + (set == null ? 0 : RamUsageEstimator.sizeOf(set.getBits()) + RamUsageEstimator.NUM_BYTES_INT);
|
||||
long singleValuesArraySize = reader.maxDoc() * RamUsageEstimator.NUM_BYTES_DOUBLE + (set == null ? 0 : set.ramBytesUsed());
|
||||
long uniqueValuesArraySize = values.ramBytesUsed();
|
||||
long ordinalsSize = build.ramBytesUsed();
|
||||
if (uniqueValuesArraySize + ordinalsSize < singleValuesArraySize) {
|
||||
|
@ -225,7 +225,7 @@ public class DoubleArrayIndexFieldData extends AbstractIndexFieldData<AtomicNume
|
|||
}
|
||||
}
|
||||
|
||||
private static SortedNumericDoubleValues singles(final DoubleArray values, FixedBitSet set) {
|
||||
private static SortedNumericDoubleValues singles(final DoubleArray values, Bits set) {
|
||||
final NumericDoubleValues numValues = new NumericDoubleValues() {
|
||||
@Override
|
||||
public double get(int docID) {
|
||||
|
|
|
@ -115,10 +115,10 @@ public class FloatArrayIndexFieldData extends AbstractIndexFieldData<AtomicNumer
|
|||
|
||||
};
|
||||
} else {
|
||||
final FixedBitSet set = builder.buildDocsWithValuesSet();
|
||||
final BitSet set = builder.buildDocsWithValuesSet();
|
||||
|
||||
// there's sweet spot where due to low unique value count, using ordinals will consume less memory
|
||||
long singleValuesArraySize = reader.maxDoc() * RamUsageEstimator.NUM_BYTES_FLOAT + (set == null ? 0 : RamUsageEstimator.sizeOf(set.getBits()) + RamUsageEstimator.NUM_BYTES_INT);
|
||||
long singleValuesArraySize = reader.maxDoc() * RamUsageEstimator.NUM_BYTES_FLOAT + (set == null ? 0 : set.ramBytesUsed());
|
||||
long uniqueValuesArraySize = values.ramBytesUsed();
|
||||
long ordinalsSize = build.ramBytesUsed();
|
||||
if (uniqueValuesArraySize + ordinalsSize < singleValuesArraySize) {
|
||||
|
@ -223,7 +223,7 @@ public class FloatArrayIndexFieldData extends AbstractIndexFieldData<AtomicNumer
|
|||
}
|
||||
}
|
||||
|
||||
private static SortedNumericDoubleValues singles(final FloatArray values, FixedBitSet set) {
|
||||
private static SortedNumericDoubleValues singles(final FloatArray values, Bits set) {
|
||||
final NumericDoubleValues numValues = new NumericDoubleValues() {
|
||||
@Override
|
||||
public double get(int docID) {
|
||||
|
|
|
@ -27,7 +27,7 @@ import org.apache.lucene.index.RandomAccessOrds;
|
|||
import org.apache.lucene.index.SortedDocValues;
|
||||
import org.apache.lucene.util.Accountable;
|
||||
import org.apache.lucene.util.Accountables;
|
||||
import org.apache.lucene.util.FixedBitSet;
|
||||
import org.apache.lucene.util.BitSet;
|
||||
import org.apache.lucene.util.RamUsageEstimator;
|
||||
import org.apache.lucene.util.packed.PagedMutable;
|
||||
import org.elasticsearch.common.geo.GeoPoint;
|
||||
|
@ -126,9 +126,9 @@ public abstract class GeoPointCompressedAtomicFieldData extends AbstractAtomicGe
|
|||
|
||||
private final GeoPointFieldMapper.Encoding encoding;
|
||||
private final PagedMutable lon, lat;
|
||||
private final FixedBitSet set;
|
||||
private final BitSet set;
|
||||
|
||||
public Single(GeoPointFieldMapper.Encoding encoding, PagedMutable lon, PagedMutable lat, FixedBitSet set) {
|
||||
public Single(GeoPointFieldMapper.Encoding encoding, PagedMutable lon, PagedMutable lat, BitSet set) {
|
||||
super();
|
||||
this.encoding = encoding;
|
||||
this.lon = lon;
|
||||
|
|
|
@ -22,7 +22,7 @@ import org.apache.lucene.index.LeafReader;
|
|||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.apache.lucene.index.RandomAccessOrds;
|
||||
import org.apache.lucene.index.Terms;
|
||||
import org.apache.lucene.util.FixedBitSet;
|
||||
import org.apache.lucene.util.BitSet;
|
||||
import org.apache.lucene.util.packed.PackedInts;
|
||||
import org.apache.lucene.util.packed.PagedMutable;
|
||||
import org.elasticsearch.common.breaker.CircuitBreaker;
|
||||
|
@ -138,7 +138,7 @@ public class GeoPointCompressedIndexFieldData extends AbstractIndexGeoPointField
|
|||
sLon.set(i, missing);
|
||||
}
|
||||
}
|
||||
FixedBitSet set = builder.buildDocsWithValuesSet();
|
||||
BitSet set = builder.buildDocsWithValuesSet();
|
||||
data = new GeoPointCompressedAtomicFieldData.Single(encoding, sLon, sLat, set);
|
||||
}
|
||||
success = true;
|
||||
|
|
|
@ -27,7 +27,7 @@ import org.apache.lucene.index.RandomAccessOrds;
|
|||
import org.apache.lucene.index.SortedDocValues;
|
||||
import org.apache.lucene.util.Accountable;
|
||||
import org.apache.lucene.util.Accountables;
|
||||
import org.apache.lucene.util.FixedBitSet;
|
||||
import org.apache.lucene.util.BitSet;
|
||||
import org.apache.lucene.util.RamUsageEstimator;
|
||||
import org.elasticsearch.common.geo.GeoPoint;
|
||||
import org.elasticsearch.common.util.DoubleArray;
|
||||
|
@ -119,9 +119,9 @@ public abstract class GeoPointDoubleArrayAtomicFieldData extends AbstractAtomicG
|
|||
public static class Single extends GeoPointDoubleArrayAtomicFieldData {
|
||||
|
||||
private final DoubleArray lon, lat;
|
||||
private final FixedBitSet set;
|
||||
private final BitSet set;
|
||||
|
||||
public Single(DoubleArray lon, DoubleArray lat, FixedBitSet set) {
|
||||
public Single(DoubleArray lon, DoubleArray lat, BitSet set) {
|
||||
this.lon = lon;
|
||||
this.lat = lat;
|
||||
this.set = set;
|
||||
|
|
|
@ -22,7 +22,7 @@ import org.apache.lucene.index.LeafReader;
|
|||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.apache.lucene.index.RandomAccessOrds;
|
||||
import org.apache.lucene.index.Terms;
|
||||
import org.apache.lucene.util.FixedBitSet;
|
||||
import org.apache.lucene.util.BitSet;
|
||||
import org.elasticsearch.common.breaker.CircuitBreaker;
|
||||
import org.elasticsearch.common.geo.GeoPoint;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
@ -103,7 +103,7 @@ public class GeoPointDoubleArrayIndexFieldData extends AbstractIndexGeoPointFiel
|
|||
sLon.set(i, lon.get(nativeOrdinal));
|
||||
}
|
||||
}
|
||||
FixedBitSet set = builder.buildDocsWithValuesSet();
|
||||
BitSet set = builder.buildDocsWithValuesSet();
|
||||
data = new GeoPointDoubleArrayAtomicFieldData.Single(sLon, sLat, set);
|
||||
} else {
|
||||
data = new GeoPointDoubleArrayAtomicFieldData.WithOrdinals(lon, lat, build, reader.maxDoc());
|
||||
|
|
|
@ -138,7 +138,7 @@ public class PackedArrayIndexFieldData extends AbstractIndexFieldData<AtomicNume
|
|||
}
|
||||
};
|
||||
} else {
|
||||
final FixedBitSet docsWithValues = builder.buildDocsWithValuesSet();
|
||||
final BitSet docsWithValues = builder.buildDocsWithValuesSet();
|
||||
|
||||
long minV, maxV;
|
||||
minV = maxV = 0;
|
||||
|
@ -524,7 +524,7 @@ public class PackedArrayIndexFieldData extends AbstractIndexFieldData<AtomicNume
|
|||
return DocValues.singleton(values, docsWithFields);
|
||||
}
|
||||
|
||||
private static SortedNumericDocValues pagedSingles(final PackedLongValues values, final FixedBitSet docsWithValue) {
|
||||
private static SortedNumericDocValues pagedSingles(final PackedLongValues values, final Bits docsWithValue) {
|
||||
return DocValues.singleton(new NumericDocValues() {
|
||||
// we need to wrap since NumericDocValues must return 0 when a doc has no value
|
||||
@Override
|
||||
|
|
|
@ -24,9 +24,9 @@ import org.apache.lucene.index.IndexReader;
|
|||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.search.*;
|
||||
import org.apache.lucene.search.join.BitDocIdSetFilter;
|
||||
import org.apache.lucene.util.BitSet;
|
||||
import org.apache.lucene.util.Bits;
|
||||
import org.apache.lucene.util.BitDocIdSet;
|
||||
import org.apache.lucene.util.FixedBitSet;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collection;
|
||||
|
@ -141,7 +141,7 @@ public class IncludeNestedDocsQuery extends Query {
|
|||
static class IncludeNestedDocsScorer extends Scorer {
|
||||
|
||||
final Scorer parentScorer;
|
||||
final FixedBitSet parentBits;
|
||||
final BitSet parentBits;
|
||||
|
||||
int currentChildPointer = -1;
|
||||
int currentParentPointer = -1;
|
||||
|
@ -150,8 +150,7 @@ public class IncludeNestedDocsQuery extends Query {
|
|||
IncludeNestedDocsScorer(Weight weight, Scorer parentScorer, BitDocIdSet parentBits, int currentParentPointer) {
|
||||
super(weight);
|
||||
this.parentScorer = parentScorer;
|
||||
// TODO: remove this cast
|
||||
this.parentBits = (FixedBitSet) parentBits.bits();
|
||||
this.parentBits = parentBits.bits();
|
||||
this.currentParentPointer = currentParentPointer;
|
||||
if (currentParentPointer == 0) {
|
||||
currentChildPointer = 0;
|
||||
|
|
|
@ -52,7 +52,7 @@ public interface IndexService extends IndexComponent, Iterable<IndexShard> {
|
|||
|
||||
IndexFieldDataService fieldData();
|
||||
|
||||
BitsetFilterCache fixedBitSetFilterCache();
|
||||
BitsetFilterCache bitsetFilterCache();
|
||||
|
||||
IndexSettingsService settingsService();
|
||||
|
||||
|
|
|
@ -117,7 +117,7 @@ public class InternalIndexService extends AbstractIndexComponent implements Inde
|
|||
|
||||
private final IndexFieldDataService indexFieldData;
|
||||
|
||||
private final BitsetFilterCache fixedBitSetFilterCache;
|
||||
private final BitsetFilterCache bitsetFilterCache;
|
||||
|
||||
private final IndexEngine indexEngine;
|
||||
|
||||
|
@ -138,7 +138,7 @@ public class InternalIndexService extends AbstractIndexComponent implements Inde
|
|||
AnalysisService analysisService, MapperService mapperService, IndexQueryParserService queryParserService,
|
||||
SimilarityService similarityService, IndexAliasesService aliasesService, IndexCache indexCache, IndexEngine indexEngine,
|
||||
IndexGateway indexGateway, IndexStore indexStore, IndexSettingsService settingsService, IndexFieldDataService indexFieldData,
|
||||
BitsetFilterCache fixedBitSetFilterCache) {
|
||||
BitsetFilterCache bitSetFilterCache) {
|
||||
super(index, indexSettings);
|
||||
this.injector = injector;
|
||||
this.threadPool = threadPool;
|
||||
|
@ -154,7 +154,7 @@ public class InternalIndexService extends AbstractIndexComponent implements Inde
|
|||
this.indexGateway = indexGateway;
|
||||
this.indexStore = indexStore;
|
||||
this.settingsService = settingsService;
|
||||
this.fixedBitSetFilterCache = fixedBitSetFilterCache;
|
||||
this.bitsetFilterCache = bitSetFilterCache;
|
||||
|
||||
this.pluginsService = injector.getInstance(PluginsService.class);
|
||||
this.indicesLifecycle = (InternalIndicesLifecycle) injector.getInstance(IndicesLifecycle.class);
|
||||
|
@ -162,7 +162,7 @@ public class InternalIndexService extends AbstractIndexComponent implements Inde
|
|||
// inject workarounds for cyclic dep
|
||||
indexCache.filter().setIndexService(this);
|
||||
indexFieldData.setIndexService(this);
|
||||
fixedBitSetFilterCache.setIndexService(this);
|
||||
bitSetFilterCache.setIndexService(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -230,8 +230,8 @@ public class InternalIndexService extends AbstractIndexComponent implements Inde
|
|||
}
|
||||
|
||||
@Override
|
||||
public BitsetFilterCache fixedBitSetFilterCache() {
|
||||
return fixedBitSetFilterCache;
|
||||
public BitsetFilterCache bitsetFilterCache() {
|
||||
return bitsetFilterCache;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -153,7 +153,7 @@ public class InternalIndexShard extends AbstractIndexShardComponent implements I
|
|||
private final IndexFieldDataService indexFieldDataService;
|
||||
private final IndexService indexService;
|
||||
private final ShardSuggestService shardSuggestService;
|
||||
private final ShardBitsetFilterCache shardFixedBitSetFilterCache;
|
||||
private final ShardBitsetFilterCache shardBitsetFilterCache;
|
||||
|
||||
private final Object mutex = new Object();
|
||||
private final String checkIndexOnStartup;
|
||||
|
@ -179,7 +179,7 @@ public class InternalIndexShard extends AbstractIndexShardComponent implements I
|
|||
public InternalIndexShard(ShardId shardId, @IndexSettings Settings indexSettings, IndexSettingsService indexSettingsService, IndicesLifecycle indicesLifecycle, Store store, Engine engine, MergeSchedulerProvider mergeScheduler, Translog translog,
|
||||
ThreadPool threadPool, MapperService mapperService, IndexQueryParserService queryParserService, IndexCache indexCache, IndexAliasesService indexAliasesService, ShardIndexingService indexingService, ShardGetService getService, ShardSearchService searchService, ShardIndexWarmerService shardWarmerService,
|
||||
ShardFilterCache shardFilterCache, ShardFieldData shardFieldData, PercolatorQueriesRegistry percolatorQueriesRegistry, ShardPercolateService shardPercolateService, CodecService codecService,
|
||||
ShardTermVectorService termVectorService, IndexFieldDataService indexFieldDataService, IndexService indexService, ShardSuggestService shardSuggestService, ShardQueryCache shardQueryCache, ShardBitsetFilterCache shardFixedBitSetFilterCache) {
|
||||
ShardTermVectorService termVectorService, IndexFieldDataService indexFieldDataService, IndexService indexService, ShardSuggestService shardSuggestService, ShardQueryCache shardQueryCache, ShardBitsetFilterCache shardBitsetFilterCache) {
|
||||
super(shardId, indexSettings);
|
||||
this.indicesLifecycle = (InternalIndicesLifecycle) indicesLifecycle;
|
||||
this.indexSettingsService = indexSettingsService;
|
||||
|
@ -206,7 +206,7 @@ public class InternalIndexShard extends AbstractIndexShardComponent implements I
|
|||
this.indexService = indexService;
|
||||
this.codecService = codecService;
|
||||
this.shardSuggestService = shardSuggestService;
|
||||
this.shardFixedBitSetFilterCache = shardFixedBitSetFilterCache;
|
||||
this.shardBitsetFilterCache = shardBitsetFilterCache;
|
||||
state = IndexShardState.CREATED;
|
||||
|
||||
this.refreshInterval = indexSettings.getAsTime(INDEX_REFRESH_INTERVAL, engine.defaultRefreshInterval());
|
||||
|
@ -256,7 +256,7 @@ public class InternalIndexShard extends AbstractIndexShardComponent implements I
|
|||
|
||||
@Override
|
||||
public ShardBitsetFilterCache shardBitsetFilterCache() {
|
||||
return shardFixedBitSetFilterCache;
|
||||
return shardBitsetFilterCache;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -575,7 +575,7 @@ public class InternalIndexShard extends AbstractIndexShardComponent implements I
|
|||
@Override
|
||||
public SegmentsStats segmentStats() {
|
||||
SegmentsStats segmentsStats = engine.segmentsStats();
|
||||
segmentsStats.addFixedBitSetMemoryInBytes(shardFixedBitSetFilterCache.getMemorySizeInBytes());
|
||||
segmentsStats.addBitsetMemoryInBytes(shardBitsetFilterCache.getMemorySizeInBytes());
|
||||
return segmentsStats;
|
||||
}
|
||||
|
||||
|
|
|
@ -440,7 +440,7 @@ public class PercolateContext extends SearchContext {
|
|||
|
||||
@Override
|
||||
public BitsetFilterCache bitsetFilterCache() {
|
||||
return indexService.fixedBitSetFilterCache();
|
||||
return indexService.bitsetFilterCache();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -464,8 +464,8 @@ public class RestIndicesAction extends AbstractCatAction {
|
|||
table.addCell(indexStats == null ? null : indexStats.getTotal().getSegments().getVersionMapMemory());
|
||||
table.addCell(indexStats == null ? null : indexStats.getPrimaries().getSegments().getVersionMapMemory());
|
||||
|
||||
table.addCell(indexStats == null ? null : indexStats.getTotal().getSegments().getFixedBitSetMemory());
|
||||
table.addCell(indexStats == null ? null : indexStats.getPrimaries().getSegments().getFixedBitSetMemory());
|
||||
table.addCell(indexStats == null ? null : indexStats.getTotal().getSegments().getBitsetMemory());
|
||||
table.addCell(indexStats == null ? null : indexStats.getPrimaries().getSegments().getBitsetMemory());
|
||||
|
||||
table.addCell(indexStats == null ? null : indexStats.getTotal().getWarmer().current());
|
||||
table.addCell(indexStats == null ? null : indexStats.getPrimaries().getWarmer().current());
|
||||
|
|
|
@ -296,7 +296,7 @@ public class RestNodesAction extends AbstractCatAction {
|
|||
table.addCell(stats == null ? null : stats.getIndices().getSegments().getIndexWriterMemory());
|
||||
table.addCell(stats == null ? null : stats.getIndices().getSegments().getIndexWriterMaxMemory());
|
||||
table.addCell(stats == null ? null : stats.getIndices().getSegments().getVersionMapMemory());
|
||||
table.addCell(stats == null ? null : stats.getIndices().getSegments().getFixedBitSetMemory());
|
||||
table.addCell(stats == null ? null : stats.getIndices().getSegments().getBitsetMemory());
|
||||
|
||||
table.addCell(stats == null ? null : stats.getIndices().getSuggest().getCurrent());
|
||||
table.addCell(stats == null ? null : stats.getIndices().getSuggest().getTime());
|
||||
|
|
|
@ -246,7 +246,7 @@ public class RestShardsAction extends AbstractCatAction {
|
|||
table.addCell(shardStats == null ? null : shardStats.getSegments().getIndexWriterMemory());
|
||||
table.addCell(shardStats == null ? null : shardStats.getSegments().getIndexWriterMaxMemory());
|
||||
table.addCell(shardStats == null ? null : shardStats.getSegments().getVersionMapMemory());
|
||||
table.addCell(shardStats == null ? null : shardStats.getSegments().getFixedBitSetMemory());
|
||||
table.addCell(shardStats == null ? null : shardStats.getSegments().getBitsetMemory());
|
||||
|
||||
table.addCell(shardStats == null ? null : shardStats.getWarmer().current());
|
||||
table.addCell(shardStats == null ? null : shardStats.getWarmer().total());
|
||||
|
|
|
@ -19,11 +19,11 @@
|
|||
package org.elasticsearch.search.aggregations.bucket.nested;
|
||||
|
||||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.apache.lucene.search.DocIdSet;
|
||||
import org.apache.lucene.search.Filter;
|
||||
import org.apache.lucene.search.join.BitDocIdSetFilter;
|
||||
import org.apache.lucene.util.BitDocIdSet;
|
||||
import org.apache.lucene.util.BitSet;
|
||||
import org.apache.lucene.util.Bits;
|
||||
import org.apache.lucene.util.FixedBitSet;
|
||||
import org.elasticsearch.common.lucene.ReaderContextAware;
|
||||
import org.elasticsearch.common.lucene.docset.DocIdSets;
|
||||
import org.elasticsearch.index.mapper.MapperService;
|
||||
|
@ -48,7 +48,7 @@ public class NestedAggregator extends SingleBucketAggregator implements ReaderCo
|
|||
private final BitDocIdSetFilter childFilter;
|
||||
|
||||
private Bits childDocs;
|
||||
private FixedBitSet parentDocs;
|
||||
private BitSet parentDocs;
|
||||
|
||||
public NestedAggregator(String name, AggregatorFactories factories, String nestedPath, AggregationContext aggregationContext, Aggregator parentAggregator, Map<String, Object> metaData) {
|
||||
super(name, factories, aggregationContext, parentAggregator, metaData);
|
||||
|
@ -84,14 +84,19 @@ public class NestedAggregator extends SingleBucketAggregator implements ReaderCo
|
|||
}
|
||||
|
||||
try {
|
||||
DocIdSet docIdSet = parentFilter.getDocIdSet(reader, null);
|
||||
// In ES if parent is deleted, then also the children are deleted. Therefore acceptedDocs can also null here.
|
||||
childDocs = DocIdSets.toSafeBits(reader.reader(), childFilter.getDocIdSet(reader, null));
|
||||
if (DocIdSets.isEmpty(docIdSet)) {
|
||||
BitDocIdSet parentSet = parentFilter.getDocIdSet(reader);
|
||||
if (DocIdSets.isEmpty(parentSet)) {
|
||||
parentDocs = null;
|
||||
childDocs = null;
|
||||
} else {
|
||||
// TODO: Remove cast when BitSet gets prevSetBit
|
||||
parentDocs = (FixedBitSet) docIdSet.bits();
|
||||
parentDocs = parentSet.bits();
|
||||
// In ES if parent is deleted, then also the children are deleted. Therefore acceptedDocs can also null here.
|
||||
BitDocIdSet childSet = childFilter.getDocIdSet(reader);
|
||||
if (DocIdSets.isEmpty(childSet)) {
|
||||
childDocs = new Bits.MatchAllBits(reader.reader().maxDoc());
|
||||
} else {
|
||||
childDocs = childSet.bits();
|
||||
}
|
||||
}
|
||||
} catch (IOException ioe) {
|
||||
throw new AggregationExecutionException("Failed to aggregate [" + name + "]", ioe);
|
||||
|
|
|
@ -441,7 +441,7 @@ public class DefaultSearchContext extends SearchContext {
|
|||
|
||||
@Override
|
||||
public BitsetFilterCache bitsetFilterCache() {
|
||||
return indexService.fixedBitSetFilterCache();
|
||||
return indexService.bitsetFilterCache();
|
||||
}
|
||||
|
||||
public IndexFieldDataService fieldData() {
|
||||
|
|
|
@ -63,12 +63,12 @@ public class TermsFilterTests extends ElasticsearchTestCase {
|
|||
|
||||
tf = new TermFilter(new Term(fieldName, "20"));
|
||||
DocIdSet result = tf.getDocIdSet(reader.getContext(), reader.getLiveDocs());
|
||||
BitSet bits = DocIdSets.toFixedBitSet(result.iterator(), reader.maxDoc());
|
||||
BitSet bits = DocIdSets.toBitSet(result.iterator(), reader.maxDoc());
|
||||
assertThat(bits.cardinality(), equalTo(1));
|
||||
|
||||
tf = new TermFilter(new Term("all", "xxx"));
|
||||
result = tf.getDocIdSet(reader.getContext(), reader.getLiveDocs());
|
||||
bits = DocIdSets.toFixedBitSet(result.iterator(), reader.maxDoc());
|
||||
bits = DocIdSets.toBitSet(result.iterator(), reader.maxDoc());
|
||||
assertThat(bits.cardinality(), equalTo(100));
|
||||
|
||||
reader.close();
|
||||
|
@ -98,17 +98,17 @@ public class TermsFilterTests extends ElasticsearchTestCase {
|
|||
|
||||
tf = new TermsFilter(new Term[]{new Term(fieldName, "19"), new Term(fieldName, "20")});
|
||||
DocIdSet result = tf.getDocIdSet(reader.getContext(), reader.getLiveDocs());
|
||||
BitSet bits = DocIdSets.toFixedBitSet(result.iterator(), reader.maxDoc());
|
||||
BitSet bits = DocIdSets.toBitSet(result.iterator(), reader.maxDoc());
|
||||
assertThat(bits.cardinality(), equalTo(1));
|
||||
|
||||
tf = new TermsFilter(new Term[]{new Term(fieldName, "19"), new Term(fieldName, "20"), new Term(fieldName, "10")});
|
||||
result = tf.getDocIdSet(reader.getContext(), reader.getLiveDocs());
|
||||
bits = DocIdSets.toFixedBitSet(result.iterator(), reader.maxDoc());
|
||||
bits = DocIdSets.toBitSet(result.iterator(), reader.maxDoc());
|
||||
assertThat(bits.cardinality(), equalTo(2));
|
||||
|
||||
tf = new TermsFilter(new Term[]{new Term(fieldName, "19"), new Term(fieldName, "20"), new Term(fieldName, "10"), new Term(fieldName, "00")});
|
||||
result = tf.getDocIdSet(reader.getContext(), reader.getLiveDocs());
|
||||
bits = DocIdSets.toFixedBitSet(result.iterator(), reader.maxDoc());
|
||||
bits = DocIdSets.toBitSet(result.iterator(), reader.maxDoc());
|
||||
assertThat(bits.cardinality(), equalTo(2));
|
||||
|
||||
reader.close();
|
||||
|
|
|
@ -124,7 +124,7 @@ public abstract class AbstractFieldDataTests extends ElasticsearchSingleNodeTest
|
|||
}
|
||||
|
||||
protected Nested createNested(Filter parentFilter, Filter childFilter) {
|
||||
BitsetFilterCache s = indexService.fixedBitSetFilterCache();
|
||||
BitsetFilterCache s = indexService.bitsetFilterCache();
|
||||
return new Nested(s.getBitDocIdSetFilter(parentFilter), s.getBitDocIdSetFilter(childFilter));
|
||||
}
|
||||
|
||||
|
|
|
@ -1186,7 +1186,7 @@ public class SimpleNestedTests extends ElasticsearchIntegrationTest {
|
|||
|
||||
// No nested mapping yet, there shouldn't be anything in the fixed bit set cache
|
||||
ClusterStatsResponse clusterStatsResponse = client().admin().cluster().prepareClusterStats().get();
|
||||
assertThat(clusterStatsResponse.getIndicesStats().getSegments().getFixedBitSetMemoryInBytes(), equalTo(0l));
|
||||
assertThat(clusterStatsResponse.getIndicesStats().getSegments().getBitsetMemoryInBytes(), equalTo(0l));
|
||||
|
||||
// Now add nested mapping
|
||||
assertAcked(
|
||||
|
@ -1207,7 +1207,7 @@ public class SimpleNestedTests extends ElasticsearchIntegrationTest {
|
|||
|
||||
if (loadFixedBitSeLazily) {
|
||||
clusterStatsResponse = client().admin().cluster().prepareClusterStats().get();
|
||||
assertThat(clusterStatsResponse.getIndicesStats().getSegments().getFixedBitSetMemoryInBytes(), equalTo(0l));
|
||||
assertThat(clusterStatsResponse.getIndicesStats().getSegments().getBitsetMemoryInBytes(), equalTo(0l));
|
||||
|
||||
// only when querying with nested the fixed bitsets are loaded
|
||||
SearchResponse searchResponse = client().prepareSearch("test")
|
||||
|
@ -1217,11 +1217,11 @@ public class SimpleNestedTests extends ElasticsearchIntegrationTest {
|
|||
assertThat(searchResponse.getHits().totalHits(), equalTo(5l));
|
||||
}
|
||||
clusterStatsResponse = client().admin().cluster().prepareClusterStats().get();
|
||||
assertThat(clusterStatsResponse.getIndicesStats().getSegments().getFixedBitSetMemoryInBytes(), greaterThan(0l));
|
||||
assertThat(clusterStatsResponse.getIndicesStats().getSegments().getBitsetMemoryInBytes(), greaterThan(0l));
|
||||
|
||||
assertAcked(client().admin().indices().prepareDelete("test"));
|
||||
clusterStatsResponse = client().admin().cluster().prepareClusterStats().get();
|
||||
assertThat(clusterStatsResponse.getIndicesStats().getSegments().getFixedBitSetMemoryInBytes(), equalTo(0l));
|
||||
assertThat(clusterStatsResponse.getIndicesStats().getSegments().getBitsetMemoryInBytes(), equalTo(0l));
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -371,7 +371,7 @@ public class SimpleChildQuerySearchTests extends ElasticsearchIntegrationTest {
|
|||
|
||||
// No _parent field yet, there shouldn't be anything in the parent id cache
|
||||
ClusterStatsResponse clusterStatsResponse = client().admin().cluster().prepareClusterStats().get();
|
||||
assertThat(clusterStatsResponse.getIndicesStats().getSegments().getFixedBitSetMemoryInBytes(), equalTo(0l));
|
||||
assertThat(clusterStatsResponse.getIndicesStats().getSegments().getBitsetMemoryInBytes(), equalTo(0l));
|
||||
|
||||
// Now add mapping + children
|
||||
assertAcked(
|
||||
|
@ -389,7 +389,7 @@ public class SimpleChildQuerySearchTests extends ElasticsearchIntegrationTest {
|
|||
|
||||
if (loadFixedBitSetLazily) {
|
||||
clusterStatsResponse = client().admin().cluster().prepareClusterStats().get();
|
||||
assertThat(clusterStatsResponse.getIndicesStats().getSegments().getFixedBitSetMemoryInBytes(), equalTo(0l));
|
||||
assertThat(clusterStatsResponse.getIndicesStats().getSegments().getBitsetMemoryInBytes(), equalTo(0l));
|
||||
|
||||
// only when querying with has_child the fixed bitsets are loaded
|
||||
SearchResponse searchResponse = client().prepareSearch("test")
|
||||
|
@ -400,11 +400,11 @@ public class SimpleChildQuerySearchTests extends ElasticsearchIntegrationTest {
|
|||
assertThat(searchResponse.getHits().totalHits(), equalTo(1l));
|
||||
}
|
||||
clusterStatsResponse = client().admin().cluster().prepareClusterStats().get();
|
||||
assertThat(clusterStatsResponse.getIndicesStats().getSegments().getFixedBitSetMemoryInBytes(), greaterThan(0l));
|
||||
assertThat(clusterStatsResponse.getIndicesStats().getSegments().getBitsetMemoryInBytes(), greaterThan(0l));
|
||||
|
||||
assertAcked(client().admin().indices().prepareDelete("test"));
|
||||
clusterStatsResponse = client().admin().cluster().prepareClusterStats().get();
|
||||
assertThat(clusterStatsResponse.getIndicesStats().getSegments().getFixedBitSetMemoryInBytes(), equalTo(0l));
|
||||
assertThat(clusterStatsResponse.getIndicesStats().getSegments().getBitsetMemoryInBytes(), equalTo(0l));
|
||||
}
|
||||
|
||||
@Test
|
||||
|
|
|
@ -154,7 +154,7 @@ public final class ExternalTestCluster extends TestCluster {
|
|||
|
||||
assertThat("Fielddata size must be 0 on node: " + stats.getNode(), stats.getIndices().getFieldData().getMemorySizeInBytes(), equalTo(0l));
|
||||
assertThat("Filter cache size must be 0 on node: " + stats.getNode(), stats.getIndices().getFilterCache().getMemorySizeInBytes(), equalTo(0l));
|
||||
assertThat("FixedBitSet cache size must be 0 on node: " + stats.getNode(), stats.getIndices().getSegments().getFixedBitSetMemoryInBytes(), equalTo(0l));
|
||||
assertThat("FixedBitSet cache size must be 0 on node: " + stats.getNode(), stats.getIndices().getSegments().getBitsetMemoryInBytes(), equalTo(0l));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1687,7 +1687,7 @@ public final class InternalTestCluster extends TestCluster {
|
|||
NodeStats stats = nodeService.stats(CommonStatsFlags.ALL, false, false, false, false, false, false, false, false, false);
|
||||
assertThat("Fielddata size must be 0 on node: " + stats.getNode(), stats.getIndices().getFieldData().getMemorySizeInBytes(), equalTo(0l));
|
||||
assertThat("Filter cache size must be 0 on node: " + stats.getNode(), stats.getIndices().getFilterCache().getMemorySizeInBytes(), equalTo(0l));
|
||||
assertThat("FixedBitSet cache size must be 0 on node: " + stats.getNode(), stats.getIndices().getSegments().getFixedBitSetMemoryInBytes(), equalTo(0l));
|
||||
assertThat("FixedBitSet cache size must be 0 on node: " + stats.getNode(), stats.getIndices().getSegments().getBitsetMemoryInBytes(), equalTo(0l));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -83,7 +83,7 @@ public class TestSearchContext extends SearchContext {
|
|||
this.indexService = indexService;
|
||||
this.filterCache = indexService.cache().filter();
|
||||
this.indexFieldDataService = indexService.fieldData();
|
||||
this.fixedBitSetFilterCache = indexService.fixedBitSetFilterCache();
|
||||
this.fixedBitSetFilterCache = indexService.bitsetFilterCache();
|
||||
this.threadPool = threadPool;
|
||||
}
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue