mirror of
https://github.com/elastic/elasticsearch.git
synced 2025-06-27 17:10:22 -04:00
Remove analyzer version deprecation check (#118167)
Version has been deprecated since v7: https://github.com/elastic/elasticsearch/pull/74073 Removing checking for the version setting. It has been ignored and does nothing for the entirety of 8 and for the last minors of v7.
This commit is contained in:
parent
eb59b989ef
commit
d614804731
174 changed files with 182 additions and 230 deletions
|
@ -30,7 +30,7 @@ public class ASCIIFoldingTokenFilterFactory extends AbstractTokenFilterFactory i
|
|||
private final boolean preserveOriginal;
|
||||
|
||||
public ASCIIFoldingTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
|
||||
super(name, settings);
|
||||
super(name);
|
||||
preserveOriginal = settings.getAsBoolean(PRESERVE_ORIGINAL.getPreferredName(), DEFAULT_PRESERVE_ORIGINAL);
|
||||
}
|
||||
|
||||
|
|
|
@ -30,7 +30,7 @@ public abstract class AbstractCompoundWordTokenFilterFactory extends AbstractTok
|
|||
protected final CharArraySet wordList;
|
||||
|
||||
protected AbstractCompoundWordTokenFilterFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) {
|
||||
super(name, settings);
|
||||
super(name);
|
||||
|
||||
minWordSize = settings.getAsInt("min_word_size", CompoundWordTokenFilterBase.DEFAULT_MIN_WORD_SIZE);
|
||||
minSubwordSize = settings.getAsInt("min_subword_size", CompoundWordTokenFilterBase.DEFAULT_MIN_SUBWORD_SIZE);
|
||||
|
|
|
@ -21,7 +21,7 @@ import org.elasticsearch.index.analysis.AbstractTokenFilterFactory;
|
|||
public class ApostropheFilterFactory extends AbstractTokenFilterFactory {
|
||||
|
||||
ApostropheFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
|
||||
super(name, settings);
|
||||
super(name);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -22,7 +22,7 @@ public class ArabicAnalyzerProvider extends AbstractIndexAnalyzerProvider<Arabic
|
|||
private final ArabicAnalyzer arabicAnalyzer;
|
||||
|
||||
ArabicAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
|
||||
super(name, settings);
|
||||
super(name);
|
||||
arabicAnalyzer = new ArabicAnalyzer(
|
||||
Analysis.parseStopWords(env, settings, ArabicAnalyzer.getDefaultStopSet()),
|
||||
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET)
|
||||
|
|
|
@ -19,7 +19,7 @@ import org.elasticsearch.index.analysis.NormalizingTokenFilterFactory;
|
|||
public class ArabicNormalizationFilterFactory extends AbstractTokenFilterFactory implements NormalizingTokenFilterFactory {
|
||||
|
||||
ArabicNormalizationFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
|
||||
super(name, settings);
|
||||
super(name);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -19,7 +19,7 @@ import org.elasticsearch.index.analysis.AbstractTokenFilterFactory;
|
|||
public class ArabicStemTokenFilterFactory extends AbstractTokenFilterFactory {
|
||||
|
||||
ArabicStemTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
|
||||
super(name, settings);
|
||||
super(name);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -22,7 +22,7 @@ public class ArmenianAnalyzerProvider extends AbstractIndexAnalyzerProvider<Arme
|
|||
private final ArmenianAnalyzer analyzer;
|
||||
|
||||
ArmenianAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
|
||||
super(name, settings);
|
||||
super(name);
|
||||
analyzer = new ArmenianAnalyzer(
|
||||
Analysis.parseStopWords(env, settings, ArmenianAnalyzer.getDefaultStopSet()),
|
||||
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET)
|
||||
|
|
|
@ -22,7 +22,7 @@ public class BasqueAnalyzerProvider extends AbstractIndexAnalyzerProvider<Basque
|
|||
private final BasqueAnalyzer analyzer;
|
||||
|
||||
BasqueAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
|
||||
super(name, settings);
|
||||
super(name);
|
||||
analyzer = new BasqueAnalyzer(
|
||||
Analysis.parseStopWords(env, settings, BasqueAnalyzer.getDefaultStopSet()),
|
||||
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET)
|
||||
|
|
|
@ -22,7 +22,7 @@ public class BengaliAnalyzerProvider extends AbstractIndexAnalyzerProvider<Benga
|
|||
private final BengaliAnalyzer analyzer;
|
||||
|
||||
BengaliAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
|
||||
super(name, settings);
|
||||
super(name);
|
||||
analyzer = new BengaliAnalyzer(
|
||||
Analysis.parseStopWords(env, settings, BengaliAnalyzer.getDefaultStopSet()),
|
||||
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET)
|
||||
|
|
|
@ -22,7 +22,7 @@ import org.elasticsearch.index.analysis.NormalizingTokenFilterFactory;
|
|||
public class BengaliNormalizationFilterFactory extends AbstractTokenFilterFactory implements NormalizingTokenFilterFactory {
|
||||
|
||||
BengaliNormalizationFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
|
||||
super(name, settings);
|
||||
super(name);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -22,7 +22,7 @@ public class BrazilianAnalyzerProvider extends AbstractIndexAnalyzerProvider<Bra
|
|||
private final BrazilianAnalyzer analyzer;
|
||||
|
||||
BrazilianAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
|
||||
super(name, settings);
|
||||
super(name);
|
||||
analyzer = new BrazilianAnalyzer(
|
||||
Analysis.parseStopWords(env, settings, BrazilianAnalyzer.getDefaultStopSet()),
|
||||
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET)
|
||||
|
|
|
@ -24,7 +24,7 @@ public class BrazilianStemTokenFilterFactory extends AbstractTokenFilterFactory
|
|||
private final CharArraySet exclusions;
|
||||
|
||||
BrazilianStemTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
|
||||
super(name, settings);
|
||||
super(name);
|
||||
this.exclusions = Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET);
|
||||
}
|
||||
|
||||
|
|
|
@ -22,7 +22,7 @@ public class BulgarianAnalyzerProvider extends AbstractIndexAnalyzerProvider<Bul
|
|||
private final BulgarianAnalyzer analyzer;
|
||||
|
||||
BulgarianAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
|
||||
super(name, settings);
|
||||
super(name);
|
||||
analyzer = new BulgarianAnalyzer(
|
||||
Analysis.parseStopWords(env, settings, BulgarianAnalyzer.getDefaultStopSet()),
|
||||
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET)
|
||||
|
|
|
@ -44,7 +44,7 @@ public final class CJKBigramFilterFactory extends AbstractTokenFilterFactory {
|
|||
|
||||
@SuppressWarnings("HiddenField")
|
||||
CJKBigramFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
|
||||
super(name, settings);
|
||||
super(name);
|
||||
outputUnigrams = settings.getAsBoolean("output_unigrams", false);
|
||||
final List<String> asArray = settings.getAsList("ignored_scripts");
|
||||
Set<String> scripts = new HashSet<>(Arrays.asList("han", "hiragana", "katakana", "hangul"));
|
||||
|
|
|
@ -20,7 +20,7 @@ import org.elasticsearch.index.analysis.NormalizingTokenFilterFactory;
|
|||
public final class CJKWidthFilterFactory extends AbstractTokenFilterFactory implements NormalizingTokenFilterFactory {
|
||||
|
||||
CJKWidthFilterFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) {
|
||||
super(name, settings);
|
||||
super(name);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -22,7 +22,7 @@ public class CatalanAnalyzerProvider extends AbstractIndexAnalyzerProvider<Catal
|
|||
private final CatalanAnalyzer analyzer;
|
||||
|
||||
CatalanAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
|
||||
super(name, settings);
|
||||
super(name);
|
||||
analyzer = new CatalanAnalyzer(
|
||||
Analysis.parseStopWords(env, settings, CatalanAnalyzer.getDefaultStopSet()),
|
||||
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET)
|
||||
|
|
|
@ -33,7 +33,7 @@ public class CharGroupTokenizerFactory extends AbstractTokenizerFactory {
|
|||
private boolean tokenizeOnSymbol = false;
|
||||
|
||||
public CharGroupTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
|
||||
super(indexSettings, settings, name);
|
||||
super(name);
|
||||
|
||||
maxTokenLength = settings.getAsInt(MAX_TOKEN_LENGTH, CharTokenizer.DEFAULT_MAX_WORD_LEN);
|
||||
|
||||
|
|
|
@ -24,7 +24,7 @@ public class ChineseAnalyzerProvider extends AbstractIndexAnalyzerProvider<Stand
|
|||
private final StandardAnalyzer analyzer;
|
||||
|
||||
ChineseAnalyzerProvider(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
|
||||
super(name, settings);
|
||||
super(name);
|
||||
// old index: best effort
|
||||
analyzer = new StandardAnalyzer(EnglishAnalyzer.ENGLISH_STOP_WORDS_SET);
|
||||
|
||||
|
|
|
@ -22,7 +22,7 @@ public class CjkAnalyzerProvider extends AbstractIndexAnalyzerProvider<CJKAnalyz
|
|||
private final CJKAnalyzer analyzer;
|
||||
|
||||
CjkAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
|
||||
super(name, settings);
|
||||
super(name);
|
||||
CharArraySet stopWords = Analysis.parseStopWords(env, settings, CJKAnalyzer.getDefaultStopSet());
|
||||
|
||||
analyzer = new CJKAnalyzer(stopWords);
|
||||
|
|
|
@ -21,7 +21,7 @@ import org.elasticsearch.index.analysis.AbstractTokenFilterFactory;
|
|||
public class ClassicFilterFactory extends AbstractTokenFilterFactory {
|
||||
|
||||
ClassicFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
|
||||
super(name, settings);
|
||||
super(name);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -25,7 +25,7 @@ public class ClassicTokenizerFactory extends AbstractTokenizerFactory {
|
|||
private final int maxTokenLength;
|
||||
|
||||
ClassicTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
|
||||
super(indexSettings, settings, name);
|
||||
super(name);
|
||||
maxTokenLength = settings.getAsInt("max_token_length", StandardAnalyzer.DEFAULT_MAX_TOKEN_LENGTH);
|
||||
}
|
||||
|
||||
|
|
|
@ -29,7 +29,7 @@ public class CommonGramsTokenFilterFactory extends AbstractTokenFilterFactory {
|
|||
private final boolean queryMode;
|
||||
|
||||
CommonGramsTokenFilterFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) {
|
||||
super(name, settings);
|
||||
super(name);
|
||||
this.ignoreCase = settings.getAsBoolean("ignore_case", false);
|
||||
this.queryMode = settings.getAsBoolean("query_mode", false);
|
||||
this.words = Analysis.parseCommonWords(env, settings, null, ignoreCase);
|
||||
|
|
|
@ -22,7 +22,7 @@ public class CzechAnalyzerProvider extends AbstractIndexAnalyzerProvider<CzechAn
|
|||
private final CzechAnalyzer analyzer;
|
||||
|
||||
CzechAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
|
||||
super(name, settings);
|
||||
super(name);
|
||||
analyzer = new CzechAnalyzer(
|
||||
Analysis.parseStopWords(env, settings, CzechAnalyzer.getDefaultStopSet()),
|
||||
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET)
|
||||
|
|
|
@ -18,7 +18,7 @@ import org.elasticsearch.index.analysis.AbstractTokenFilterFactory;
|
|||
public class CzechStemTokenFilterFactory extends AbstractTokenFilterFactory {
|
||||
|
||||
CzechStemTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
|
||||
super(name, settings);
|
||||
super(name);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -22,7 +22,7 @@ public class DanishAnalyzerProvider extends AbstractIndexAnalyzerProvider<Danish
|
|||
private final DanishAnalyzer analyzer;
|
||||
|
||||
DanishAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
|
||||
super(name, settings);
|
||||
super(name);
|
||||
analyzer = new DanishAnalyzer(
|
||||
Analysis.parseStopWords(env, settings, DanishAnalyzer.getDefaultStopSet()),
|
||||
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET)
|
||||
|
|
|
@ -23,7 +23,7 @@ import org.elasticsearch.index.analysis.NormalizingTokenFilterFactory;
|
|||
public final class DecimalDigitFilterFactory extends AbstractTokenFilterFactory implements NormalizingTokenFilterFactory {
|
||||
|
||||
DecimalDigitFilterFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) {
|
||||
super(name, settings);
|
||||
super(name);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -32,7 +32,7 @@ public class DelimitedPayloadTokenFilterFactory extends AbstractTokenFilterFacto
|
|||
private final PayloadEncoder encoder;
|
||||
|
||||
DelimitedPayloadTokenFilterFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) {
|
||||
super(name, settings);
|
||||
super(name);
|
||||
String delimiterConf = settings.get(DELIMITER);
|
||||
if (delimiterConf != null) {
|
||||
delimiter = delimiterConf.charAt(0);
|
||||
|
|
|
@ -22,7 +22,7 @@ public class DutchAnalyzerProvider extends AbstractIndexAnalyzerProvider<DutchAn
|
|||
private final DutchAnalyzer analyzer;
|
||||
|
||||
DutchAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
|
||||
super(name, settings);
|
||||
super(name);
|
||||
analyzer = new DutchAnalyzer(
|
||||
Analysis.parseStopWords(env, settings, DutchAnalyzer.getDefaultStopSet()),
|
||||
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET)
|
||||
|
|
|
@ -25,7 +25,7 @@ public class DutchStemTokenFilterFactory extends AbstractTokenFilterFactory {
|
|||
private final CharArraySet exclusions;
|
||||
|
||||
DutchStemTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
|
||||
super(name, settings);
|
||||
super(name);
|
||||
this.exclusions = Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET);
|
||||
}
|
||||
|
||||
|
|
|
@ -35,7 +35,7 @@ public class EdgeNGramTokenFilterFactory extends AbstractTokenFilterFactory {
|
|||
private static final String PRESERVE_ORIG_KEY = "preserve_original";
|
||||
|
||||
EdgeNGramTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
|
||||
super(name, settings);
|
||||
super(name);
|
||||
this.minGram = settings.getAsInt("min_gram", 1);
|
||||
this.maxGram = settings.getAsInt("max_gram", 2);
|
||||
if (settings.get("side") != null) {
|
||||
|
|
|
@ -26,7 +26,7 @@ public class EdgeNGramTokenizerFactory extends AbstractTokenizerFactory {
|
|||
private final CharMatcher matcher;
|
||||
|
||||
EdgeNGramTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
|
||||
super(indexSettings, settings, name);
|
||||
super(name);
|
||||
this.minGram = settings.getAsInt("min_gram", NGramTokenizer.DEFAULT_MIN_NGRAM_SIZE);
|
||||
this.maxGram = settings.getAsInt("max_gram", NGramTokenizer.DEFAULT_MAX_NGRAM_SIZE);
|
||||
this.matcher = parseTokenChars(settings);
|
||||
|
|
|
@ -24,7 +24,7 @@ public class ElisionTokenFilterFactory extends AbstractTokenFilterFactory implem
|
|||
private final CharArraySet articles;
|
||||
|
||||
ElisionTokenFilterFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) {
|
||||
super(name, settings);
|
||||
super(name);
|
||||
this.articles = Analysis.parseArticles(env, settings);
|
||||
if (this.articles == null) {
|
||||
throw new IllegalArgumentException("elision filter requires [articles] or [articles_path] setting");
|
||||
|
|
|
@ -22,7 +22,7 @@ public class EnglishAnalyzerProvider extends AbstractIndexAnalyzerProvider<Engli
|
|||
private final EnglishAnalyzer analyzer;
|
||||
|
||||
EnglishAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
|
||||
super(name, settings);
|
||||
super(name);
|
||||
analyzer = new EnglishAnalyzer(
|
||||
Analysis.parseStopWords(env, settings, EnglishAnalyzer.getDefaultStopSet()),
|
||||
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET)
|
||||
|
|
|
@ -22,7 +22,7 @@ public class EstonianAnalyzerProvider extends AbstractIndexAnalyzerProvider<Esto
|
|||
private final EstonianAnalyzer analyzer;
|
||||
|
||||
EstonianAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
|
||||
super(name, settings);
|
||||
super(name);
|
||||
analyzer = new EstonianAnalyzer(
|
||||
Analysis.parseStopWords(env, settings, EstonianAnalyzer.getDefaultStopSet()),
|
||||
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET)
|
||||
|
|
|
@ -34,7 +34,7 @@ public class FingerprintAnalyzerProvider extends AbstractIndexAnalyzerProvider<A
|
|||
private final FingerprintAnalyzer analyzer;
|
||||
|
||||
FingerprintAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
|
||||
super(name, settings);
|
||||
super(name);
|
||||
|
||||
char separator = parseSeparator(settings);
|
||||
int maxOutputSize = settings.getAsInt(MAX_OUTPUT_SIZE.getPreferredName(), DEFAULT_MAX_OUTPUT_SIZE);
|
||||
|
|
|
@ -26,7 +26,7 @@ public class FingerprintTokenFilterFactory extends AbstractTokenFilterFactory {
|
|||
private final int maxOutputSize;
|
||||
|
||||
FingerprintTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
|
||||
super(name, settings);
|
||||
super(name);
|
||||
this.separator = FingerprintAnalyzerProvider.parseSeparator(settings);
|
||||
this.maxOutputSize = settings.getAsInt(MAX_OUTPUT_SIZE.getPreferredName(), DEFAULT_MAX_OUTPUT_SIZE);
|
||||
}
|
||||
|
|
|
@ -22,7 +22,7 @@ public class FinnishAnalyzerProvider extends AbstractIndexAnalyzerProvider<Finni
|
|||
private final FinnishAnalyzer analyzer;
|
||||
|
||||
FinnishAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
|
||||
super(name, settings);
|
||||
super(name);
|
||||
analyzer = new FinnishAnalyzer(
|
||||
Analysis.parseStopWords(env, settings, FinnishAnalyzer.getDefaultStopSet()),
|
||||
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET)
|
||||
|
|
|
@ -19,7 +19,7 @@ import org.elasticsearch.index.analysis.AbstractTokenFilterFactory;
|
|||
public class FlattenGraphTokenFilterFactory extends AbstractTokenFilterFactory {
|
||||
|
||||
FlattenGraphTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
|
||||
super(name, settings);
|
||||
super(name);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -22,7 +22,7 @@ public class FrenchAnalyzerProvider extends AbstractIndexAnalyzerProvider<French
|
|||
private final FrenchAnalyzer analyzer;
|
||||
|
||||
FrenchAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
|
||||
super(name, settings);
|
||||
super(name);
|
||||
analyzer = new FrenchAnalyzer(
|
||||
Analysis.parseStopWords(env, settings, FrenchAnalyzer.getDefaultStopSet()),
|
||||
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET)
|
||||
|
|
|
@ -25,7 +25,7 @@ public class FrenchStemTokenFilterFactory extends AbstractTokenFilterFactory {
|
|||
private final CharArraySet exclusions;
|
||||
|
||||
FrenchStemTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
|
||||
super(name, settings);
|
||||
super(name);
|
||||
this.exclusions = Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET);
|
||||
}
|
||||
|
||||
|
|
|
@ -22,7 +22,7 @@ public class GalicianAnalyzerProvider extends AbstractIndexAnalyzerProvider<Gali
|
|||
private final GalicianAnalyzer analyzer;
|
||||
|
||||
GalicianAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
|
||||
super(name, settings);
|
||||
super(name);
|
||||
analyzer = new GalicianAnalyzer(
|
||||
Analysis.parseStopWords(env, settings, GalicianAnalyzer.getDefaultStopSet()),
|
||||
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET)
|
||||
|
|
|
@ -22,7 +22,7 @@ public class GermanAnalyzerProvider extends AbstractIndexAnalyzerProvider<German
|
|||
private final GermanAnalyzer analyzer;
|
||||
|
||||
GermanAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
|
||||
super(name, settings);
|
||||
super(name);
|
||||
analyzer = new GermanAnalyzer(
|
||||
Analysis.parseStopWords(env, settings, GermanAnalyzer.getDefaultStopSet()),
|
||||
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET)
|
||||
|
|
|
@ -22,7 +22,7 @@ import org.elasticsearch.index.analysis.NormalizingTokenFilterFactory;
|
|||
public class GermanNormalizationFilterFactory extends AbstractTokenFilterFactory implements NormalizingTokenFilterFactory {
|
||||
|
||||
GermanNormalizationFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
|
||||
super(name, settings);
|
||||
super(name);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -24,7 +24,7 @@ public class GermanStemTokenFilterFactory extends AbstractTokenFilterFactory {
|
|||
private final CharArraySet exclusions;
|
||||
|
||||
GermanStemTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
|
||||
super(name, settings);
|
||||
super(name);
|
||||
this.exclusions = Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET);
|
||||
}
|
||||
|
||||
|
|
|
@ -21,7 +21,7 @@ public class GreekAnalyzerProvider extends AbstractIndexAnalyzerProvider<GreekAn
|
|||
private final GreekAnalyzer analyzer;
|
||||
|
||||
GreekAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
|
||||
super(name, settings);
|
||||
super(name);
|
||||
analyzer = new GreekAnalyzer(Analysis.parseStopWords(env, settings, GreekAnalyzer.getDefaultStopSet()));
|
||||
}
|
||||
|
||||
|
|
|
@ -22,7 +22,7 @@ public class HindiAnalyzerProvider extends AbstractIndexAnalyzerProvider<HindiAn
|
|||
private final HindiAnalyzer analyzer;
|
||||
|
||||
HindiAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
|
||||
super(name, settings);
|
||||
super(name);
|
||||
analyzer = new HindiAnalyzer(
|
||||
Analysis.parseStopWords(env, settings, HindiAnalyzer.getDefaultStopSet()),
|
||||
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET)
|
||||
|
|
|
@ -22,7 +22,7 @@ import org.elasticsearch.index.analysis.NormalizingTokenFilterFactory;
|
|||
public class HindiNormalizationFilterFactory extends AbstractTokenFilterFactory implements NormalizingTokenFilterFactory {
|
||||
|
||||
HindiNormalizationFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
|
||||
super(name, settings);
|
||||
super(name);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -22,7 +22,7 @@ public class HungarianAnalyzerProvider extends AbstractIndexAnalyzerProvider<Hun
|
|||
private final HungarianAnalyzer analyzer;
|
||||
|
||||
HungarianAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
|
||||
super(name, settings);
|
||||
super(name);
|
||||
analyzer = new HungarianAnalyzer(
|
||||
Analysis.parseStopWords(env, settings, HungarianAnalyzer.getDefaultStopSet()),
|
||||
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET)
|
||||
|
|
|
@ -22,7 +22,7 @@ import org.elasticsearch.index.analysis.NormalizingTokenFilterFactory;
|
|||
public class IndicNormalizationFilterFactory extends AbstractTokenFilterFactory implements NormalizingTokenFilterFactory {
|
||||
|
||||
IndicNormalizationFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
|
||||
super(name, settings);
|
||||
super(name);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -22,7 +22,7 @@ public class IndonesianAnalyzerProvider extends AbstractIndexAnalyzerProvider<In
|
|||
private final IndonesianAnalyzer analyzer;
|
||||
|
||||
IndonesianAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
|
||||
super(name, settings);
|
||||
super(name);
|
||||
analyzer = new IndonesianAnalyzer(
|
||||
Analysis.parseStopWords(env, settings, IndonesianAnalyzer.getDefaultStopSet()),
|
||||
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET)
|
||||
|
|
|
@ -25,7 +25,7 @@ public class IrishAnalyzerProvider extends AbstractIndexAnalyzerProvider<IrishAn
|
|||
private final IrishAnalyzer analyzer;
|
||||
|
||||
IrishAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
|
||||
super(name, settings);
|
||||
super(name);
|
||||
analyzer = new IrishAnalyzer(
|
||||
Analysis.parseStopWords(env, settings, IrishAnalyzer.getDefaultStopSet()),
|
||||
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET)
|
||||
|
|
|
@ -22,7 +22,7 @@ public class ItalianAnalyzerProvider extends AbstractIndexAnalyzerProvider<Itali
|
|||
private final ItalianAnalyzer analyzer;
|
||||
|
||||
ItalianAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
|
||||
super(name, settings);
|
||||
super(name);
|
||||
analyzer = new ItalianAnalyzer(
|
||||
Analysis.parseStopWords(env, settings, ItalianAnalyzer.getDefaultStopSet()),
|
||||
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET)
|
||||
|
|
|
@ -19,7 +19,7 @@ import org.elasticsearch.index.analysis.AbstractTokenFilterFactory;
|
|||
public class KStemTokenFilterFactory extends AbstractTokenFilterFactory {
|
||||
|
||||
KStemTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
|
||||
super(name, settings);
|
||||
super(name);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -70,7 +70,7 @@ public class KeepTypesFilterFactory extends AbstractTokenFilterFactory {
|
|||
}
|
||||
|
||||
KeepTypesFilterFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) {
|
||||
super(name, settings);
|
||||
super(name);
|
||||
|
||||
final List<String> arrayKeepTypes = settings.getAsList(KEEP_TYPES_KEY, null);
|
||||
if ((arrayKeepTypes == null)) {
|
||||
|
|
|
@ -51,7 +51,7 @@ public class KeepWordFilterFactory extends AbstractTokenFilterFactory {
|
|||
private static final String ENABLE_POS_INC_KEY = "enable_position_increments";
|
||||
|
||||
KeepWordFilterFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) {
|
||||
super(name, settings);
|
||||
super(name);
|
||||
|
||||
final List<String> arrayKeepWords = settings.getAsList(KEEP_WORDS_KEY, null);
|
||||
final String keepWordsPath = settings.get(KEEP_WORDS_PATH_KEY, null);
|
||||
|
|
|
@ -20,7 +20,7 @@ public class KeywordAnalyzerProvider extends AbstractIndexAnalyzerProvider<Keywo
|
|||
private final KeywordAnalyzer keywordAnalyzer;
|
||||
|
||||
public KeywordAnalyzerProvider(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
|
||||
super(name, settings);
|
||||
super(name);
|
||||
this.keywordAnalyzer = new KeywordAnalyzer();
|
||||
}
|
||||
|
||||
|
|
|
@ -45,7 +45,7 @@ public class KeywordMarkerTokenFilterFactory extends AbstractTokenFilterFactory
|
|||
private final AnalysisMode analysisMode;
|
||||
|
||||
KeywordMarkerTokenFilterFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) {
|
||||
super(name, settings);
|
||||
super(name);
|
||||
|
||||
boolean ignoreCase = settings.getAsBoolean("ignore_case", false);
|
||||
String patternString = settings.get("keywords_pattern");
|
||||
|
|
|
@ -21,7 +21,7 @@ public class KeywordTokenizerFactory extends AbstractTokenizerFactory {
|
|||
private final int bufferSize;
|
||||
|
||||
KeywordTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
|
||||
super(indexSettings, settings, name);
|
||||
super(name);
|
||||
bufferSize = settings.getAsInt("buffer_size", 256);
|
||||
}
|
||||
|
||||
|
|
|
@ -22,7 +22,7 @@ public class LatvianAnalyzerProvider extends AbstractIndexAnalyzerProvider<Latvi
|
|||
private final LatvianAnalyzer analyzer;
|
||||
|
||||
LatvianAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
|
||||
super(name, settings);
|
||||
super(name);
|
||||
analyzer = new LatvianAnalyzer(
|
||||
Analysis.parseStopWords(env, settings, LatvianAnalyzer.getDefaultStopSet()),
|
||||
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET)
|
||||
|
|
|
@ -25,7 +25,7 @@ public class LengthTokenFilterFactory extends AbstractTokenFilterFactory {
|
|||
private static final String ENABLE_POS_INC_KEY = "enable_position_increments";
|
||||
|
||||
LengthTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
|
||||
super(name, settings);
|
||||
super(name);
|
||||
min = settings.getAsInt("min", 0);
|
||||
max = settings.getAsInt("max", Integer.MAX_VALUE);
|
||||
if (settings.get(ENABLE_POS_INC_KEY) != null) {
|
||||
|
|
|
@ -19,7 +19,7 @@ import org.elasticsearch.index.analysis.AbstractTokenizerFactory;
|
|||
public class LetterTokenizerFactory extends AbstractTokenizerFactory {
|
||||
|
||||
LetterTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
|
||||
super(indexSettings, settings, name);
|
||||
super(name);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -25,7 +25,7 @@ public class LimitTokenCountFilterFactory extends AbstractTokenFilterFactory {
|
|||
private final boolean consumeAllTokens;
|
||||
|
||||
LimitTokenCountFilterFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) {
|
||||
super(name, settings);
|
||||
super(name);
|
||||
this.maxTokenCount = settings.getAsInt("max_token_count", DEFAULT_MAX_TOKEN_COUNT);
|
||||
this.consumeAllTokens = settings.getAsBoolean("consume_all_tokens", DEFAULT_CONSUME_ALL_TOKENS);
|
||||
}
|
||||
|
|
|
@ -25,7 +25,7 @@ public class LithuanianAnalyzerProvider extends AbstractIndexAnalyzerProvider<Li
|
|||
private final LithuanianAnalyzer analyzer;
|
||||
|
||||
LithuanianAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
|
||||
super(name, settings);
|
||||
super(name);
|
||||
analyzer = new LithuanianAnalyzer(
|
||||
Analysis.parseStopWords(env, settings, LithuanianAnalyzer.getDefaultStopSet()),
|
||||
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET)
|
||||
|
|
|
@ -34,7 +34,7 @@ public class LowerCaseTokenFilterFactory extends AbstractTokenFilterFactory impl
|
|||
private final String lang;
|
||||
|
||||
LowerCaseTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
|
||||
super(name, settings);
|
||||
super(name);
|
||||
this.lang = settings.get("language", null);
|
||||
}
|
||||
|
||||
|
|
|
@ -28,7 +28,7 @@ public class MinHashTokenFilterFactory extends AbstractTokenFilterFactory {
|
|||
private final MinHashFilterFactory minHashFilterFactory;
|
||||
|
||||
MinHashTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
|
||||
super(name, settings);
|
||||
super(name);
|
||||
minHashFilterFactory = new MinHashFilterFactory(convertSettings(settings));
|
||||
}
|
||||
|
||||
|
|
|
@ -36,7 +36,7 @@ public class MultiplexerTokenFilterFactory extends AbstractTokenFilterFactory {
|
|||
private final boolean preserveOriginal;
|
||||
|
||||
public MultiplexerTokenFilterFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) throws IOException {
|
||||
super(name, settings);
|
||||
super(name);
|
||||
this.filterNames = settings.getAsList("filters");
|
||||
this.preserveOriginal = settings.getAsBoolean("preserve_original", true);
|
||||
}
|
||||
|
|
|
@ -24,7 +24,7 @@ public class NGramTokenFilterFactory extends AbstractTokenFilterFactory {
|
|||
private static final String PRESERVE_ORIG_KEY = "preserve_original";
|
||||
|
||||
NGramTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
|
||||
super(name, settings);
|
||||
super(name);
|
||||
int maxAllowedNgramDiff = indexSettings.getMaxNgramDiff();
|
||||
this.minGram = settings.getAsInt("min_gram", 1);
|
||||
this.maxGram = settings.getAsInt("max_gram", 2);
|
||||
|
|
|
@ -94,7 +94,7 @@ public class NGramTokenizerFactory extends AbstractTokenizerFactory {
|
|||
}
|
||||
|
||||
NGramTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
|
||||
super(indexSettings, settings, name);
|
||||
super(name);
|
||||
int maxAllowedNgramDiff = indexSettings.getMaxNgramDiff();
|
||||
this.minGram = settings.getAsInt("min_gram", NGramTokenizer.DEFAULT_MIN_NGRAM_SIZE);
|
||||
this.maxGram = settings.getAsInt("max_gram", NGramTokenizer.DEFAULT_MAX_NGRAM_SIZE);
|
||||
|
|
|
@ -22,7 +22,7 @@ public class NorwegianAnalyzerProvider extends AbstractIndexAnalyzerProvider<Nor
|
|||
private final NorwegianAnalyzer analyzer;
|
||||
|
||||
NorwegianAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
|
||||
super(name, settings);
|
||||
super(name);
|
||||
analyzer = new NorwegianAnalyzer(
|
||||
Analysis.parseStopWords(env, settings, NorwegianAnalyzer.getDefaultStopSet()),
|
||||
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET)
|
||||
|
|
|
@ -27,7 +27,7 @@ public class PathHierarchyTokenizerFactory extends AbstractTokenizerFactory {
|
|||
private final boolean reverse;
|
||||
|
||||
PathHierarchyTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
|
||||
super(indexSettings, settings, name);
|
||||
super(name);
|
||||
bufferSize = settings.getAsInt("buffer_size", 1024);
|
||||
String delimiterString = settings.get("delimiter");
|
||||
if (delimiterString == null) {
|
||||
|
|
|
@ -25,7 +25,7 @@ public class PatternAnalyzerProvider extends AbstractIndexAnalyzerProvider<Analy
|
|||
private final PatternAnalyzer analyzer;
|
||||
|
||||
PatternAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
|
||||
super(name, settings);
|
||||
super(name);
|
||||
|
||||
final CharArraySet defaultStopwords = CharArraySet.EMPTY_SET;
|
||||
boolean lowercase = settings.getAsBoolean("lowercase", true);
|
||||
|
|
|
@ -26,7 +26,7 @@ public class PatternCaptureGroupTokenFilterFactory extends AbstractTokenFilterFa
|
|||
private static final String PRESERVE_ORIG_KEY = "preserve_original";
|
||||
|
||||
PatternCaptureGroupTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
|
||||
super(name, settings);
|
||||
super(name);
|
||||
List<String> regexes = settings.getAsList(PATTERNS_KEY, null, false);
|
||||
if (regexes == null) {
|
||||
throw new IllegalArgumentException("required setting '" + PATTERNS_KEY + "' is missing for token filter [" + name + "]");
|
||||
|
|
|
@ -27,7 +27,7 @@ public class PatternReplaceTokenFilterFactory extends AbstractTokenFilterFactory
|
|||
private final boolean all;
|
||||
|
||||
public PatternReplaceTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
|
||||
super(name, settings);
|
||||
super(name);
|
||||
|
||||
String sPattern = settings.get("pattern", null);
|
||||
if (sPattern == null) {
|
||||
|
|
|
@ -25,7 +25,7 @@ public class PatternTokenizerFactory extends AbstractTokenizerFactory {
|
|||
private final int group;
|
||||
|
||||
PatternTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
|
||||
super(indexSettings, settings, name);
|
||||
super(name);
|
||||
|
||||
String sPattern = settings.get("pattern", "\\W+" /*PatternAnalyzer.NON_WORD_PATTERN*/);
|
||||
if (sPattern == null) {
|
||||
|
|
|
@ -35,7 +35,7 @@ public class PersianAnalyzerProvider extends AbstractIndexAnalyzerProvider<Stopw
|
|||
private final StopwordAnalyzerBase analyzer;
|
||||
|
||||
PersianAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
|
||||
super(name, settings);
|
||||
super(name);
|
||||
if (indexSettings.getIndexVersionCreated().onOrAfter(IndexVersions.UPGRADE_TO_LUCENE_10_0_0)) {
|
||||
// since Lucene 10 this analyzer contains stemming by default
|
||||
analyzer = new PersianAnalyzer(Analysis.parseStopWords(env, settings, PersianAnalyzer.getDefaultStopSet()));
|
||||
|
|
|
@ -19,7 +19,7 @@ import org.elasticsearch.index.analysis.NormalizingTokenFilterFactory;
|
|||
public class PersianNormalizationFilterFactory extends AbstractTokenFilterFactory implements NormalizingTokenFilterFactory {
|
||||
|
||||
PersianNormalizationFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
|
||||
super(name, settings);
|
||||
super(name);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -19,7 +19,7 @@ import org.elasticsearch.index.analysis.AbstractTokenFilterFactory;
|
|||
public class PersianStemTokenFilterFactory extends AbstractTokenFilterFactory {
|
||||
|
||||
PersianStemTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
|
||||
super(name, settings);
|
||||
super(name);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -19,7 +19,7 @@ import org.elasticsearch.index.analysis.AbstractTokenFilterFactory;
|
|||
public class PorterStemTokenFilterFactory extends AbstractTokenFilterFactory {
|
||||
|
||||
PorterStemTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
|
||||
super(name, settings);
|
||||
super(name);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -22,7 +22,7 @@ public class PortugueseAnalyzerProvider extends AbstractIndexAnalyzerProvider<Po
|
|||
private final PortugueseAnalyzer analyzer;
|
||||
|
||||
PortugueseAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
|
||||
super(name, settings);
|
||||
super(name);
|
||||
analyzer = new PortugueseAnalyzer(
|
||||
Analysis.parseStopWords(env, settings, PortugueseAnalyzer.getDefaultStopSet()),
|
||||
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET)
|
||||
|
|
|
@ -29,7 +29,7 @@ public class PredicateTokenFilterScriptFactory extends AbstractTokenFilterFactor
|
|||
private final AnalysisPredicateScript.Factory factory;
|
||||
|
||||
public PredicateTokenFilterScriptFactory(IndexSettings indexSettings, String name, Settings settings, ScriptService scriptService) {
|
||||
super(name, settings);
|
||||
super(name);
|
||||
Settings scriptSettings = settings.getAsSettings("script");
|
||||
Script script = Script.parse(scriptSettings);
|
||||
if (script.getType() != ScriptType.INLINE) {
|
||||
|
|
|
@ -22,7 +22,7 @@ import org.elasticsearch.index.analysis.AbstractTokenFilterFactory;
|
|||
class RemoveDuplicatesTokenFilterFactory extends AbstractTokenFilterFactory {
|
||||
|
||||
RemoveDuplicatesTokenFilterFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) {
|
||||
super(name, settings);
|
||||
super(name);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -19,7 +19,7 @@ import org.elasticsearch.index.analysis.AbstractTokenFilterFactory;
|
|||
public class ReverseTokenFilterFactory extends AbstractTokenFilterFactory {
|
||||
|
||||
ReverseTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
|
||||
super(name, settings);
|
||||
super(name);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -32,7 +32,7 @@ public class RomanianAnalyzerProvider extends AbstractIndexAnalyzerProvider<Stop
|
|||
private final StopwordAnalyzerBase analyzer;
|
||||
|
||||
RomanianAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
|
||||
super(name, settings);
|
||||
super(name);
|
||||
CharArraySet stopwords = Analysis.parseStopWords(env, settings, RomanianAnalyzer.getDefaultStopSet());
|
||||
CharArraySet stemExclusionSet = Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET);
|
||||
if (indexSettings.getIndexVersionCreated().onOrAfter(IndexVersions.UPGRADE_TO_LUCENE_10_0_0)) {
|
||||
|
|
|
@ -22,7 +22,7 @@ public class RussianAnalyzerProvider extends AbstractIndexAnalyzerProvider<Russi
|
|||
private final RussianAnalyzer analyzer;
|
||||
|
||||
RussianAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
|
||||
super(name, settings);
|
||||
super(name);
|
||||
analyzer = new RussianAnalyzer(
|
||||
Analysis.parseStopWords(env, settings, RussianAnalyzer.getDefaultStopSet()),
|
||||
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET)
|
||||
|
|
|
@ -19,7 +19,7 @@ import org.elasticsearch.index.analysis.AbstractTokenFilterFactory;
|
|||
public class RussianStemTokenFilterFactory extends AbstractTokenFilterFactory {
|
||||
|
||||
public RussianStemTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
|
||||
super(name, settings);
|
||||
super(name);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -22,7 +22,7 @@ import org.elasticsearch.index.analysis.NormalizingTokenFilterFactory;
|
|||
public class ScandinavianFoldingFilterFactory extends AbstractTokenFilterFactory implements NormalizingTokenFilterFactory {
|
||||
|
||||
ScandinavianFoldingFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
|
||||
super(name, settings);
|
||||
super(name);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -22,7 +22,7 @@ import org.elasticsearch.index.analysis.NormalizingTokenFilterFactory;
|
|||
public class ScandinavianNormalizationFilterFactory extends AbstractTokenFilterFactory implements NormalizingTokenFilterFactory {
|
||||
|
||||
ScandinavianNormalizationFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
|
||||
super(name, settings);
|
||||
super(name);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -37,7 +37,7 @@ public class ScriptedConditionTokenFilterFactory extends AbstractTokenFilterFact
|
|||
private final List<String> filterNames;
|
||||
|
||||
ScriptedConditionTokenFilterFactory(IndexSettings indexSettings, String name, Settings settings, ScriptService scriptService) {
|
||||
super(name, settings);
|
||||
super(name);
|
||||
|
||||
Settings scriptSettings = settings.getAsSettings("script");
|
||||
Script script = Script.parse(scriptSettings);
|
||||
|
|
|
@ -22,7 +22,7 @@ public class SerbianAnalyzerProvider extends AbstractIndexAnalyzerProvider<Serbi
|
|||
private final SerbianAnalyzer analyzer;
|
||||
|
||||
SerbianAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
|
||||
super(name, settings);
|
||||
super(name);
|
||||
analyzer = new SerbianAnalyzer(
|
||||
Analysis.parseStopWords(env, settings, SerbianAnalyzer.getDefaultStopSet()),
|
||||
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET)
|
||||
|
|
|
@ -20,7 +20,7 @@ import org.elasticsearch.index.analysis.NormalizingTokenFilterFactory;
|
|||
public class SerbianNormalizationFilterFactory extends AbstractTokenFilterFactory implements NormalizingTokenFilterFactory {
|
||||
|
||||
SerbianNormalizationFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
|
||||
super(name, settings);
|
||||
super(name);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -20,7 +20,7 @@ public class SimpleAnalyzerProvider extends AbstractIndexAnalyzerProvider<Simple
|
|||
private final SimpleAnalyzer simpleAnalyzer;
|
||||
|
||||
public SimpleAnalyzerProvider(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
|
||||
super(name, settings);
|
||||
super(name);
|
||||
this.simpleAnalyzer = new SimpleAnalyzer();
|
||||
}
|
||||
|
||||
|
|
|
@ -21,7 +21,7 @@ public class SimplePatternSplitTokenizerFactory extends AbstractTokenizerFactory
|
|||
private final String pattern;
|
||||
|
||||
public SimplePatternSplitTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
|
||||
super(indexSettings, settings, name);
|
||||
super(name);
|
||||
|
||||
pattern = settings.get("pattern", "");
|
||||
}
|
||||
|
|
|
@ -21,7 +21,7 @@ public class SimplePatternTokenizerFactory extends AbstractTokenizerFactory {
|
|||
private final String pattern;
|
||||
|
||||
public SimplePatternTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
|
||||
super(indexSettings, settings, name);
|
||||
super(name);
|
||||
|
||||
pattern = settings.get("pattern", "");
|
||||
}
|
||||
|
|
|
@ -51,7 +51,7 @@ public class SnowballAnalyzerProvider extends AbstractIndexAnalyzerProvider<Snow
|
|||
private final SnowballAnalyzer analyzer;
|
||||
|
||||
SnowballAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
|
||||
super(name, settings);
|
||||
super(name);
|
||||
|
||||
String language = settings.get("language", settings.get("name", "English"));
|
||||
CharArraySet defaultStopwords = DEFAULT_LANGUAGE_STOP_WORDS.getOrDefault(language, CharArraySet.EMPTY_SET);
|
||||
|
|
|
@ -25,7 +25,7 @@ public class SnowballTokenFilterFactory extends AbstractTokenFilterFactory {
|
|||
private String language;
|
||||
|
||||
SnowballTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
|
||||
super(name, settings);
|
||||
super(name);
|
||||
this.language = Strings.capitalize(settings.get("language", settings.get("name", "English")));
|
||||
}
|
||||
|
||||
|
|
|
@ -25,7 +25,7 @@ public class SoraniAnalyzerProvider extends AbstractIndexAnalyzerProvider<Sorani
|
|||
private final SoraniAnalyzer analyzer;
|
||||
|
||||
SoraniAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
|
||||
super(name, settings);
|
||||
super(name);
|
||||
analyzer = new SoraniAnalyzer(
|
||||
Analysis.parseStopWords(env, settings, SoraniAnalyzer.getDefaultStopSet()),
|
||||
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET)
|
||||
|
|
|
@ -22,7 +22,7 @@ import org.elasticsearch.index.analysis.NormalizingTokenFilterFactory;
|
|||
public class SoraniNormalizationFilterFactory extends AbstractTokenFilterFactory implements NormalizingTokenFilterFactory {
|
||||
|
||||
public SoraniNormalizationFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
|
||||
super(name, settings);
|
||||
super(name);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -22,7 +22,7 @@ public class SpanishAnalyzerProvider extends AbstractIndexAnalyzerProvider<Spani
|
|||
private final SpanishAnalyzer analyzer;
|
||||
|
||||
SpanishAnalyzerProvider(IndexSettings indexSettings, Environment env, String name, Settings settings) {
|
||||
super(name, settings);
|
||||
super(name);
|
||||
analyzer = new SpanishAnalyzer(
|
||||
Analysis.parseStopWords(env, settings, SpanishAnalyzer.getDefaultStopSet()),
|
||||
Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET)
|
||||
|
|
|
@ -26,7 +26,7 @@ public class StemmerOverrideTokenFilterFactory extends AbstractTokenFilterFactor
|
|||
private final StemmerOverrideMap overrideMap;
|
||||
|
||||
StemmerOverrideTokenFilterFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) throws IOException {
|
||||
super(name, settings);
|
||||
super(name);
|
||||
|
||||
List<String> rules = Analysis.getWordList(env, settings, "rules");
|
||||
if (rules == null) {
|
||||
|
|
|
@ -92,7 +92,7 @@ public class StemmerTokenFilterFactory extends AbstractTokenFilterFactory {
|
|||
private static final DeprecationLogger DEPRECATION_LOGGER = DeprecationLogger.getLogger(StemmerTokenFilterFactory.class);
|
||||
|
||||
StemmerTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) throws IOException {
|
||||
super(name, settings);
|
||||
super(name);
|
||||
this.language = Strings.capitalize(settings.get("language", settings.get("name", "porter")));
|
||||
// check that we have a valid language by trying to create a TokenStream
|
||||
create(EMPTY_TOKEN_STREAM).close();
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue