summaryrefslogtreecommitdiffstats
path: root/lucene-linguistics
diff options
context:
space:
mode:
authorDainius Jocas <dainius.jocas@gmail.com>2023-09-14 18:14:57 +0300
committerDainius Jocas <dainius.jocas@gmail.com>2023-09-14 18:14:57 +0300
commit8beac01933b5121187d1cf6dd97cef0b34d1afd2 (patch)
tree158ff555917aa58deedaf583aacf4a833e431ace /lucene-linguistics
parent3a539203f2cd93d248f46ec4e75922879699e55b (diff)
LuceneLinguistics optional configDir
Diffstat (limited to 'lucene-linguistics')
-rw-r--r--lucene-linguistics/README.md13
-rw-r--r--lucene-linguistics/src/main/java/com/yahoo/language/lucene/AnalyzerFactory.java10
-rw-r--r--lucene-linguistics/src/main/resources/configdefinitions/lucene-analysis.def2
-rw-r--r--lucene-linguistics/src/test/java/com/yahoo/language/lucene/LuceneTokenizerTest.java40
4 files changed, 44 insertions, 21 deletions
diff --git a/lucene-linguistics/README.md b/lucene-linguistics/README.md
index 6329811e458..feece2b2366 100644
--- a/lucene-linguistics/README.md
+++ b/lucene-linguistics/README.md
@@ -1,6 +1,7 @@
# Vespa Lucene Linguistics
-Linguistics implementation based on Apache Lucene.
+Linguistics implementation based on the [Apache Lucene](https://lucene.apache.org).
+
Features:
- a list of default analyzers per language;
- building custom analyzers through the configuration of the linguistics component;
@@ -40,7 +41,7 @@ Add `<component>` to `services.xml` of your application package, e.g.:
</config>
</component>
```
-into `container` clusters that has `<document-processing/>` and/or `<search>` specified.
+into `container` clusters that have `<document-processing/>` and/or `<search>` specified.
And then package and deploy, e.g.:
```shell
@@ -74,7 +75,7 @@ Copy value of the `public static final String NAME` into the `<name>` and observ
</tokenizer>
```
-The `AnalyzerFactory` constructor logs the available analysis components.
+The `AnalyzerFactory` constructor on the application startup logs the available analysis components.
The analysis components are discovered through Java Service Provider Interface (SPI).
To add more analysis components it should be enough to put a Lucene analyzer dependency into your application package `pom.xml`
@@ -82,7 +83,11 @@ or register services and create classes directly in the application package.
### Resource files
-The resource files are relative to the component config `configDir`.
+The Lucene analyzers can use various resource files, e.g. for stopwords, synonyms, etc.
+The `configDir` configuration parameter controls where to load these files from.
+These files are relative to the application package root directory.
+
+If the `configDir` is not specified then files are loaded from the classpath.
## Inspiration
diff --git a/lucene-linguistics/src/main/java/com/yahoo/language/lucene/AnalyzerFactory.java b/lucene-linguistics/src/main/java/com/yahoo/language/lucene/AnalyzerFactory.java
index 71e31de34b3..67a430a28dc 100644
--- a/lucene-linguistics/src/main/java/com/yahoo/language/lucene/AnalyzerFactory.java
+++ b/lucene-linguistics/src/main/java/com/yahoo/language/lucene/AnalyzerFactory.java
@@ -28,9 +28,6 @@ class AnalyzerFactory {
private final LuceneAnalysisConfig config;
- // Root config directory for all analysis components
- private final Path configDir;
-
// Registry of analyzers per language
// The idea is to create analyzers ONLY WHEN they are needed
// Analyzers are thread safe so no need to recreate them for every document
@@ -45,7 +42,6 @@ class AnalyzerFactory {
public AnalyzerFactory(LuceneAnalysisConfig config, ComponentRegistry<Analyzer> analyzers) {
this.config = config;
- this.configDir = config.configDir();
this.analyzerComponents = analyzers;
this.defaultAnalyzers = new DefaultAnalyzers();
log.config("Available in classpath char filters: " + CharFilterFactory.availableCharFilters());
@@ -83,7 +79,11 @@ class AnalyzerFactory {
private Analyzer createAnalyzer(AnalyzerKey analyzerKey, LuceneAnalysisConfig.Analysis analysis) {
try {
- CustomAnalyzer.Builder builder = CustomAnalyzer.builder(configDir);
+ CustomAnalyzer.Builder builder = config.configDir()
+ // Root config directory for all analysis components in the application package
+ .map(CustomAnalyzer::builder)
+ // else load resource files from the classpath
+ .orElseGet(CustomAnalyzer::builder);
builder = withTokenizer(builder, analysis);
builder = addCharFilters(builder, analysis);
builder = addTokenFilters(builder, analysis);
diff --git a/lucene-linguistics/src/main/resources/configdefinitions/lucene-analysis.def b/lucene-linguistics/src/main/resources/configdefinitions/lucene-analysis.def
index e4b5037dcbe..081d93ec580 100644
--- a/lucene-linguistics/src/main/resources/configdefinitions/lucene-analysis.def
+++ b/lucene-linguistics/src/main/resources/configdefinitions/lucene-analysis.def
@@ -4,7 +4,7 @@ package=com.yahoo.language.lucene
# See
# - https://docs.vespa.ai/en/reference/config-files.html
-configDir path
+configDir path optional
analysis{}.tokenizer.name string default=standard
analysis{}.tokenizer.conf{} string
diff --git a/lucene-linguistics/src/test/java/com/yahoo/language/lucene/LuceneTokenizerTest.java b/lucene-linguistics/src/test/java/com/yahoo/language/lucene/LuceneTokenizerTest.java
index 21d3a7bd33d..35373479bff 100644
--- a/lucene-linguistics/src/test/java/com/yahoo/language/lucene/LuceneTokenizerTest.java
+++ b/lucene-linguistics/src/test/java/com/yahoo/language/lucene/LuceneTokenizerTest.java
@@ -10,10 +10,7 @@ import com.yahoo.language.process.Token;
import org.junit.Test;
import java.io.File;
-import java.util.ArrayList;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
+import java.util.*;
import static org.junit.Assert.assertEquals;
@@ -47,10 +44,12 @@ public class LuceneTokenizerTest {
}
private Linguistics luceneLinguistics() {
- return new LuceneLinguistics(new LuceneAnalysisConfig.Builder()
- .configDir(FileReference.mockFileReferenceForUnitTesting(new File(".")))
- .build(),
- new ComponentRegistry<>());
+ return new LuceneLinguistics(
+ new LuceneAnalysisConfig.Builder()
+ .configDir(Optional.of(FileReference
+ .mockFileReferenceForUnitTesting(new File("."))))
+ .build(),
+ new ComponentRegistry<>());
}
private void assertToken(String tokenString, Iterator<Token> tokens) {
@@ -76,7 +75,7 @@ public class LuceneTokenizerTest {
public void testAnalyzerConfiguration() {
String languageCode = Language.ENGLISH.languageCode();
LuceneAnalysisConfig enConfig = new LuceneAnalysisConfig.Builder()
- .configDir(FileReference.mockFileReferenceForUnitTesting(new File(".")))
+ .configDir(Optional.of(FileReference.mockFileReferenceForUnitTesting(new File("."))))
.analysis(
Map.of(languageCode,
new LuceneAnalysisConfig
@@ -105,7 +104,7 @@ public class LuceneTokenizerTest {
public void testEnglishStemmerAnalyzerConfiguration() {
String languageCode = Language.ENGLISH.languageCode();
LuceneAnalysisConfig enConfig = new LuceneAnalysisConfig.Builder()
- .configDir(FileReference.mockFileReferenceForUnitTesting(new File(".")))
+ .configDir(Optional.of(FileReference.mockFileReferenceForUnitTesting(new File("."))))
.analysis(
Map.of(languageCode,
new LuceneAnalysisConfig.Analysis.Builder().tokenFilters(List.of(
@@ -126,7 +125,7 @@ public class LuceneTokenizerTest {
public void testStemmerWithStopWords() {
String languageCode = Language.ENGLISH.languageCode();
LuceneAnalysisConfig enConfig = new LuceneAnalysisConfig.Builder()
- .configDir(FileReference.mockFileReferenceForUnitTesting(new File(".")))
+ .configDir(Optional.of(FileReference.mockFileReferenceForUnitTesting(new File("."))))
.analysis(
Map.of(languageCode,
new LuceneAnalysisConfig.Analysis.Builder().tokenFilters(List.of(
@@ -149,4 +148,23 @@ public class LuceneTokenizerTest {
assertEquals(List.of("Dog", "Cat"), tokenStrings(tokens));
}
+ @Test
+ public void testOptionalPath() {
+ String languageCode = Language.ENGLISH.languageCode();
+ LuceneAnalysisConfig enConfig = new LuceneAnalysisConfig.Builder()
+ .analysis(
+ Map.of(languageCode,
+ new LuceneAnalysisConfig.Analysis.Builder().tokenFilters(List.of(
+ new LuceneAnalysisConfig
+ .Analysis
+ .TokenFilters
+ .Builder()
+ .name("englishMinimalStem"))))
+ ).build();
+ LuceneLinguistics linguistics = new LuceneLinguistics(enConfig, new ComponentRegistry<>());
+ Iterable<Token> tokens = linguistics
+ .getTokenizer()
+ .tokenize("Dogs and Cats", Language.ENGLISH, StemMode.ALL, false);
+ assertEquals(List.of("Dog", "and", "Cat"), tokenStrings(tokens));
+ }
}