summaryrefslogtreecommitdiffstats
path: root/config-model/src/test/cfg/application/embed
diff options
context:
space:
mode:
Diffstat (limited to 'config-model/src/test/cfg/application/embed')
-rw-r--r--config-model/src/test/cfg/application/embed/configdefinitions/embedding.bert-base-embedder.def30
-rw-r--r--config-model/src/test/cfg/application/embed/services.xml2
2 files changed, 31 insertions, 1 deletions
diff --git a/config-model/src/test/cfg/application/embed/configdefinitions/embedding.bert-base-embedder.def b/config-model/src/test/cfg/application/embed/configdefinitions/embedding.bert-base-embedder.def
new file mode 100644
index 00000000000..a6544187140
--- /dev/null
+++ b/config-model/src/test/cfg/application/embed/configdefinitions/embedding.bert-base-embedder.def
@@ -0,0 +1,30 @@
+# Copy of this Vespa config stored here because Vespa config definitions are not
+# available in unit tests, and are needed (by DomConfigPayloadBuilder.parseLeaf)
+# Alternativ ely, we could make that not need it as it is not strictly necessaery.
+
+namespace=embedding
+
+# Wordpiece tokenizer
+tokenizerVocab model
+
+transformerModel model
+
+# Max length of token sequence model can handle
+transformerMaxTokens int default=384
+
+# Pooling strategy
+poolingStrategy enum { cls, mean } default=mean
+
+# Input names
+transformerInputIds string default=input_ids
+transformerAttentionMask string default=attention_mask
+transformerTokenTypeIds string default=token_type_ids
+
+# Output name
+transformerOutput string default=output_0
+
+# Settings for ONNX model evaluation
+onnxExecutionMode enum { parallel, sequential } default=sequential
+onnxInterOpThreads int default=1
+onnxIntraOpThreads int default=-4 # n=number of threads -> n<0: CPUs/(-n), n==0: CPUs, n>0: n
+
diff --git a/config-model/src/test/cfg/application/embed/services.xml b/config-model/src/test/cfg/application/embed/services.xml
index 88558ace4bf..cdbcfd67f02 100644
--- a/config-model/src/test/cfg/application/embed/services.xml
+++ b/config-model/src/test/cfg/application/embed/services.xml
@@ -7,7 +7,7 @@
<component id="transformer" class="ai.vespa.embedding.BertBaseEmbedder" bindle="model-integration">
<config name="embedding.bert-base-embedder">
<!-- model specifics -->
- <transformerModel id="minilm-l6-v2" url="application-url"/>
+ <transformerModel model-id="minilm-l6-v2" url="application-url"/>
<tokenizerVocab path="files/vocab.txt"/>
<!-- tunable parameters: number of threads etc -->