aboutsummaryrefslogtreecommitdiffstats
path: root/config-model/src/test/cfg/application/embed
diff options
context:
space:
mode:
Diffstat (limited to 'config-model/src/test/cfg/application/embed')
-rw-r--r--config-model/src/test/cfg/application/embed/configdefinitions/embedding.bert-base-embedder.def30
-rw-r--r--config-model/src/test/cfg/application/embed/configdefinitions/sentence-embedder.def26
-rw-r--r--config-model/src/test/cfg/application/embed/services.xml26
3 files changed, 44 insertions, 38 deletions
diff --git a/config-model/src/test/cfg/application/embed/configdefinitions/embedding.bert-base-embedder.def b/config-model/src/test/cfg/application/embed/configdefinitions/embedding.bert-base-embedder.def
deleted file mode 100644
index 144dfbd0001..00000000000
--- a/config-model/src/test/cfg/application/embed/configdefinitions/embedding.bert-base-embedder.def
+++ /dev/null
@@ -1,30 +0,0 @@
-# Copy of this Vespa config stored here because Vespa config definitions are not
-# available in unit tests, and are needed (by DomConfigPayloadBuilder.parseLeaf)
-# Alternatively, we could make that not need it as it is not strictly necessaery.
-
-namespace=embedding
-
-# Wordpiece tokenizer
-tokenizerVocab model
-
-transformerModel model
-
-# Max length of token sequence model can handle
-transformerMaxTokens int default=384
-
-# Pooling strategy
-poolingStrategy enum { cls, mean } default=mean
-
-# Input names
-transformerInputIds string default=input_ids
-transformerAttentionMask string default=attention_mask
-transformerTokenTypeIds string default=token_type_ids
-
-# Output name
-transformerOutput string default=output_0
-
-# Settings for ONNX model evaluation
-onnxExecutionMode enum { parallel, sequential } default=sequential
-onnxInterOpThreads int default=1
-onnxIntraOpThreads int default=-4 # n=number of threads -> n<0: CPUs/(-n), n==0: CPUs, n>0: n
-
diff --git a/config-model/src/test/cfg/application/embed/configdefinitions/sentence-embedder.def b/config-model/src/test/cfg/application/embed/configdefinitions/sentence-embedder.def
new file mode 100644
index 00000000000..87b80f1051a
--- /dev/null
+++ b/config-model/src/test/cfg/application/embed/configdefinitions/sentence-embedder.def
@@ -0,0 +1,26 @@
+package=ai.vespa.example.paragraph
+
+# WordPiece tokenizer vocabulary
+vocab model
+
+model model
+
+myValue string
+
+# Max length of token sequence model can handle
+transforerMaxTokens int default=128
+
+# Pooling strategy
+poolingStrategy enum { cls, mean } default=mean
+
+# Input names
+transformerInputIds string default=input_ids
+transformerAttentionMask string default=attention_mask
+
+# Output name
+transformerOutput string default=last_hidden_state
+
+# Settings for ONNX model evaluation
+onnxExecutionMode enum { parallel, sequential } default=sequential
+onnxInterOpThreads int default=1
+onnxIntraOpThreads int default=-4
diff --git a/config-model/src/test/cfg/application/embed/services.xml b/config-model/src/test/cfg/application/embed/services.xml
index 99c89bc4324..6823ef900ae 100644
--- a/config-model/src/test/cfg/application/embed/services.xml
+++ b/config-model/src/test/cfg/application/embed/services.xml
@@ -16,6 +16,7 @@
<onnx-intraop-threads>10</onnx-intraop-threads>
<onnx-interop-threads>8</onnx-interop-threads>
<onnx-gpu-device>1</onnx-gpu-device>
+ <pooling-strategy>mean</pooling-strategy>
</component>
<component id="hf-tokenizer" type="hugging-face-tokenizer">
@@ -25,15 +26,24 @@
<truncation>true</truncation>
</component>
- <component id="transformer" class="ai.vespa.embedding.BertBaseEmbedder" bundle="model-integration">
- <config name="embedding.bert-base-embedder">
- <!-- model specifics -->
- <transformerModel model-id="minilm-l6-v2" url="application-url"/>
- <tokenizerVocab path="files/vocab.txt"/>
+ <component id="bert-embedder" type="bert-embedder">
+ <!-- model specifics -->
+ <transformer-model model-id="minilm-l6-v2" url="application-url"/>
+ <tokenizer-vocab path="files/vocab.txt"/>
+ <max-tokens>512</max-tokens>
+ <transformer-input-ids>my_input_ids</transformer-input-ids>
+ <transformer-attention-mask>my_attention_mask</transformer-attention-mask>
+ <transformer-token-type-ids>my_token_type_ids</transformer-token-type-ids>
+ <transformer-output>my_output</transformer-output>
+ <transformer-start-sequence-token>101</transformer-start-sequence-token>
+ <transformer-end-sequence-token>102</transformer-end-sequence-token>
- <!-- tunable parameters: number of threads etc -->
- <onnxIntraOpThreads>4</onnxIntraOpThreads>
- </config>
+
+ <!-- tunable parameters: number of threads etc -->
+ <onnx-execution-mode>parallel</onnx-execution-mode>
+ <onnx-intraop-threads>4</onnx-intraop-threads>
+ <onnx-interop-threads>8</onnx-interop-threads>
+ <onnx-gpu-device>1</onnx-gpu-device>
</component>
<nodes>