summaryrefslogtreecommitdiffstats
path: root/model-integration/src/main/resources/configdefinitions/bert-base-embedder.def
blob: 7e3ff151466a11a6ec117727a53857b07b500ff8 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
package=ai.vespa.embedding

# Transformer model settings
transformerModelUrl  url

# Max length of token sequence model can handle
transformerMaxTokens int default=384

# Pooling strategy
poolingStrategy enum { cls, mean } default=mean

# Input names
transformerInputIds      string default=input_ids
transformerAttentionMask string default=attention_mask
transformerTokenTypeIds  string default=token_type_ids

# Output name
transformerOutput string default=output_0

# Settings for ONNX model evaluation
onnxExecutionMode enum { parallel, sequential } default=sequential
onnxInterOpThreads int default=1
onnxIntraOpThreads int default=-4  # n=number of threads -> n<0: CPUs/(-n), n==0: CPUs, n>0: n

# Settings for wordpiece tokenizer
tokenizerVocabUrl  url