blob: a6dac3f18fd6034ad36093226159319a26343de0 (
plain) (
blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
|
# Copyright Vespa.ai. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
namespace=embedding
# Path to tokenizer.json
tokenizerPath model
# Path to model.onnx
transformerModel model
# Max query tokens for ColBERT
maxQueryTokens int default=32
# Max document query tokens for ColBERT
maxDocumentTokens int default=512
# Max length of token sequence model can handle
transformerMaxTokens int default=512
# Input names
transformerInputIds string default=input_ids
transformerAttentionMask string default=attention_mask
# special token ids
transformerStartSequenceToken int default=101
transformerEndSequenceToken int default=102
transformerMaskToken int default=103
transformerPadToken int default=0
queryTokenId int default=1
documentTokenId int default=2
# Output name
transformerOutput string default=contextual
# Settings for ONNX model evaluation
transformerExecutionMode enum { parallel, sequential } default=sequential
transformerInterOpThreads int default=1
transformerIntraOpThreads int default=-4
# GPU device id, -1 for CPU
transformerGpuDevice int default=0
|