blob: 7ea4227b3cdedf2c4bc675f48120011ba9972f32 (
plain) (
blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
|
namespace=embedding.huggingface
# Path to tokenizer.json
tokenizerPath model
# Path to model.onnx
transformerModel model
# Max length of token sequence model can handle
transformerMaxTokens int default=512
# Input names
transformerInputIds string default=input_ids
transformerAttentionMask string default=attention_mask
transformerTokenTypeIds string default=token_type_ids
# Output name
transformerOutput string default=last_hidden_state
# Normalize tensors from tokenizer
normalize bool default=false
poolingStrategy enum { cls, mean } default=mean
# Settings for ONNX model evaluation
transformerExecutionMode enum { parallel, sequential } default=sequential
transformerInterOpThreads int default=1
transformerIntraOpThreads int default=-4
# GPU device id, -1 for CPU
transformerGpuDevice int default=0
|