blob: 36957004e0296e7d6048a0e643503a49eb733436 (
plain) (
blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
|
namespace=embedding.huggingface
# Path to tokenizer.json
tokenizerPath model
# Path to model.onnx
transformerModel model
# Max length of token sequence model can handle
transformerMaxTokens int default=512
# Input names
transformerInputIds string default=input_ids
transformerAttentionMask string default=attention_mask
transformerTokenTypeIds string default=token_type_ids
# Output name
transformerOutput string default=last_hidden_state
# Normalize tensors from tokenizer
normalize bool default=false
# Settings for ONNX model evaluation
transformerExecutionMode enum { parallel, sequential } default=sequential
transformerInterOpThreads int default=1
transformerIntraOpThreads int default=-4
# GPU device id, -1 for CPU
transformerGpuDevice int default=0
|