blob: 99c89bc43248b8daa48aee7c7070c50323213ed1 (
plain) (
blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
|
<?xml version="1.0" encoding="utf-8" ?>
<!-- Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -->
<services version="1.0">
<container version="1.0">
<component id="hf-embedder" type="hugging-face-embedder">
<transformer-model model-id="e5-base-v2" url="https://my/url/model.onnx"/>
<tokenizer-model model-id="e5-base-v2-vocab" path="app/tokenizer.json"/>
<max-tokens>1024</max-tokens>
<transformer-input-ids>my_input_ids</transformer-input-ids>
<transformer-attention-mask>my_attention_mask</transformer-attention-mask>
<transformer-token-type-ids>my_token_type_ids</transformer-token-type-ids>
<transformer-output>my_output</transformer-output>
<normalize>true</normalize>
<onnx-execution-mode>parallel</onnx-execution-mode>
<onnx-intraop-threads>10</onnx-intraop-threads>
<onnx-interop-threads>8</onnx-interop-threads>
<onnx-gpu-device>1</onnx-gpu-device>
</component>
<component id="hf-tokenizer" type="hugging-face-tokenizer">
<model language="no" model-id="multilingual-e5-base-vocab" url="https://my/url/tokenizer.json"/>
<special-tokens>true</special-tokens>
<max-length>768</max-length>
<truncation>true</truncation>
</component>
<component id="transformer" class="ai.vespa.embedding.BertBaseEmbedder" bundle="model-integration">
<config name="embedding.bert-base-embedder">
<!-- model specifics -->
<transformerModel model-id="minilm-l6-v2" url="application-url"/>
<tokenizerVocab path="files/vocab.txt"/>
<!-- tunable parameters: number of threads etc -->
<onnxIntraOpThreads>4</onnxIntraOpThreads>
</config>
</component>
<nodes>
<node hostalias="node1" />
</nodes>
</container>
</services>
|