summaryrefslogtreecommitdiffstats
path: root/container-search/src/main/resources/configdefinitions/llm-local-client.def
blob: 08eab19f0f8b62ee1554d1ef9c946108484881ff (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
# Copyright Vespa.ai. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package=ai.vespa.llm.clients

# Url to the model to use
modelUrl url default=""

# Local file path to the model to use - will have precedence over model_url if set - mostly for testing
localLlmFile string default=""

# Maximum number of requests to handle in parallel pr container node
parallelRequests int default=10

# Additional number of requests to put in queue for processing before starting to reject new requests
maxQueueSize int default=10

# Use GPU
useGpu bool default=false

# Maximum number of model layers to run on GPU
gpuLayers int default=1000000

# Number of threads to use for CPU processing - -1 means use all available cores
# Not used for GPU processing
threads int default=-1

# Context size for the model
# Context is divided between parallel requests. So for 10 parallel requests, each "slot" gets 1/10 of the context
contextSize int default=512

# Maximum number of tokens to process in one request - overriden by inference parameters
maxTokens int default=512