aboutsummaryrefslogtreecommitdiffstats
path: root/model-integration/src/main/resources/configdefinitions/llm-local-client.def
blob: 4823a53ec4643e6187d789e5d66ddb22573ae3c2 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
# Copyright Vespa.ai. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package=ai.vespa.llm.clients

# The LLM model to use
model model

# Maximum number of requests to handle in parallel pr container node
parallelRequests int default=1

# Additional number of requests to put in queue for processing before starting to reject new requests
maxQueueSize int default=10

# Use GPU
useGpu bool default=true

# Maximum number of model layers to run on GPU
gpuLayers int default=1000000

# Number of threads to use for CPU processing - -1 means use all available cores
# Not used for GPU processing
threads int default=-1

# Context size for the model
# Context is divided between parallel requests. So for 10 parallel requests, each "slot" gets 1/10 of the context
contextSize int default=4096

# Maximum number of tokens to process in one request - overriden by inference parameters
maxTokens int default=512