blob: 6b83ffd075143d247d22bcc917a1d6ca760d9433 (
plain) (
blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
|
# Copyright Vespa.ai. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package=ai.vespa.llm.clients
# The LLM model to use
model model
# Maximum number of requests to handle in parallel pr container node
parallelRequests int default=1
# Additional number of requests to put in queue for processing before starting to reject new requests
maxQueueSize int default=100
# Max number of milliseoncds to wait in the queue before rejecting a request
maxQueueWait int default=10000
# Use GPU
useGpu bool default=true
# Maximum number of model layers to run on GPU
gpuLayers int default=1000000
# Number of threads to use for CPU processing - -1 means use all available cores
# Not used for GPU processing
threads int default=-1
# Context size for the model
# Context is divided between parallel requests. So for 10 parallel requests, each "slot" gets 1/10 of the context
contextSize int default=4096
# Maximum number of tokens to process in one request - overridden by inference parameters
maxTokens int default=512
|