diff options
Diffstat (limited to 'model-integration/src/test/java/ai/vespa/llm/clients/LocalLLMTest.java')
-rw-r--r-- | model-integration/src/test/java/ai/vespa/llm/clients/LocalLLMTest.java | 9 |
1 files changed, 5 insertions, 4 deletions
diff --git a/model-integration/src/test/java/ai/vespa/llm/clients/LocalLLMTest.java b/model-integration/src/test/java/ai/vespa/llm/clients/LocalLLMTest.java index 95bcfb985bd..a3b260f3fb5 100644 --- a/model-integration/src/test/java/ai/vespa/llm/clients/LocalLLMTest.java +++ b/model-integration/src/test/java/ai/vespa/llm/clients/LocalLLMTest.java @@ -6,6 +6,7 @@ import ai.vespa.llm.completion.Completion; import ai.vespa.llm.completion.Prompt; import ai.vespa.llm.completion.StringPrompt; import com.yahoo.config.ModelReference; +import org.junit.jupiter.api.Disabled; import org.junit.jupiter.api.Test; import java.util.ArrayList; @@ -32,10 +33,10 @@ public class LocalLLMTest { private static Prompt prompt = StringPrompt.from("A random prompt"); @Test + @Disabled public void testGeneration() { var config = new LlmLocalClientConfig.Builder() .parallelRequests(1) - .threads(1) .model(ModelReference.valueOf(model)); var llm = new LocalLLM(config.build()); @@ -49,12 +50,12 @@ public class LocalLLMTest { } @Test + @Disabled public void testAsyncGeneration() { var sb = new StringBuilder(); var tokenCount = new AtomicInteger(0); var config = new LlmLocalClientConfig.Builder() .parallelRequests(1) - .threads(1) .model(ModelReference.valueOf(model)); var llm = new LocalLLM(config.build()); @@ -77,6 +78,7 @@ public class LocalLLMTest { } @Test + @Disabled public void testParallelGeneration() { var prompts = testPrompts(); var promptsToUse = prompts.size(); @@ -88,7 +90,6 @@ public class LocalLLMTest { var config = new LlmLocalClientConfig.Builder() .parallelRequests(parallelRequests) - .threads(1) .model(ModelReference.valueOf(model)); var llm = new LocalLLM(config.build()); @@ -116,6 +117,7 @@ public class LocalLLMTest { } @Test + @Disabled public void testRejection() { var prompts = testPrompts(); var promptsToUse = prompts.size(); @@ -128,7 +130,6 @@ public class LocalLLMTest { var config = new LlmLocalClientConfig.Builder() .parallelRequests(parallelRequests) - .threads(1) .maxQueueSize(additionalQueue) .model(ModelReference.valueOf(model)); var llm = new LocalLLM(config.build()); |