Skip to content

Commit 8559c2b

Browse files
authored
feat: KV aware LoRA request routing for vllm (#4810)
1 parent ad5afb7 commit 8559c2b

File tree

3 files changed

+130
-18
lines changed

3 files changed

+130
-18
lines changed

components/src/dynamo/vllm/args.py

Lines changed: 0 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -371,24 +371,6 @@ def create_kv_events_config(config: Config) -> Optional[KVEventsConfig]:
371371
logger.info("No kv_events_config required: prefix caching is disabled")
372372
return None
373373

374-
# There is a bug with KV events publishing when LORA is enabled.
375-
# This is fixed in https://github.com/vllm-project/vllm/pull/27728 but not released yet.
376-
# remove below check once new vLLM version is released with the fix.
377-
if config.engine_args.enable_lora:
378-
if config.engine_args.kv_events_config is None:
379-
# No explicit kv events config provided by user, we'll disable kv cache because LoRA is enabled and its not supported yet.
380-
return None
381-
else:
382-
# User provided their own kv events config and it'll not work when LoRA is enabled.
383-
message = (
384-
"KV events doesn't work when LoRA is enabled due to upstream vLLM bug. "
385-
"Please see https://github.com/vllm-project/vllm/pull/27728."
386-
"For now, either disable lora or dont use explicit kv envents config."
387-
"Dont set both --kv-events-config and --enable-lora in vllm command line args."
388-
)
389-
logger.error(message)
390-
raise ValueError(message)
391-
392374
# If user provided their own config, use that
393375
if c := getattr(config.engine_args, "kv_events_config"):
394376
# Warn user that enable_kv_cache_events probably should be True (user may have omitted it from JSON)
File renamed without changes.
Lines changed: 130 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,130 @@
1+
#!/bin/bash
2+
# SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3+
# SPDX-License-Identifier: Apache-2.0
4+
set -e
5+
trap 'echo Cleaning up...; kill 0' EXIT
6+
7+
# Follow the README.md instructions to setup MinIO or upload the LoRA to s3/minio
8+
# Adjust these values to match your local MinIO or S3 setup
9+
10+
# load math lora to minio
11+
# LORA_NAME=Neural-Hacker/Qwen3-Math-Reasoning-LoRA HF_LORA_REPO=Neural-Hacker/Qwen3-Math-Reasoning-LoRA ./setup_minio.sh
12+
13+
export AWS_ENDPOINT=http://localhost:9000
14+
export AWS_ACCESS_KEY_ID=minioadmin
15+
export AWS_SECRET_ACCESS_KEY=minioadmin
16+
export AWS_REGION=us-east-1
17+
export AWS_ALLOW_HTTP=true
18+
19+
# Dynamo LoRA Configuration
20+
export DYN_LORA_ENABLED=true
21+
export DYN_LORA_PATH=/tmp/dynamo_loras_minio
22+
export DYN_LOG=debug
23+
# export DYN_LOG_LEVEL=debug
24+
25+
mkdir -p $DYN_LORA_PATH
26+
27+
# Set deterministic hash for KV event IDs
28+
export PYTHONHASHSEED=0
29+
30+
# Common configuration
31+
MODEL="Qwen/Qwen3-0.6B"
32+
BLOCK_SIZE=64
33+
34+
# run frontend + KV router
35+
# dynamo.frontend accepts either --http-port flag or DYN_HTTP_PORT env var (defaults to 8000)
36+
python -m dynamo.frontend \
37+
--router-mode kv \
38+
--router-reset-states &
39+
40+
# run workers
41+
# --enforce-eager is added for quick deployment. for production use, need to remove this flag
42+
DYN_SYSTEM_ENABLED=true DYN_SYSTEM_PORT=8082 \
43+
CUDA_VISIBLE_DEVICES=0 python3 -m dynamo.vllm \
44+
--model $MODEL \
45+
--block-size $BLOCK_SIZE \
46+
--enforce-eager \
47+
--connector none \
48+
--enable-lora \
49+
--max-lora-rank 64 \
50+
--kv-events-config '{"publisher":"zmq","topic":"kv-events","endpoint":"tcp://*:20080","enable_kv_cache_events":true}' &
51+
52+
DYN_SYSTEM_ENABLED=true DYN_SYSTEM_PORT=8081 \
53+
VLLM_NIXL_SIDE_CHANNEL_PORT=20097 \
54+
CUDA_VISIBLE_DEVICES=1 python3 -m dynamo.vllm \
55+
--model $MODEL \
56+
--block-size $BLOCK_SIZE \
57+
--enforce-eager \
58+
--connector none \
59+
--enable-lora \
60+
--max-lora-rank 64 \
61+
--kv-events-config '{"publisher":"zmq","topic":"kv-events","endpoint":"tcp://*:20081","enable_kv_cache_events":true}'
62+
63+
# below commands are not executed automatically in the script because previous backend launch command is blocking.
64+
65+
################################## Example Usage ##################################
66+
67+
# Check available models
68+
curl http://localhost:8000/v1/models | jq .
69+
70+
# Load LoRA to instances using s3 uri
71+
curl -s -X POST http://localhost:8081/v1/loras \
72+
-H "Content-Type: application/json" \
73+
-d '{"lora_name": "codelion/Qwen3-0.6B-accuracy-recovery-lora",
74+
"source": {"uri": "s3://my-loras/codelion/Qwen3-0.6B-accuracy-recovery-lora"}}' | jq .
75+
76+
curl -s -X POST http://localhost:8082/v1/loras \
77+
-H "Content-Type: application/json" \
78+
-d '{"lora_name": "codelion/Qwen3-0.6B-accuracy-recovery-lora",
79+
"source": {"uri": "s3://my-loras/codelion/Qwen3-0.6B-accuracy-recovery-lora"}}' | jq .
80+
81+
# Test LoRA inference
82+
curl localhost:8000/v1/chat/completions \
83+
-H "Content-Type: application/json" \
84+
-d '{
85+
"model": "codelion/Qwen3-0.6B-accuracy-recovery-lora",
86+
"messages": [
87+
{
88+
"role": "user",
89+
"content": "In the heart of Eldoria, an ancient land of boundless magic and mysterious creatures, lies the long-forgotten city of Aeloria. Once a beacon of knowledge and power, Aeloria was buried beneath the shifting sands of time, lost to the world for centuries. You are an intrepid explorer, known for your unparalleled curiosity and courage, who has stumbled upon an ancient map hinting at ests that Aeloria holds a secret so profound that it has the potential to reshape the very fabric of reality. Your journey will take you through treacherous deserts, enchanted forests, and across perilous mountain ranges. Your Task: Character Background: Develop a detailed background for your character. Describe their motivations for seeking out Aeloria, their skills and weaknesses, and any personal connections to the ancient city or its legends. Are they driven by a quest for knowledge, a search for lost familt clue is hidden."
90+
}
91+
],
92+
"stream": false,
93+
"max_tokens": 30
94+
}' | jq .
95+
96+
97+
# Sample output after running above curl request twice.
98+
# usage.prompt_tokens_details.cached_tokens is the number of tokens that were cached from the previous request.
99+
{
100+
"id": "chatcmpl-0cf880c2-fe98-45c4-9c76-84c3ad1a56cc",
101+
"choices": [
102+
{
103+
"index": 0,
104+
"message": {
105+
"content": "<think>\nOkay, so I need to develop a character background for a character named Elara. Let me start by understanding the requirements. The user wants",
106+
"role": "assistant",
107+
"reasoning_content": null
108+
},
109+
"finish_reason": "length"
110+
}
111+
],
112+
"created": 1765230243,
113+
"model": "codelion/Qwen3-0.6B-accuracy-recovery-lora",
114+
"object": "chat.completion",
115+
"usage": {
116+
"prompt_tokens": 196,
117+
"completion_tokens": 30,
118+
"total_tokens": 226,
119+
"prompt_tokens_details": {
120+
"audio_tokens": null,
121+
"cached_tokens": 192
122+
}
123+
},
124+
"nvext": {
125+
"worker_id": {
126+
"prefill_worker_id": 7587891281668871552,
127+
"decode_worker_id": 7587891281668871552
128+
}
129+
}
130+
}

0 commit comments

Comments
 (0)