Skip to content

Commit fdaab63

Browse files
krishung5nv-anants
andauthored
fix: Fix multimodal EPD examples for vllm version bump (#4849) (#4870)
Co-authored-by: Anant Sharma <[email protected]>
1 parent fb8cede commit fdaab63

File tree

15 files changed

+499
-43
lines changed

15 files changed

+499
-43
lines changed

components/src/dynamo/vllm/multimodal_handlers/processor_handler.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@
1111
from vllm.engine.arg_utils import AsyncEngineArgs
1212
from vllm.entrypoints.openai.protocol import ChatCompletionRequest, CompletionRequest
1313
from vllm.outputs import RequestOutput
14-
from vllm.transformers_utils.tokenizer import AnyTokenizer
14+
from vllm.tokenizers import TokenizerLike as AnyTokenizer
1515

1616
from dynamo.runtime import Client
1717

components/src/dynamo/vllm/multimodal_utils/chat_processor.py

Lines changed: 35 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -28,9 +28,22 @@
2828
from vllm.entrypoints.openai.serving_chat import OpenAIServingChat
2929
from vllm.entrypoints.openai.serving_completion import OpenAIServingCompletion
3030
from vllm.entrypoints.openai.serving_engine import RequestPrompt
31+
from vllm.entrypoints.openai.serving_models import BaseModelPath, OpenAIServingModels
3132
from vllm.inputs.data import TokensPrompt
3233
from vllm.sampling_params import SamplingParams
33-
from vllm.transformers_utils.tokenizer import AnyTokenizer
34+
from vllm.tokenizers import TokenizerLike as AnyTokenizer
35+
36+
37+
class StubEngineClient:
38+
"""
39+
Stub EngineClient for preprocessing-only use of OpenAIServingChat/Completion.
40+
Provides the minimal attributes required by OpenAIServingModels.
41+
"""
42+
43+
def __init__(self, model_config: ModelConfig):
44+
self.model_config = model_config
45+
self.input_processor = None
46+
self.io_processor = None
3447

3548

3649
@runtime_checkable
@@ -120,12 +133,19 @@ class ChatProcessor:
120133
def __init__(self, tokenizer: AnyTokenizer, model_config: ModelConfig):
121134
self.tokenizer = tokenizer
122135
self.model_config = model_config
136+
# Create stub engine client and models for preprocessing-only usage
137+
stub_engine = StubEngineClient(model_config)
138+
serving_models = OpenAIServingModels(
139+
engine_client=stub_engine,
140+
base_model_paths=[
141+
BaseModelPath(name=model_config.model, model_path=model_config.model)
142+
],
143+
)
123144
self.openai_serving = OpenAIServingChat(
124-
engine_client=None,
125-
model_config=model_config,
126-
models=None,
127-
request_logger=None,
145+
engine_client=stub_engine,
146+
models=serving_models,
128147
response_role="assistant",
148+
request_logger=None,
129149
chat_template=None,
130150
chat_template_content_format="auto",
131151
)
@@ -186,7 +206,6 @@ async def stream_response(
186206
conversation,
187207
self.tokenizer,
188208
request_metadata,
189-
enable_force_include_usage=False,
190209
):
191210
if raw_response.startswith("data: [DONE]"):
192211
yield raw_response
@@ -220,7 +239,6 @@ async def stream_response(
220239
conversation,
221240
self.tokenizer,
222241
request_metadata,
223-
enable_force_include_usage=False,
224242
):
225243
if raw_response.startswith("data: [DONE]"):
226244
break
@@ -267,10 +285,17 @@ class CompletionsProcessor:
267285
def __init__(self, tokenizer: AnyTokenizer, model_config: ModelConfig):
268286
self.tokenizer = tokenizer
269287
self.model_config = model_config
288+
# Create stub engine client and models for preprocessing-only usage
289+
stub_engine = StubEngineClient(model_config)
290+
serving_models = OpenAIServingModels(
291+
engine_client=stub_engine,
292+
base_model_paths=[
293+
BaseModelPath(name=model_config.model, model_path=model_config.model)
294+
],
295+
)
270296
self.openai_serving = OpenAIServingCompletion(
271-
engine_client=None,
272-
model_config=model_config,
273-
models=None,
297+
engine_client=stub_engine,
298+
models=serving_models,
274299
request_logger=None,
275300
)
276301

components/src/dynamo/vllm/multimodal_utils/protocol.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@
2626
from vllm.multimodal.inputs import MultiModalUUIDDict # noqa: F401
2727
from vllm.outputs import CompletionOutput
2828
from vllm.sampling_params import SamplingParams
29-
from vllm.sequence import RequestMetrics
29+
from vllm.v1.metrics.stats import RequestStateStats
3030

3131
import dynamo.nixl_connect as connect
3232

@@ -156,7 +156,7 @@ class MyRequestOutput(BaseModel):
156156
https://github.com/vllm-project/vllm/blob/a4c402a756fa3213caf9d2cde0e4ceb2d57727f2/vllm/outputs.py#L85
157157
158158
This class is used to serialize the RequestOutput and any recursively defined types
159-
We can do this because PromptLogprobs, RequestMetrics, and CompletionOutput are all serializable dataclasses
159+
We can do this because PromptLogprobs, RequestStateStats, and CompletionOutput are all serializable dataclasses
160160
"""
161161

162162
model_config = ConfigDict(arbitrary_types_allowed=True)
@@ -167,7 +167,7 @@ class MyRequestOutput(BaseModel):
167167
prompt_logprobs: Optional[PromptLogprobs] = None
168168
outputs: List[CompletionOutput]
169169
finished: bool
170-
metrics: Optional[RequestMetrics] = None
170+
metrics: Optional[RequestStateStats] = None
171171
kv_transfer_params: Optional[dict[str, Any]] = None
172172
# lora_request: Optional[LoRARequest] = None
173173
# encoder_prompt: Optional[str] = None

examples/backends/vllm/launch/agg_multimodal_epd.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -77,7 +77,7 @@ python -m dynamo.vllm --multimodal-processor --enable-multimodal --model $MODEL_
7777

7878
# run E/P/D workers
7979
CUDA_VISIBLE_DEVICES=0 python -m dynamo.vllm --multimodal-encode-worker --enable-multimodal --model $MODEL_NAME &
80-
CUDA_VISIBLE_DEVICES=1 python -m dynamo.vllm --multimodal-worker --enable-multimodal --model $MODEL_NAME $EXTRA_ARGS &
80+
CUDA_VISIBLE_DEVICES=0 python -m dynamo.vllm --multimodal-worker --enable-multimodal --enable-mm-embeds --model $MODEL_NAME $EXTRA_ARGS &
8181

8282
# Wait for all background processes to complete
8383
wait

examples/backends/vllm/launch/disagg_multimodal_epd.sh

Lines changed: 6 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -80,23 +80,20 @@ python -m dynamo.vllm --multimodal-processor --enable-multimodal --model $MODEL_
8080

8181
# Configure GPU memory optimization for specific models
8282
EXTRA_ARGS=""
83-
if [[ "$MODEL_NAME" == "Qwen/Qwen2.5-VL-7B-Instruct" ]]; then
84-
EXTRA_ARGS="--gpu-memory-utilization 0.85 --max-model-len 2048"
85-
fi
8683

8784
# Start encode worker
88-
echo "Starting encode worker on GPU 1..."
89-
VLLM_NIXL_SIDE_CHANNEL_PORT=20097 CUDA_VISIBLE_DEVICES=1 python -m dynamo.vllm --multimodal-encode-worker --enable-multimodal --model $MODEL_NAME $EXTRA_ARGS --kv-events-config '{"publisher":"zmq","topic":"kv-events","endpoint":"tcp://*:20080"}' &
85+
echo "Starting encode worker on GPU 0..."
86+
VLLM_NIXL_SIDE_CHANNEL_PORT=20097 CUDA_VISIBLE_DEVICES=0 python -m dynamo.vllm --multimodal-encode-worker --enable-multimodal --model $MODEL_NAME $EXTRA_ARGS --kv-events-config '{"publisher":"zmq","topic":"kv-events","endpoint":"tcp://*:20080"}' &
9087

9188
# Start prefill worker
92-
echo "Starting prefill worker on GPU 2..."
89+
echo "Starting prefill worker on GPU 1..."
9390
VLLM_NIXL_SIDE_CHANNEL_PORT=20098 \
94-
CUDA_VISIBLE_DEVICES=2 python -m dynamo.vllm --multimodal-worker --is-prefill-worker --enable-multimodal --model $MODEL_NAME $EXTRA_ARGS --kv-events-config '{"publisher":"zmq","topic":"kv-events","endpoint":"tcp://*:20081"}' &
91+
CUDA_VISIBLE_DEVICES=1 python -m dynamo.vllm --multimodal-worker --is-prefill-worker --enable-multimodal --enable-mm-embeds --model $MODEL_NAME $EXTRA_ARGS --kv-events-config '{"publisher":"zmq","topic":"kv-events","endpoint":"tcp://*:20081"}' &
9592

9693
# Start decode worker
97-
echo "Starting decode worker on GPU 3..."
94+
echo "Starting decode worker on GPU 2..."
9895
VLLM_NIXL_SIDE_CHANNEL_PORT=20099 \
99-
CUDA_VISIBLE_DEVICES=3 python -m dynamo.vllm --multimodal-decode-worker --enable-multimodal --model $MODEL_NAME $EXTRA_ARGS --kv-events-config '{"publisher":"zmq","topic":"kv-events","endpoint":"tcp://*:20082"}' &
96+
CUDA_VISIBLE_DEVICES=2 python -m dynamo.vllm --multimodal-decode-worker --enable-multimodal --model $MODEL_NAME $EXTRA_ARGS --kv-events-config '{"publisher":"zmq","topic":"kv-events","endpoint":"tcp://*:20082"}' &
10097

10198
echo "=================================================="
10299
echo "All components started. Waiting for initialization..."

0 commit comments

Comments
 (0)