Skip to content

Commit af3c758

Browse files
committed
remove unnecessary code
1 parent 367612a commit af3c758

File tree

2 files changed

+5
-22
lines changed

2 files changed

+5
-22
lines changed

python/sglang/srt/lora/lora_manager.py

Lines changed: 2 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -325,16 +325,14 @@ def update_lora_info(self):
325325
if isinstance(module, FusedMoEWithLoRA) and all(x in self.target_modules for x in ['gate_up_proj', 'down_proj']):
326326
module.set_lora_info(
327327
self.memory_pool.get_tensor(
328-
target_module='gate_up_proj',
328+
target_module='gate_up_proj_moe',
329329
layer_id=layer_id,
330330
lora_type=LoRAType.LORA_A,
331-
context='moe',
332331
),
333332
self.memory_pool.get_tensor(
334-
target_module='down_proj',
333+
target_module='down_proj_moe',
335334
layer_id=layer_id,
336335
lora_type=LoRAType.LORA_B,
337-
context='moe',
338336
),
339337
)
340338
continue
@@ -343,23 +341,16 @@ def update_lora_info(self):
343341
module_name, self.memory_pool.target_modules
344342
)
345343

346-
# Determine context based on module name
347-
context = None
348-
if isinstance(module, FusedMoEWithLoRA):
349-
context = "moe"
350-
351344
module.set_lora_info(
352345
self.memory_pool.get_tensor(
353346
target_module=target_module,
354347
layer_id=layer_id,
355348
lora_type=LoRAType.LORA_A,
356-
context=context,
357349
),
358350
self.memory_pool.get_tensor(
359351
target_module=target_module,
360352
layer_id=layer_id,
361353
lora_type=LoRAType.LORA_B,
362-
context=context,
363354
),
364355
)
365356

python/sglang/srt/lora/mem_pool.py

Lines changed: 3 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -480,30 +480,22 @@ def load_lora_weight_tensor(
480480
load_lora_weight_tensor(buffer_view, weights)
481481

482482
def get_tensor(
483-
self, target_module: str, layer_id: int, lora_type: LoRAType, context: str = None
483+
self, target_module: str, layer_id: int, lora_type: LoRAType
484484
) -> torch.Tensor:
485485
"""
486486
Get LoRA tensor buffer (automatically handles both 3D and 4D tensors).
487487
488488
Args:
489-
target_module: Target module name (e.g., 'gate_up_proj')
489+
target_module: Target module name (e.g., 'gate_up_proj' or 'gate_up_proj_moe' for MoE)
490490
layer_id: Layer index
491491
lora_type: LoRAType.LORA_A or LoRAType.LORA_B
492-
context: Optional context hint ('moe' or None for auto-detect)
493492
494493
Returns:
495494
- 3D tensor [num_loras, rank, hidden] for standard modules
496495
- 4D tensor [num_loras, num_experts, rank, hidden] for MoE modules
497496
"""
498497
buffer_dict = self.A_buffer if lora_type == LoRAType.LORA_A else self.B_buffer
499-
500-
# Handle context-specific buffer selection for ambiguous modules
501-
ambiguous_modules = {"gate_up_proj", "down_proj"}
502-
if target_module in ambiguous_modules:
503-
if context == "moe" and f"{target_module}_moe" in buffer_dict:
504-
return buffer_dict[f"{target_module}_moe"][layer_id]
505-
506-
# Fall back to original key for non-ambiguous modules
498+
507499
return buffer_dict[target_module][layer_id]
508500

509501
def get_buffer_id(self, lora_uid: str):

0 commit comments

Comments
 (0)