Skip to content

Commit

Permalink
[V1] Fix Configs (vllm-project#9971)
Browse files Browse the repository at this point in the history
  • Loading branch information
robertgshaw2-neuralmagic authored Nov 4, 2024
1 parent 5459772 commit 91c9ebb
Showing 1 changed file with 3 additions and 2 deletions.
5 changes: 3 additions & 2 deletions vllm/v1/executor/gpu_executor.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import os
from typing import Optional, Tuple

from vllm.config import EngineConfig
from vllm.config import VllmConfig
from vllm.logger import init_logger
from vllm.utils import get_distributed_init_method, get_ip, get_open_port
from vllm.v1.outputs import ModelRunnerOutput
Expand All @@ -12,7 +12,8 @@

class GPUExecutor:

def __init__(self, vllm_config: EngineConfig) -> None:
def __init__(self, vllm_config: VllmConfig) -> None:
self.vllm_config = vllm_config
self.model_config = vllm_config.model_config
self.cache_config = vllm_config.cache_config
self.lora_config = vllm_config.lora_config
Expand Down

0 comments on commit 91c9ebb

Please sign in to comment.