Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion vllm/engine/protocol.py
Original file line number Diff line number Diff line change
Expand Up @@ -125,7 +125,7 @@ async def reset_mm_cache(self) -> None:
...

@abstractmethod
async def reset_prefix_cache(self, device: Device | None = None) -> None:
async def reset_prefix_cache(self) -> None:
"""Reset the prefix cache"""
...

Expand Down
5 changes: 2 additions & 3 deletions vllm/entrypoints/llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,6 @@
TokenizerMode,
)
from vllm.engine.arg_utils import EngineArgs
from vllm.engine.protocol import Device
from vllm.entrypoints.chat_utils import (
ChatCompletionMessageParam,
ChatTemplateContentFormatOption,
Expand Down Expand Up @@ -1499,8 +1498,8 @@ def start_profile(self) -> None:
def stop_profile(self) -> None:
self.llm_engine.stop_profile()

def reset_prefix_cache(self, device: Device | None = None) -> None:
self.llm_engine.reset_prefix_cache(device)
def reset_prefix_cache(self) -> None:
self.llm_engine.reset_prefix_cache()

def sleep(self, level: int = 1):
"""
Expand Down
10 changes: 3 additions & 7 deletions vllm/entrypoints/openai/api_server.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@
import vllm.envs as envs
from vllm.config import VllmConfig
from vllm.engine.arg_utils import AsyncEngineArgs
from vllm.engine.protocol import Device, EngineClient
from vllm.engine.protocol import EngineClient
from vllm.entrypoints.anthropic.protocol import (
AnthropicError,
AnthropicErrorResponse,
Expand Down Expand Up @@ -1069,12 +1069,8 @@ async def reset_prefix_cache(raw_request: Request):
Reset the prefix cache. Note that we currently do not check if the
prefix cache is successfully reset in the API server.
"""
device = None
device_str = raw_request.query_params.get("device")
if device_str is not None:
device = Device[device_str.upper()]
logger.info("Resetting prefix cache with specific %s...", str(device))
await engine_client(raw_request).reset_prefix_cache(device)
logger.info("Resetting prefix cache...")
await engine_client(raw_request).reset_prefix_cache()
return Response(status_code=200)

@router.post("/reset_mm_cache")
Expand Down
6 changes: 2 additions & 4 deletions vllm/v1/engine/async_llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
import vllm.envs as envs
from vllm.config import VllmConfig
from vllm.engine.arg_utils import AsyncEngineArgs
from vllm.engine.protocol import Device, EngineClient
from vllm.engine.protocol import EngineClient
from vllm.entrypoints.utils import _validate_truncation_size
from vllm.inputs import PromptType
from vllm.logger import init_logger
Expand Down Expand Up @@ -672,9 +672,7 @@ async def reset_mm_cache(self) -> None:
self.processor.clear_mm_cache()
await self.engine_core.reset_mm_cache_async()

async def reset_prefix_cache(self, device: Device | None = None) -> None:
if device == Device.CPU:
raise ValueError("Not supported on CPU.")
async def reset_prefix_cache(self) -> None:
await self.engine_core.reset_prefix_cache_async()

async def sleep(self, level: int = 1) -> None:
Expand Down
3 changes: 1 addition & 2 deletions vllm/v1/engine/llm_engine.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,6 @@
from vllm.distributed import stateless_destroy_torch_distributed_process_group
from vllm.distributed.parallel_state import get_dp_group
from vllm.engine.arg_utils import EngineArgs
from vllm.engine.protocol import Device
from vllm.inputs import PromptType
from vllm.logger import init_logger
from vllm.lora.request import LoRARequest
Expand Down Expand Up @@ -321,7 +320,7 @@ def reset_mm_cache(self):
self.processor.clear_mm_cache()
self.engine_core.reset_mm_cache()

def reset_prefix_cache(self, device: Device | None = None):
def reset_prefix_cache(self):
self.engine_core.reset_prefix_cache()

def sleep(self, level: int = 1):
Expand Down
Loading