Skip to content
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 9 additions & 0 deletions fastdeploy/engine/sched/resource_manager_v1.py
Original file line number Diff line number Diff line change
Expand Up @@ -261,6 +261,7 @@ def _trigger_preempt(self, request, num_new_blocks, preempted_reqs, scheduled_re
self.running.insert(0, preempted_req)
continue
preempted_req.status = RequestStatus.PREEMPTED
preempted_req.last_preempted_blocksize = len(preempted_req.block_tables)
preempted_req.num_computed_tokens = 0
if self.config.scheduler_config.splitwise_role == "decode":
self.tasks_list[preempted_req.idx] = None
Expand Down Expand Up @@ -735,6 +736,14 @@ def _allocate_decode_and_extend():
break
num_new_tokens = self._get_num_new_tokens(request, token_budget)
num_new_block = self.get_new_block_nums(request, num_new_tokens)
# If num_new_block is less than the last preempted block size, use the last preempted block size instead.
# For normal requests, when allocating blocks, we reserve two extra blocks for decoding.
# In the request rescheduling scenario, we currently only consider the number of tokens already generated,
# which might lead to allocating fewer blocks than the previous allocation, causing repeated rescheduling.
# This adjustment ensures we at least allocate as many blocks as before to avoid this issue.
last_preempted_blocksize = getattr(request, "last_preempted_blocksize", 0)
if num_new_block < last_preempted_blocksize:
num_new_block = last_preempted_blocksize
# Allocate blocks to prefill
if self.cache_manager.can_allocate_gpu_blocks(num_new_block):
if not request.get("skip_allocate", False):
Expand Down
Loading