Skip to content

vllm.core.scheduler

ARTIFICIAL_PREEMPTION_MAX_CNT module-attribute

ARTIFICIAL_PREEMPTION_MAX_CNT = 500

ARTIFICIAL_PREEMPTION_PROB module-attribute

ARTIFICIAL_PREEMPTION_PROB = 0.5

ENABLE_ARTIFICIAL_PREEMPT module-attribute

ENABLE_ARTIFICIAL_PREEMPT = bool(
    getenv("VLLM_TEST_ENABLE_ARTIFICIAL_PREEMPT", False)
)

logger module-attribute

logger = init_logger(__name__)

PartialPrefillMetadata dataclass

Holds information about the partial prefills that are currently running during a single iteration of the Scheduler. When chunked prefill is enabled, we allow a certain number of seqs to be partially prefilled during each iteration. Having multiple partial prefills in flight allows us to minimize TTFT and avoid decode starvation in cases where a single sequence group with a very large prompt blocks the queue for too many iterations. The number of long prefill requests is limited so that smaller requests may jump the queue in front of them and get to the decode phase faster.

Source code in vllm/core/scheduler.py
@dataclass
class PartialPrefillMetadata:
    """Holds information about the partial prefills that are currently running
    during a single iteration of the Scheduler.
    When chunked prefill is enabled, we allow a certain number of seqs to be
    partially prefilled during each iteration. Having multiple partial prefills
    in flight allows us to minimize TTFT and avoid decode starvation in cases
    where a single sequence group with a very large prompt blocks the queue for
    too many iterations.
    The number of long prefill requests is limited so that smaller
    requests may jump the queue in front of them and get to the decode
    phase faster.
    """

    # A minimum bound on the total number of prefills to be scheduled during
    # this iteration
    schedulable_prefills: int

    # The number of long prefill requests currently running
    long_prefills: int

    scheduler_config: SchedulerConfig

    def can_schedule(self, seq_group: SequenceGroup) -> bool:
        """When concurrent partial prefills are enabled,
        we limit the number of long requests and only accept
        shorter requests from the queue while running them
        concurrently"""
        return not (seq_group.first_seq.get_num_new_tokens()
                    > self.scheduler_config.long_prefill_token_threshold
                    and self.long_prefills
                    >= self.scheduler_config.max_long_partial_prefills
                    and self.scheduler_config.max_num_partial_prefills > 1)

    def maybe_increment_partial_prefills(self,
                                         seq_group: SequenceGroup) -> None:
        # When a new prefill is scheduled, we need to know if it is a
        # long request
        if (seq_group.first_seq.get_num_new_tokens()
                > self.scheduler_config.long_prefill_token_threshold):
            self.long_prefills += 1

    @classmethod
    def from_queues(
        cls,
        running: Deque[SequenceGroup],
        waiting: Deque[SequenceGroup],
        scheduler_config: SchedulerConfig,
    ) -> "PartialPrefillMetadata":
        """Create a PartialPrefillMetadata object from the current state of
        the scheduler's queues.
        This accounts for the currently running prefill requests, and peeks into
        the waiting queue to see if there are more prefills to potentially be
        scheduled during this iteration."""
        prefills = 0
        long_prefills = 0

        waiting_long_prefills = 0

        for sg in running:
            if sg.first_seq.data.stage == SequenceStage.PREFILL:
                prefills += 1
                if (sg.first_seq.get_num_new_tokens()
                        > scheduler_config.long_prefill_token_threshold):
                    long_prefills += 1

        for sg in waiting:
            # Don't bother looping through the rest of the queue if we know
            # there are already at
            # least max_partial_prefills requests to fill
            if prefills >= scheduler_config.max_num_partial_prefills:
                break

            # Don't count long requests from the waiting queue if we aren't
            # going to schedule them anyway
            if (sg.first_seq.get_num_new_tokens()
                    > scheduler_config.long_prefill_token_threshold):
                if (long_prefills + waiting_long_prefills
                        >= scheduler_config.max_long_partial_prefills):
                    continue
                waiting_long_prefills += 1
            prefills += 1

        # NB: long_prefills and waiting_long_prefills are tracked separately.
        # We don't account for the waiting requests here because we need to use
        # this metadata to track how many have actually been scheduled.
        return PartialPrefillMetadata(
            schedulable_prefills=min(
                prefills, scheduler_config.max_num_partial_prefills),
            long_prefills=long_prefills,
            scheduler_config=scheduler_config,
        )

long_prefills instance-attribute

long_prefills: int

schedulable_prefills instance-attribute

schedulable_prefills: int

scheduler_config instance-attribute

scheduler_config: SchedulerConfig

__init__

__init__(
    schedulable_prefills: int,
    long_prefills: int,
    scheduler_config: SchedulerConfig,
) -> None

can_schedule

can_schedule(seq_group: SequenceGroup) -> bool

When concurrent partial prefills are enabled, we limit the number of long requests and only accept shorter requests from the queue while running them concurrently

Source code in vllm/core/scheduler.py
def can_schedule(self, seq_group: SequenceGroup) -> bool:
    """When concurrent partial prefills are enabled,
    we limit the number of long requests and only accept
    shorter requests from the queue while running them
    concurrently"""
    return not (seq_group.first_seq.get_num_new_tokens()
                > self.scheduler_config.long_prefill_token_threshold
                and self.long_prefills
                >= self.scheduler_config.max_long_partial_prefills
                and self.scheduler_config.max_num_partial_prefills > 1)

from_queues classmethod

from_queues(
    running: Deque[SequenceGroup],
    waiting: Deque[SequenceGroup],
    scheduler_config: SchedulerConfig,
) -> PartialPrefillMetadata

Create a PartialPrefillMetadata object from the current state of the scheduler's queues. This accounts for the currently running prefill requests, and peeks into the waiting queue to see if there are more prefills to potentially be scheduled during this iteration.

Source code in vllm/core/scheduler.py
@classmethod
def from_queues(
    cls,
    running: Deque[SequenceGroup],
    waiting: Deque[SequenceGroup],
    scheduler_config: SchedulerConfig,
) -> "PartialPrefillMetadata":
    """Create a PartialPrefillMetadata object from the current state of
    the scheduler's queues.
    This accounts for the currently running prefill requests, and peeks into
    the waiting queue to see if there are more prefills to potentially be
    scheduled during this iteration."""
    prefills = 0
    long_prefills = 0

    waiting_long_prefills = 0

    for sg in running:
        if sg.first_seq.data.stage == SequenceStage.PREFILL:
            prefills += 1
            if (sg.first_seq.get_num_new_tokens()
                    > scheduler_config.long_prefill_token_threshold):
                long_prefills += 1

    for sg in waiting:
        # Don't bother looping through the rest of the queue if we know
        # there are already at
        # least max_partial_prefills requests to fill
        if prefills >= scheduler_config.max_num_partial_prefills:
            break

        # Don't count long requests from the waiting queue if we aren't
        # going to schedule them anyway
        if (sg.first_seq.get_num_new_tokens()
                > scheduler_config.long_prefill_token_threshold):
            if (long_prefills + waiting_long_prefills
                    >= scheduler_config.max_long_partial_prefills):
                continue
            waiting_long_prefills += 1
        prefills += 1

    # NB: long_prefills and waiting_long_prefills are tracked separately.
    # We don't account for the waiting requests here because we need to use
    # this metadata to track how many have actually been scheduled.
    return PartialPrefillMetadata(
        schedulable_prefills=min(
            prefills, scheduler_config.max_num_partial_prefills),
        long_prefills=long_prefills,
        scheduler_config=scheduler_config,
    )

maybe_increment_partial_prefills

maybe_increment_partial_prefills(
    seq_group: SequenceGroup,
) -> None
Source code in vllm/core/scheduler.py
def maybe_increment_partial_prefills(self,
                                     seq_group: SequenceGroup) -> None:
    # When a new prefill is scheduled, we need to know if it is a
    # long request
    if (seq_group.first_seq.get_num_new_tokens()
            > self.scheduler_config.long_prefill_token_threshold):
        self.long_prefills += 1

PreemptionMode

Bases: Enum

Preemption modes.

  1. Swapping: Swap out the blocks of the preempted sequences to CPU memory and swap them back in when the sequences are resumed.
  2. Recomputation: Discard the blocks of the preempted sequences and recompute them when the sequences are resumed, treating the sequences as new prompts.
Source code in vllm/core/scheduler.py
class PreemptionMode(enum.Enum):
    """Preemption modes.

    1. Swapping: Swap out the blocks of the preempted sequences to CPU memory
    and swap them back in when the sequences are resumed.
    2. Recomputation: Discard the blocks of the preempted sequences and
    recompute them when the sequences are resumed, treating the sequences as
    new prompts.
    """

    SWAP = enum.auto()
    RECOMPUTE = enum.auto()

RECOMPUTE class-attribute instance-attribute

RECOMPUTE = auto()

SWAP class-attribute instance-attribute

SWAP = auto()

ScheduledSequenceGroup dataclass

Source code in vllm/core/scheduler.py
@dataclass
class ScheduledSequenceGroup:
    # A sequence group that's scheduled.
    seq_group: SequenceGroup
    # The total chunk size (number of tokens) to process for next iteration.
    # 1 for decoding. Same as prompt tokens for prefill, but if prefill is
    # chunked, it can be smaller than that.
    token_chunk_size: int

seq_group instance-attribute

seq_group: SequenceGroup

token_chunk_size instance-attribute

token_chunk_size: int

__init__

__init__(
    seq_group: SequenceGroup, token_chunk_size: int
) -> None

Scheduler

Source code in vllm/core/scheduler.py
 426
 427
 428
 429
 430
 431
 432
 433
 434
 435
 436
 437
 438
 439
 440
 441
 442
 443
 444
 445
 446
 447
 448
 449
 450
 451
 452
 453
 454
 455
 456
 457
 458
 459
 460
 461
 462
 463
 464
 465
 466
 467
 468
 469
 470
 471
 472
 473
 474
 475
 476
 477
 478
 479
 480
 481
 482
 483
 484
 485
 486
 487
 488
 489
 490
 491
 492
 493
 494
 495
 496
 497
 498
 499
 500
 501
 502
 503
 504
 505
 506
 507
 508
 509
 510
 511
 512
 513
 514
 515
 516
 517
 518
 519
 520
 521
 522
 523
 524
 525
 526
 527
 528
 529
 530
 531
 532
 533
 534
 535
 536
 537
 538
 539
 540
 541
 542
 543
 544
 545
 546
 547
 548
 549
 550
 551
 552
 553
 554
 555
 556
 557
 558
 559
 560
 561
 562
 563
 564
 565
 566
 567
 568
 569
 570
 571
 572
 573
 574
 575
 576
 577
 578
 579
 580
 581
 582
 583
 584
 585
 586
 587
 588
 589
 590
 591
 592
 593
 594
 595
 596
 597
 598
 599
 600
 601
 602
 603
 604
 605
 606
 607
 608
 609
 610
 611
 612
 613
 614
 615
 616
 617
 618
 619
 620
 621
 622
 623
 624
 625
 626
 627
 628
 629
 630
 631
 632
 633
 634
 635
 636
 637
 638
 639
 640
 641
 642
 643
 644
 645
 646
 647
 648
 649
 650
 651
 652
 653
 654
 655
 656
 657
 658
 659
 660
 661
 662
 663
 664
 665
 666
 667
 668
 669
 670
 671
 672
 673
 674
 675
 676
 677
 678
 679
 680
 681
 682
 683
 684
 685
 686
 687
 688
 689
 690
 691
 692
 693
 694
 695
 696
 697
 698
 699
 700
 701
 702
 703
 704
 705
 706
 707
 708
 709
 710
 711
 712
 713
 714
 715
 716
 717
 718
 719
 720
 721
 722
 723
 724
 725
 726
 727
 728
 729
 730
 731
 732
 733
 734
 735
 736
 737
 738
 739
 740
 741
 742
 743
 744
 745
 746
 747
 748
 749
 750
 751
 752
 753
 754
 755
 756
 757
 758
 759
 760
 761
 762
 763
 764
 765
 766
 767
 768
 769
 770
 771
 772
 773
 774
 775
 776
 777
 778
 779
 780
 781
 782
 783
 784
 785
 786
 787
 788
 789
 790
 791
 792
 793
 794
 795
 796
 797
 798
 799
 800
 801
 802
 803
 804
 805
 806
 807
 808
 809
 810
 811
 812
 813
 814
 815
 816
 817
 818
 819
 820
 821
 822
 823
 824
 825
 826
 827
 828
 829
 830
 831
 832
 833
 834
 835
 836
 837
 838
 839
 840
 841
 842
 843
 844
 845
 846
 847
 848
 849
 850
 851
 852
 853
 854
 855
 856
 857
 858
 859
 860
 861
 862
 863
 864
 865
 866
 867
 868
 869
 870
 871
 872
 873
 874
 875
 876
 877
 878
 879
 880
 881
 882
 883
 884
 885
 886
 887
 888
 889
 890
 891
 892
 893
 894
 895
 896
 897
 898
 899
 900
 901
 902
 903
 904
 905
 906
 907
 908
 909
 910
 911
 912
 913
 914
 915
 916
 917
 918
 919
 920
 921
 922
 923
 924
 925
 926
 927
 928
 929
 930
 931
 932
 933
 934
 935
 936
 937
 938
 939
 940
 941
 942
 943
 944
 945
 946
 947
 948
 949
 950
 951
 952
 953
 954
 955
 956
 957
 958
 959
 960
 961
 962
 963
 964
 965
 966
 967
 968
 969
 970
 971
 972
 973
 974
 975
 976
 977
 978
 979
 980
 981
 982
 983
 984
 985
 986
 987
 988
 989
 990
 991
 992
 993
 994
 995
 996
 997
 998
 999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126
class Scheduler:

    def __init__(
        self,
        scheduler_config: SchedulerConfig,
        cache_config: CacheConfig,
        lora_config: Optional[LoRAConfig],
        pipeline_parallel_size: int = 1,
        output_proc_callback: Optional[Callable] = None,
    ) -> None:
        self.scheduler_config = scheduler_config
        self.cache_config = cache_config
        # Note for LoRA scheduling: the current policy is extremely
        # simple and NOT fair. It can lead to starvation of some
        # LoRAs. This should be improved in the future.
        self.lora_config = lora_config

        version = "selfattn"
        if (self.scheduler_config.runner_type == "pooling"
                or self.cache_config.is_attention_free):
            version = "placeholder"

        BlockSpaceManagerImpl = BlockSpaceManager.get_block_space_manager_class(
            version)

        num_gpu_blocks = cache_config.num_gpu_blocks
        if num_gpu_blocks:
            num_gpu_blocks //= pipeline_parallel_size

        num_cpu_blocks = cache_config.num_cpu_blocks
        if num_cpu_blocks:
            num_cpu_blocks //= pipeline_parallel_size

        # Create the block space manager.
        self.block_manager = BlockSpaceManagerImpl(
            block_size=self.cache_config.block_size,
            num_gpu_blocks=num_gpu_blocks,
            num_cpu_blocks=num_cpu_blocks,
            sliding_window=self.cache_config.sliding_window,
            enable_caching=self.cache_config.enable_prefix_caching,
        )

        # Sequence groups in the WAITING state.
        # Contain new prefill or preempted requests.
        self.waiting: Deque[SequenceGroup] = deque()
        # Sequence groups in the RUNNING state.
        # Contain decode requests.
        self.running: Deque[SequenceGroup] = deque()
        # Sequence groups in the SWAPPED state.
        # Contain decode requests that are swapped out.
        self.swapped: Deque[SequenceGroup] = deque()
        # Sequence groups finished requests ids since last step iteration.
        # It lets the model know that any state associated with these requests
        # can and must be released after the current step.
        # This is used to evict the finished requests from the Mamba cache.
        self._finished_requests_ids: List[str] = list()
        # Time at previous scheduling step
        self.prev_time = 0.0
        # Did we schedule a prompt at previous step?
        self.prev_prompt = False
        # Latency of the last prompt step
        self.last_prompt_latency = 0.0
        # preemption mode, RECOMPUTE or SWAP
        self.user_specified_preemption_mode = scheduler_config.preemption_mode

        # The following field is test-only. It is used to inject artificial
        # preemption.
        self.enable_artificial_preemption = ENABLE_ARTIFICIAL_PREEMPT
        self.artificial_preempt_cnt = (ARTIFICIAL_PREEMPTION_MAX_CNT
                                       if self.enable_artificial_preemption
                                       else 0)
        self.num_cumulative_preemption: int = 0

        # Used to cache python objects
        self._seq_group_metadata_cache: List[PyObjectCache] = []
        self._scheduler_running_outputs_cache: List[PyObjectCache] = []
        self._scheduled_seq_group_cache: List[PyObjectCache] = []

        # For async output processing, we need to swap cache buffers between
        # iterations. I.e. since the output processing is lagged one step,
        # we cannot reuse the cached objects immediately when the schedule()
        # is called again, but only when schedule() is called the second time.
        self.output_proc_callback = output_proc_callback
        self.use_async_output_proc = self.output_proc_callback is not None
        self.num_cache_iters = 2 if self.use_async_output_proc else 1

        self.cache_id = 0
        for i in range(self.num_cache_iters):
            self._seq_group_metadata_cache.append(
                PyObjectCache(seq_group_metadata_builder))
            self._scheduler_running_outputs_cache.append(
                PyObjectCache(scheduler_running_outputs_builder))
            self._scheduled_seq_group_cache.append(
                PyObjectCache(scheduled_seq_group_builder))

        # For async postprocessor, the extra decode run cannot be done
        # when the request reaches max_model_len. In this case, the request
        # will be stopped during schedule() call and added to this stop list
        # for processing and deallocation by the free_finished_seq_groups()
        self._async_stopped: List[SequenceGroup] = []

        # List with the chunk sizes to hand out to each sequence depending
        # on how many partial prefills are running. This is slightly faster than
        # running an integer division every time a prefill is scheduled.
        # This splits the budget evenly among all prefills.
        self.partial_prefill_budget_lookup_list = [0] * (
            self.scheduler_config.max_num_partial_prefills + 1)
        self.partial_prefill_budget_lookup_list[0] = (
            scheduler_config.max_num_batched_tokens)
        for i in range(1, self.scheduler_config.max_num_partial_prefills + 1):
            self.partial_prefill_budget_lookup_list[i] = (
                scheduler_config.max_num_batched_tokens // i)

    @property
    def next_cache_id(self):
        return (self.cache_id + 1) % self.num_cache_iters

    @property
    def lora_enabled(self) -> bool:
        return bool(self.lora_config)

    @property
    def num_decoding_tokens_per_seq(self) -> int:
        """The number of new tokens."""
        return 1

    def add_seq_group(self, seq_group: SequenceGroup) -> None:
        # Add sequence groups to the waiting queue.
        self.waiting.append(seq_group)

    def _add_seq_group_to_running(self, seq_group: SequenceGroup) -> None:
        # Add sequence groups to the running queue.
        # Only for testing purposes.
        self.running.append(seq_group)

    def _add_seq_group_to_swapped(self, seq_group: SequenceGroup) -> None:
        # Add sequence groups to the swapped queue.
        # Only for testing purposes.
        self.swapped.append(seq_group)

    def abort_seq_group(
        self,
        request_id: Union[str, Iterable[str]],
        seq_id_to_seq_group: Optional[Dict[str, SequenceGroupBase]] = None,
    ) -> None:
        """Aborts a sequence group with the given ID.

        Check if the sequence group with the given ID
            is present in any of the state queue.
        If present, remove the sequence group from the state queue.
            Also, if any of the sequences in the sequence group is not finished,
                free the sequence with status `FINISHED_ABORTED`.
        Otherwise, do nothing.

        Args:
            request_id: The ID(s) of the sequence group to abort.
            seq_id_to_seq_group: helper for groups with n>1
        """
        if isinstance(request_id, str):
            request_id = (request_id, )
        request_ids = set(request_id)
        seq_id_to_seq_group = seq_id_to_seq_group or {}
        for state_queue in [self.waiting, self.running, self.swapped]:
            aborted_groups: List[SequenceGroup] = []
            for seq_group in state_queue:
                # When n>1, seq_group.request_id looks like
                # foo_parallel_sample_0, while request_ids is just foo, and we
                # should resolve it as real_request_id to match.
                if seq_group.request_id in seq_id_to_seq_group:
                    real_request_id = seq_id_to_seq_group[
                        seq_group.request_id].group_id
                else:
                    real_request_id = seq_group.request_id
                if real_request_id in request_ids:
                    # Appending aborted group into pending list.
                    aborted_groups.append(seq_group)
                    # We can't remove real_request_id in request_ids here,
                    # because there may be other seq groups sharing the same
                    # real_request_id
            for aborted_group in aborted_groups:
                # Remove the sequence group from the state queue.
                state_queue.remove(aborted_group)
                # Remove the aborted request from the Mamba cache.
                self._finished_requests_ids.append(aborted_group.request_id)
                for seq in aborted_group.get_seqs():
                    if seq.is_finished():
                        continue
                    seq.status = SequenceStatus.FINISHED_ABORTED
                    self.free_seq(seq)
                if aborted_group.request_id in seq_id_to_seq_group:
                    del seq_id_to_seq_group[aborted_group.request_id]

                self._free_seq_group_cross_attn_blocks(aborted_group)

    def _free_seq_group_cross_attn_blocks(
        self,
        seq_group: SequenceGroup,
    ) -> None:
        """
        Free a sequence group from a cross-attention block table.
        Has no effect on decoder-only models.
        """
        if seq_group.is_encoder_decoder():
            self.block_manager.free_cross(seq_group)

    def has_unfinished_seqs(self) -> bool:
        return (len(self.waiting) != 0 or len(self.running) != 0
                or len(self.swapped) != 0)

    def get_prefix_cache_hit_rate(self, device: Device) -> float:
        return self.block_manager.get_prefix_cache_hit_rate(device)

    def reset_prefix_cache(self, device: Optional[Device] = None) -> bool:
        return self.block_manager.reset_prefix_cache(device)

    def get_num_unfinished_seq_groups(self) -> int:
        return len(self.waiting) + len(self.running) + len(self.swapped)

    def get_and_reset_finished_requests_ids(self) -> List[str]:
        """Flushes the list of request ids of previously finished seq_groups."""
        finished_requests_ids = self._finished_requests_ids
        self._finished_requests_ids = list()
        return finished_requests_ids

    def _schedule_running(
        self,
        budget: SchedulingBudget,
        curr_loras: Optional[Set[int]],
        enable_chunking: bool = False,
        partial_prefill_metadata: Optional[PartialPrefillMetadata] = None,
    ) -> SchedulerRunningOutputs:
        """Schedule sequence groups that are running.

        Running queue should include decode and chunked prefill requests.

        Args:
            budget: The scheduling budget. The argument is in-place updated
                when any decodes are preempted.
            curr_loras: Currently batched lora request ids. The argument is
                in-place updated when any decodes are preempted.
            enable_chunking: If True, seq group can be chunked and only a
                chunked number of tokens are scheduled  if
                `budget.num_batched_tokens` has not enough capacity to schedule
                all tokens.
            partial_prefill_metadata: information about the partial prefills
            that are currently running

        Returns:
            SchedulerRunningOutputs.
        """
        ret: SchedulerRunningOutputs = self._scheduler_running_outputs_cache[
            self.cache_id].get_object()
        ret.blocks_to_swap_out.clear()
        ret.blocks_to_copy.clear()
        ret.decode_seq_groups.clear()
        ret.prefill_seq_groups.clear()
        ret.preempted.clear()
        ret.swapped_out.clear()

        ret.num_lookahead_slots = self._get_num_lookahead_slots(
            is_prefill=False, enable_chunking=enable_chunking)

        ret.decode_seq_groups_list.clear()
        ret.prefill_seq_groups_list.clear()

        # Blocks that need to be swapped or copied before model execution.
        blocks_to_swap_out: List[Tuple[int, int]] = ret.blocks_to_swap_out
        blocks_to_copy: List[Tuple[int, int]] = ret.blocks_to_copy

        decode_seq_groups: List[ScheduledSequenceGroup] = ret.decode_seq_groups
        prefill_seq_groups: List[
            ScheduledSequenceGroup] = ret.prefill_seq_groups
        preempted: List[SequenceGroup] = ret.preempted
        swapped_out: List[SequenceGroup] = ret.swapped_out

        running_queue = self.running
        assert len(self._async_stopped) == 0
        while running_queue:
            seq_group = running_queue[0]
            # We discard the cached tokens info here because we don't need it
            # for running sequence:
            #   1. If a sequence is running with chunked prefill, the cached
            #      tokens info was already used for the first prefill.
            #   2. If a sequence is running with non-chunked prefill, then
            #      there it's a decoding sequence, and the cached tokens info is
            #      irrelevant.
            num_uncached_new_tokens, _ = \
                self._get_num_new_uncached_and_cached_tokens(
                seq_group,
                SequenceStatus.RUNNING,
                enable_chunking,
                budget,
                partial_prefill_metadata,
            )

            num_running_tokens = num_uncached_new_tokens
            if num_running_tokens == 0:
                # No budget => Stop
                break

            running_queue.popleft()

            # With async postprocessor, an extra decode run is done
            # to process the final tokens. The check below avoids this extra
            # decode run when the model max len is reached, in order to avoid
            # a memory overflow.
            if (self.use_async_output_proc and seq_group.seqs[0].get_len()
                    > self.scheduler_config.max_model_len):
                self._async_stopped.append(seq_group)
                continue

            # NOTE(woosuk): Preemption happens only when there is no available
            # slot to keep all the sequence groups in the RUNNING state.
            while not self._can_append_slots(seq_group, enable_chunking):
                budget.subtract_num_batched_tokens(seq_group.request_id,
                                                   num_running_tokens)
                num_running_seqs = seq_group.get_max_num_running_seqs()
                budget.subtract_num_seqs(seq_group.request_id,
                                         num_running_seqs)

                if (curr_loras is not None and seq_group.lora_int_id > 0
                        and seq_group.lora_int_id in curr_loras):
                    curr_loras.remove(seq_group.lora_int_id)

                # Determine victim sequence
                cont_loop = True
                if running_queue:
                    # Preempt the lowest-priority sequence group.
                    victim_seq_group = running_queue.pop()
                else:
                    # No other sequence group can be preempted.
                    # Preempt the current sequence group.
                    # Note: This is also where we stop this loop
                    # (since there is nothing else to preempt)
                    victim_seq_group = seq_group
                    cont_loop = False

                # With async postprocessor, before preempting a sequence
                # we need to ensure it has no pending async postprocessor
                do_preempt = True
                if self.use_async_output_proc:
                    assert self.output_proc_callback is not None
                    self.output_proc_callback(
                        request_id=victim_seq_group.request_id)

                    # It may be that the async pending "victim_seq_group"
                    # becomes finished, in which case we simply free it.
                    if victim_seq_group.is_finished():
                        self._free_finished_seq_group(victim_seq_group)
                        do_preempt = False

                # Do preemption
                if do_preempt:
                    preempted_mode = self._preempt(victim_seq_group,
                                                   blocks_to_swap_out)
                    if preempted_mode == PreemptionMode.RECOMPUTE:
                        preempted.append(victim_seq_group)
                    else:
                        swapped_out.append(victim_seq_group)

                if not cont_loop:
                    break
            else:
                self._append_slots(seq_group, blocks_to_copy, enable_chunking)
                is_prefill = seq_group.is_prefill()

                scheduled_seq_group: ScheduledSequenceGroup = (
                    self._scheduled_seq_group_cache[
                        self.cache_id].get_object())
                scheduled_seq_group.seq_group = seq_group
                if is_prefill:
                    scheduled_seq_group.token_chunk_size = num_running_tokens
                    prefill_seq_groups.append(scheduled_seq_group)
                    ret.prefill_seq_groups_list.append(seq_group)
                else:
                    scheduled_seq_group.token_chunk_size = 1
                    decode_seq_groups.append(scheduled_seq_group)
                    ret.decode_seq_groups_list.append(seq_group)

                budget.add_num_batched_tokens(seq_group.request_id,
                                              num_running_tokens)
                # OPTIMIZATION:  Note that get_max_num_running_seqs is
                # expensive. For the default scheduling chase where
                # enable_chunking is False, num_seqs are updated before running
                # this method, so we don't have to update it again here.
                if enable_chunking:
                    num_running_seqs = seq_group.get_max_num_running_seqs()
                    budget.add_num_seqs(seq_group.request_id, num_running_seqs)
                if curr_loras is not None and seq_group.lora_int_id > 0:
                    curr_loras.add(seq_group.lora_int_id)

        self._scheduler_running_outputs_cache[self.next_cache_id].reset()
        self._scheduled_seq_group_cache[self.next_cache_id].reset()

        return ret

    def _schedule_swapped(
        self,
        budget: SchedulingBudget,
        curr_loras: Optional[Set[int]],
        enable_chunking: bool = False,
    ) -> SchedulerSwappedInOutputs:
        """Schedule sequence groups that are swapped out.

        It schedules swapped requests as long as it fits `budget` and
        curr_loras <= max_lora from the scheduling config. The input arguments
        `budget` and `curr_loras` are updated based on scheduled seq_groups.

        Args:
            budget: The scheduling budget. The argument is in-place updated
                when any requests are swapped in.
            curr_loras: Currently batched lora request ids. The argument is
                in-place updated when any requests are swapped in.
            enable_chunking: If True, seq group can be chunked and only a
                chunked number of tokens are scheduled  if
                `budget.num_batched_tokens` has not enough capacity to schedule
                all tokens.

        Returns:
            SchedulerSwappedInOutputs.
        """
        # Blocks that need to be swapped or copied before model execution.
        blocks_to_swap_in: List[Tuple[int, int]] = []
        blocks_to_copy: List[Tuple[int, int]] = []
        decode_seq_groups: List[ScheduledSequenceGroup] = []
        prefill_seq_groups: List[ScheduledSequenceGroup] = []
        infeasible_seq_groups: List[SequenceGroup] = []

        swapped_queue = self.swapped

        leftover_swapped: Deque[SequenceGroup] = deque()
        while swapped_queue:
            seq_group = swapped_queue[0]

            # If the sequence group cannot be swapped in, stop.
            is_prefill = seq_group.is_prefill()
            alloc_status = self.block_manager.can_swap_in(
                seq_group,
                self._get_num_lookahead_slots(is_prefill, enable_chunking))
            if alloc_status == AllocStatus.LATER:
                break
            elif alloc_status == AllocStatus.NEVER:
                logger.warning(
                    "Failing the request %s because there's not enough kv "
                    "cache blocks to run the entire sequence.",
                    seq_group.request_id,
                )
                for seq in seq_group.get_seqs():
                    seq.status = SequenceStatus.FINISHED_IGNORED
                infeasible_seq_groups.append(seq_group)
                swapped_queue.popleft()
                continue

            lora_int_id = 0
            if self.lora_enabled:
                lora_int_id = seq_group.lora_int_id
                assert curr_loras is not None
                assert self.lora_config is not None
                if (lora_int_id > 0 and (lora_int_id not in curr_loras)
                        and len(curr_loras) >= self.lora_config.max_loras):
                    # We don't have a space for another LoRA, so
                    # we ignore this request for now.
                    leftover_swapped.appendleft(seq_group)
                    swapped_queue.popleft()
                    continue

            # The total number of sequences in the RUNNING state should not
            # exceed the maximum number of sequences.
            num_new_seqs = seq_group.get_max_num_running_seqs()
            num_new_tokens_uncached, num_new_tokens_cached = (
                self._get_num_new_uncached_and_cached_tokens(
                    seq_group, SequenceStatus.SWAPPED, enable_chunking,
                    budget))

            if num_new_tokens_uncached == 0 or not budget.can_schedule(
                    num_new_tokens=num_new_tokens_uncached,
                    num_new_seqs=num_new_seqs,
            ):
                self.remove_seq_from_computed_blocks_tracker(
                    seq_group, SequenceStatus.SWAPPED)
                break

            if lora_int_id > 0 and curr_loras is not None:
                curr_loras.add(lora_int_id)
            swapped_queue.popleft()
            self._swap_in(seq_group, blocks_to_swap_in)
            self._append_slots(seq_group, blocks_to_copy, enable_chunking)
            if is_prefill:
                prefill_seq_groups.append(
                    ScheduledSequenceGroup(
                        seq_group,
                        token_chunk_size=num_new_tokens_uncached +
                        num_new_tokens_cached,
                    ))
            else:
                decode_seq_groups.append(
                    ScheduledSequenceGroup(seq_group, token_chunk_size=1))
            budget.add_num_batched_tokens(
                seq_group.request_id,
                num_batched_tokens=num_new_tokens_uncached,
                num_cached_tokens=num_new_tokens_cached,
            )
            budget.add_num_seqs(seq_group.request_id, num_new_seqs)

        swapped_queue.extendleft(leftover_swapped)

        return SchedulerSwappedInOutputs(
            decode_seq_groups=decode_seq_groups,
            prefill_seq_groups=prefill_seq_groups,
            blocks_to_swap_in=blocks_to_swap_in,
            blocks_to_copy=blocks_to_copy,
            num_lookahead_slots=self._get_num_lookahead_slots(
                is_prefill=False, enable_chunking=enable_chunking),
            infeasible_seq_groups=infeasible_seq_groups,
        )

    def _get_prompt_limit(self, seq_group: SequenceGroup) -> int:
        if (self.scheduler_config.chunked_prefill_enabled
                and not self.scheduler_config.is_multi_step):
            prompt_limit = self.scheduler_config.max_model_len
        else:
            prompt_limit = min(
                self.scheduler_config.max_model_len,
                self.scheduler_config.max_num_batched_tokens,
            )

        # Model is fine tuned with long context. Return the fine tuned max_len.
        if seq_group.lora_request and seq_group.lora_request.long_lora_max_len:
            assert prompt_limit <= seq_group.lora_request.long_lora_max_len
            return seq_group.lora_request.long_lora_max_len
        else:
            return prompt_limit

    def _get_priority(self,
                      seq_group: SequenceGroup) -> Tuple[Optional[int], float]:
        """Get the priority of the sequence group.
        Highest preference to user-defined priority, followed by arrival time.
        Args:
            seq_group: The sequence group input.
        Returns:
            The priority of the sequence group.
        """
        return seq_group.priority, seq_group.arrival_time

    def _schedule_priority_preemption(
        self,
        budget: SchedulingBudget,
    ) -> int:
        """Sorts waiting and running queue. Also, force preempt requests
        from the running queue if their priority is lower.
        Priority-based preemption is used with the priority policy.
        Args:
            budget: The scheduling budget. The argument is in-place updated
                when any requests are scheduled.
        Returns:
            A count of priority-based preemptions.
        """

        waiting_queue = self.waiting

        running_queue = deque(sorted(self.running, key=self._get_priority))

        blocks_to_swap_out: List[Tuple[int, int]] = []
        force_preemption_count = 0

        if waiting_queue:
            seq_group = waiting_queue.popleft()
            num_new_seqs = seq_group.get_max_num_running_seqs()
            num_new_tokens_uncached, _ = \
                self._get_num_new_uncached_and_cached_tokens(
                seq_group, SequenceStatus.WAITING, False, budget)

            # Only preempt if priority inversion exists
            while running_queue and self._get_priority(
                    running_queue[-1]) > self._get_priority(seq_group):
                # Only preempt if waiting sequence cannot be allocated
                can_allocate = self.block_manager.can_allocate(seq_group)
                if (num_new_tokens_uncached > 0
                        and can_allocate == AllocStatus.OK
                        and budget.can_schedule(
                            num_new_tokens=num_new_tokens_uncached,
                            num_new_seqs=num_new_seqs,
                        )):
                    break

                # Adjust budget to remove the victim sequence group
                vseq_group = running_queue.pop()
                num_running_tokens_uncached, _ = (
                    self._get_num_new_uncached_and_cached_tokens(
                        vseq_group, SequenceStatus.RUNNING, False, budget))
                budget.subtract_num_batched_tokens(
                    vseq_group.request_id, num_running_tokens_uncached)
                num_running_seqs = vseq_group.get_max_num_running_seqs()
                budget.subtract_num_seqs(vseq_group.request_id,
                                         num_running_seqs)

                # Preempt out the victim sequence group
                self._preempt(vseq_group, blocks_to_swap_out)
                waiting_queue.appendleft(vseq_group)
                force_preemption_count += 1
            # Put the sequence back into the waiting queue
            waiting_queue.appendleft(seq_group)

            self.remove_seq_from_computed_blocks_tracker(
                seq_group, SequenceStatus.WAITING)

        waiting_queue = deque(sorted(waiting_queue, key=self._get_priority))

        self.waiting = waiting_queue
        self.running = running_queue
        return force_preemption_count

    def _schedule_prefills(
        self,
        budget: SchedulingBudget,
        curr_loras: Optional[Set[int]],
        enable_chunking: bool = False,
        partial_prefill_metadata: Optional[PartialPrefillMetadata] = None,
    ) -> SchedulerPrefillOutputs:
        """Schedule sequence groups that are in prefill stage.

        Note that the current scheduler treats PREEMPTED_FOR_RECOMPUTE
        as a new prefill (that starts from beginning -> most recently generated
        tokens).

        It schedules waiting requests as long as it fits `budget` and
        curr_loras <= max_lora from the scheduling config. The input arguments
        `budget` and `curr_loras` are updated based on scheduled seq_groups.

        Args:
            budget: The scheduling budget. The argument is in-place updated
                when any requests are scheduled.
            curr_loras: Currently batched lora request ids. The argument is
                in-place updated when any requests are scheduled.
            enable_chunking: If True, seq group can be chunked and only a
                chunked number of tokens are scheduled  if
                `budget.num_batched_tokens` has not enough capacity to schedule
                all tokens.
            partial_prefill_metadata: information about the partial prefills
                that are currently running

        Returns:
            SchedulerPrefillOutputs.
        """
        if budget.remaining_token_budget() == 0:
            # Do nothing: Can't add any more prefill anyway
            return SchedulerPrefillOutputs(
                seq_groups=[],
                ignored_seq_groups=[],
                num_lookahead_slots=self._get_num_lookahead_slots(
                    is_prefill=True, enable_chunking=enable_chunking),
            )
        ignored_seq_groups: List[SequenceGroup] = []
        seq_groups: List[ScheduledSequenceGroup] = []
        using_prompt_embeds: bool = False

        waiting_queue = self.waiting

        leftover_waiting_sequences: Deque[SequenceGroup] = deque()
        while self._passed_delay(time.time()) and waiting_queue:
            seq_group = waiting_queue[0]

            waiting_seqs = seq_group.get_seqs(status=SequenceStatus.WAITING)
            assert len(waiting_seqs) == 1, (
                "Waiting sequence group should have only one prompt "
                "sequence.")
            if (partial_prefill_metadata is not None
                    and not partial_prefill_metadata.can_schedule(seq_group)):
                leftover_waiting_sequences.appendleft(seq_group)
                waiting_queue.popleft()
                continue
            num_new_tokens_uncached, num_new_tokens_cached = (
                self._get_num_new_uncached_and_cached_tokens(
                    seq_group,
                    SequenceStatus.WAITING,
                    enable_chunking,
                    budget,
                    partial_prefill_metadata=partial_prefill_metadata,
                ))
            num_new_tokens = num_new_tokens_uncached + num_new_tokens_cached

            if not enable_chunking:
                num_prompt_tokens = waiting_seqs[0].get_len()
                assert num_new_tokens == num_prompt_tokens

            prompt_limit = self._get_prompt_limit(seq_group)
            if num_new_tokens > prompt_limit:
                logger.warning(
                    "Input prompt (%d tokens) is too long"
                    " and exceeds limit of %d",
                    num_new_tokens,
                    prompt_limit,
                )
                for seq in waiting_seqs:
                    seq.status = SequenceStatus.FINISHED_IGNORED
                self.remove_seq_from_computed_blocks_tracker(
                    seq_group, SequenceStatus.FINISHED_IGNORED)
                ignored_seq_groups.append(seq_group)
                waiting_queue.popleft()
                continue

            num_lookahead_slots: int = 0
            if self.scheduler_config.is_multi_step and enable_chunking:
                num_lookahead_slots = self._get_num_lookahead_slots(
                    True, enable_chunking)

            # If the sequence group cannot be allocated, stop.
            can_allocate = self.block_manager.can_allocate(
                seq_group, num_lookahead_slots=num_lookahead_slots)
            if can_allocate == AllocStatus.LATER:
                self.remove_seq_from_computed_blocks_tracker(
                    seq_group, SequenceStatus.WAITING)
                break
            elif can_allocate == AllocStatus.NEVER:
                logger.warning(
                    "Input prompt (%d tokens) + lookahead slots (%d) is "
                    "too long and exceeds the capacity of block_manager",
                    num_new_tokens,
                    num_lookahead_slots,
                )
                for seq in waiting_seqs:
                    seq.status = SequenceStatus.FINISHED_IGNORED
                self.remove_seq_from_computed_blocks_tracker(
                    seq_group, SequenceStatus.FINISHED_IGNORED)
                ignored_seq_groups.append(seq_group)
                waiting_queue.popleft()
                continue

            # We cannot mix sequence groups that use prompt embeds and
            # those that do not.
            if len(seq_groups) == 0:
                using_prompt_embeds = seq_group.uses_prompt_embeds()
            if using_prompt_embeds != seq_group.uses_prompt_embeds():
                self.remove_seq_from_computed_blocks_tracker(
                    seq_group, SequenceStatus.WAITING)
                leftover_waiting_sequences.appendleft(seq_group)
                waiting_queue.popleft()
                continue

            lora_int_id = 0
            if self.lora_enabled:
                lora_int_id = seq_group.lora_int_id
                assert curr_loras is not None
                assert self.lora_config is not None
                if (self.lora_enabled and lora_int_id > 0
                        and lora_int_id not in curr_loras
                        and len(curr_loras) >= self.lora_config.max_loras):
                    # We don't have a space for another LoRA, so
                    # we ignore this request for now.
                    self.remove_seq_from_computed_blocks_tracker(
                        seq_group, SequenceStatus.WAITING)
                    leftover_waiting_sequences.appendleft(seq_group)
                    waiting_queue.popleft()
                    continue

            if (budget.num_batched_tokens
                    >= self.scheduler_config.max_num_batched_tokens):
                # We've reached the budget limit - since there might be
                # continuous prefills in the running queue, we should break
                # to avoid scheduling any new prefills.
                self.remove_seq_from_computed_blocks_tracker(
                    seq_group, SequenceStatus.WAITING)
                break

            num_new_seqs = seq_group.get_max_num_running_seqs()
            if num_new_tokens_uncached == 0 or not budget.can_schedule(
                    num_new_tokens=num_new_tokens_uncached,
                    num_new_seqs=num_new_seqs,
            ):
                self.remove_seq_from_computed_blocks_tracker(
                    seq_group, SequenceStatus.WAITING)
                break

            # Can schedule this request.
            if curr_loras is not None and lora_int_id > 0:
                curr_loras.add(lora_int_id)
            waiting_queue.popleft()
            self._allocate_and_set_running(seq_group)

            if partial_prefill_metadata is not None:
                partial_prefill_metadata.maybe_increment_partial_prefills(
                    seq_group)

            if enable_chunking and self.scheduler_config.is_multi_step:
                blocks_to_copy: List[Tuple[int, int]] = []
                # init_multi_step_from_lookahead_slots happens in append_slots
                self._append_slots(seq_group, blocks_to_copy, enable_chunking)
                # This assert will trip when a copy-on-write happens. This is
                # not a concern as the very first sequence-group block
                # allocation happens above. Still, we have the assert to
                # catch any edge-cases.
                assert not blocks_to_copy
            else:
                seq_group.init_multi_step_from_lookahead_slots(
                    num_lookahead_slots,
                    num_scheduler_steps=self.scheduler_config.
                    num_scheduler_steps,
                    is_multi_step=self.scheduler_config.is_multi_step,
                    enable_chunking=enable_chunking,
                )

            seq_groups.append(
                ScheduledSequenceGroup(seq_group=seq_group,
                                       token_chunk_size=num_new_tokens))
            budget.add_num_batched_tokens(
                seq_group.request_id,
                num_batched_tokens=num_new_tokens_uncached,
                num_cached_tokens=num_new_tokens_cached,
            )
            budget.add_num_seqs(seq_group.request_id, num_new_seqs)

        # Queue requests that couldn't be scheduled.
        waiting_queue.extendleft(leftover_waiting_sequences)
        if len(seq_groups) > 0:
            self.prev_prompt = True

        return SchedulerPrefillOutputs(
            seq_groups=seq_groups,
            ignored_seq_groups=ignored_seq_groups,
            num_lookahead_slots=self._get_num_lookahead_slots(
                is_prefill=True, enable_chunking=enable_chunking),
        )

    def _schedule_default(self) -> SchedulerOutputs:
        """Schedule queued requests.

        The current policy is designed to optimize the throughput. First,
        it batches as many prefill requests as possible. And it schedules
        decodes. If there's a pressure on GPU memory, decode requests can
        be swapped or preempted.
        """
        # Include running requests to the budget.
        budget = SchedulingBudget(
            token_budget=self.scheduler_config.max_num_batched_tokens,
            max_num_seqs=self.scheduler_config.max_num_seqs,
        )
        # Make sure we include num running seqs before scheduling prefill,
        # so that we don't schedule beyond max_num_seqs for prefill.
        for seq_group in self.running:
            budget.add_num_seqs(seq_group.request_id,
                                seq_group.get_max_num_running_seqs())
        curr_loras = (set(
            seq_group.lora_int_id for seq_group in self.running
            if seq_group.lora_int_id > 0) if self.lora_enabled else None)

        prefills = SchedulerPrefillOutputs.create_empty()
        running_scheduled = SchedulerRunningOutputs.create_empty()
        swapped_in = SchedulerSwappedInOutputs.create_empty()

        # If any requests are swapped, prioritized swapped requests.
        if not self.swapped:
            prefills = self._schedule_prefills(budget,
                                               curr_loras,
                                               enable_chunking=False)

        if len(prefills.seq_groups
               ) == 0 and self.scheduler_config.policy == "priority":
            self._schedule_priority_preemption(budget)

        # Don't schedule decodes if prefills are scheduled.
        # NOTE: If `_schedule_prefills` doesn't enable chunking, self.running
        # only contains decode requests, not chunked prefills.
        if len(prefills.seq_groups) == 0:
            running_scheduled = self._schedule_running(budget,
                                                       curr_loras,
                                                       enable_chunking=False)

            # If any sequence group is preempted, do not swap in any sequence
            # group. because it means there's no slot for new running requests.
            if (len(running_scheduled.preempted) +
                    len(running_scheduled.swapped_out) == 0):
                swapped_in = \
                    self._schedule_swapped(budget, curr_loras)

        assert (budget.num_batched_tokens
                <= self.scheduler_config.max_num_batched_tokens)
        assert budget.num_curr_seqs <= self.scheduler_config.max_num_seqs

        # Update waiting requests.
        self.waiting.extendleft(running_scheduled.preempted)
        # Update new running requests.
        if len(prefills.seq_groups) > 0:
            self.running.extend([s.seq_group for s in prefills.seq_groups])

        self.running.extend(running_scheduled.decode_seq_groups_list)

        if len(swapped_in.decode_seq_groups) > 0:
            self.running.extend(
                [s.seq_group for s in swapped_in.decode_seq_groups])

        # Update swapped requests.
        self.swapped.extend(running_scheduled.swapped_out)
        preempted = len(running_scheduled.preempted) + len(
            running_scheduled.swapped_out)

        # There should be no prefill from running queue because this policy
        # doesn't allow chunked prefills.
        assert len(running_scheduled.prefill_seq_groups) == 0
        assert len(swapped_in.prefill_seq_groups) == 0

        # Merge lists
        num_prefill_groups = len(prefills.seq_groups)
        ignored_seq_groups_for_embeds = list[SequenceGroup]()
        if num_prefill_groups > 0:
            scheduled_seq_groups = prefills.seq_groups
            scheduled_seq_groups.extend(running_scheduled.decode_seq_groups)
            ignored_seq_groups_for_embeds.clear()
        else:
            scheduled_seq_groups = running_scheduled.decode_seq_groups
            if len(scheduled_seq_groups) > 0:
                using_prompt_embeds = scheduled_seq_groups[
                    0].seq_group.uses_prompt_embeds()
                ignored_seq_groups_for_embeds.clear()
                indices_ignored = list[int]()
                for i, schedule_seq_group in enumerate(scheduled_seq_groups):
                    if using_prompt_embeds !=\
                        schedule_seq_group.seq_group.uses_prompt_embeds():
                        ignored_seq_groups_for_embeds.append(
                            schedule_seq_group.seq_group)
                        indices_ignored.append(i)
                if len(ignored_seq_groups_for_embeds) > 0:
                    scheduled_seq_groups = [
                        group for i, group in enumerate(scheduled_seq_groups)
                        if i not in indices_ignored
                    ]
            else:
                ignored_seq_groups_for_embeds.clear()

        scheduled_seq_groups.extend(swapped_in.decode_seq_groups)

        blocks_to_copy = running_scheduled.blocks_to_copy
        blocks_to_copy.extend(swapped_in.blocks_to_copy)

        ignored_seq_groups = prefills.ignored_seq_groups
        ignored_seq_groups.extend(ignored_seq_groups_for_embeds)
        ignored_seq_groups.extend(swapped_in.infeasible_seq_groups)

        return SchedulerOutputs(
            scheduled_seq_groups=scheduled_seq_groups,
            num_prefill_groups=num_prefill_groups,
            num_batched_tokens=budget.num_batched_tokens +
            budget.num_cached_tokens,
            blocks_to_swap_in=swapped_in.blocks_to_swap_in,
            blocks_to_swap_out=running_scheduled.blocks_to_swap_out,
            blocks_to_copy=blocks_to_copy,
            ignored_seq_groups=ignored_seq_groups,
            num_lookahead_slots=running_scheduled.num_lookahead_slots,
            running_queue_size=len(self.running),
            preempted=preempted,
        )

    def _schedule_chunked_prefill(self) -> SchedulerOutputs:
        """Schedule queued requests.

        Chunked prefill allows to chunk prefill requests, batch them together
        with decode requests. This policy 1. schedule as many decoding requests
        as possible. 2. schedule chunked prefill requests that are not
        finished. 3. schedule swapped request. 4. schedule new prefill
        requests.

        The policy can sustain the high GPU utilization because it can put
        prefill and decodes requests to the same batch, while it improves
        inter token latency because decodes requests don't need to be blocked
        by prefill requests.
        """
        budget = SchedulingBudget(
            token_budget=self.scheduler_config.max_num_batched_tokens,
            max_num_seqs=self.scheduler_config.max_num_seqs,
        )
        curr_loras: Set[int] = set()

        prefills = SchedulerPrefillOutputs.create_empty()
        swapped_in = SchedulerSwappedInOutputs.create_empty()

        # Create partial prefill metadata
        partial_prefill_metadata = PartialPrefillMetadata.from_queues(
            running=self.running,
            waiting=self.waiting,
            scheduler_config=self.scheduler_config,
        )

        # Decoding should be always scheduled first by fcfs.
        running_scheduled = self._schedule_running(
            budget,
            curr_loras,
            enable_chunking=True,
            partial_prefill_metadata=partial_prefill_metadata,
        )

        # Schedule swapped out requests.
        # If preemption happens, it means we don't have space for swap-in.
        if len(running_scheduled.preempted) + len(
                running_scheduled.swapped_out) == 0:
            swapped_in = self._schedule_swapped(budget, curr_loras)

        prefills = self._schedule_prefills(
            budget,
            curr_loras,
            enable_chunking=True,
            partial_prefill_metadata=partial_prefill_metadata,
        )

        assert (budget.num_batched_tokens
                <= self.scheduler_config.max_num_batched_tokens)
        assert budget.num_curr_seqs <= self.scheduler_config.max_num_seqs

        # Update waiting requests.
        self.waiting.extendleft(running_scheduled.preempted)

        # Update new running requests.
        # By default, vLLM scheduler prioritizes prefills.
        # Once chunked prefill is enabled,
        # the policy is changed to prioritize decode requests.
        self.running.extend(
            [s.seq_group for s in swapped_in.decode_seq_groups])
        self.running.extend(
            [s.seq_group for s in swapped_in.prefill_seq_groups])
        self.running.extend(
            [s.seq_group for s in running_scheduled.decode_seq_groups])
        # Because multiple prefills may be running concurrently, we need to
        # make sure that prefills which are scheduled to finish are listed
        # before those that won't. This is so that on the next scheduling
        # iteration when they have transitioned to the decode stage, they are
        # properly prioritized over sequences that are still in the prefill
        # stage.
        self.running.extend(
            self._order_finishing_prefills_first(
                running_scheduled.prefill_seq_groups))
        self.running.extend([s.seq_group for s in prefills.seq_groups])

        # Update swapped requests.
        self.swapped.extend(running_scheduled.swapped_out)
        # Put prefills first due to Attention backend ordering assumption.
        scheduled_seq_groups = (prefills.seq_groups +
                                running_scheduled.prefill_seq_groups +
                                swapped_in.prefill_seq_groups +
                                running_scheduled.decode_seq_groups +
                                swapped_in.decode_seq_groups)
        num_prefill_groups = (len(prefills.seq_groups) +
                              len(swapped_in.prefill_seq_groups) +
                              len(running_scheduled.prefill_seq_groups))
        # If all prompts, then we set num_lookahead_slots to 0
        # this allows us to go through the `no_spec` path in
        # `spec_decode_worker.py`
        all_prefills = len(scheduled_seq_groups) == num_prefill_groups
        num_lookahead_slots = (0 if
                               (all_prefills
                                and not self.scheduler_config.is_multi_step)
                               else running_scheduled.num_lookahead_slots)
        return SchedulerOutputs(
            scheduled_seq_groups=scheduled_seq_groups,
            num_prefill_groups=num_prefill_groups,
            num_batched_tokens=budget.num_batched_tokens +
            budget.num_cached_tokens,
            blocks_to_swap_in=swapped_in.blocks_to_swap_in,
            blocks_to_swap_out=running_scheduled.blocks_to_swap_out,
            blocks_to_copy=running_scheduled.blocks_to_copy +
            swapped_in.blocks_to_copy,
            ignored_seq_groups=prefills.ignored_seq_groups +
            swapped_in.infeasible_seq_groups,
            num_lookahead_slots=num_lookahead_slots,
            running_queue_size=len(self.running),
            preempted=(len(running_scheduled.preempted) +
                       len(running_scheduled.swapped_out)),
        )

    def _order_finishing_prefills_first(
        self, scheduled_prefill_seqs: List[ScheduledSequenceGroup]
    ) -> List[SequenceGroup]:
        """Returns a list of prefilling SequenceGroups where sequences that are
        scheduled to finish prefilling are listed first"""
        finishing = [
            s.seq_group for s in scheduled_prefill_seqs
            if s.seq_group.get_num_uncomputed_tokens() == s.token_chunk_size
        ]
        not_finishing = [
            s.seq_group for s in scheduled_prefill_seqs
            if s.seq_group.get_num_uncomputed_tokens() != s.token_chunk_size
        ]
        return finishing + not_finishing

    def _schedule(self) -> SchedulerOutputs:
        """Schedule queued requests."""
        if self.scheduler_config.chunked_prefill_enabled:
            return self._schedule_chunked_prefill()
        else:
            return self._schedule_default()

    def _can_append_slots(self, seq_group: SequenceGroup,
                          enable_chunking: bool) -> bool:
        """Determine whether or not we have enough space in the KV cache to
        continue generation of the sequence group.
        """
        # It is True only for testing case to trigger artificial preemption.
        if (self.enable_artificial_preemption
                and random.uniform(0, 1) < ARTIFICIAL_PREEMPTION_PROB
                and self.artificial_preempt_cnt > 0):
            self.artificial_preempt_cnt -= 1
            return False

        is_prefill = seq_group.is_prefill()
        num_lookahead_slots = self._get_num_lookahead_slots(
            is_prefill, enable_chunking)

        if is_prefill and num_lookahead_slots > 0:
            # Appending prefill slots only happens multi-step and
            # chunked-prefill are enabled together.
            assert self.scheduler_config.is_multi_step and enable_chunking

        return self.block_manager.can_append_slots(
            seq_group=seq_group, num_lookahead_slots=num_lookahead_slots)

    def _allow_async_output_proc(self, seq_group: SequenceGroup) -> bool:
        # async_output_proc is allowed only when we have a single sequence
        # in the sequence group
        no_single_seq = seq_group.sampling_params is None or (
            seq_group.sampling_params.n == 1)
        return no_single_seq

    def schedule(
            self
    ) -> Tuple[List[SequenceGroupMetadata], SchedulerOutputs, bool]:
        # Schedule sequence groups.
        # This function call changes the internal states of the scheduler
        # such as self.running, self.swapped, and self.waiting.
        scheduler_start_time = time.perf_counter()

        scheduler_outputs: SchedulerOutputs = self._schedule()
        now = time.time()

        if not self.cache_config.enable_prefix_caching:
            common_computed_block_nums = []

        allow_async_output_proc: bool = self.use_async_output_proc

        # Create input data structures.
        seq_group_metadata_list: List[SequenceGroupMetadata] = []
        for i, scheduled_seq_group in enumerate(
                scheduler_outputs.scheduled_seq_groups):
            seq_group = scheduled_seq_group.seq_group
            token_chunk_size = scheduled_seq_group.token_chunk_size
            seq_group.maybe_set_first_scheduled_time(now)

            seq_group_metadata = self._seq_group_metadata_cache[
                self.cache_id].get_object()
            seq_group_metadata.seq_data.clear()
            seq_group_metadata.block_tables.clear()

            # seq_id -> SequenceData
            seq_data: Dict[int, SequenceData] = {}
            # seq_id -> physical block numbers
            block_tables: Dict[int, List[int]] = {}

            if seq_group.is_encoder_decoder():
                # Encoder associated with SequenceGroup
                encoder_seq = seq_group.get_encoder_seq()
                assert encoder_seq is not None
                encoder_seq_data = encoder_seq.data
                # Block table for cross-attention
                # Also managed at SequenceGroup level
                cross_block_table = self.block_manager.get_cross_block_table(
                    seq_group)
            else:
                encoder_seq_data = None
                cross_block_table = None

            for seq in seq_group.get_seqs(status=SequenceStatus.RUNNING):
                seq_id = seq.seq_id
                seq_data[seq_id] = seq.data
                block_tables[seq_id] = self.block_manager.get_block_table(seq)
                self.block_manager.access_all_blocks_in_seq(seq, now)

            if self.cache_config.enable_prefix_caching:
                common_computed_block_nums = (
                    self.block_manager.get_common_computed_block_ids(
                        seq_group.get_seqs(status=SequenceStatus.RUNNING)))

            do_sample = True
            is_prompt = seq_group.is_prefill()
            # We should send the metadata to workers when the first prefill
            # is sent. Subsequent requests could be chunked prefill or decode.
            is_first_prefill = False
            if is_prompt:
                seqs = seq_group.get_seqs()
                # Prefill has only 1 sequence.
                assert len(seqs) == 1
                num_computed_tokens = seqs[0].data.get_num_computed_tokens()
                is_first_prefill = num_computed_tokens == 0
                # In the next iteration, all prompt tokens are not computed.
                # It means the prefill is chunked, and we don't need sampling.
                # NOTE: We use get_len instead of get_prompt_len because when
                # a sequence is preempted, prefill includes previous generated
                # output tokens.
                if (token_chunk_size + num_computed_tokens
                        < seqs[0].data.get_len()):
                    do_sample = False

            # It assumes the scheduled_seq_groups is ordered by
            # prefill < decoding.
            if is_first_prefill or not self.scheduler_config.send_delta_data:
                seq_group_metadata = SequenceGroupMetadata(
                    request_id=seq_group.request_id,
                    is_prompt=is_prompt,
                    seq_data=seq_data,
                    sampling_params=seq_group.sampling_params,
                    block_tables=block_tables,
                    do_sample=do_sample,
                    pooling_params=seq_group.pooling_params,
                    token_chunk_size=token_chunk_size,
                    lora_request=seq_group.lora_request,
                    computed_block_nums=common_computed_block_nums,
                    encoder_seq_data=encoder_seq_data,
                    cross_block_table=cross_block_table,
                    state=seq_group.state,
                    token_type_ids=seq_group.token_type_ids,
                    # `multi_modal_data` will only be present for the 1st comm
                    # between engine and worker.
                    # the subsequent comms can still use delta, but
                    # `multi_modal_data` will be None.
                    multi_modal_data=(seq_group.multi_modal_data
                                      if scheduler_outputs.num_prefill_groups
                                      > 0 else None),
                    multi_modal_placeholders=(
                        seq_group.multi_modal_placeholders
                        if scheduler_outputs.num_prefill_groups > 0 else None),
                    prompt_adapter_request=seq_group.prompt_adapter_request,
                )
            else:
                # When SPMD mode is enabled, we only send delta data except for
                # the first request to reduce serialization cost.
                seq_data_delta = {}
                for id, data in seq_data.items():
                    seq_data_delta[id] = data.get_delta_and_reset()
                seq_group_metadata = SequenceGroupMetadataDelta(
                    seq_data_delta,
                    seq_group.request_id,
                    block_tables,
                    is_prompt,
                    do_sample=do_sample,
                    token_chunk_size=token_chunk_size,
                    computed_block_nums=common_computed_block_nums,
                )
            seq_group_metadata_list.append(seq_group_metadata)

            if allow_async_output_proc:
                allow_async_output_proc = self._allow_async_output_proc(
                    seq_group)

        # Now that the batch has been created, we can assume all blocks in the
        # batch will have been computed before the next scheduling invocation.
        # This is because the engine assumes that a failure in model execution
        # will crash the vLLM instance / will not retry.
        for scheduled_seq_group in scheduler_outputs.scheduled_seq_groups:
            self.block_manager.mark_blocks_as_computed(
                scheduled_seq_group.seq_group,
                scheduled_seq_group.token_chunk_size)

        self._seq_group_metadata_cache[self.next_cache_id].reset()

        scheduler_time = time.perf_counter() - scheduler_start_time
        # Add this to scheduler time to all the sequences that are currently
        # running. This will help estimate if the scheduler is a significant
        # component in the e2e latency.
        for seq_group in self.running:
            if seq_group is not None and seq_group.metrics is not None:
                if seq_group.metrics.scheduler_time is not None:
                    seq_group.metrics.scheduler_time += scheduler_time
                else:
                    seq_group.metrics.scheduler_time = scheduler_time

        # Move to next cache (if exists)
        self.cache_id = self.next_cache_id

        # Return results
        return (seq_group_metadata_list, scheduler_outputs,
                allow_async_output_proc)

    def fork_seq(self, parent_seq: Sequence, child_seq: Sequence) -> None:
        self.block_manager.fork(parent_seq, child_seq)

    def free_seq(self, seq: Sequence) -> None:
        """Free a sequence from a block table."""
        self.block_manager.free(seq)

    def remove_seq_from_computed_blocks_tracker(
            self, seq_group: SequenceGroup,
            status: Optional[SequenceStatus]) -> None:
        seqs = seq_group.get_seqs(status=status)
        for seq in seqs:
            self._remove_seq_from_computed_blocks_tracker(seq)

    def _remove_seq_from_computed_blocks_tracker(self, seq: Sequence) -> None:
        """
        Free a sequence computed blocks tracker _seq_id_to_blocks_hashes
        and _seq_id_to_num_tokens_computed.
        """
        self.block_manager.remove_seq_from_computed_blocks_tracker(seq)

    def _free_finished_seqs(self, seq_group: SequenceGroup) -> None:
        """Free finished seqs in a sequence group."""
        for seq in seq_group.get_seqs():
            if seq.is_finished():
                self.free_seq(seq)

    def _free_finished_seq_group(self, seq_group: SequenceGroup) -> None:
        if seq_group.is_finished():
            # Free cross-attention block table, if it exists
            self._free_seq_group_cross_attn_blocks(seq_group)

            # Add the finished requests to the finished requests list.
            # This list will be used to update the Mamba cache in the
            # next step.
            self._finished_requests_ids.append(seq_group.request_id)

        # Free finished seqs
        self._free_finished_seqs(seq_group)

    def free_finished_seq_groups(self) -> None:
        remaining: Deque[SequenceGroup] = deque()
        for seq_group in self.running:
            self._free_finished_seq_group(seq_group)
            if not seq_group.is_finished():
                remaining.append(seq_group)

        self.running = remaining

        # Handle async stopped sequence groups
        # (ones that reached max model len)
        if self._async_stopped:
            for seq_group in self._async_stopped:
                self._free_seq_group_cross_attn_blocks(seq_group)
                self._finished_requests_ids.append(seq_group.request_id)

                # Free finished seqs
                self._free_finished_seqs(seq_group)

            self._async_stopped.clear()

    def _allocate_and_set_running(self, seq_group: SequenceGroup) -> None:
        self.block_manager.allocate(seq_group)
        for seq in seq_group.get_seqs(status=SequenceStatus.WAITING):
            seq.status = SequenceStatus.RUNNING

    def _append_slots(
        self,
        seq_group: SequenceGroup,
        blocks_to_copy: List[Tuple[int, int]],
        enable_chunking: bool = False,
    ) -> None:
        """Appends new slots to the sequences in the given sequence group.

        Args:
            seq_group (SequenceGroup): The sequence group containing the
                sequences to append slots to.
            blocks_to_copy (List[Tuple[int, int]]): A list of tuple of two
                ints, the first int is the source block index, and the second
                int is the destination block index. This list is updated with
                the new source and destination block indices for the appended
                slots.
            enable_chunking (bool): True if chunked prefill is enabled.
        """
        is_prefill: bool = seq_group.is_prefill()
        num_lookahead_slots: int = self._get_num_lookahead_slots(
            is_prefill, enable_chunking)

        seq_group.init_multi_step_from_lookahead_slots(
            num_lookahead_slots,
            num_scheduler_steps=self.scheduler_config.num_scheduler_steps,
            is_multi_step=self.scheduler_config.is_multi_step,
            enable_chunking=enable_chunking,
        )

        seq_status: Optional[SequenceStatus] = SequenceStatus.RUNNING
        if self.scheduler_config.is_multi_step and enable_chunking:
            # In multi-step chunked-prefill any sequence type can have
            # slots appended.
            seq_status = None

        for seq in seq_group.get_seqs(status=seq_status):
            cows = self.block_manager.append_slots(seq, num_lookahead_slots)
            if len(cows) > 0:
                blocks_to_copy.extend(cows)

    def _preempt(self, seq_group: SequenceGroup,
                 blocks_to_swap_out: List[Tuple[int, int]]) -> PreemptionMode:
        # If preemption mode is not specified, we determine the mode as follows:
        # We use recomputation by default since it incurs lower overhead than
        # swapping. However, when the sequence group has multiple sequences
        # (e.g., beam search), recomputation is not currently supported. In
        # such a case, we use swapping instead.
        # FIXME(woosuk): This makes our scheduling policy a bit bizarre.
        # As swapped sequences are prioritized over waiting sequences,
        # sequence groups with multiple sequences are implicitly prioritized
        # over sequence groups with a single sequence.
        # TODO(woosuk): Support recomputation for sequence groups with multiple
        # sequences. This may require a more sophisticated CUDA kernel.
        if self.user_specified_preemption_mode is None:
            if seq_group.get_max_num_running_seqs() == 1:
                preemption_mode = PreemptionMode.RECOMPUTE
            else:
                preemption_mode = PreemptionMode.SWAP

        elif self.user_specified_preemption_mode == "swap":
            preemption_mode = PreemptionMode.SWAP
        else:
            preemption_mode = PreemptionMode.RECOMPUTE

        if self.num_cumulative_preemption % 50 == 0:
            logger.warning(
                "Sequence group %s is preempted by %s mode because there is "
                "not enough KV cache space. This can affect the end-to-end "
                "performance. Increase gpu_memory_utilization or "
                "tensor_parallel_size to provide more KV cache memory. "
                "total_num_cumulative_preemption=%d",
                seq_group.request_id,
                preemption_mode,
                self.num_cumulative_preemption + 1,
            )
        self.num_cumulative_preemption += 1

        if preemption_mode == PreemptionMode.RECOMPUTE:
            self._preempt_by_recompute(seq_group)
        elif preemption_mode == PreemptionMode.SWAP:
            self._preempt_by_swap(seq_group, blocks_to_swap_out)
        else:
            raise AssertionError("Invalid preemption mode.")
        return preemption_mode

    def _preempt_by_recompute(
        self,
        seq_group: SequenceGroup,
    ) -> None:
        seqs = seq_group.get_seqs(status=SequenceStatus.RUNNING)
        assert len(seqs) == 1
        for seq in seqs:
            seq.status = SequenceStatus.WAITING
            self.free_seq(seq)
            seq.reset_state_for_recompute()
        self._free_seq_group_cross_attn_blocks(seq_group)

    def _preempt_by_swap(
        self,
        seq_group: SequenceGroup,
        blocks_to_swap_out: List[Tuple[int, int]],
    ) -> None:
        self._swap_out(seq_group, blocks_to_swap_out)

    def _swap_in(
        self,
        seq_group: SequenceGroup,
        blocks_to_swap_in: List[Tuple[int, int]],
    ) -> None:
        mapping = self.block_manager.swap_in(seq_group)
        blocks_to_swap_in.extend(mapping)
        for seq in seq_group.get_seqs(status=SequenceStatus.SWAPPED):
            seq.status = SequenceStatus.RUNNING

    def _swap_out(
        self,
        seq_group: SequenceGroup,
        blocks_to_swap_out: List[Tuple[int, int]],
    ) -> None:
        if not self.block_manager.can_swap_out(seq_group):
            # FIXME(woosuk): Abort the sequence group instead of aborting the
            # entire engine.
            raise RuntimeError(
                "Aborted due to the lack of CPU swap space. Please increase "
                "the swap space to avoid this error.")
        mapping = self.block_manager.swap_out(seq_group)
        blocks_to_swap_out.extend(mapping)
        for seq in seq_group.get_seqs(status=SequenceStatus.RUNNING):
            seq.status = SequenceStatus.SWAPPED

    def _passed_delay(self, now: float) -> bool:
        if self.prev_prompt:
            self.last_prompt_latency = now - self.prev_time
        self.prev_time, self.prev_prompt = now, False
        # Delay scheduling prompts to let waiting queue fill up
        if self.scheduler_config.delay_factor > 0 and self.waiting:
            earliest_arrival_time = min(
                [e.metrics.arrival_time for e in self.waiting])
            passed_delay = ((now - earliest_arrival_time)
                            > (self.scheduler_config.delay_factor *
                               self.last_prompt_latency) or not self.running)
        else:
            passed_delay = True
        return passed_delay

    def _get_num_lookahead_slots(self, is_prefill: bool,
                                 enable_chunking: bool) -> int:
        """The number of slots to allocate per sequence per step, beyond known
        token ids. Speculative decoding uses these slots to store KV activations
        of tokens which may or may not be accepted.

        Speculative decoding does not yet support prefill, so we do not perform
        lookahead allocation for prefill.

        When chunking is enabled with multi-step, we allocate lookahead slots
        for the prefills for when the prefills turn into decodes in the first
        step.
        """
        if is_prefill:
            if self.scheduler_config.is_multi_step and enable_chunking:
                # num_lookahead_slots was introduced in the context of decodes,
                # in Speculative Decoding.
                # When the num_scheduler_steps is 8, say, then the
                # num_lookahead_slots is 7. Meaning, we are doing a 1-step of
                # decode anyways and we wish to do 7 more.
                #
                # "lookaheads" for prefills, is introduced in support for
                # Chunked-Prefill in Multi-Step.
                return self.scheduler_config.num_lookahead_slots + 1
            else:
                return 0

        return self.scheduler_config.num_lookahead_slots

    def _get_num_new_uncached_and_cached_tokens(
        self,
        seq_group: SequenceGroup,
        status: SequenceStatus,
        enable_chunking: bool,
        budget: SchedulingBudget,
        partial_prefill_metadata: Optional[PartialPrefillMetadata] = None,
    ) -> Tuple[int, int]:
        """
        Returns the number of new uncached and cached tokens to schedule for a
        given sequence group that's in a given `status`.

        The API could chunk the number of tokens to compute based on `budget`
        if `enable_chunking` is True. If a sequence group has multiple
        sequences (e.g., running beam search), it means it is in decoding
        phase, so chunking doesn't happen.

        Returns (0, 0) if the new token cannot be computed due to token budget.

        The cached tokens's blocks are already computed, and the attention
        backend will reuse the cached blocks rather than recomputing them. So
        the scheduler could schedule these cached tokens "for free".

        Args:
            seq_group: The sequence group to get the number of new tokens to
                schedule.
            status: The status of the sequences to get the number of new tokens
                to schedule.
            enable_chunking: Whether to chunk the number of tokens to compute.
            budget: The budget to chunk the number of tokens to compute.
            partial_prefill_metadata: information about the partial prefills
                that are currently running


        Returns:
            A tuple of two ints. The first int is the number of new uncached
            tokens to schedule. The second int is the number of cached tokens.
            If no more new tokens can be scheduled, returns (0, 0).
        """
        num_cached_new_tokens = 0
        num_uncached_new_tokens = 0

        seqs = seq_group.get_seqs(status=status)
        # Compute the number of new uncached and cached tokens for
        # each sequence.
        for seq in seqs:
            if not seq.is_prefill():
                # Decode sequences should always just have 1 uncached token
                # TODO(rickyx): Actually is this still correct for multi-step?
                num_uncached_new_tokens += 1
                continue

            num_computed_tokens_seq = seq.get_num_computed_tokens()
            all_num_new_tokens_seq = seq.get_len() - num_computed_tokens_seq
            if not self.cache_config.enable_prefix_caching:
                # If prefix caching is not enabled, all new tokens are uncached.
                num_uncached_new_tokens += all_num_new_tokens_seq
                continue

            # NOTE: the cache token might be currently in a block that's in an
            # evictor meaning that it's not yet allocated. However, we don't
            # exclude such tokens in the cache count because it will be
            # guaranteed to be allocated later if the sequence can be allocated.
            num_cached_tokens_seq = self.block_manager.get_num_cached_tokens(
                seq)

            # Sanity check.
            if num_cached_tokens_seq < num_computed_tokens_seq:
                # This should only happen with chunked prefill, and
                # the seq is still in prefill. The `num_cached_tokens_seq`
                # is the value we calculated on scheduling the first prefill.
                # For subsequent continuous prefill steps, we cached the
                # number of cache tokens for the sequence so the cached token
                # count could be less than the number of computed tokens.
                # See comments on `ComputedBlocksTracker` for more details.
                assert (
                    seq.is_prefill() and seq.status == SequenceStatus.RUNNING
                    and self.scheduler_config.chunked_prefill_enabled
                ), ("Number of cached tokens should not be less than the "
                    "number of computed tokens for a sequence that's still "
                    f"in prefill. But there are {num_cached_tokens_seq} cached "
                    f"tokens and {num_computed_tokens_seq} computed tokens "
                    f"for sequence {seq.seq_id}.")

            num_cached_new_tokens_seq = max(
                0, num_cached_tokens_seq - num_computed_tokens_seq)
            num_uncached_new_tokens_seq = (all_num_new_tokens_seq -
                                           num_cached_new_tokens_seq)

            num_uncached_new_tokens += num_uncached_new_tokens_seq
            num_cached_new_tokens += num_cached_new_tokens_seq

        if num_uncached_new_tokens == 0 and num_cached_new_tokens > 0:
            # For a fully cached hit sequence, we actually need to recompute the
            # last token. So we need at least 1 uncached token to schedule.
            # See ModelRunner._compute_for_prefix_cache_hit for more details.
            num_uncached_new_tokens = 1
            num_cached_new_tokens -= 1

        if enable_chunking and len(seqs) == 1:
            # Chunk if a running request cannot fit in the given budget.
            # If number of seq > 1, it means it is doing beam search
            # in a decode phase. Do not chunk.
            num_uncached_new_tokens = self._chunk_new_tokens_to_schedule(
                self.scheduler_config,
                self.cache_config,
                budget,
                self._get_prompt_limit(seq_group),
                num_uncached_new_tokens,
                self.partial_prefill_budget_lookup_list,
                partial_prefill_metadata,
            )

        return num_uncached_new_tokens, num_cached_new_tokens

    @staticmethod
    def _chunk_new_tokens_to_schedule(
        scheduler_config: SchedulerConfig,
        cache_config: CacheConfig,
        budget: SchedulingBudget,
        prompt_limit: int,
        num_new_tokens: int,
        partial_prefill_budget_lookup_list: List[int],
        partial_prefill_metadata: Optional[PartialPrefillMetadata] = None,
    ) -> int:
        """
        Chunks the number of new tokens to schedule based on the budget when
        chunked prefill is enabled.

        Args:
            scheduler_config: The scheduler config.
            cache_config: The cache config.
            budget: The budget to chunk the number of tokens to compute.
            prompt_limit: The maximum number of tokens allowed in a prompt.
            num_new_tokens: The number of new tokens to schedule.

        Returns:
            The number of new tokens to schedule after chunking.
        """
        remaining_token_budget = budget.remaining_token_budget()
        if scheduler_config.is_multi_step:
            # The current multi-step + chunked prefill capability does
            # not actually support chunking prompts.
            #
            # Therefore, `num_new_tokens` is computed in the same fashion
            # for both multi-step+chunked-prefill &
            # multi-step+chunked-prefill+APC
            #
            # Prompts with more tokens than the current remaining budget
            # are postponed to future scheduler steps
            if num_new_tokens > prompt_limit:
                # If the seq_group is in prompt-stage, pass the
                # num_new_tokens as-is so the caller can ignore
                # the sequence.
                return num_new_tokens

            return 0 if num_new_tokens > \
                remaining_token_budget else num_new_tokens

        # Get the number of tokens to allocate to this prefill slot
        prefill_slot_budget = (
            remaining_token_budget if partial_prefill_metadata is None else
            partial_prefill_budget_lookup_list[
                partial_prefill_metadata.schedulable_prefills])

        if cache_config.enable_prefix_caching:
            # When prefix caching is enabled and we're partially prefilling
            # a sequence, we always allocate a number of new tokens that is
            # divisible by the block size to avoid partial block matching.
            block_size = cache_config.block_size
            # Don't exceed either the total budget or slot budget.
            # Take min of those and get the next lowest multiple of the
            # block size:
            remaining_token_budget = (
                min(remaining_token_budget, prefill_slot_budget) //
                block_size) * block_size
            # NB: In the case where num_new_tokens < budget, we are
            # finishing prefill for this sequence, so we do not need to
            # allocate a full block.

        num_new_tokens = min(num_new_tokens, remaining_token_budget,
                             prefill_slot_budget)

        return num_new_tokens

_async_stopped instance-attribute

_async_stopped: List[SequenceGroup] = []

_finished_requests_ids instance-attribute

_finished_requests_ids: List[str] = list()

_scheduled_seq_group_cache instance-attribute

_scheduled_seq_group_cache: List[PyObjectCache] = []

_scheduler_running_outputs_cache instance-attribute

_scheduler_running_outputs_cache: List[PyObjectCache] = []

_seq_group_metadata_cache instance-attribute

_seq_group_metadata_cache: List[PyObjectCache] = []

artificial_preempt_cnt instance-attribute

artificial_preempt_cnt = (
    ARTIFICIAL_PREEMPTION_MAX_CNT
    if enable_artificial_preemption
    else 0
)

block_manager instance-attribute

block_manager = BlockSpaceManagerImpl(
    block_size=block_size,
    num_gpu_blocks=num_gpu_blocks,
    num_cpu_blocks=num_cpu_blocks,
    sliding_window=sliding_window,
    enable_caching=enable_prefix_caching,
)

cache_config instance-attribute

cache_config = cache_config

cache_id instance-attribute

cache_id = 0

enable_artificial_preemption instance-attribute

enable_artificial_preemption = ENABLE_ARTIFICIAL_PREEMPT

last_prompt_latency instance-attribute

last_prompt_latency = 0.0

lora_config instance-attribute

lora_config = lora_config

lora_enabled property

lora_enabled: bool

next_cache_id property

next_cache_id

num_cache_iters instance-attribute

num_cache_iters = 2 if use_async_output_proc else 1

num_cumulative_preemption instance-attribute

num_cumulative_preemption: int = 0

num_decoding_tokens_per_seq property

num_decoding_tokens_per_seq: int

The number of new tokens.

output_proc_callback instance-attribute

output_proc_callback = output_proc_callback

partial_prefill_budget_lookup_list instance-attribute

partial_prefill_budget_lookup_list = [
    0
] * max_num_partial_prefills + 1

prev_prompt instance-attribute

prev_prompt = False

prev_time instance-attribute

prev_time = 0.0

running instance-attribute

running: Deque[SequenceGroup] = deque()

scheduler_config instance-attribute

scheduler_config = scheduler_config

swapped instance-attribute

swapped: Deque[SequenceGroup] = deque()

use_async_output_proc instance-attribute

use_async_output_proc = output_proc_callback is not None

user_specified_preemption_mode instance-attribute

user_specified_preemption_mode = preemption_mode

waiting instance-attribute

waiting: Deque[SequenceGroup] = deque()

__init__

__init__(
    scheduler_config: SchedulerConfig,
    cache_config: CacheConfig,
    lora_config: Optional[LoRAConfig],
    pipeline_parallel_size: int = 1,
    output_proc_callback: Optional[Callable] = None,
) -> None
Source code in vllm/core/scheduler.py
def __init__(
    self,
    scheduler_config: SchedulerConfig,
    cache_config: CacheConfig,
    lora_config: Optional[LoRAConfig],
    pipeline_parallel_size: int = 1,
    output_proc_callback: Optional[Callable] = None,
) -> None:
    self.scheduler_config = scheduler_config
    self.cache_config = cache_config
    # Note for LoRA scheduling: the current policy is extremely
    # simple and NOT fair. It can lead to starvation of some
    # LoRAs. This should be improved in the future.
    self.lora_config = lora_config

    version = "selfattn"
    if (self.scheduler_config.runner_type == "pooling"
            or self.cache_config.is_attention_free):
        version = "placeholder"

    BlockSpaceManagerImpl = BlockSpaceManager.get_block_space_manager_class(
        version)

    num_gpu_blocks = cache_config.num_gpu_blocks
    if num_gpu_blocks:
        num_gpu_blocks //= pipeline_parallel_size

    num_cpu_blocks = cache_config.num_cpu_blocks
    if num_cpu_blocks:
        num_cpu_blocks //= pipeline_parallel_size

    # Create the block space manager.
    self.block_manager = BlockSpaceManagerImpl(
        block_size=self.cache_config.block_size,
        num_gpu_blocks=num_gpu_blocks,
        num_cpu_blocks=num_cpu_blocks,
        sliding_window=self.cache_config.sliding_window,
        enable_caching=self.cache_config.enable_prefix_caching,
    )

    # Sequence groups in the WAITING state.
    # Contain new prefill or preempted requests.
    self.waiting: Deque[SequenceGroup] = deque()
    # Sequence groups in the RUNNING state.
    # Contain decode requests.
    self.running: Deque[SequenceGroup] = deque()
    # Sequence groups in the SWAPPED state.
    # Contain decode requests that are swapped out.
    self.swapped: Deque[SequenceGroup] = deque()
    # Sequence groups finished requests ids since last step iteration.
    # It lets the model know that any state associated with these requests
    # can and must be released after the current step.
    # This is used to evict the finished requests from the Mamba cache.
    self._finished_requests_ids: List[str] = list()
    # Time at previous scheduling step
    self.prev_time = 0.0
    # Did we schedule a prompt at previous step?
    self.prev_prompt = False
    # Latency of the last prompt step
    self.last_prompt_latency = 0.0
    # preemption mode, RECOMPUTE or SWAP
    self.user_specified_preemption_mode = scheduler_config.preemption_mode

    # The following field is test-only. It is used to inject artificial
    # preemption.
    self.enable_artificial_preemption = ENABLE_ARTIFICIAL_PREEMPT
    self.artificial_preempt_cnt = (ARTIFICIAL_PREEMPTION_MAX_CNT
                                   if self.enable_artificial_preemption
                                   else 0)
    self.num_cumulative_preemption: int = 0

    # Used to cache python objects
    self._seq_group_metadata_cache: List[PyObjectCache] = []
    self._scheduler_running_outputs_cache: List[PyObjectCache] = []
    self._scheduled_seq_group_cache: List[PyObjectCache] = []

    # For async output processing, we need to swap cache buffers between
    # iterations. I.e. since the output processing is lagged one step,
    # we cannot reuse the cached objects immediately when the schedule()
    # is called again, but only when schedule() is called the second time.
    self.output_proc_callback = output_proc_callback
    self.use_async_output_proc = self.output_proc_callback is not None
    self.num_cache_iters = 2 if self.use_async_output_proc else 1

    self.cache_id = 0
    for i in range(self.num_cache_iters):
        self._seq_group_metadata_cache.append(
            PyObjectCache(seq_group_metadata_builder))
        self._scheduler_running_outputs_cache.append(
            PyObjectCache(scheduler_running_outputs_builder))
        self._scheduled_seq_group_cache.append(
            PyObjectCache(scheduled_seq_group_builder))

    # For async postprocessor, the extra decode run cannot be done
    # when the request reaches max_model_len. In this case, the request
    # will be stopped during schedule() call and added to this stop list
    # for processing and deallocation by the free_finished_seq_groups()
    self._async_stopped: List[SequenceGroup] = []

    # List with the chunk sizes to hand out to each sequence depending
    # on how many partial prefills are running. This is slightly faster than
    # running an integer division every time a prefill is scheduled.
    # This splits the budget evenly among all prefills.
    self.partial_prefill_budget_lookup_list = [0] * (
        self.scheduler_config.max_num_partial_prefills + 1)
    self.partial_prefill_budget_lookup_list[0] = (
        scheduler_config.max_num_batched_tokens)
    for i in range(1, self.scheduler_config.max_num_partial_prefills + 1):
        self.partial_prefill_budget_lookup_list[i] = (
            scheduler_config.max_num_batched_tokens // i)

_add_seq_group_to_running

_add_seq_group_to_running(seq_group: SequenceGroup) -> None
Source code in vllm/core/scheduler.py
def _add_seq_group_to_running(self, seq_group: SequenceGroup) -> None:
    # Add sequence groups to the running queue.
    # Only for testing purposes.
    self.running.append(seq_group)

_add_seq_group_to_swapped

_add_seq_group_to_swapped(seq_group: SequenceGroup) -> None
Source code in vllm/core/scheduler.py
def _add_seq_group_to_swapped(self, seq_group: SequenceGroup) -> None:
    # Add sequence groups to the swapped queue.
    # Only for testing purposes.
    self.swapped.append(seq_group)

_allocate_and_set_running

_allocate_and_set_running(seq_group: SequenceGroup) -> None
Source code in vllm/core/scheduler.py
def _allocate_and_set_running(self, seq_group: SequenceGroup) -> None:
    self.block_manager.allocate(seq_group)
    for seq in seq_group.get_seqs(status=SequenceStatus.WAITING):
        seq.status = SequenceStatus.RUNNING

_allow_async_output_proc

_allow_async_output_proc(seq_group: SequenceGroup) -> bool
Source code in vllm/core/scheduler.py
def _allow_async_output_proc(self, seq_group: SequenceGroup) -> bool:
    # async_output_proc is allowed only when we have a single sequence
    # in the sequence group
    no_single_seq = seq_group.sampling_params is None or (
        seq_group.sampling_params.n == 1)
    return no_single_seq

_append_slots

_append_slots(
    seq_group: SequenceGroup,
    blocks_to_copy: List[Tuple[int, int]],
    enable_chunking: bool = False,
) -> None

Appends new slots to the sequences in the given sequence group.

Parameters:

Name Type Description Default
seq_group SequenceGroup

The sequence group containing the sequences to append slots to.

required
blocks_to_copy List[Tuple[int, int]]

A list of tuple of two ints, the first int is the source block index, and the second int is the destination block index. This list is updated with the new source and destination block indices for the appended slots.

required
enable_chunking bool

True if chunked prefill is enabled.

False
Source code in vllm/core/scheduler.py
def _append_slots(
    self,
    seq_group: SequenceGroup,
    blocks_to_copy: List[Tuple[int, int]],
    enable_chunking: bool = False,
) -> None:
    """Appends new slots to the sequences in the given sequence group.

    Args:
        seq_group (SequenceGroup): The sequence group containing the
            sequences to append slots to.
        blocks_to_copy (List[Tuple[int, int]]): A list of tuple of two
            ints, the first int is the source block index, and the second
            int is the destination block index. This list is updated with
            the new source and destination block indices for the appended
            slots.
        enable_chunking (bool): True if chunked prefill is enabled.
    """
    is_prefill: bool = seq_group.is_prefill()
    num_lookahead_slots: int = self._get_num_lookahead_slots(
        is_prefill, enable_chunking)

    seq_group.init_multi_step_from_lookahead_slots(
        num_lookahead_slots,
        num_scheduler_steps=self.scheduler_config.num_scheduler_steps,
        is_multi_step=self.scheduler_config.is_multi_step,
        enable_chunking=enable_chunking,
    )

    seq_status: Optional[SequenceStatus] = SequenceStatus.RUNNING
    if self.scheduler_config.is_multi_step and enable_chunking:
        # In multi-step chunked-prefill any sequence type can have
        # slots appended.
        seq_status = None

    for seq in seq_group.get_seqs(status=seq_status):
        cows = self.block_manager.append_slots(seq, num_lookahead_slots)
        if len(cows) > 0:
            blocks_to_copy.extend(cows)

_can_append_slots

_can_append_slots(
    seq_group: SequenceGroup, enable_chunking: bool
) -> bool

Determine whether or not we have enough space in the KV cache to continue generation of the sequence group.

Source code in vllm/core/scheduler.py
def _can_append_slots(self, seq_group: SequenceGroup,
                      enable_chunking: bool) -> bool:
    """Determine whether or not we have enough space in the KV cache to
    continue generation of the sequence group.
    """
    # It is True only for testing case to trigger artificial preemption.
    if (self.enable_artificial_preemption
            and random.uniform(0, 1) < ARTIFICIAL_PREEMPTION_PROB
            and self.artificial_preempt_cnt > 0):
        self.artificial_preempt_cnt -= 1
        return False

    is_prefill = seq_group.is_prefill()
    num_lookahead_slots = self._get_num_lookahead_slots(
        is_prefill, enable_chunking)

    if is_prefill and num_lookahead_slots > 0:
        # Appending prefill slots only happens multi-step and
        # chunked-prefill are enabled together.
        assert self.scheduler_config.is_multi_step and enable_chunking

    return self.block_manager.can_append_slots(
        seq_group=seq_group, num_lookahead_slots=num_lookahead_slots)

_chunk_new_tokens_to_schedule staticmethod

_chunk_new_tokens_to_schedule(
    scheduler_config: SchedulerConfig,
    cache_config: CacheConfig,
    budget: SchedulingBudget,
    prompt_limit: int,
    num_new_tokens: int,
    partial_prefill_budget_lookup_list: List[int],
    partial_prefill_metadata: Optional[
        PartialPrefillMetadata
    ] = None,
) -> int

Chunks the number of new tokens to schedule based on the budget when chunked prefill is enabled.

Parameters:

Name Type Description Default
scheduler_config SchedulerConfig

The scheduler config.

required
cache_config CacheConfig

The cache config.

required
budget SchedulingBudget

The budget to chunk the number of tokens to compute.

required
prompt_limit int

The maximum number of tokens allowed in a prompt.

required
num_new_tokens int

The number of new tokens to schedule.

required

Returns:

Type Description
int

The number of new tokens to schedule after chunking.

Source code in vllm/core/scheduler.py
@staticmethod
def _chunk_new_tokens_to_schedule(
    scheduler_config: SchedulerConfig,
    cache_config: CacheConfig,
    budget: SchedulingBudget,
    prompt_limit: int,
    num_new_tokens: int,
    partial_prefill_budget_lookup_list: List[int],
    partial_prefill_metadata: Optional[PartialPrefillMetadata] = None,
) -> int:
    """
    Chunks the number of new tokens to schedule based on the budget when
    chunked prefill is enabled.

    Args:
        scheduler_config: The scheduler config.
        cache_config: The cache config.
        budget: The budget to chunk the number of tokens to compute.
        prompt_limit: The maximum number of tokens allowed in a prompt.
        num_new_tokens: The number of new tokens to schedule.

    Returns:
        The number of new tokens to schedule after chunking.
    """
    remaining_token_budget = budget.remaining_token_budget()
    if scheduler_config.is_multi_step:
        # The current multi-step + chunked prefill capability does
        # not actually support chunking prompts.
        #
        # Therefore, `num_new_tokens` is computed in the same fashion
        # for both multi-step+chunked-prefill &
        # multi-step+chunked-prefill+APC
        #
        # Prompts with more tokens than the current remaining budget
        # are postponed to future scheduler steps
        if num_new_tokens > prompt_limit:
            # If the seq_group is in prompt-stage, pass the
            # num_new_tokens as-is so the caller can ignore
            # the sequence.
            return num_new_tokens

        return 0 if num_new_tokens > \
            remaining_token_budget else num_new_tokens

    # Get the number of tokens to allocate to this prefill slot
    prefill_slot_budget = (
        remaining_token_budget if partial_prefill_metadata is None else
        partial_prefill_budget_lookup_list[
            partial_prefill_metadata.schedulable_prefills])

    if cache_config.enable_prefix_caching:
        # When prefix caching is enabled and we're partially prefilling
        # a sequence, we always allocate a number of new tokens that is
        # divisible by the block size to avoid partial block matching.
        block_size = cache_config.block_size
        # Don't exceed either the total budget or slot budget.
        # Take min of those and get the next lowest multiple of the
        # block size:
        remaining_token_budget = (
            min(remaining_token_budget, prefill_slot_budget) //
            block_size) * block_size
        # NB: In the case where num_new_tokens < budget, we are
        # finishing prefill for this sequence, so we do not need to
        # allocate a full block.

    num_new_tokens = min(num_new_tokens, remaining_token_budget,
                         prefill_slot_budget)

    return num_new_tokens

_free_finished_seq_group

_free_finished_seq_group(seq_group: SequenceGroup) -> None
Source code in vllm/core/scheduler.py
def _free_finished_seq_group(self, seq_group: SequenceGroup) -> None:
    if seq_group.is_finished():
        # Free cross-attention block table, if it exists
        self._free_seq_group_cross_attn_blocks(seq_group)

        # Add the finished requests to the finished requests list.
        # This list will be used to update the Mamba cache in the
        # next step.
        self._finished_requests_ids.append(seq_group.request_id)

    # Free finished seqs
    self._free_finished_seqs(seq_group)

_free_finished_seqs

_free_finished_seqs(seq_group: SequenceGroup) -> None

Free finished seqs in a sequence group.

Source code in vllm/core/scheduler.py
def _free_finished_seqs(self, seq_group: SequenceGroup) -> None:
    """Free finished seqs in a sequence group."""
    for seq in seq_group.get_seqs():
        if seq.is_finished():
            self.free_seq(seq)

_free_seq_group_cross_attn_blocks

_free_seq_group_cross_attn_blocks(
    seq_group: SequenceGroup,
) -> None

Free a sequence group from a cross-attention block table. Has no effect on decoder-only models.

Source code in vllm/core/scheduler.py
def _free_seq_group_cross_attn_blocks(
    self,
    seq_group: SequenceGroup,
) -> None:
    """
    Free a sequence group from a cross-attention block table.
    Has no effect on decoder-only models.
    """
    if seq_group.is_encoder_decoder():
        self.block_manager.free_cross(seq_group)

_get_num_lookahead_slots

_get_num_lookahead_slots(
    is_prefill: bool, enable_chunking: bool
) -> int

The number of slots to allocate per sequence per step, beyond known token ids. Speculative decoding uses these slots to store KV activations of tokens which may or may not be accepted.

Speculative decoding does not yet support prefill, so we do not perform lookahead allocation for prefill.

When chunking is enabled with multi-step, we allocate lookahead slots for the prefills for when the prefills turn into decodes in the first step.

Source code in vllm/core/scheduler.py
def _get_num_lookahead_slots(self, is_prefill: bool,
                             enable_chunking: bool) -> int:
    """The number of slots to allocate per sequence per step, beyond known
    token ids. Speculative decoding uses these slots to store KV activations
    of tokens which may or may not be accepted.

    Speculative decoding does not yet support prefill, so we do not perform
    lookahead allocation for prefill.

    When chunking is enabled with multi-step, we allocate lookahead slots
    for the prefills for when the prefills turn into decodes in the first
    step.
    """
    if is_prefill:
        if self.scheduler_config.is_multi_step and enable_chunking:
            # num_lookahead_slots was introduced in the context of decodes,
            # in Speculative Decoding.
            # When the num_scheduler_steps is 8, say, then the
            # num_lookahead_slots is 7. Meaning, we are doing a 1-step of
            # decode anyways and we wish to do 7 more.
            #
            # "lookaheads" for prefills, is introduced in support for
            # Chunked-Prefill in Multi-Step.
            return self.scheduler_config.num_lookahead_slots + 1
        else:
            return 0

    return self.scheduler_config.num_lookahead_slots

_get_num_new_uncached_and_cached_tokens

_get_num_new_uncached_and_cached_tokens(
    seq_group: SequenceGroup,
    status: SequenceStatus,
    enable_chunking: bool,
    budget: SchedulingBudget,
    partial_prefill_metadata: Optional[
        PartialPrefillMetadata
    ] = None,
) -> Tuple[int, int]

Returns the number of new uncached and cached tokens to schedule for a given sequence group that's in a given status.

The API could chunk the number of tokens to compute based on budget if enable_chunking is True. If a sequence group has multiple sequences (e.g., running beam search), it means it is in decoding phase, so chunking doesn't happen.

Returns (0, 0) if the new token cannot be computed due to token budget.

The cached tokens's blocks are already computed, and the attention backend will reuse the cached blocks rather than recomputing them. So the scheduler could schedule these cached tokens "for free".

Parameters:

Name Type Description Default
seq_group SequenceGroup

The sequence group to get the number of new tokens to schedule.

required
status SequenceStatus

The status of the sequences to get the number of new tokens to schedule.

required
enable_chunking bool

Whether to chunk the number of tokens to compute.

required
budget SchedulingBudget

The budget to chunk the number of tokens to compute.

required
partial_prefill_metadata Optional[PartialPrefillMetadata]

information about the partial prefills that are currently running

None

Returns:

Type Description
int

A tuple of two ints. The first int is the number of new uncached

int

tokens to schedule. The second int is the number of cached tokens.

Tuple[int, int]

If no more new tokens can be scheduled, returns (0, 0).

Source code in vllm/core/scheduler.py
def _get_num_new_uncached_and_cached_tokens(
    self,
    seq_group: SequenceGroup,
    status: SequenceStatus,
    enable_chunking: bool,
    budget: SchedulingBudget,
    partial_prefill_metadata: Optional[PartialPrefillMetadata] = None,
) -> Tuple[int, int]:
    """
    Returns the number of new uncached and cached tokens to schedule for a
    given sequence group that's in a given `status`.

    The API could chunk the number of tokens to compute based on `budget`
    if `enable_chunking` is True. If a sequence group has multiple
    sequences (e.g., running beam search), it means it is in decoding
    phase, so chunking doesn't happen.

    Returns (0, 0) if the new token cannot be computed due to token budget.

    The cached tokens's blocks are already computed, and the attention
    backend will reuse the cached blocks rather than recomputing them. So
    the scheduler could schedule these cached tokens "for free".

    Args:
        seq_group: The sequence group to get the number of new tokens to
            schedule.
        status: The status of the sequences to get the number of new tokens
            to schedule.
        enable_chunking: Whether to chunk the number of tokens to compute.
        budget: The budget to chunk the number of tokens to compute.
        partial_prefill_metadata: information about the partial prefills
            that are currently running


    Returns:
        A tuple of two ints. The first int is the number of new uncached
        tokens to schedule. The second int is the number of cached tokens.
        If no more new tokens can be scheduled, returns (0, 0).
    """
    num_cached_new_tokens = 0
    num_uncached_new_tokens = 0

    seqs = seq_group.get_seqs(status=status)
    # Compute the number of new uncached and cached tokens for
    # each sequence.
    for seq in seqs:
        if not seq.is_prefill():
            # Decode sequences should always just have 1 uncached token
            # TODO(rickyx): Actually is this still correct for multi-step?
            num_uncached_new_tokens += 1
            continue

        num_computed_tokens_seq = seq.get_num_computed_tokens()
        all_num_new_tokens_seq = seq.get_len() - num_computed_tokens_seq
        if not self.cache_config.enable_prefix_caching:
            # If prefix caching is not enabled, all new tokens are uncached.
            num_uncached_new_tokens += all_num_new_tokens_seq
            continue

        # NOTE: the cache token might be currently in a block that's in an
        # evictor meaning that it's not yet allocated. However, we don't
        # exclude such tokens in the cache count because it will be
        # guaranteed to be allocated later if the sequence can be allocated.
        num_cached_tokens_seq = self.block_manager.get_num_cached_tokens(
            seq)

        # Sanity check.
        if num_cached_tokens_seq < num_computed_tokens_seq:
            # This should only happen with chunked prefill, and
            # the seq is still in prefill. The `num_cached_tokens_seq`
            # is the value we calculated on scheduling the first prefill.
            # For subsequent continuous prefill steps, we cached the
            # number of cache tokens for the sequence so the cached token
            # count could be less than the number of computed tokens.
            # See comments on `ComputedBlocksTracker` for more details.
            assert (
                seq.is_prefill() and seq.status == SequenceStatus.RUNNING
                and self.scheduler_config.chunked_prefill_enabled
            ), ("Number of cached tokens should not be less than the "
                "number of computed tokens for a sequence that's still "
                f"in prefill. But there are {num_cached_tokens_seq} cached "
                f"tokens and {num_computed_tokens_seq} computed tokens "
                f"for sequence {seq.seq_id}.")

        num_cached_new_tokens_seq = max(
            0, num_cached_tokens_seq - num_computed_tokens_seq)
        num_uncached_new_tokens_seq = (all_num_new_tokens_seq -
                                       num_cached_new_tokens_seq)

        num_uncached_new_tokens += num_uncached_new_tokens_seq
        num_cached_new_tokens += num_cached_new_tokens_seq

    if num_uncached_new_tokens == 0 and num_cached_new_tokens > 0:
        # For a fully cached hit sequence, we actually need to recompute the
        # last token. So we need at least 1 uncached token to schedule.
        # See ModelRunner._compute_for_prefix_cache_hit for more details.
        num_uncached_new_tokens = 1
        num_cached_new_tokens -= 1

    if enable_chunking and len(seqs) == 1:
        # Chunk if a running request cannot fit in the given budget.
        # If number of seq > 1, it means it is doing beam search
        # in a decode phase. Do not chunk.
        num_uncached_new_tokens = self._chunk_new_tokens_to_schedule(
            self.scheduler_config,
            self.cache_config,
            budget,
            self._get_prompt_limit(seq_group),
            num_uncached_new_tokens,
            self.partial_prefill_budget_lookup_list,
            partial_prefill_metadata,
        )

    return num_uncached_new_tokens, num_cached_new_tokens

_get_priority

_get_priority(
    seq_group: SequenceGroup,
) -> Tuple[Optional[int], float]

Get the priority of the sequence group. Highest preference to user-defined priority, followed by arrival time. Args: seq_group: The sequence group input. Returns: The priority of the sequence group.

Source code in vllm/core/scheduler.py
def _get_priority(self,
                  seq_group: SequenceGroup) -> Tuple[Optional[int], float]:
    """Get the priority of the sequence group.
    Highest preference to user-defined priority, followed by arrival time.
    Args:
        seq_group: The sequence group input.
    Returns:
        The priority of the sequence group.
    """
    return seq_group.priority, seq_group.arrival_time

_get_prompt_limit

_get_prompt_limit(seq_group: SequenceGroup) -> int
Source code in vllm/core/scheduler.py
def _get_prompt_limit(self, seq_group: SequenceGroup) -> int:
    if (self.scheduler_config.chunked_prefill_enabled
            and not self.scheduler_config.is_multi_step):
        prompt_limit = self.scheduler_config.max_model_len
    else:
        prompt_limit = min(
            self.scheduler_config.max_model_len,
            self.scheduler_config.max_num_batched_tokens,
        )

    # Model is fine tuned with long context. Return the fine tuned max_len.
    if seq_group.lora_request and seq_group.lora_request.long_lora_max_len:
        assert prompt_limit <= seq_group.lora_request.long_lora_max_len
        return seq_group.lora_request.long_lora_max_len
    else:
        return prompt_limit

_order_finishing_prefills_first

_order_finishing_prefills_first(
    scheduled_prefill_seqs: List[ScheduledSequenceGroup],
) -> List[SequenceGroup]

Returns a list of prefilling SequenceGroups where sequences that are scheduled to finish prefilling are listed first

Source code in vllm/core/scheduler.py
def _order_finishing_prefills_first(
    self, scheduled_prefill_seqs: List[ScheduledSequenceGroup]
) -> List[SequenceGroup]:
    """Returns a list of prefilling SequenceGroups where sequences that are
    scheduled to finish prefilling are listed first"""
    finishing = [
        s.seq_group for s in scheduled_prefill_seqs
        if s.seq_group.get_num_uncomputed_tokens() == s.token_chunk_size
    ]
    not_finishing = [
        s.seq_group for s in scheduled_prefill_seqs
        if s.seq_group.get_num_uncomputed_tokens() != s.token_chunk_size
    ]
    return finishing + not_finishing

_passed_delay

_passed_delay(now: float) -> bool
Source code in vllm/core/scheduler.py
def _passed_delay(self, now: float) -> bool:
    if self.prev_prompt:
        self.last_prompt_latency = now - self.prev_time
    self.prev_time, self.prev_prompt = now, False
    # Delay scheduling prompts to let waiting queue fill up
    if self.scheduler_config.delay_factor > 0 and self.waiting:
        earliest_arrival_time = min(
            [e.metrics.arrival_time for e in self.waiting])
        passed_delay = ((now - earliest_arrival_time)
                        > (self.scheduler_config.delay_factor *
                           self.last_prompt_latency) or not self.running)
    else:
        passed_delay = True
    return passed_delay

_preempt

_preempt(
    seq_group: SequenceGroup,
    blocks_to_swap_out: List[Tuple[int, int]],
) -> PreemptionMode
Source code in vllm/core/scheduler.py
def _preempt(self, seq_group: SequenceGroup,
             blocks_to_swap_out: List[Tuple[int, int]]) -> PreemptionMode:
    # If preemption mode is not specified, we determine the mode as follows:
    # We use recomputation by default since it incurs lower overhead than
    # swapping. However, when the sequence group has multiple sequences
    # (e.g., beam search), recomputation is not currently supported. In
    # such a case, we use swapping instead.
    # FIXME(woosuk): This makes our scheduling policy a bit bizarre.
    # As swapped sequences are prioritized over waiting sequences,
    # sequence groups with multiple sequences are implicitly prioritized
    # over sequence groups with a single sequence.
    # TODO(woosuk): Support recomputation for sequence groups with multiple
    # sequences. This may require a more sophisticated CUDA kernel.
    if self.user_specified_preemption_mode is None:
        if seq_group.get_max_num_running_seqs() == 1:
            preemption_mode = PreemptionMode.RECOMPUTE
        else:
            preemption_mode = PreemptionMode.SWAP

    elif self.user_specified_preemption_mode == "swap":
        preemption_mode = PreemptionMode.SWAP
    else:
        preemption_mode = PreemptionMode.RECOMPUTE

    if self.num_cumulative_preemption % 50 == 0:
        logger.warning(
            "Sequence group %s is preempted by %s mode because there is "
            "not enough KV cache space. This can affect the end-to-end "
            "performance. Increase gpu_memory_utilization or "
            "tensor_parallel_size to provide more KV cache memory. "
            "total_num_cumulative_preemption=%d",
            seq_group.request_id,
            preemption_mode,
            self.num_cumulative_preemption + 1,
        )
    self.num_cumulative_preemption += 1

    if preemption_mode == PreemptionMode.RECOMPUTE:
        self._preempt_by_recompute(seq_group)
    elif preemption_mode == PreemptionMode.SWAP:
        self._preempt_by_swap(seq_group, blocks_to_swap_out)
    else:
        raise AssertionError("Invalid preemption mode.")
    return preemption_mode

_preempt_by_recompute

_preempt_by_recompute(seq_group: SequenceGroup) -> None
Source code in vllm/core/scheduler.py
def _preempt_by_recompute(
    self,
    seq_group: SequenceGroup,
) -> None:
    seqs = seq_group.get_seqs(status=SequenceStatus.RUNNING)
    assert len(seqs) == 1
    for seq in seqs:
        seq.status = SequenceStatus.WAITING
        self.free_seq(seq)
        seq.reset_state_for_recompute()
    self._free_seq_group_cross_attn_blocks(seq_group)

_preempt_by_swap

_preempt_by_swap(
    seq_group: SequenceGroup,
    blocks_to_swap_out: List[Tuple[int, int]],
) -> None
Source code in vllm/core/scheduler.py
def _preempt_by_swap(
    self,
    seq_group: SequenceGroup,
    blocks_to_swap_out: List[Tuple[int, int]],
) -> None:
    self._swap_out(seq_group, blocks_to_swap_out)

_remove_seq_from_computed_blocks_tracker

_remove_seq_from_computed_blocks_tracker(
    seq: Sequence,
) -> None

Free a sequence computed blocks tracker _seq_id_to_blocks_hashes and _seq_id_to_num_tokens_computed.

Source code in vllm/core/scheduler.py
def _remove_seq_from_computed_blocks_tracker(self, seq: Sequence) -> None:
    """
    Free a sequence computed blocks tracker _seq_id_to_blocks_hashes
    and _seq_id_to_num_tokens_computed.
    """
    self.block_manager.remove_seq_from_computed_blocks_tracker(seq)

_schedule

_schedule() -> SchedulerOutputs

Schedule queued requests.

Source code in vllm/core/scheduler.py
def _schedule(self) -> SchedulerOutputs:
    """Schedule queued requests."""
    if self.scheduler_config.chunked_prefill_enabled:
        return self._schedule_chunked_prefill()
    else:
        return self._schedule_default()

_schedule_chunked_prefill

_schedule_chunked_prefill() -> SchedulerOutputs

Schedule queued requests.

Chunked prefill allows to chunk prefill requests, batch them together with decode requests. This policy 1. schedule as many decoding requests as possible. 2. schedule chunked prefill requests that are not finished. 3. schedule swapped request. 4. schedule new prefill requests.

The policy can sustain the high GPU utilization because it can put prefill and decodes requests to the same batch, while it improves inter token latency because decodes requests don't need to be blocked by prefill requests.

Source code in vllm/core/scheduler.py
def _schedule_chunked_prefill(self) -> SchedulerOutputs:
    """Schedule queued requests.

    Chunked prefill allows to chunk prefill requests, batch them together
    with decode requests. This policy 1. schedule as many decoding requests
    as possible. 2. schedule chunked prefill requests that are not
    finished. 3. schedule swapped request. 4. schedule new prefill
    requests.

    The policy can sustain the high GPU utilization because it can put
    prefill and decodes requests to the same batch, while it improves
    inter token latency because decodes requests don't need to be blocked
    by prefill requests.
    """
    budget = SchedulingBudget(
        token_budget=self.scheduler_config.max_num_batched_tokens,
        max_num_seqs=self.scheduler_config.max_num_seqs,
    )
    curr_loras: Set[int] = set()

    prefills = SchedulerPrefillOutputs.create_empty()
    swapped_in = SchedulerSwappedInOutputs.create_empty()

    # Create partial prefill metadata
    partial_prefill_metadata = PartialPrefillMetadata.from_queues(
        running=self.running,
        waiting=self.waiting,
        scheduler_config=self.scheduler_config,
    )

    # Decoding should be always scheduled first by fcfs.
    running_scheduled = self._schedule_running(
        budget,
        curr_loras,
        enable_chunking=True,
        partial_prefill_metadata=partial_prefill_metadata,
    )

    # Schedule swapped out requests.
    # If preemption happens, it means we don't have space for swap-in.
    if len(running_scheduled.preempted) + len(
            running_scheduled.swapped_out) == 0:
        swapped_in = self._schedule_swapped(budget, curr_loras)

    prefills = self._schedule_prefills(
        budget,
        curr_loras,
        enable_chunking=True,
        partial_prefill_metadata=partial_prefill_metadata,
    )

    assert (budget.num_batched_tokens
            <= self.scheduler_config.max_num_batched_tokens)
    assert budget.num_curr_seqs <= self.scheduler_config.max_num_seqs

    # Update waiting requests.
    self.waiting.extendleft(running_scheduled.preempted)

    # Update new running requests.
    # By default, vLLM scheduler prioritizes prefills.
    # Once chunked prefill is enabled,
    # the policy is changed to prioritize decode requests.
    self.running.extend(
        [s.seq_group for s in swapped_in.decode_seq_groups])
    self.running.extend(
        [s.seq_group for s in swapped_in.prefill_seq_groups])
    self.running.extend(
        [s.seq_group for s in running_scheduled.decode_seq_groups])
    # Because multiple prefills may be running concurrently, we need to
    # make sure that prefills which are scheduled to finish are listed
    # before those that won't. This is so that on the next scheduling
    # iteration when they have transitioned to the decode stage, they are
    # properly prioritized over sequences that are still in the prefill
    # stage.
    self.running.extend(
        self._order_finishing_prefills_first(
            running_scheduled.prefill_seq_groups))
    self.running.extend([s.seq_group for s in prefills.seq_groups])

    # Update swapped requests.
    self.swapped.extend(running_scheduled.swapped_out)
    # Put prefills first due to Attention backend ordering assumption.
    scheduled_seq_groups = (prefills.seq_groups +
                            running_scheduled.prefill_seq_groups +
                            swapped_in.prefill_seq_groups +
                            running_scheduled.decode_seq_groups +
                            swapped_in.decode_seq_groups)
    num_prefill_groups = (len(prefills.seq_groups) +
                          len(swapped_in.prefill_seq_groups) +
                          len(running_scheduled.prefill_seq_groups))
    # If all prompts, then we set num_lookahead_slots to 0
    # this allows us to go through the `no_spec` path in
    # `spec_decode_worker.py`
    all_prefills = len(scheduled_seq_groups) == num_prefill_groups
    num_lookahead_slots = (0 if
                           (all_prefills
                            and not self.scheduler_config.is_multi_step)
                           else running_scheduled.num_lookahead_slots)
    return SchedulerOutputs(
        scheduled_seq_groups=scheduled_seq_groups,
        num_prefill_groups=num_prefill_groups,
        num_batched_tokens=budget.num_batched_tokens +
        budget.num_cached_tokens,
        blocks_to_swap_in=swapped_in.blocks_to_swap_in,
        blocks_to_swap_out=running_scheduled.blocks_to_swap_out,
        blocks_to_copy=running_scheduled.blocks_to_copy +
        swapped_in.blocks_to_copy,
        ignored_seq_groups=prefills.ignored_seq_groups +
        swapped_in.infeasible_seq_groups,
        num_lookahead_slots=num_lookahead_slots,
        running_queue_size=len(self.running),
        preempted=(len(running_scheduled.preempted) +
                   len(running_scheduled.swapped_out)),
    )

_schedule_default

_schedule_default() -> SchedulerOutputs

Schedule queued requests.

The current policy is designed to optimize the throughput. First, it batches as many prefill requests as possible. And it schedules decodes. If there's a pressure on GPU memory, decode requests can be swapped or preempted.

Source code in vllm/core/scheduler.py
def _schedule_default(self) -> SchedulerOutputs:
    """Schedule queued requests.

    The current policy is designed to optimize the throughput. First,
    it batches as many prefill requests as possible. And it schedules
    decodes. If there's a pressure on GPU memory, decode requests can
    be swapped or preempted.
    """
    # Include running requests to the budget.
    budget = SchedulingBudget(
        token_budget=self.scheduler_config.max_num_batched_tokens,
        max_num_seqs=self.scheduler_config.max_num_seqs,
    )
    # Make sure we include num running seqs before scheduling prefill,
    # so that we don't schedule beyond max_num_seqs for prefill.
    for seq_group in self.running:
        budget.add_num_seqs(seq_group.request_id,
                            seq_group.get_max_num_running_seqs())
    curr_loras = (set(
        seq_group.lora_int_id for seq_group in self.running
        if seq_group.lora_int_id > 0) if self.lora_enabled else None)

    prefills = SchedulerPrefillOutputs.create_empty()
    running_scheduled = SchedulerRunningOutputs.create_empty()
    swapped_in = SchedulerSwappedInOutputs.create_empty()

    # If any requests are swapped, prioritized swapped requests.
    if not self.swapped:
        prefills = self._schedule_prefills(budget,
                                           curr_loras,
                                           enable_chunking=False)

    if len(prefills.seq_groups
           ) == 0 and self.scheduler_config.policy == "priority":
        self._schedule_priority_preemption(budget)

    # Don't schedule decodes if prefills are scheduled.
    # NOTE: If `_schedule_prefills` doesn't enable chunking, self.running
    # only contains decode requests, not chunked prefills.
    if len(prefills.seq_groups) == 0:
        running_scheduled = self._schedule_running(budget,
                                                   curr_loras,
                                                   enable_chunking=False)

        # If any sequence group is preempted, do not swap in any sequence
        # group. because it means there's no slot for new running requests.
        if (len(running_scheduled.preempted) +
                len(running_scheduled.swapped_out) == 0):
            swapped_in = \
                self._schedule_swapped(budget, curr_loras)

    assert (budget.num_batched_tokens
            <= self.scheduler_config.max_num_batched_tokens)
    assert budget.num_curr_seqs <= self.scheduler_config.max_num_seqs

    # Update waiting requests.
    self.waiting.extendleft(running_scheduled.preempted)
    # Update new running requests.
    if len(prefills.seq_groups) > 0:
        self.running.extend([s.seq_group for s in prefills.seq_groups])

    self.running.extend(running_scheduled.decode_seq_groups_list)

    if len(swapped_in.decode_seq_groups) > 0:
        self.running.extend(
            [s.seq_group for s in swapped_in.decode_seq_groups])

    # Update swapped requests.
    self.swapped.extend(running_scheduled.swapped_out)
    preempted = len(running_scheduled.preempted) + len(
        running_scheduled.swapped_out)

    # There should be no prefill from running queue because this policy
    # doesn't allow chunked prefills.
    assert len(running_scheduled.prefill_seq_groups) == 0
    assert len(swapped_in.prefill_seq_groups) == 0

    # Merge lists
    num_prefill_groups = len(prefills.seq_groups)
    ignored_seq_groups_for_embeds = list[SequenceGroup]()
    if num_prefill_groups > 0:
        scheduled_seq_groups = prefills.seq_groups
        scheduled_seq_groups.extend(running_scheduled.decode_seq_groups)
        ignored_seq_groups_for_embeds.clear()
    else:
        scheduled_seq_groups = running_scheduled.decode_seq_groups
        if len(scheduled_seq_groups) > 0:
            using_prompt_embeds = scheduled_seq_groups[
                0].seq_group.uses_prompt_embeds()
            ignored_seq_groups_for_embeds.clear()
            indices_ignored = list[int]()
            for i, schedule_seq_group in enumerate(scheduled_seq_groups):
                if using_prompt_embeds !=\
                    schedule_seq_group.seq_group.uses_prompt_embeds():
                    ignored_seq_groups_for_embeds.append(
                        schedule_seq_group.seq_group)
                    indices_ignored.append(i)
            if len(ignored_seq_groups_for_embeds) > 0:
                scheduled_seq_groups = [
                    group for i, group in enumerate(scheduled_seq_groups)
                    if i not in indices_ignored
                ]
        else:
            ignored_seq_groups_for_embeds.clear()

    scheduled_seq_groups.extend(swapped_in.decode_seq_groups)

    blocks_to_copy = running_scheduled.blocks_to_copy
    blocks_to_copy.extend(swapped_in.blocks_to_copy)

    ignored_seq_groups = prefills.ignored_seq_groups
    ignored_seq_groups.extend(ignored_seq_groups_for_embeds)
    ignored_seq_groups.extend(swapped_in.infeasible_seq_groups)

    return SchedulerOutputs(
        scheduled_seq_groups=scheduled_seq_groups,
        num_prefill_groups=num_prefill_groups,
        num_batched_tokens=budget.num_batched_tokens +
        budget.num_cached_tokens,
        blocks_to_swap_in=swapped_in.blocks_to_swap_in,
        blocks_to_swap_out=running_scheduled.blocks_to_swap_out,
        blocks_to_copy=blocks_to_copy,
        ignored_seq_groups=ignored_seq_groups,
        num_lookahead_slots=running_scheduled.num_lookahead_slots,
        running_queue_size=len(self.running),
        preempted=preempted,
    )

_schedule_prefills

_schedule_prefills(
    budget: SchedulingBudget,
    curr_loras: Optional[Set[int]],
    enable_chunking: bool = False,
    partial_prefill_metadata: Optional[
        PartialPrefillMetadata
    ] = None,
) -> SchedulerPrefillOutputs

Schedule sequence groups that are in prefill stage.

Note that the current scheduler treats PREEMPTED_FOR_RECOMPUTE as a new prefill (that starts from beginning -> most recently generated tokens).

It schedules waiting requests as long as it fits budget and curr_loras <= max_lora from the scheduling config. The input arguments budget and curr_loras are updated based on scheduled seq_groups.

Parameters:

Name Type Description Default
budget SchedulingBudget

The scheduling budget. The argument is in-place updated when any requests are scheduled.

required
curr_loras Optional[Set[int]]

Currently batched lora request ids. The argument is in-place updated when any requests are scheduled.

required
enable_chunking bool

If True, seq group can be chunked and only a chunked number of tokens are scheduled if budget.num_batched_tokens has not enough capacity to schedule all tokens.

False
partial_prefill_metadata Optional[PartialPrefillMetadata]

information about the partial prefills that are currently running

None

Returns:

Type Description
SchedulerPrefillOutputs

SchedulerPrefillOutputs.

Source code in vllm/core/scheduler.py
def _schedule_prefills(
    self,
    budget: SchedulingBudget,
    curr_loras: Optional[Set[int]],
    enable_chunking: bool = False,
    partial_prefill_metadata: Optional[PartialPrefillMetadata] = None,
) -> SchedulerPrefillOutputs:
    """Schedule sequence groups that are in prefill stage.

    Note that the current scheduler treats PREEMPTED_FOR_RECOMPUTE
    as a new prefill (that starts from beginning -> most recently generated
    tokens).

    It schedules waiting requests as long as it fits `budget` and
    curr_loras <= max_lora from the scheduling config. The input arguments
    `budget` and `curr_loras` are updated based on scheduled seq_groups.

    Args:
        budget: The scheduling budget. The argument is in-place updated
            when any requests are scheduled.
        curr_loras: Currently batched lora request ids. The argument is
            in-place updated when any requests are scheduled.
        enable_chunking: If True, seq group can be chunked and only a
            chunked number of tokens are scheduled  if
            `budget.num_batched_tokens` has not enough capacity to schedule
            all tokens.
        partial_prefill_metadata: information about the partial prefills
            that are currently running

    Returns:
        SchedulerPrefillOutputs.
    """
    if budget.remaining_token_budget() == 0:
        # Do nothing: Can't add any more prefill anyway
        return SchedulerPrefillOutputs(
            seq_groups=[],
            ignored_seq_groups=[],
            num_lookahead_slots=self._get_num_lookahead_slots(
                is_prefill=True, enable_chunking=enable_chunking),
        )
    ignored_seq_groups: List[SequenceGroup] = []
    seq_groups: List[ScheduledSequenceGroup] = []
    using_prompt_embeds: bool = False

    waiting_queue = self.waiting

    leftover_waiting_sequences: Deque[SequenceGroup] = deque()
    while self._passed_delay(time.time()) and waiting_queue:
        seq_group = waiting_queue[0]

        waiting_seqs = seq_group.get_seqs(status=SequenceStatus.WAITING)
        assert len(waiting_seqs) == 1, (
            "Waiting sequence group should have only one prompt "
            "sequence.")
        if (partial_prefill_metadata is not None
                and not partial_prefill_metadata.can_schedule(seq_group)):
            leftover_waiting_sequences.appendleft(seq_group)
            waiting_queue.popleft()
            continue
        num_new_tokens_uncached, num_new_tokens_cached = (
            self._get_num_new_uncached_and_cached_tokens(
                seq_group,
                SequenceStatus.WAITING,
                enable_chunking,
                budget,
                partial_prefill_metadata=partial_prefill_metadata,
            ))
        num_new_tokens = num_new_tokens_uncached + num_new_tokens_cached

        if not enable_chunking:
            num_prompt_tokens = waiting_seqs[0].get_len()
            assert num_new_tokens == num_prompt_tokens

        prompt_limit = self._get_prompt_limit(seq_group)
        if num_new_tokens > prompt_limit:
            logger.warning(
                "Input prompt (%d tokens) is too long"
                " and exceeds limit of %d",
                num_new_tokens,
                prompt_limit,
            )
            for seq in waiting_seqs:
                seq.status = SequenceStatus.FINISHED_IGNORED
            self.remove_seq_from_computed_blocks_tracker(
                seq_group, SequenceStatus.FINISHED_IGNORED)
            ignored_seq_groups.append(seq_group)
            waiting_queue.popleft()
            continue

        num_lookahead_slots: int = 0
        if self.scheduler_config.is_multi_step and enable_chunking:
            num_lookahead_slots = self._get_num_lookahead_slots(
                True, enable_chunking)

        # If the sequence group cannot be allocated, stop.
        can_allocate = self.block_manager.can_allocate(
            seq_group, num_lookahead_slots=num_lookahead_slots)
        if can_allocate == AllocStatus.LATER:
            self.remove_seq_from_computed_blocks_tracker(
                seq_group, SequenceStatus.WAITING)
            break
        elif can_allocate == AllocStatus.NEVER:
            logger.warning(
                "Input prompt (%d tokens) + lookahead slots (%d) is "
                "too long and exceeds the capacity of block_manager",
                num_new_tokens,
                num_lookahead_slots,
            )
            for seq in waiting_seqs:
                seq.status = SequenceStatus.FINISHED_IGNORED
            self.remove_seq_from_computed_blocks_tracker(
                seq_group, SequenceStatus.FINISHED_IGNORED)
            ignored_seq_groups.append(seq_group)
            waiting_queue.popleft()
            continue

        # We cannot mix sequence groups that use prompt embeds and
        # those that do not.
        if len(seq_groups) == 0:
            using_prompt_embeds = seq_group.uses_prompt_embeds()
        if using_prompt_embeds != seq_group.uses_prompt_embeds():
            self.remove_seq_from_computed_blocks_tracker(
                seq_group, SequenceStatus.WAITING)
            leftover_waiting_sequences.appendleft(seq_group)
            waiting_queue.popleft()
            continue

        lora_int_id = 0
        if self.lora_enabled:
            lora_int_id = seq_group.lora_int_id
            assert curr_loras is not None
            assert self.lora_config is not None
            if (self.lora_enabled and lora_int_id > 0
                    and lora_int_id not in curr_loras
                    and len(curr_loras) >= self.lora_config.max_loras):
                # We don't have a space for another LoRA, so
                # we ignore this request for now.
                self.remove_seq_from_computed_blocks_tracker(
                    seq_group, SequenceStatus.WAITING)
                leftover_waiting_sequences.appendleft(seq_group)
                waiting_queue.popleft()
                continue

        if (budget.num_batched_tokens
                >= self.scheduler_config.max_num_batched_tokens):
            # We've reached the budget limit - since there might be
            # continuous prefills in the running queue, we should break
            # to avoid scheduling any new prefills.
            self.remove_seq_from_computed_blocks_tracker(
                seq_group, SequenceStatus.WAITING)
            break

        num_new_seqs = seq_group.get_max_num_running_seqs()
        if num_new_tokens_uncached == 0 or not budget.can_schedule(
                num_new_tokens=num_new_tokens_uncached,
                num_new_seqs=num_new_seqs,
        ):
            self.remove_seq_from_computed_blocks_tracker(
                seq_group, SequenceStatus.WAITING)
            break

        # Can schedule this request.
        if curr_loras is not None and lora_int_id > 0:
            curr_loras.add(lora_int_id)
        waiting_queue.popleft()
        self._allocate_and_set_running(seq_group)

        if partial_prefill_metadata is not None:
            partial_prefill_metadata.maybe_increment_partial_prefills(
                seq_group)

        if enable_chunking and self.scheduler_config.is_multi_step:
            blocks_to_copy: List[Tuple[int, int]] = []
            # init_multi_step_from_lookahead_slots happens in append_slots
            self._append_slots(seq_group, blocks_to_copy, enable_chunking)
            # This assert will trip when a copy-on-write happens. This is
            # not a concern as the very first sequence-group block
            # allocation happens above. Still, we have the assert to
            # catch any edge-cases.
            assert not blocks_to_copy
        else:
            seq_group.init_multi_step_from_lookahead_slots(
                num_lookahead_slots,
                num_scheduler_steps=self.scheduler_config.
                num_scheduler_steps,
                is_multi_step=self.scheduler_config.is_multi_step,
                enable_chunking=enable_chunking,
            )

        seq_groups.append(
            ScheduledSequenceGroup(seq_group=seq_group,
                                   token_chunk_size=num_new_tokens))
        budget.add_num_batched_tokens(
            seq_group.request_id,
            num_batched_tokens=num_new_tokens_uncached,
            num_cached_tokens=num_new_tokens_cached,
        )
        budget.add_num_seqs(seq_group.request_id, num_new_seqs)

    # Queue requests that couldn't be scheduled.
    waiting_queue.extendleft(leftover_waiting_sequences)
    if len(seq_groups) > 0:
        self.prev_prompt = True

    return SchedulerPrefillOutputs(
        seq_groups=seq_groups,
        ignored_seq_groups=ignored_seq_groups,
        num_lookahead_slots=self._get_num_lookahead_slots(
            is_prefill=True, enable_chunking=enable_chunking),
    )

_schedule_priority_preemption

_schedule_priority_preemption(
    budget: SchedulingBudget,
) -> int

Sorts waiting and running queue. Also, force preempt requests from the running queue if their priority is lower. Priority-based preemption is used with the priority policy. Args: budget: The scheduling budget. The argument is in-place updated when any requests are scheduled. Returns: A count of priority-based preemptions.

Source code in vllm/core/scheduler.py
def _schedule_priority_preemption(
    self,
    budget: SchedulingBudget,
) -> int:
    """Sorts waiting and running queue. Also, force preempt requests
    from the running queue if their priority is lower.
    Priority-based preemption is used with the priority policy.
    Args:
        budget: The scheduling budget. The argument is in-place updated
            when any requests are scheduled.
    Returns:
        A count of priority-based preemptions.
    """

    waiting_queue = self.waiting

    running_queue = deque(sorted(self.running, key=self._get_priority))

    blocks_to_swap_out: List[Tuple[int, int]] = []
    force_preemption_count = 0

    if waiting_queue:
        seq_group = waiting_queue.popleft()
        num_new_seqs = seq_group.get_max_num_running_seqs()
        num_new_tokens_uncached, _ = \
            self._get_num_new_uncached_and_cached_tokens(
            seq_group, SequenceStatus.WAITING, False, budget)

        # Only preempt if priority inversion exists
        while running_queue and self._get_priority(
                running_queue[-1]) > self._get_priority(seq_group):
            # Only preempt if waiting sequence cannot be allocated
            can_allocate = self.block_manager.can_allocate(seq_group)
            if (num_new_tokens_uncached > 0
                    and can_allocate == AllocStatus.OK
                    and budget.can_schedule(
                        num_new_tokens=num_new_tokens_uncached,
                        num_new_seqs=num_new_seqs,
                    )):
                break

            # Adjust budget to remove the victim sequence group
            vseq_group = running_queue.pop()
            num_running_tokens_uncached, _ = (
                self._get_num_new_uncached_and_cached_tokens(
                    vseq_group, SequenceStatus.RUNNING, False, budget))
            budget.subtract_num_batched_tokens(
                vseq_group.request_id, num_running_tokens_uncached)
            num_running_seqs = vseq_group.get_max_num_running_seqs()
            budget.subtract_num_seqs(vseq_group.request_id,
                                     num_running_seqs)

            # Preempt out the victim sequence group
            self._preempt(vseq_group, blocks_to_swap_out)
            waiting_queue.appendleft(vseq_group)
            force_preemption_count += 1
        # Put the sequence back into the waiting queue
        waiting_queue.appendleft(seq_group)

        self.remove_seq_from_computed_blocks_tracker(
            seq_group, SequenceStatus.WAITING)

    waiting_queue = deque(sorted(waiting_queue, key=self._get_priority))

    self.waiting = waiting_queue
    self.running = running_queue
    return force_preemption_count

_schedule_running

_schedule_running(
    budget: SchedulingBudget,
    curr_loras: Optional[Set[int]],
    enable_chunking: bool = False,
    partial_prefill_metadata: Optional[
        PartialPrefillMetadata
    ] = None,
) -> SchedulerRunningOutputs

Schedule sequence groups that are running.

Running queue should include decode and chunked prefill requests.

Parameters:

Name Type Description Default
budget SchedulingBudget

The scheduling budget. The argument is in-place updated when any decodes are preempted.

required
curr_loras Optional[Set[int]]

Currently batched lora request ids. The argument is in-place updated when any decodes are preempted.

required
enable_chunking bool

If True, seq group can be chunked and only a chunked number of tokens are scheduled if budget.num_batched_tokens has not enough capacity to schedule all tokens.

False
partial_prefill_metadata Optional[PartialPrefillMetadata]

information about the partial prefills

None

Returns:

Type Description
SchedulerRunningOutputs

SchedulerRunningOutputs.

Source code in vllm/core/scheduler.py
def _schedule_running(
    self,
    budget: SchedulingBudget,
    curr_loras: Optional[Set[int]],
    enable_chunking: bool = False,
    partial_prefill_metadata: Optional[PartialPrefillMetadata] = None,
) -> SchedulerRunningOutputs:
    """Schedule sequence groups that are running.

    Running queue should include decode and chunked prefill requests.

    Args:
        budget: The scheduling budget. The argument is in-place updated
            when any decodes are preempted.
        curr_loras: Currently batched lora request ids. The argument is
            in-place updated when any decodes are preempted.
        enable_chunking: If True, seq group can be chunked and only a
            chunked number of tokens are scheduled  if
            `budget.num_batched_tokens` has not enough capacity to schedule
            all tokens.
        partial_prefill_metadata: information about the partial prefills
        that are currently running

    Returns:
        SchedulerRunningOutputs.
    """
    ret: SchedulerRunningOutputs = self._scheduler_running_outputs_cache[
        self.cache_id].get_object()
    ret.blocks_to_swap_out.clear()
    ret.blocks_to_copy.clear()
    ret.decode_seq_groups.clear()
    ret.prefill_seq_groups.clear()
    ret.preempted.clear()
    ret.swapped_out.clear()

    ret.num_lookahead_slots = self._get_num_lookahead_slots(
        is_prefill=False, enable_chunking=enable_chunking)

    ret.decode_seq_groups_list.clear()
    ret.prefill_seq_groups_list.clear()

    # Blocks that need to be swapped or copied before model execution.
    blocks_to_swap_out: List[Tuple[int, int]] = ret.blocks_to_swap_out
    blocks_to_copy: List[Tuple[int, int]] = ret.blocks_to_copy

    decode_seq_groups: List[ScheduledSequenceGroup] = ret.decode_seq_groups
    prefill_seq_groups: List[
        ScheduledSequenceGroup] = ret.prefill_seq_groups
    preempted: List[SequenceGroup] = ret.preempted
    swapped_out: List[SequenceGroup] = ret.swapped_out

    running_queue = self.running
    assert len(self._async_stopped) == 0
    while running_queue:
        seq_group = running_queue[0]
        # We discard the cached tokens info here because we don't need it
        # for running sequence:
        #   1. If a sequence is running with chunked prefill, the cached
        #      tokens info was already used for the first prefill.
        #   2. If a sequence is running with non-chunked prefill, then
        #      there it's a decoding sequence, and the cached tokens info is
        #      irrelevant.
        num_uncached_new_tokens, _ = \
            self._get_num_new_uncached_and_cached_tokens(
            seq_group,
            SequenceStatus.RUNNING,
            enable_chunking,
            budget,
            partial_prefill_metadata,
        )

        num_running_tokens = num_uncached_new_tokens
        if num_running_tokens == 0:
            # No budget => Stop
            break

        running_queue.popleft()

        # With async postprocessor, an extra decode run is done
        # to process the final tokens. The check below avoids this extra
        # decode run when the model max len is reached, in order to avoid
        # a memory overflow.
        if (self.use_async_output_proc and seq_group.seqs[0].get_len()
                > self.scheduler_config.max_model_len):
            self._async_stopped.append(seq_group)
            continue

        # NOTE(woosuk): Preemption happens only when there is no available
        # slot to keep all the sequence groups in the RUNNING state.
        while not self._can_append_slots(seq_group, enable_chunking):
            budget.subtract_num_batched_tokens(seq_group.request_id,
                                               num_running_tokens)
            num_running_seqs = seq_group.get_max_num_running_seqs()
            budget.subtract_num_seqs(seq_group.request_id,
                                     num_running_seqs)

            if (curr_loras is not None and seq_group.lora_int_id > 0
                    and seq_group.lora_int_id in curr_loras):
                curr_loras.remove(seq_group.lora_int_id)

            # Determine victim sequence
            cont_loop = True
            if running_queue:
                # Preempt the lowest-priority sequence group.
                victim_seq_group = running_queue.pop()
            else:
                # No other sequence group can be preempted.
                # Preempt the current sequence group.
                # Note: This is also where we stop this loop
                # (since there is nothing else to preempt)
                victim_seq_group = seq_group
                cont_loop = False

            # With async postprocessor, before preempting a sequence
            # we need to ensure it has no pending async postprocessor
            do_preempt = True
            if self.use_async_output_proc:
                assert self.output_proc_callback is not None
                self.output_proc_callback(
                    request_id=victim_seq_group.request_id)

                # It may be that the async pending "victim_seq_group"
                # becomes finished, in which case we simply free it.
                if victim_seq_group.is_finished():
                    self._free_finished_seq_group(victim_seq_group)
                    do_preempt = False

            # Do preemption
            if do_preempt:
                preempted_mode = self._preempt(victim_seq_group,
                                               blocks_to_swap_out)
                if preempted_mode == PreemptionMode.RECOMPUTE:
                    preempted.append(victim_seq_group)
                else:
                    swapped_out.append(victim_seq_group)

            if not cont_loop:
                break
        else:
            self._append_slots(seq_group, blocks_to_copy, enable_chunking)
            is_prefill = seq_group.is_prefill()

            scheduled_seq_group: ScheduledSequenceGroup = (
                self._scheduled_seq_group_cache[
                    self.cache_id].get_object())
            scheduled_seq_group.seq_group = seq_group
            if is_prefill:
                scheduled_seq_group.token_chunk_size = num_running_tokens
                prefill_seq_groups.append(scheduled_seq_group)
                ret.prefill_seq_groups_list.append(seq_group)
            else:
                scheduled_seq_group.token_chunk_size = 1
                decode_seq_groups.append(scheduled_seq_group)
                ret.decode_seq_groups_list.append(seq_group)

            budget.add_num_batched_tokens(seq_group.request_id,
                                          num_running_tokens)
            # OPTIMIZATION:  Note that get_max_num_running_seqs is
            # expensive. For the default scheduling chase where
            # enable_chunking is False, num_seqs are updated before running
            # this method, so we don't have to update it again here.
            if enable_chunking:
                num_running_seqs = seq_group.get_max_num_running_seqs()
                budget.add_num_seqs(seq_group.request_id, num_running_seqs)
            if curr_loras is not None and seq_group.lora_int_id > 0:
                curr_loras.add(seq_group.lora_int_id)

    self._scheduler_running_outputs_cache[self.next_cache_id].reset()
    self._scheduled_seq_group_cache[self.next_cache_id].reset()

    return ret

_schedule_swapped

_schedule_swapped(
    budget: SchedulingBudget,
    curr_loras: Optional[Set[int]],
    enable_chunking: bool = False,
) -> SchedulerSwappedInOutputs

Schedule sequence groups that are swapped out.

It schedules swapped requests as long as it fits budget and curr_loras <= max_lora from the scheduling config. The input arguments budget and curr_loras are updated based on scheduled seq_groups.

Parameters:

Name Type Description Default
budget SchedulingBudget

The scheduling budget. The argument is in-place updated when any requests are swapped in.

required
curr_loras Optional[Set[int]]

Currently batched lora request ids. The argument is in-place updated when any requests are swapped in.

required
enable_chunking bool

If True, seq group can be chunked and only a chunked number of tokens are scheduled if budget.num_batched_tokens has not enough capacity to schedule all tokens.

False

Returns:

Type Description
SchedulerSwappedInOutputs

SchedulerSwappedInOutputs.

Source code in vllm/core/scheduler.py
def _schedule_swapped(
    self,
    budget: SchedulingBudget,
    curr_loras: Optional[Set[int]],
    enable_chunking: bool = False,
) -> SchedulerSwappedInOutputs:
    """Schedule sequence groups that are swapped out.

    It schedules swapped requests as long as it fits `budget` and
    curr_loras <= max_lora from the scheduling config. The input arguments
    `budget` and `curr_loras` are updated based on scheduled seq_groups.

    Args:
        budget: The scheduling budget. The argument is in-place updated
            when any requests are swapped in.
        curr_loras: Currently batched lora request ids. The argument is
            in-place updated when any requests are swapped in.
        enable_chunking: If True, seq group can be chunked and only a
            chunked number of tokens are scheduled  if
            `budget.num_batched_tokens` has not enough capacity to schedule
            all tokens.

    Returns:
        SchedulerSwappedInOutputs.
    """
    # Blocks that need to be swapped or copied before model execution.
    blocks_to_swap_in: List[Tuple[int, int]] = []
    blocks_to_copy: List[Tuple[int, int]] = []
    decode_seq_groups: List[ScheduledSequenceGroup] = []
    prefill_seq_groups: List[ScheduledSequenceGroup] = []
    infeasible_seq_groups: List[SequenceGroup] = []

    swapped_queue = self.swapped

    leftover_swapped: Deque[SequenceGroup] = deque()
    while swapped_queue:
        seq_group = swapped_queue[0]

        # If the sequence group cannot be swapped in, stop.
        is_prefill = seq_group.is_prefill()
        alloc_status = self.block_manager.can_swap_in(
            seq_group,
            self._get_num_lookahead_slots(is_prefill, enable_chunking))
        if alloc_status == AllocStatus.LATER:
            break
        elif alloc_status == AllocStatus.NEVER:
            logger.warning(
                "Failing the request %s because there's not enough kv "
                "cache blocks to run the entire sequence.",
                seq_group.request_id,
            )
            for seq in seq_group.get_seqs():
                seq.status = SequenceStatus.FINISHED_IGNORED
            infeasible_seq_groups.append(seq_group)
            swapped_queue.popleft()
            continue

        lora_int_id = 0
        if self.lora_enabled:
            lora_int_id = seq_group.lora_int_id
            assert curr_loras is not None
            assert self.lora_config is not None
            if (lora_int_id > 0 and (lora_int_id not in curr_loras)
                    and len(curr_loras) >= self.lora_config.max_loras):
                # We don't have a space for another LoRA, so
                # we ignore this request for now.
                leftover_swapped.appendleft(seq_group)
                swapped_queue.popleft()
                continue

        # The total number of sequences in the RUNNING state should not
        # exceed the maximum number of sequences.
        num_new_seqs = seq_group.get_max_num_running_seqs()
        num_new_tokens_uncached, num_new_tokens_cached = (
            self._get_num_new_uncached_and_cached_tokens(
                seq_group, SequenceStatus.SWAPPED, enable_chunking,
                budget))

        if num_new_tokens_uncached == 0 or not budget.can_schedule(
                num_new_tokens=num_new_tokens_uncached,
                num_new_seqs=num_new_seqs,
        ):
            self.remove_seq_from_computed_blocks_tracker(
                seq_group, SequenceStatus.SWAPPED)
            break

        if lora_int_id > 0 and curr_loras is not None:
            curr_loras.add(lora_int_id)
        swapped_queue.popleft()
        self._swap_in(seq_group, blocks_to_swap_in)
        self._append_slots(seq_group, blocks_to_copy, enable_chunking)
        if is_prefill:
            prefill_seq_groups.append(
                ScheduledSequenceGroup(
                    seq_group,
                    token_chunk_size=num_new_tokens_uncached +
                    num_new_tokens_cached,
                ))
        else:
            decode_seq_groups.append(
                ScheduledSequenceGroup(seq_group, token_chunk_size=1))
        budget.add_num_batched_tokens(
            seq_group.request_id,
            num_batched_tokens=num_new_tokens_uncached,
            num_cached_tokens=num_new_tokens_cached,
        )
        budget.add_num_seqs(seq_group.request_id, num_new_seqs)

    swapped_queue.extendleft(leftover_swapped)

    return SchedulerSwappedInOutputs(
        decode_seq_groups=decode_seq_groups,
        prefill_seq_groups=prefill_seq_groups,
        blocks_to_swap_in=blocks_to_swap_in,
        blocks_to_copy=blocks_to_copy,
        num_lookahead_slots=self._get_num_lookahead_slots(
            is_prefill=False, enable_chunking=enable_chunking),
        infeasible_seq_groups=infeasible_seq_groups,
    )

_swap_in

_swap_in(
    seq_group: SequenceGroup,
    blocks_to_swap_in: List[Tuple[int, int]],
) -> None
Source code in vllm/core/scheduler.py
def _swap_in(
    self,
    seq_group: SequenceGroup,
    blocks_to_swap_in: List[Tuple[int, int]],
) -> None:
    mapping = self.block_manager.swap_in(seq_group)
    blocks_to_swap_in.extend(mapping)
    for seq in seq_group.get_seqs(status=SequenceStatus.SWAPPED):
        seq.status = SequenceStatus.RUNNING

_swap_out

_swap_out(
    seq_group: SequenceGroup,
    blocks_to_swap_out: List[Tuple[int, int]],
) -> None
Source code in vllm/core/scheduler.py
def _swap_out(
    self,
    seq_group: SequenceGroup,
    blocks_to_swap_out: List[Tuple[int, int]],
) -> None:
    if not self.block_manager.can_swap_out(seq_group):
        # FIXME(woosuk): Abort the sequence group instead of aborting the
        # entire engine.
        raise RuntimeError(
            "Aborted due to the lack of CPU swap space. Please increase "
            "the swap space to avoid this error.")
    mapping = self.block_manager.swap_out(seq_group)
    blocks_to_swap_out.extend(mapping)
    for seq in seq_group.get_seqs(status=SequenceStatus.RUNNING):
        seq.status = SequenceStatus.SWAPPED

abort_seq_group

abort_seq_group(
    request_id: Union[str, Iterable[str]],
    seq_id_to_seq_group: Optional[
        Dict[str, SequenceGroupBase]
    ] = None,
) -> None

Aborts a sequence group with the given ID.

Check if the sequence group with the given ID is present in any of the state queue. If present, remove the sequence group from the state queue. Also, if any of the sequences in the sequence group is not finished, free the sequence with status FINISHED_ABORTED. Otherwise, do nothing.

Parameters:

Name Type Description Default
request_id Union[str, Iterable[str]]

The ID(s) of the sequence group to abort.

required
seq_id_to_seq_group Optional[Dict[str, SequenceGroupBase]]

helper for groups with n>1

None
Source code in vllm/core/scheduler.py
def abort_seq_group(
    self,
    request_id: Union[str, Iterable[str]],
    seq_id_to_seq_group: Optional[Dict[str, SequenceGroupBase]] = None,
) -> None:
    """Aborts a sequence group with the given ID.

    Check if the sequence group with the given ID
        is present in any of the state queue.
    If present, remove the sequence group from the state queue.
        Also, if any of the sequences in the sequence group is not finished,
            free the sequence with status `FINISHED_ABORTED`.
    Otherwise, do nothing.

    Args:
        request_id: The ID(s) of the sequence group to abort.
        seq_id_to_seq_group: helper for groups with n>1
    """
    if isinstance(request_id, str):
        request_id = (request_id, )
    request_ids = set(request_id)
    seq_id_to_seq_group = seq_id_to_seq_group or {}
    for state_queue in [self.waiting, self.running, self.swapped]:
        aborted_groups: List[SequenceGroup] = []
        for seq_group in state_queue:
            # When n>1, seq_group.request_id looks like
            # foo_parallel_sample_0, while request_ids is just foo, and we
            # should resolve it as real_request_id to match.
            if seq_group.request_id in seq_id_to_seq_group:
                real_request_id = seq_id_to_seq_group[
                    seq_group.request_id].group_id
            else:
                real_request_id = seq_group.request_id
            if real_request_id in request_ids:
                # Appending aborted group into pending list.
                aborted_groups.append(seq_group)
                # We can't remove real_request_id in request_ids here,
                # because there may be other seq groups sharing the same
                # real_request_id
        for aborted_group in aborted_groups:
            # Remove the sequence group from the state queue.
            state_queue.remove(aborted_group)
            # Remove the aborted request from the Mamba cache.
            self._finished_requests_ids.append(aborted_group.request_id)
            for seq in aborted_group.get_seqs():
                if seq.is_finished():
                    continue
                seq.status = SequenceStatus.FINISHED_ABORTED
                self.free_seq(seq)
            if aborted_group.request_id in seq_id_to_seq_group:
                del seq_id_to_seq_group[aborted_group.request_id]

            self._free_seq_group_cross_attn_blocks(aborted_group)

add_seq_group

add_seq_group(seq_group: SequenceGroup) -> None
Source code in vllm/core/scheduler.py
def add_seq_group(self, seq_group: SequenceGroup) -> None:
    # Add sequence groups to the waiting queue.
    self.waiting.append(seq_group)

fork_seq

fork_seq(parent_seq: Sequence, child_seq: Sequence) -> None
Source code in vllm/core/scheduler.py
def fork_seq(self, parent_seq: Sequence, child_seq: Sequence) -> None:
    self.block_manager.fork(parent_seq, child_seq)

free_finished_seq_groups

free_finished_seq_groups() -> None
Source code in vllm/core/scheduler.py
def free_finished_seq_groups(self) -> None:
    remaining: Deque[SequenceGroup] = deque()
    for seq_group in self.running:
        self._free_finished_seq_group(seq_group)
        if not seq_group.is_finished():
            remaining.append(seq_group)

    self.running = remaining

    # Handle async stopped sequence groups
    # (ones that reached max model len)
    if self._async_stopped:
        for seq_group in self._async_stopped:
            self._free_seq_group_cross_attn_blocks(seq_group)
            self._finished_requests_ids.append(seq_group.request_id)

            # Free finished seqs
            self._free_finished_seqs(seq_group)

        self._async_stopped.clear()

free_seq

free_seq(seq: Sequence) -> None

Free a sequence from a block table.

Source code in vllm/core/scheduler.py
def free_seq(self, seq: Sequence) -> None:
    """Free a sequence from a block table."""
    self.block_manager.free(seq)

get_and_reset_finished_requests_ids

get_and_reset_finished_requests_ids() -> List[str]

Flushes the list of request ids of previously finished seq_groups.

Source code in vllm/core/scheduler.py
def get_and_reset_finished_requests_ids(self) -> List[str]:
    """Flushes the list of request ids of previously finished seq_groups."""
    finished_requests_ids = self._finished_requests_ids
    self._finished_requests_ids = list()
    return finished_requests_ids

get_num_unfinished_seq_groups

get_num_unfinished_seq_groups() -> int
Source code in vllm/core/scheduler.py
def get_num_unfinished_seq_groups(self) -> int:
    return len(self.waiting) + len(self.running) + len(self.swapped)

get_prefix_cache_hit_rate

get_prefix_cache_hit_rate(device: Device) -> float
Source code in vllm/core/scheduler.py
def get_prefix_cache_hit_rate(self, device: Device) -> float:
    return self.block_manager.get_prefix_cache_hit_rate(device)

has_unfinished_seqs

has_unfinished_seqs() -> bool
Source code in vllm/core/scheduler.py
def has_unfinished_seqs(self) -> bool:
    return (len(self.waiting) != 0 or len(self.running) != 0
            or len(self.swapped) != 0)

remove_seq_from_computed_blocks_tracker

remove_seq_from_computed_blocks_tracker(
    seq_group: SequenceGroup,
    status: Optional[SequenceStatus],
) -> None
Source code in vllm/core/scheduler.py
def remove_seq_from_computed_blocks_tracker(
        self, seq_group: SequenceGroup,
        status: Optional[SequenceStatus]) -> None:
    seqs = seq_group.get_seqs(status=status)
    for seq in seqs:
        self._remove_seq_from_computed_blocks_tracker(seq)

reset_prefix_cache

reset_prefix_cache(device: Optional[Device] = None) -> bool
Source code in vllm/core/scheduler.py
def reset_prefix_cache(self, device: Optional[Device] = None) -> bool:
    return self.block_manager.reset_prefix_cache(device)

schedule

Source code in vllm/core/scheduler.py
def schedule(
        self
) -> Tuple[List[SequenceGroupMetadata], SchedulerOutputs, bool]:
    # Schedule sequence groups.
    # This function call changes the internal states of the scheduler
    # such as self.running, self.swapped, and self.waiting.
    scheduler_start_time = time.perf_counter()

    scheduler_outputs: SchedulerOutputs = self._schedule()
    now = time.time()

    if not self.cache_config.enable_prefix_caching:
        common_computed_block_nums = []

    allow_async_output_proc: bool = self.use_async_output_proc

    # Create input data structures.
    seq_group_metadata_list: List[SequenceGroupMetadata] = []
    for i, scheduled_seq_group in enumerate(
            scheduler_outputs.scheduled_seq_groups):
        seq_group = scheduled_seq_group.seq_group
        token_chunk_size = scheduled_seq_group.token_chunk_size
        seq_group.maybe_set_first_scheduled_time(now)

        seq_group_metadata = self._seq_group_metadata_cache[
            self.cache_id].get_object()
        seq_group_metadata.seq_data.clear()
        seq_group_metadata.block_tables.clear()

        # seq_id -> SequenceData
        seq_data: Dict[int, SequenceData] = {}
        # seq_id -> physical block numbers
        block_tables: Dict[int, List[int]] = {}

        if seq_group.is_encoder_decoder():
            # Encoder associated with SequenceGroup
            encoder_seq = seq_group.get_encoder_seq()
            assert encoder_seq is not None
            encoder_seq_data = encoder_seq.data
            # Block table for cross-attention
            # Also managed at SequenceGroup level
            cross_block_table = self.block_manager.get_cross_block_table(
                seq_group)
        else:
            encoder_seq_data = None
            cross_block_table = None

        for seq in seq_group.get_seqs(status=SequenceStatus.RUNNING):
            seq_id = seq.seq_id
            seq_data[seq_id] = seq.data
            block_tables[seq_id] = self.block_manager.get_block_table(seq)
            self.block_manager.access_all_blocks_in_seq(seq, now)

        if self.cache_config.enable_prefix_caching:
            common_computed_block_nums = (
                self.block_manager.get_common_computed_block_ids(
                    seq_group.get_seqs(status=SequenceStatus.RUNNING)))

        do_sample = True
        is_prompt = seq_group.is_prefill()
        # We should send the metadata to workers when the first prefill
        # is sent. Subsequent requests could be chunked prefill or decode.
        is_first_prefill = False
        if is_prompt:
            seqs = seq_group.get_seqs()
            # Prefill has only 1 sequence.
            assert len(seqs) == 1
            num_computed_tokens = seqs[0].data.get_num_computed_tokens()
            is_first_prefill = num_computed_tokens == 0
            # In the next iteration, all prompt tokens are not computed.
            # It means the prefill is chunked, and we don't need sampling.
            # NOTE: We use get_len instead of get_prompt_len because when
            # a sequence is preempted, prefill includes previous generated
            # output tokens.
            if (token_chunk_size + num_computed_tokens
                    < seqs[0].data.get_len()):
                do_sample = False

        # It assumes the scheduled_seq_groups is ordered by
        # prefill < decoding.
        if is_first_prefill or not self.scheduler_config.send_delta_data:
            seq_group_metadata = SequenceGroupMetadata(
                request_id=seq_group.request_id,
                is_prompt=is_prompt,
                seq_data=seq_data,
                sampling_params=seq_group.sampling_params,
                block_tables=block_tables,
                do_sample=do_sample,
                pooling_params=seq_group.pooling_params,
                token_chunk_size=token_chunk_size,
                lora_request=seq_group.lora_request,
                computed_block_nums=common_computed_block_nums,
                encoder_seq_data=encoder_seq_data,
                cross_block_table=cross_block_table,
                state=seq_group.state,
                token_type_ids=seq_group.token_type_ids,
                # `multi_modal_data` will only be present for the 1st comm
                # between engine and worker.
                # the subsequent comms can still use delta, but
                # `multi_modal_data` will be None.
                multi_modal_data=(seq_group.multi_modal_data
                                  if scheduler_outputs.num_prefill_groups
                                  > 0 else None),
                multi_modal_placeholders=(
                    seq_group.multi_modal_placeholders
                    if scheduler_outputs.num_prefill_groups > 0 else None),
                prompt_adapter_request=seq_group.prompt_adapter_request,
            )
        else:
            # When SPMD mode is enabled, we only send delta data except for
            # the first request to reduce serialization cost.
            seq_data_delta = {}
            for id, data in seq_data.items():
                seq_data_delta[id] = data.get_delta_and_reset()
            seq_group_metadata = SequenceGroupMetadataDelta(
                seq_data_delta,
                seq_group.request_id,
                block_tables,
                is_prompt,
                do_sample=do_sample,
                token_chunk_size=token_chunk_size,
                computed_block_nums=common_computed_block_nums,
            )
        seq_group_metadata_list.append(seq_group_metadata)

        if allow_async_output_proc:
            allow_async_output_proc = self._allow_async_output_proc(
                seq_group)

    # Now that the batch has been created, we can assume all blocks in the
    # batch will have been computed before the next scheduling invocation.
    # This is because the engine assumes that a failure in model execution
    # will crash the vLLM instance / will not retry.
    for scheduled_seq_group in scheduler_outputs.scheduled_seq_groups:
        self.block_manager.mark_blocks_as_computed(
            scheduled_seq_group.seq_group,
            scheduled_seq_group.token_chunk_size)

    self._seq_group_metadata_cache[self.next_cache_id].reset()

    scheduler_time = time.perf_counter() - scheduler_start_time
    # Add this to scheduler time to all the sequences that are currently
    # running. This will help estimate if the scheduler is a significant
    # component in the e2e latency.
    for seq_group in self.running:
        if seq_group is not None and seq_group.metrics is not None:
            if seq_group.metrics.scheduler_time is not None:
                seq_group.metrics.scheduler_time += scheduler_time
            else:
                seq_group.metrics.scheduler_time = scheduler_time

    # Move to next cache (if exists)
    self.cache_id = self.next_cache_id

    # Return results
    return (seq_group_metadata_list, scheduler_outputs,
            allow_async_output_proc)

SchedulerOutputs dataclass

The scheduling decision made from a scheduler.

Source code in vllm/core/scheduler.py
@dataclass
class SchedulerOutputs:
    """The scheduling decision made from a scheduler."""

    # Scheduled sequence groups.
    scheduled_seq_groups: GenericSequence[ScheduledSequenceGroup]
    # Number of prefill groups scheduled.
    num_prefill_groups: int
    # Total number of batched tokens.
    num_batched_tokens: int
    # Blocks to swap in. List of CPU -> GPU block number.
    blocks_to_swap_in: List[Tuple[int, int]]
    # Blocks to swap out. List of GPU -> CPU block number.
    blocks_to_swap_out: List[Tuple[int, int]]
    # Blocks to copy. Source to dest block.
    blocks_to_copy: List[Tuple[int, int]]
    # Sequence groups that are going to be ignored.
    ignored_seq_groups: List[SequenceGroup]
    # The number of slots for lookahead decoding.
    num_lookahead_slots: int
    # The number of requests in the running queue
    running_queue_size: int
    preempted: int

    def __post_init__(self):
        # Swap in and swap out should never happen at the same time.
        assert not (self.blocks_to_swap_in and self.blocks_to_swap_out)

        self.num_loras: int = len(self.lora_requests)
        if self.num_loras > 0:
            self._sort_by_lora_ids()

        self.num_prompt_adapters: int = len(self.prompt_adapter_requests)

    def is_empty(self) -> bool:
        # NOTE: We do not consider the ignored sequence groups.
        return (not self.scheduled_seq_groups and not self.blocks_to_swap_in
                and not self.blocks_to_swap_out and not self.blocks_to_copy)

    def _sort_by_lora_ids(self):
        assert 0 <= self.num_prefill_groups <= len(self.scheduled_seq_groups)

        def key_fn(group: ScheduledSequenceGroup):
            key = (group.seq_group.lora_int_id, group.seq_group.request_id)
            if 0 < self.num_prefill_groups < len(self.scheduled_seq_groups):
                # Sort sequence groups so that all prefills come before all
                # decodes as required by chunked prefill.
                return (not group.seq_group.is_prefill(), *key)
            return key

        self.scheduled_seq_groups = sorted(self.scheduled_seq_groups,
                                           key=key_fn)

    @property
    def lora_requests(self) -> Set[LoRARequest]:
        return {
            g.seq_group.lora_request
            for g in self.scheduled_seq_groups
            if g.seq_group.lora_request is not None
        }

    @property
    def prompt_adapter_requests(self) -> Set[PromptAdapterRequest]:
        return {
            g.seq_group.prompt_adapter_request
            for g in self.scheduled_seq_groups
            if g.seq_group.prompt_adapter_request is not None
        }

blocks_to_copy instance-attribute

blocks_to_copy: List[Tuple[int, int]]

blocks_to_swap_in instance-attribute

blocks_to_swap_in: List[Tuple[int, int]]

blocks_to_swap_out instance-attribute

blocks_to_swap_out: List[Tuple[int, int]]

ignored_seq_groups instance-attribute

ignored_seq_groups: List[SequenceGroup]

lora_requests property

lora_requests: Set[LoRARequest]

num_batched_tokens instance-attribute

num_batched_tokens: int

num_lookahead_slots instance-attribute

num_lookahead_slots: int

num_prefill_groups instance-attribute

num_prefill_groups: int

preempted instance-attribute

preempted: int

prompt_adapter_requests property

prompt_adapter_requests: Set[PromptAdapterRequest]

running_queue_size instance-attribute

running_queue_size: int

scheduled_seq_groups instance-attribute

scheduled_seq_groups: Sequence[ScheduledSequenceGroup]

__init__

__init__(
    scheduled_seq_groups: Sequence[ScheduledSequenceGroup],
    num_prefill_groups: int,
    num_batched_tokens: int,
    blocks_to_swap_in: List[Tuple[int, int]],
    blocks_to_swap_out: List[Tuple[int, int]],
    blocks_to_copy: List[Tuple[int, int]],
    ignored_seq_groups: List[SequenceGroup],
    num_lookahead_slots: int,
    running_queue_size: int,
    preempted: int,
) -> None

__post_init__

__post_init__()
Source code in vllm/core/scheduler.py
def __post_init__(self):
    # Swap in and swap out should never happen at the same time.
    assert not (self.blocks_to_swap_in and self.blocks_to_swap_out)

    self.num_loras: int = len(self.lora_requests)
    if self.num_loras > 0:
        self._sort_by_lora_ids()

    self.num_prompt_adapters: int = len(self.prompt_adapter_requests)

_sort_by_lora_ids

_sort_by_lora_ids()
Source code in vllm/core/scheduler.py
def _sort_by_lora_ids(self):
    assert 0 <= self.num_prefill_groups <= len(self.scheduled_seq_groups)

    def key_fn(group: ScheduledSequenceGroup):
        key = (group.seq_group.lora_int_id, group.seq_group.request_id)
        if 0 < self.num_prefill_groups < len(self.scheduled_seq_groups):
            # Sort sequence groups so that all prefills come before all
            # decodes as required by chunked prefill.
            return (not group.seq_group.is_prefill(), *key)
        return key

    self.scheduled_seq_groups = sorted(self.scheduled_seq_groups,
                                       key=key_fn)

is_empty

is_empty() -> bool
Source code in vllm/core/scheduler.py
def is_empty(self) -> bool:
    # NOTE: We do not consider the ignored sequence groups.
    return (not self.scheduled_seq_groups and not self.blocks_to_swap_in
            and not self.blocks_to_swap_out and not self.blocks_to_copy)

SchedulerPrefillOutputs dataclass

The requests that are scheduled from a waiting queue.

Could contain a fresh prefill requests or preempted requests that need to be recomputed from scratch.

Source code in vllm/core/scheduler.py
@dataclass
class SchedulerPrefillOutputs:
    """The requests that are scheduled from a waiting queue.

    Could contain a fresh prefill requests or preempted requests that need
    to be recomputed from scratch.
    """

    # Selected sequences for prefill.
    seq_groups: List[ScheduledSequenceGroup]
    # Ignored sequence groups.
    ignored_seq_groups: List[SequenceGroup]
    num_lookahead_slots: int

    @classmethod
    def create_empty(cls) -> "SchedulerPrefillOutputs":
        return SchedulerPrefillOutputs(
            seq_groups=[],
            ignored_seq_groups=[],
            num_lookahead_slots=0,
        )

ignored_seq_groups instance-attribute

ignored_seq_groups: List[SequenceGroup]

num_lookahead_slots instance-attribute

num_lookahead_slots: int

seq_groups instance-attribute

__init__

__init__(
    seq_groups: List[ScheduledSequenceGroup],
    ignored_seq_groups: List[SequenceGroup],
    num_lookahead_slots: int,
) -> None

create_empty classmethod

create_empty() -> SchedulerPrefillOutputs
Source code in vllm/core/scheduler.py
@classmethod
def create_empty(cls) -> "SchedulerPrefillOutputs":
    return SchedulerPrefillOutputs(
        seq_groups=[],
        ignored_seq_groups=[],
        num_lookahead_slots=0,
    )

SchedulerRunningOutputs dataclass

The requests that are scheduled from a running queue.

Could contain prefill (prefill that's chunked) or decodes. If there's not enough memory, it can be preempted (for recompute) or swapped out.

Source code in vllm/core/scheduler.py
@dataclass
class SchedulerRunningOutputs:
    """The requests that are scheduled from a running queue.

    Could contain prefill (prefill that's chunked) or decodes. If there's not
    enough memory, it can be preempted (for recompute) or swapped out.
    """

    # Selected sequences that are running and in a decoding phase.
    decode_seq_groups: List[ScheduledSequenceGroup]
    # Selected sequences that are running and in a prefill phase.
    # I.e., it means the prefill has been chunked.
    prefill_seq_groups: List[ScheduledSequenceGroup]
    # The preempted sequences.
    preempted: List[SequenceGroup]
    # Sequences that are swapped out.
    swapped_out: List[SequenceGroup]
    # The blocks to swap out.
    blocks_to_swap_out: List[Tuple[int, int]]
    # The blocks to copy.
    blocks_to_copy: List[Tuple[int, int]]
    # The number of slots for lookahead decoding.
    num_lookahead_slots: int

    # Optimization for fast-access to seq_group lists
    decode_seq_groups_list: List[SequenceGroup]
    prefill_seq_groups_list: List[SequenceGroup]

    @classmethod
    def create_empty(cls) -> "SchedulerRunningOutputs":
        return SchedulerRunningOutputs(
            decode_seq_groups=[],
            prefill_seq_groups=[],
            preempted=[],
            swapped_out=[],
            blocks_to_swap_out=[],
            blocks_to_copy=[],
            num_lookahead_slots=0,
            decode_seq_groups_list=[],
            prefill_seq_groups_list=[],
        )

blocks_to_copy instance-attribute

blocks_to_copy: List[Tuple[int, int]]

blocks_to_swap_out instance-attribute

blocks_to_swap_out: List[Tuple[int, int]]

decode_seq_groups instance-attribute

decode_seq_groups: List[ScheduledSequenceGroup]

decode_seq_groups_list instance-attribute

decode_seq_groups_list: List[SequenceGroup]

num_lookahead_slots instance-attribute

num_lookahead_slots: int

preempted instance-attribute

preempted: List[SequenceGroup]

prefill_seq_groups instance-attribute

prefill_seq_groups: List[ScheduledSequenceGroup]

prefill_seq_groups_list instance-attribute

prefill_seq_groups_list: List[SequenceGroup]

swapped_out instance-attribute

swapped_out: List[SequenceGroup]

__init__

__init__(
    decode_seq_groups: List[ScheduledSequenceGroup],
    prefill_seq_groups: List[ScheduledSequenceGroup],
    preempted: List[SequenceGroup],
    swapped_out: List[SequenceGroup],
    blocks_to_swap_out: List[Tuple[int, int]],
    blocks_to_copy: List[Tuple[int, int]],
    num_lookahead_slots: int,
    decode_seq_groups_list: List[SequenceGroup],
    prefill_seq_groups_list: List[SequenceGroup],
) -> None

create_empty classmethod

create_empty() -> SchedulerRunningOutputs
Source code in vllm/core/scheduler.py
@classmethod
def create_empty(cls) -> "SchedulerRunningOutputs":
    return SchedulerRunningOutputs(
        decode_seq_groups=[],
        prefill_seq_groups=[],
        preempted=[],
        swapped_out=[],
        blocks_to_swap_out=[],
        blocks_to_copy=[],
        num_lookahead_slots=0,
        decode_seq_groups_list=[],
        prefill_seq_groups_list=[],
    )

SchedulerSwappedInOutputs dataclass

The requests that are scheduled from a swap queue.

Could contain prefill (prefill that's chunked) or decodes.

Source code in vllm/core/scheduler.py
@dataclass
class SchedulerSwappedInOutputs:
    """The requests that are scheduled from a swap queue.

    Could contain prefill (prefill that's chunked) or decodes.
    """

    # Selected sequences that are going to be swapped in and is in a
    # decoding phase.
    decode_seq_groups: List[ScheduledSequenceGroup]
    # Selected sequences that are going to be swapped in and in a prefill
    # phase. I.e., it means the prefill has been chunked.
    prefill_seq_groups: List[ScheduledSequenceGroup]
    # The blocks to swap in.
    blocks_to_swap_in: List[Tuple[int, int]]
    # The blocks to copy.
    blocks_to_copy: List[Tuple[int, int]]
    # The number of slots for lookahead decoding.
    num_lookahead_slots: int
    # Infeasible sequence groups.
    infeasible_seq_groups: List[SequenceGroup]

    @classmethod
    def create_empty(cls) -> "SchedulerSwappedInOutputs":
        return SchedulerSwappedInOutputs(
            decode_seq_groups=[],
            prefill_seq_groups=[],
            blocks_to_swap_in=[],
            blocks_to_copy=[],
            num_lookahead_slots=0,
            infeasible_seq_groups=[],
        )

blocks_to_copy instance-attribute

blocks_to_copy: List[Tuple[int, int]]

blocks_to_swap_in instance-attribute

blocks_to_swap_in: List[Tuple[int, int]]

decode_seq_groups instance-attribute

decode_seq_groups: List[ScheduledSequenceGroup]

infeasible_seq_groups instance-attribute

infeasible_seq_groups: List[SequenceGroup]

num_lookahead_slots instance-attribute

num_lookahead_slots: int

prefill_seq_groups instance-attribute

prefill_seq_groups: List[ScheduledSequenceGroup]

__init__

__init__(
    decode_seq_groups: List[ScheduledSequenceGroup],
    prefill_seq_groups: List[ScheduledSequenceGroup],
    blocks_to_swap_in: List[Tuple[int, int]],
    blocks_to_copy: List[Tuple[int, int]],
    num_lookahead_slots: int,
    infeasible_seq_groups: List[SequenceGroup],
) -> None

create_empty classmethod

create_empty() -> SchedulerSwappedInOutputs
Source code in vllm/core/scheduler.py
@classmethod
def create_empty(cls) -> "SchedulerSwappedInOutputs":
    return SchedulerSwappedInOutputs(
        decode_seq_groups=[],
        prefill_seq_groups=[],
        blocks_to_swap_in=[],
        blocks_to_copy=[],
        num_lookahead_slots=0,
        infeasible_seq_groups=[],
    )

SchedulingBudget dataclass

The available slots for scheduling.

TODO(sang): Right now, the budget is request_id-aware meaning it can ignore budget update from the same request_id. It is because in normal scheduling path, we update RUNNING num_seqs ahead of time, meaning it could be updated more than once when scheduling RUNNING requests. Since this won't happen if we only have chunked prefill scheduling, we can remove this feature from the API when chunked prefill is enabled by default.

Source code in vllm/core/scheduler.py
@dataclass
class SchedulingBudget:
    """The available slots for scheduling.

    TODO(sang): Right now, the budget is request_id-aware meaning it can ignore
    budget update from the same request_id. It is because in normal scheduling
    path, we update RUNNING num_seqs ahead of time, meaning it could be
    updated more than once when scheduling RUNNING requests. Since this won't
    happen if we only have chunked prefill scheduling, we can remove this
    feature from the API when chunked prefill is enabled by default.
    """

    token_budget: int
    max_num_seqs: int
    _request_ids_num_batched_tokens: Set[str] = field(default_factory=set)
    _request_ids_num_curr_seqs: Set[str] = field(default_factory=set)
    # Number of cached tokens in the batch.
    _num_cached_tokens: int = 0
    # Number of actual non-cached tokens in the batch.
    _num_batched_tokens: int = 0
    _num_curr_seqs: int = 0

    def can_schedule(self, *, num_new_tokens: int, num_new_seqs: int):
        # We allow num_new_tokens to be 0 when the entire sequence has
        # been cached.
        assert num_new_tokens >= 0
        assert num_new_seqs != 0
        return (self.num_batched_tokens + num_new_tokens <= self.token_budget
                and self.num_curr_seqs + num_new_seqs <= self.max_num_seqs)

    def remaining_token_budget(self):
        return self.token_budget - self.num_batched_tokens

    def add_num_batched_tokens(self,
                               req_id: str,
                               num_batched_tokens: int,
                               num_cached_tokens: int = 0):
        if req_id in self._request_ids_num_batched_tokens:
            return
        assert num_cached_tokens >= 0
        assert num_batched_tokens >= 0

        self._request_ids_num_batched_tokens.add(req_id)
        self._num_batched_tokens += num_batched_tokens
        self._num_cached_tokens += num_cached_tokens

    def subtract_num_batched_tokens(self, req_id: str,
                                    num_batched_tokens: int):
        if req_id in self._request_ids_num_batched_tokens:
            self._request_ids_num_batched_tokens.remove(req_id)
            self._num_batched_tokens -= num_batched_tokens

    def add_num_seqs(self, req_id: str, num_curr_seqs: int):
        if req_id in self._request_ids_num_curr_seqs:
            return

        self._request_ids_num_curr_seqs.add(req_id)
        self._num_curr_seqs += num_curr_seqs

    def subtract_num_seqs(self, req_id: str, num_curr_seqs: int):
        if req_id in self._request_ids_num_curr_seqs:
            self._request_ids_num_curr_seqs.remove(req_id)
            self._num_curr_seqs -= num_curr_seqs

    @property
    def num_batched_tokens(self):
        return self._num_batched_tokens

    @property
    def num_curr_seqs(self):
        return self._num_curr_seqs

    @property
    def num_cached_tokens(self):
        return self._num_cached_tokens

_num_batched_tokens class-attribute instance-attribute

_num_batched_tokens: int = 0

_num_cached_tokens class-attribute instance-attribute

_num_cached_tokens: int = 0

_num_curr_seqs class-attribute instance-attribute

_num_curr_seqs: int = 0

_request_ids_num_batched_tokens class-attribute instance-attribute

_request_ids_num_batched_tokens: Set[str] = field(
    default_factory=set
)

_request_ids_num_curr_seqs class-attribute instance-attribute

_request_ids_num_curr_seqs: Set[str] = field(
    default_factory=set
)

max_num_seqs instance-attribute

max_num_seqs: int

num_batched_tokens property

num_batched_tokens

num_cached_tokens property

num_cached_tokens

num_curr_seqs property

num_curr_seqs

token_budget instance-attribute

token_budget: int

__init__

__init__(
    token_budget: int,
    max_num_seqs: int,
    _request_ids_num_batched_tokens: Set[str] = set(),
    _request_ids_num_curr_seqs: Set[str] = set(),
    _num_cached_tokens: int = 0,
    _num_batched_tokens: int = 0,
    _num_curr_seqs: int = 0,
) -> None

add_num_batched_tokens

add_num_batched_tokens(
    req_id: str,
    num_batched_tokens: int,
    num_cached_tokens: int = 0,
)
Source code in vllm/core/scheduler.py
def add_num_batched_tokens(self,
                           req_id: str,
                           num_batched_tokens: int,
                           num_cached_tokens: int = 0):
    if req_id in self._request_ids_num_batched_tokens:
        return
    assert num_cached_tokens >= 0
    assert num_batched_tokens >= 0

    self._request_ids_num_batched_tokens.add(req_id)
    self._num_batched_tokens += num_batched_tokens
    self._num_cached_tokens += num_cached_tokens

add_num_seqs

add_num_seqs(req_id: str, num_curr_seqs: int)
Source code in vllm/core/scheduler.py
def add_num_seqs(self, req_id: str, num_curr_seqs: int):
    if req_id in self._request_ids_num_curr_seqs:
        return

    self._request_ids_num_curr_seqs.add(req_id)
    self._num_curr_seqs += num_curr_seqs

can_schedule

can_schedule(*, num_new_tokens: int, num_new_seqs: int)
Source code in vllm/core/scheduler.py
def can_schedule(self, *, num_new_tokens: int, num_new_seqs: int):
    # We allow num_new_tokens to be 0 when the entire sequence has
    # been cached.
    assert num_new_tokens >= 0
    assert num_new_seqs != 0
    return (self.num_batched_tokens + num_new_tokens <= self.token_budget
            and self.num_curr_seqs + num_new_seqs <= self.max_num_seqs)

remaining_token_budget

remaining_token_budget()
Source code in vllm/core/scheduler.py
def remaining_token_budget(self):
    return self.token_budget - self.num_batched_tokens

subtract_num_batched_tokens

subtract_num_batched_tokens(
    req_id: str, num_batched_tokens: int
)
Source code in vllm/core/scheduler.py
def subtract_num_batched_tokens(self, req_id: str,
                                num_batched_tokens: int):
    if req_id in self._request_ids_num_batched_tokens:
        self._request_ids_num_batched_tokens.remove(req_id)
        self._num_batched_tokens -= num_batched_tokens

subtract_num_seqs

subtract_num_seqs(req_id: str, num_curr_seqs: int)
Source code in vllm/core/scheduler.py
def subtract_num_seqs(self, req_id: str, num_curr_seqs: int):
    if req_id in self._request_ids_num_curr_seqs:
        self._request_ids_num_curr_seqs.remove(req_id)
        self._num_curr_seqs -= num_curr_seqs

scheduled_seq_group_builder

scheduled_seq_group_builder()
Source code in vllm/core/scheduler.py
def scheduled_seq_group_builder():
    return ScheduledSequenceGroup(SequenceGroup.__new__(SequenceGroup),
                                  token_chunk_size=0)

scheduler_running_outputs_builder

scheduler_running_outputs_builder()
Source code in vllm/core/scheduler.py
def scheduler_running_outputs_builder():
    return SchedulerRunningOutputs(decode_seq_groups=[],
                                   prefill_seq_groups=[],
                                   preempted=[],
                                   swapped_out=[],
                                   blocks_to_swap_out=[],
                                   blocks_to_copy=[],
                                   num_lookahead_slots=0,
                                   prefill_seq_groups_list=[],
                                   decode_seq_groups_list=[])

seq_group_metadata_builder

seq_group_metadata_builder()
Source code in vllm/core/scheduler.py
def seq_group_metadata_builder():
    return SequenceGroupMetadata(request_id="",
                                 is_prompt=False,
                                 seq_data={},
                                 sampling_params=None,
                                 block_tables={})