|
| 1 | +import atexit |
| 2 | +import torch |
| 3 | +import numpy as np |
| 4 | +from multiprocessing import shared_memory |
| 5 | +from typing import Optional |
| 6 | +from lightllm.utils.log_utils import init_logger |
| 7 | +from lightllm.utils.dist_utils import get_current_rank_in_dp |
| 8 | +from lightllm.server.router.dynamic_prompt.shared_arr import SharedArray |
| 9 | +from lightllm.utils.envs_utils import get_unique_server_name |
| 10 | +from lightllm.utils.shm_utils import create_or_link_shm |
| 11 | + |
| 12 | +logger = init_logger(__name__) |
| 13 | + |
| 14 | + |
| 15 | +def routing_dtype_id_to_np(dtype_id: int): |
| 16 | + if dtype_id == 1: |
| 17 | + return np.int8 |
| 18 | + elif dtype_id == 2: |
| 19 | + return np.int16 |
| 20 | + return np.int32 |
| 21 | + |
| 22 | + |
| 23 | +def get_routing_config_shm() -> SharedArray: |
| 24 | + service_name = get_unique_server_name() |
| 25 | + return SharedArray(f"{service_name}_routing_config", shape=(4,), dtype=np.int32) |
| 26 | + |
| 27 | + |
| 28 | +class RoutingCaptureManager: |
| 29 | + def __init__( |
| 30 | + self, |
| 31 | + num_moe_layers: int, |
| 32 | + topk: int, |
| 33 | + num_experts: int, |
| 34 | + kv_cache_size: int, |
| 35 | + max_capture_tokens: int, |
| 36 | + ): |
| 37 | + self.num_moe_layers = num_moe_layers |
| 38 | + self.topk = topk |
| 39 | + self.num_experts = num_experts |
| 40 | + self.kv_cache_size = kv_cache_size |
| 41 | + |
| 42 | + self.dtype = torch.int8 if num_experts <= 127 else torch.int16 |
| 43 | + dtype_bytes = 1 if self.dtype == torch.int8 else 2 |
| 44 | + |
| 45 | + # Shape: (num_moe_layers, kv_cache_size, topk) — on CPU to save GPU memory. |
| 46 | + # Written after forward() via flush_to_routing_buffer(), read on request finish. |
| 47 | + routing_buffer_size = num_moe_layers * kv_cache_size * topk * dtype_bytes |
| 48 | + self.routing_buffer = torch.zeros( |
| 49 | + (num_moe_layers, kv_cache_size, topk), |
| 50 | + dtype=self.dtype, |
| 51 | + device="cpu", |
| 52 | + ) |
| 53 | + |
| 54 | + # Capture buffers: simple contiguous tensors written to during forward(). |
| 55 | + capture_buf_size = max_capture_tokens * num_moe_layers * topk * dtype_bytes |
| 56 | + self._capture_buffer = [ |
| 57 | + torch.zeros((max_capture_tokens, num_moe_layers, topk), dtype=self.dtype, device="cuda") for _ in range(2) |
| 58 | + ] |
| 59 | + |
| 60 | + dtype_name = "int8" if self.dtype == torch.int8 else "int16" |
| 61 | + logger.info( |
| 62 | + f"RoutingCaptureManager initialized: {num_moe_layers} MoE layers, topk={topk}, " |
| 63 | + f"routing_buffer(cpu)={routing_buffer_size / 1024 / 1024:.2f}MB, " |
| 64 | + f"capture_buffer={capture_buf_size / 1024 / 1024:.2f}MB x2, dtype={dtype_name}" |
| 65 | + ) |
| 66 | + |
| 67 | + @property |
| 68 | + def np_dtype(self): |
| 69 | + return np.int8 if self.dtype == torch.int8 else np.int16 |
| 70 | + |
| 71 | + @property |
| 72 | + def dtype_id(self) -> int: |
| 73 | + return 1 if self.dtype == torch.int8 else 2 |
| 74 | + |
| 75 | + def capture(self, moe_layer_index: int, topk_ids: torch.Tensor, microbatch_index: int = 0) -> None: |
| 76 | + num_tokens = topk_ids.shape[0] |
| 77 | + self._capture_buffer[microbatch_index][:num_tokens, moe_layer_index, :] = topk_ids.to(self.dtype) |
| 78 | + |
| 79 | + def flush_to_routing_buffer(self, mem_indexes: torch.Tensor, num_tokens: int, microbatch_index: int = 0) -> None: |
| 80 | + buf = self._capture_buffer[microbatch_index][:num_tokens] # (num_tokens, num_moe_layers, topk) |
| 81 | + buf_t = buf.permute(1, 0, 2).cpu() |
| 82 | + self.routing_buffer[:, mem_indexes[:num_tokens].cpu(), :] = buf_t |
| 83 | + |
| 84 | + def extract_routing_data(self, mem_indexes: torch.Tensor) -> np.ndarray: |
| 85 | + cpu_indexes = mem_indexes.cpu() if mem_indexes.is_cuda else mem_indexes |
| 86 | + return self.routing_buffer[:, cpu_indexes, :].numpy() |
| 87 | + |
| 88 | + |
| 89 | +g_routing_capture_manager: Optional[RoutingCaptureManager] = None |
| 90 | + |
| 91 | + |
| 92 | +def create_routing_capture_manager( |
| 93 | + num_moe_layers: int, |
| 94 | + topk: int, |
| 95 | + num_experts: int, |
| 96 | + kv_cache_size: int, |
| 97 | + max_capture_tokens: int, |
| 98 | +) -> None: |
| 99 | + global g_routing_capture_manager |
| 100 | + assert g_routing_capture_manager is None, "RoutingCaptureManager already exists" |
| 101 | + g_routing_capture_manager = RoutingCaptureManager( |
| 102 | + num_moe_layers=num_moe_layers, |
| 103 | + topk=topk, |
| 104 | + num_experts=num_experts, |
| 105 | + kv_cache_size=kv_cache_size, |
| 106 | + max_capture_tokens=max_capture_tokens, |
| 107 | + ) |
| 108 | + |
| 109 | + |
| 110 | +def preallocate_routing_shm_pool(max_req_num: int, num_moe_layers: int, max_tokens: int, topk: int, np_dtype) -> None: |
| 111 | + """Pre-allocate POSIX SHM segments for all request slots. |
| 112 | +
|
| 113 | + Each segment is sized for the maximum possible routing data so it can be |
| 114 | + reused across requests without create/destroy overhead. |
| 115 | + """ |
| 116 | + dtype_bytes = np.dtype(np_dtype).itemsize |
| 117 | + segment_size = num_moe_layers * max_tokens * topk * dtype_bytes |
| 118 | + service_name = get_unique_server_name() |
| 119 | + |
| 120 | + for i in range(max_req_num): |
| 121 | + name = f"{service_name}_shm_routing_{i}" |
| 122 | + shm = create_or_link_shm(name, segment_size, auto_cleanup=True) |
| 123 | + shm.close() # close handle; SHM persists in /dev/shm |
| 124 | + |
| 125 | + logger.info( |
| 126 | + f"Pre-allocated {max_req_num} routing SHM segments, " |
| 127 | + f"each {segment_size / 1024:.1f} KB (total {max_req_num * segment_size / 1024 / 1024:.1f} MB)" |
| 128 | + ) |
| 129 | + |
| 130 | + |
| 131 | +def cleanup_routing_shm_pool() -> None: |
| 132 | + """Unlink all pre-allocated routing SHM segments. Called at server shutdown.""" |
| 133 | + try: |
| 134 | + from lightllm.utils.envs_utils import get_env_start_args |
| 135 | + |
| 136 | + args = get_env_start_args() |
| 137 | + except Exception: |
| 138 | + return |
| 139 | + |
| 140 | + service_name = get_unique_server_name() |
| 141 | + |
| 142 | + for i in range(args.running_max_req_size): |
| 143 | + name = f"{service_name}_shm_routing_{i}" |
| 144 | + try: |
| 145 | + shm = shared_memory.SharedMemory(name=name) |
| 146 | + shm.close() |
| 147 | + shm.unlink() |
| 148 | + except Exception: |
| 149 | + pass |
| 150 | + |
| 151 | + config_name = f"{service_name}_routing_config" |
| 152 | + try: |
| 153 | + shm = shared_memory.SharedMemory(name=config_name) |
| 154 | + shm.close() |
| 155 | + shm.unlink() |
| 156 | + except Exception: |
| 157 | + pass |
| 158 | + |
| 159 | + |
| 160 | +def init_routing_capture(model, num_moe_layers: int) -> None: |
| 161 | + dp_rank = get_current_rank_in_dp() |
| 162 | + logger.info(f"init_routing_capture called: num_moe_layers={num_moe_layers}, dp_rank={dp_rank}") |
| 163 | + if dp_rank != 0: |
| 164 | + logger.info(f"Skipping routing capture initialization on dp_rank={dp_rank}") |
| 165 | + return |
| 166 | + |
| 167 | + if num_moe_layers == 0: |
| 168 | + logger.warning( |
| 169 | + "enable_return_routed_experts is set but no MoE layers found. Routing capture will not be enabled." |
| 170 | + ) |
| 171 | + return |
| 172 | + |
| 173 | + num_experts = model.config.get("n_routed_experts", model.config.get("num_experts", 0)) |
| 174 | + topk = model.config.get("num_experts_per_tok", 0) |
| 175 | + assert num_experts > 0 and topk > 0 |
| 176 | + |
| 177 | + from lightllm.utils.envs_utils import get_env_start_args |
| 178 | + |
| 179 | + args = get_env_start_args() |
| 180 | + |
| 181 | + # Capture buffer must fit the max tokens in any single forward call. |
| 182 | + # For prefill that's batch_max_tokens; for decode it's graph_max_batch_size. |
| 183 | + batch_max_tokens = args.batch_max_tokens or args.max_req_total_len or 8192 |
| 184 | + max_capture_tokens = max(batch_max_tokens, args.graph_max_batch_size) |
| 185 | + |
| 186 | + logger.info( |
| 187 | + f"Initializing routing capture: num_moe_layers={num_moe_layers}, " |
| 188 | + f"topk={topk}, num_experts={num_experts}, max_capture_tokens={max_capture_tokens}" |
| 189 | + ) |
| 190 | + |
| 191 | + create_routing_capture_manager( |
| 192 | + num_moe_layers=num_moe_layers, |
| 193 | + topk=topk, |
| 194 | + num_experts=num_experts, |
| 195 | + kv_cache_size=model.mem_manager.size + 1, |
| 196 | + max_capture_tokens=max_capture_tokens, |
| 197 | + ) |
| 198 | + |
| 199 | + mgr = g_routing_capture_manager |
| 200 | + np_dtype = mgr.np_dtype |
| 201 | + dtype_id = mgr.dtype_id |
| 202 | + |
| 203 | + max_req_total_len = args.max_req_total_len |
| 204 | + |
| 205 | + # Write config to cross-process SHM |
| 206 | + shm = get_routing_config_shm() |
| 207 | + shm.arr[0] = num_moe_layers |
| 208 | + shm.arr[1] = topk |
| 209 | + shm.arr[2] = dtype_id |
| 210 | + shm.arr[3] = max_req_total_len |
| 211 | + logger.info( |
| 212 | + f"Shared routing config set: num_moe_layers={num_moe_layers}, topk={topk}, " |
| 213 | + f"dtype_id={dtype_id}, max_tokens={max_req_total_len}" |
| 214 | + ) |
| 215 | + |
| 216 | + preallocate_routing_shm_pool( |
| 217 | + max_req_num=args.running_max_req_size, |
| 218 | + num_moe_layers=num_moe_layers, |
| 219 | + max_tokens=max_req_total_len, |
| 220 | + topk=topk, |
| 221 | + np_dtype=np_dtype, |
| 222 | + ) |
| 223 | + |
| 224 | + atexit.register(cleanup_routing_shm_pool) |
0 commit comments