Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 9 additions & 3 deletions fastdeploy/model_executor/layers/linear.py
Original file line number Diff line number Diff line change
Expand Up @@ -82,10 +82,16 @@ def process_loaded_weights(self, layer, weights) -> None:
layer.weight.set_value(weights)

def apply(self, layer: nn.Layer, x: paddle.Tensor) -> paddle.Tensor:
linear_out = paddle.matmul(x, layer.weight)
if layer.with_bias:
linear_out = paddle.add(linear_out, layer.bias)
return linear_out
bias = layer.bias
assert bias.dim() == 1 and bias.shape[-1] == layer.weight.shape[-1], (
Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🔴 Bugapply 方法中每次 forward 都执行 assert 语句检查 bias 形状,这会引入不必要的性能开销,违背了性能优化的初衷。

影响分析

  • assert 语句在每次调用 forward_cuda 时都会执行,增加了 Python 层的开销
  • bias 的形状在初始化时就已经确定,无需在每次调用时验证
  • 这会部分抵消使用 paddle.nn.functional.linear 带来的性能收益

建议修复方式
将形状检查移到权重加载阶段,例如在 process_loaded_weights 中验证:

def process_loaded_weights(self, layer, weights) -> None:
    if layer.weight.dtype != weights.dtype:
        weights = weights.cast(layer.weight.dtype)
    layer.weight.set_value(weights)
    # 在这里验证 bias 和 weight 的形状匹配(仅在权重加载时执行一次)
    if layer.with_bias:
        assert layer.bias.dim() == 1 and layer.bias.shape[0] == layer.weight.shape[-1], (
            f"bias must be 1D with size equal to the last dim of weight, "
            f"but got bias.shape={layer.bias.shape}, weight.shape[-1]={layer.weight.shape[-1]}"
        )

这样验证只在模型加载时执行一次,而非每次推理调用。

f"bias must be 1D with size equal to the last dim of weight, "
f"but got bias.shape={bias.shape}, weight.shape[-1]={layer.weight.shape[-1]}"
)
out = paddle.nn.functional.linear(x, layer.weight, bias)
else:
out = paddle.matmul(x, layer.weight)
return out


class LinearBase(nn.Layer):
Expand Down
2 changes: 1 addition & 1 deletion tests/e2e/4cards_cases/test_GLM_45_AIR_mtp_tp4.py
Original file line number Diff line number Diff line change
Expand Up @@ -184,7 +184,7 @@ def test_lm_head_fp32(api_url, headers, consistent_payload):
# 校验返回内容与概率信息
assert (
resp_json["choices"][0]["message"]["content"]
== "\n<think>这个问题是关于牛顿的三大运动定律的。牛顿的三大运动定律是经典"
== "\n<think>我需要回答牛顿的三大运动定律是什么。牛顿的三大运动定律是经典"
), f"The response content is not as expected {resp_json['choices'][0]['message']['content']}."


Expand Down
2 changes: 1 addition & 1 deletion tests/e2e/4cards_cases/test_GLM_45_AIR_tp4.py
Original file line number Diff line number Diff line change
Expand Up @@ -181,7 +181,7 @@ def test_lm_head_fp32(api_url, headers, consistent_payload):
# 校验返回内容与概率信息
assert (
resp_json["choices"][0]["message"]["content"]
== "\n<think>这个问题是关于牛顿的三大运动定律的。牛顿的三大运动定律是经典"
== "\n<think>我需要回答牛顿的三大运动定律是什么。牛顿的三大运动定律是经典"
), f"The response content is not as expected {resp_json['choices'][0]['message']['content']}."


Expand Down
6 changes: 4 additions & 2 deletions tests/e2e/utils/rollout_routing_replay_test_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -156,9 +156,11 @@ def check_routing_replay_chat_completion(openai_client, moe_layer_num: int, mode
cur_save_routing_path = f"./R3_tmp/routing_replay_output_{model_name}/"
model_path = os.getenv("MODEL_PATH")
if model_path:
baseline_path = os.path.join(model_path, f"R3_BaseLine_25_uint8/routing_replay_output_baseline_{model_name}")
baseline_path = os.path.join(
model_path, f"R3_BaseLine_25_uint8_0403/routing_replay_output_baseline_{model_name}"
)
else:
baseline_path = f"./R3_BaseLine_25_uint8/routing_replay_output_baseline_{model_name}"
baseline_path = f"./R3_BaseLine_25_uint8_0403/routing_replay_output_baseline_{model_name}"
stream_baseline_path = os.path.join(baseline_path, "r3_chat_completion_stream")

nonstream_baseline_path = os.path.join(baseline_path, "r3_chat_completion_nonstream")
Expand Down
Loading