Skip to content

Commit 1561597

Browse files
authored
[API Compatibility] add aminmax op-part (#78441)
* develop paddle.aminmax to align paddle with torch.Tensor.aminmax * refine tests, improve en docs * fix * try fix coverage * add out test and add out in docs. * add CINN test, follow the test routine, remove name in docs and remove extra in op_compat.yaml * enable CINN, remove the redundant wrapper, and restructure the if-elif branch * remove example code and add coverage for CINN * try conver * add back aminmax in __init__ * add AminmaxOpInferSymbolicShape coverage test to CINN sym shape test file * enable all device to run test for aminmax, disabled the example code check in docs * add SKIP per example code * delete example code
1 parent 35b36cc commit 1561597

17 files changed

Lines changed: 812 additions & 0 deletions

File tree

paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/unary_infer_sym.cc

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -307,6 +307,24 @@ bool AminOpInferSymbolicShape(pir::Operation *op,
307307
axis.size() == 0 /*reduce_all*/);
308308
}
309309

310+
bool AminmaxOpInferSymbolicShape(
311+
pir::Operation *op, pir::InferSymbolicShapeContext *infer_context) {
312+
const auto &axis = details::GetVectorAttr(op, "axis");
313+
bool keepdim = GetBoolAttr(op, "keepdim");
314+
bool reduce_all = axis.size() == 0;
315+
316+
// ReduceInferDim only sets result(0). We need the same shape for both
317+
// outputs, so call it for result(0) then copy to result(1).
318+
bool ret =
319+
details::ReduceInferDim(op, infer_context, axis, keepdim, reduce_all);
320+
if (ret) {
321+
const auto &out_shape =
322+
infer_context->GetShapeOrDataForValue(op->result(0));
323+
infer_context->SetShapeOrDataForValue(op->result(1), out_shape);
324+
}
325+
return ret;
326+
}
327+
310328
bool AnyOpInferSymbolicShape(pir::Operation *op,
311329
pir::InferSymbolicShapeContext *infer_context) {
312330
const auto &axis = details::GetVectorAttr(op, "axis");

paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/unary_infer_sym.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,7 @@ OP_DECLARE_INFER_SYMBOLIC_SHAPE(AffineGrid)
2121
OP_DECLARE_INFER_SYMBOLIC_SHAPE(All)
2222
OP_DECLARE_INFER_SYMBOLIC_SHAPE(Amax)
2323
OP_DECLARE_INFER_SYMBOLIC_SHAPE(Amin)
24+
OP_DECLARE_INFER_SYMBOLIC_SHAPE(Aminmax)
2425
OP_DECLARE_INFER_SYMBOLIC_SHAPE(Any)
2526
OP_DECLARE_INFER_SYMBOLIC_SHAPE(Argmax)
2627
OP_DECLARE_INFER_SYMBOLIC_SHAPE(Argmin)

paddle/phi/infermeta/unary.cc

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4291,6 +4291,24 @@ void ReduceInferMeta(const MetaTensor& x,
42914291
ReduceInferMetaBase(x, axis, keep_dim, reduce_all, out);
42924292
}
42934293

4294+
void AMinMaxInferMeta(const MetaTensor& x,
4295+
const std::vector<int64_t>& axis,
4296+
bool keep_dim,
4297+
MetaTensor* min,
4298+
MetaTensor* max) {
4299+
bool reduce_all = false;
4300+
if (axis.empty()) {
4301+
reduce_all = true;
4302+
}
4303+
DDim out_dim = ReduceInferDim(x, axis, keep_dim, reduce_all);
4304+
min->set_dims(out_dim);
4305+
min->set_dtype(x.dtype());
4306+
min->set_layout(x.layout());
4307+
max->set_dims(out_dim);
4308+
max->set_dtype(x.dtype());
4309+
max->set_layout(x.layout());
4310+
}
4311+
42944312
DDim ReduceInferDimForIntArrayAxis(const MetaTensor& x,
42954313
const IntArray& axis,
42964314
bool keep_dim,

paddle/phi/infermeta/unary.h

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -69,6 +69,12 @@ PADDLE_API void ArgMinMaxInferMeta(const MetaTensor& x,
6969
MetaTensor* out,
7070
MetaConfig config = MetaConfig());
7171

72+
PADDLE_API void AMinMaxInferMeta(const MetaTensor& x,
73+
const std::vector<int64_t>& axis,
74+
bool keep_dim,
75+
MetaTensor* min,
76+
MetaTensor* max);
77+
7278
PADDLE_API void MinMaxWithIndexInferMeta(const MetaTensor& x,
7379
const Scalar& axis,
7480
bool keepdims,
Lines changed: 77 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,77 @@
1+
// Copyright (c) 2026 PaddlePaddle Authors. All Rights Reserved.
2+
//
3+
// Licensed under the Apache License, Version 2.0 (the "License");
4+
// you may not use this file except in compliance with the License.
5+
// You may obtain a copy of the License at
6+
//
7+
// http://www.apache.org/licenses/LICENSE-2.0
8+
//
9+
// Unless required by applicable law or agreed to in writing, software
10+
// distributed under the License is distributed on an "AS IS" BASIS,
11+
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
// See the License for the specific language governing permissions and
13+
// limitations under the License.
14+
15+
#include "paddle/phi/kernels/aminmax_grad_kernel.h"
16+
17+
#include "paddle/phi/backends/all_context.h"
18+
#include "paddle/phi/core/kernel_registry.h"
19+
#include "paddle/phi/kernels/elementwise_add_kernel.h"
20+
#include "paddle/phi/kernels/reduce_amax_grad_kernel.h"
21+
#include "paddle/phi/kernels/reduce_amin_grad_kernel.h"
22+
23+
namespace phi {
24+
25+
template <typename T, typename Context>
26+
void AMinMaxGradKernel(const Context& dev_ctx,
27+
const DenseTensor& x,
28+
const DenseTensor& min,
29+
const DenseTensor& max,
30+
const DenseTensor& min_grad,
31+
const DenseTensor& max_grad,
32+
const std::vector<int64_t>& dims,
33+
bool keep_dim,
34+
bool reduce_all,
35+
DenseTensor* x_grad) {
36+
if (x_grad && x_grad->numel() == 0) {
37+
dev_ctx.template Alloc<T>(x_grad);
38+
return;
39+
}
40+
reduce_all = recompute_reduce_all(x, dims, reduce_all);
41+
42+
// Compute amax grad contribution into x_grad
43+
ReduceAMaxGradKernel<T, Context>(
44+
dev_ctx, x, max, max_grad, dims, keep_dim, reduce_all, x_grad);
45+
46+
// Compute amin grad contribution into a temporary tensor
47+
DenseTensor amin_x_grad;
48+
amin_x_grad.Resize(x_grad->dims());
49+
dev_ctx.template Alloc<T>(&amin_x_grad);
50+
ReduceAMinGradKernel<T, Context>(
51+
dev_ctx, x, min, min_grad, dims, keep_dim, reduce_all, &amin_x_grad);
52+
53+
// x_grad = amax_grad_result + amin_grad_result
54+
Add<T, Context>(dev_ctx, *x_grad, amin_x_grad, x_grad);
55+
}
56+
57+
} // namespace phi
58+
59+
PD_REGISTER_KERNEL(aminmax_grad,
60+
CPU,
61+
ALL_LAYOUT,
62+
phi::AMinMaxGradKernel,
63+
float,
64+
double,
65+
int,
66+
int64_t) {}
67+
68+
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
69+
PD_REGISTER_KERNEL(aminmax_grad,
70+
GPU,
71+
ALL_LAYOUT,
72+
phi::AMinMaxGradKernel,
73+
float,
74+
double,
75+
int,
76+
int64_t) {}
77+
#endif
Lines changed: 31 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,31 @@
1+
// Copyright (c) 2026 PaddlePaddle Authors. All Rights Reserved.
2+
//
3+
// Licensed under the Apache License, Version 2.0 (the "License");
4+
// you may not use this file except in compliance with the License.
5+
// You may obtain a copy of the License at
6+
//
7+
// http://www.apache.org/licenses/LICENSE-2.0
8+
//
9+
// Unless required by applicable law or agreed to in writing, software
10+
// distributed under the License is distributed on an "AS IS" BASIS,
11+
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
// See the License for the specific language governing permissions and
13+
// limitations under the License.
14+
15+
#pragma once
16+
17+
#include "paddle/phi/core/dense_tensor.h"
18+
19+
namespace phi {
20+
template <typename T, typename Context>
21+
void AMinMaxGradKernel(const Context& dev_ctx,
22+
const DenseTensor& x,
23+
const DenseTensor& min,
24+
const DenseTensor& max,
25+
const DenseTensor& min_grad,
26+
const DenseTensor& max_grad,
27+
const std::vector<int64_t>& axis,
28+
bool keep_dim,
29+
bool reduce_all,
30+
DenseTensor* x_grad);
31+
} // namespace phi
Lines changed: 50 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,50 @@
1+
// Copyright (c) 2025 PaddlePaddle Authors. All Rights Reserved.
2+
//
3+
// Licensed under the Apache License, Version 2.0 (the "License");
4+
// you may not use this file except in compliance with the License.
5+
// You may obtain a copy of the License at
6+
//
7+
// http://www.apache.org/licenses/LICENSE-2.0
8+
//
9+
// Unless required by applicable law or agreed to in writing, software
10+
// distributed under the License is distributed on an "AS IS" BASIS,
11+
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
// See the License for the specific language governing permissions and
13+
// limitations under the License.
14+
15+
#include "paddle/phi/kernels/aminmax_kernel.h"
16+
17+
#include "paddle/phi/backends/all_context.h"
18+
#include "paddle/phi/core/kernel_registry.h"
19+
#include "paddle/phi/kernels/reduce_amax_kernel.h"
20+
#include "paddle/phi/kernels/reduce_amin_kernel.h"
21+
22+
namespace phi {
23+
24+
template <typename T, typename Context>
25+
void AMinMaxKernel(const Context& dev_ctx,
26+
const DenseTensor& x,
27+
const std::vector<int64_t>& dims,
28+
bool keep_dim,
29+
DenseTensor* min,
30+
DenseTensor* max) {
31+
bool reduce_all = recompute_reduce_all(x, dims);
32+
AMinRawKernel<T>(dev_ctx, x, dims, keep_dim, reduce_all, min);
33+
AMaxRawKernel<T>(dev_ctx, x, dims, keep_dim, reduce_all, max);
34+
}
35+
36+
} // namespace phi
37+
38+
PD_REGISTER_KERNEL(
39+
aminmax, CPU, ALL_LAYOUT, phi::AMinMaxKernel, float, double, int, int64_t) {
40+
}
41+
42+
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
43+
PD_REGISTER_KERNEL(
44+
aminmax, GPU, ALL_LAYOUT, phi::AMinMaxKernel, float, double, int, int64_t) {
45+
}
46+
#endif
47+
48+
#if defined(PADDLE_WITH_XPU_KP)
49+
PD_REGISTER_KERNEL(aminmax, KPS, ALL_LAYOUT, phi::AMinMaxKernel, float) {}
50+
#endif
Lines changed: 29 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,29 @@
1+
// Copyright (c) 2025 PaddlePaddle Authors. All Rights Reserved.
2+
//
3+
// Licensed under the Apache License, Version 2.0 (the "License");
4+
// you may not use this file except in compliance with the License.
5+
// You may obtain a copy of the License at
6+
//
7+
// http://www.apache.org/licenses/LICENSE-2.0
8+
//
9+
// Unless required by applicable law or agreed to in writing, software
10+
// distributed under the License is distributed on an "AS IS" BASIS,
11+
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
// See the License for the specific language governing permissions and
13+
// limitations under the License.
14+
15+
#pragma once
16+
17+
#include "paddle/phi/core/dense_tensor.h"
18+
19+
namespace phi {
20+
21+
template <typename T, typename Context>
22+
void AMinMaxKernel(const Context& dev_ctx,
23+
const DenseTensor& x,
24+
const std::vector<int64_t>& dims,
25+
bool keep_dim,
26+
DenseTensor* min,
27+
DenseTensor* max);
28+
29+
} // namespace phi

paddle/phi/ops/yaml/backward.yaml

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -141,6 +141,16 @@
141141
kernel :
142142
func : amin_grad
143143

144+
- backward_op : aminmax_grad
145+
forward : aminmax (Tensor x, int64_t[] axis={}, bool keepdim=false) -> Tensor(min), Tensor(max)
146+
args : (Tensor x, Tensor min, Tensor max, Tensor min_grad, Tensor max_grad, int64_t[] axis={}, bool keepdim=false, bool reduce_all=false)
147+
output : Tensor(x_grad)
148+
infer_meta :
149+
func : UnchangedInferMeta
150+
param : [x]
151+
kernel :
152+
func : aminmax_grad
153+
144154
- backward_op : angle_grad
145155
forward : angle (Tensor x) -> Tensor(out)
146156
args : (Tensor x, Tensor out_grad)

paddle/phi/ops/yaml/op_compat.yaml

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -206,6 +206,14 @@
206206
amin_grad : GetReduceGradExpectedKernelType
207207
manual_signature : [amin]
208208

209+
- op : aminmax
210+
backward : aminmax_grad
211+
inputs :
212+
x : X
213+
outputs :
214+
{min : Min, max : Max}
215+
manual_signature : [aminmax]
216+
209217
- op : anchor_generator
210218
inputs:
211219
input : Input

0 commit comments

Comments
 (0)