Skip to content

Commit 6e88052

Browse files
Merge pull request #1112 from InfiniTensor/issue/1031_T1-1-50
【比赛2025秋】T1-1-50
2 parents 9db1b6c + f9e6f1f commit 6e88052

76 files changed

Lines changed: 4639 additions & 16 deletions

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.
Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,21 @@
1+
#pragma once
2+
3+
#include "../device.hpp"
4+
#include "common/op.hpp"
5+
6+
namespace infinicore::op {
7+
8+
class LogCumSumExp {
9+
public:
10+
using schema = void (*)(Tensor, Tensor, int, bool, bool);
11+
12+
static void execute(Tensor y, Tensor x, int axis, bool exclusive, bool reverse);
13+
14+
static common::OpDispatcher<schema> &dispatcher();
15+
};
16+
17+
Tensor logcumsumexp(Tensor x, int axis, bool exclusive = false, bool reverse = false);
18+
19+
void logcumsumexp_(Tensor y, Tensor x, int axis, bool exclusive = false, bool reverse = false);
20+
21+
} // namespace infinicore::op
Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,20 @@
1+
#pragma once
2+
3+
#include "../device.hpp"
4+
#include "common/op.hpp"
5+
6+
namespace infinicore::op {
7+
8+
class LogicalAnd {
9+
public:
10+
// LogicalAnd 是二元操作,schema 通常定义为 (Output, Input1, Input2)
11+
using schema = void (*)(Tensor, Tensor, Tensor);
12+
13+
static void execute(Tensor output, Tensor input1, Tensor input2);
14+
static common::OpDispatcher<schema> &dispatcher();
15+
};
16+
17+
Tensor logical_and(Tensor input1, Tensor input2);
18+
void logical_and_(Tensor output, Tensor input1, Tensor input2);
19+
20+
} // namespace infinicore::op
Lines changed: 23 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,23 @@
1+
#pragma once
2+
3+
#include "../device.hpp"
4+
#include "common/op.hpp"
5+
6+
namespace infinicore::op {
7+
8+
class LogicalNot {
9+
public:
10+
// LogicalNot 是一元操作,schema 定义为 (Output, Input)
11+
using schema = void (*)(Tensor, Tensor);
12+
13+
static void execute(Tensor output, Tensor input);
14+
static common::OpDispatcher<schema> &dispatcher();
15+
};
16+
17+
// 构造新 Tensor 返回结果
18+
Tensor logical_not(Tensor input);
19+
20+
// 将结果写入指定的 output Tensor
21+
void logical_not_(Tensor output, Tensor input);
22+
23+
} // namespace infinicore::op

include/infinicore/ops/unfold.hpp

Lines changed: 35 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,35 @@
1+
#pragma once
2+
3+
#include "../device.hpp"
4+
#include "common/op.hpp"
5+
#include <vector>
6+
7+
namespace infinicore::op {
8+
9+
class Unfold {
10+
public:
11+
// schema: output, input, kernel_sizes, dilations, paddings, strides
12+
using schema = void (*)(Tensor, Tensor, const std::vector<int64_t> &, const std::vector<int64_t> &, const std::vector<int64_t> &, const std::vector<int64_t> &);
13+
14+
static void execute(Tensor output, Tensor input,
15+
const std::vector<int64_t> &kernel_sizes,
16+
const std::vector<int64_t> &dilations,
17+
const std::vector<int64_t> &paddings,
18+
const std::vector<int64_t> &strides);
19+
static common::OpDispatcher<schema> &dispatcher();
20+
};
21+
22+
// Functional API
23+
Tensor unfold(Tensor input,
24+
std::vector<int64_t> kernel_sizes,
25+
std::vector<int64_t> dilations,
26+
std::vector<int64_t> paddings,
27+
std::vector<int64_t> strides);
28+
29+
void unfold_(Tensor output, Tensor input,
30+
std::vector<int64_t> kernel_sizes,
31+
std::vector<int64_t> dilations,
32+
std::vector<int64_t> paddings,
33+
std::vector<int64_t> strides);
34+
35+
} // namespace infinicore::op

include/infinicore/ops/vander.hpp

Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,21 @@
1+
#pragma once
2+
3+
#include "../device.hpp"
4+
#include "common/op.hpp"
5+
6+
namespace infinicore::op {
7+
8+
class Vander {
9+
public:
10+
// schema: output, input, N, increasing
11+
using schema = void (*)(Tensor, Tensor, int64_t, bool);
12+
13+
static void execute(Tensor output, Tensor input, int64_t N, bool increasing);
14+
static common::OpDispatcher<schema> &dispatcher();
15+
};
16+
17+
// N defaults to 0 (implying N = input.size(0), i.e., a square matrix)
18+
Tensor vander(Tensor input, int64_t N = 0, bool increasing = false);
19+
void vander_(Tensor output, Tensor input, int64_t N, bool increasing);
20+
21+
} // namespace infinicore::op

include/infiniop.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -55,6 +55,7 @@
5555
#include "infiniop/ops/log_softmax.h"
5656
#include "infiniop/ops/logaddexp.h"
5757
#include "infiniop/ops/logaddexp2.h"
58+
#include "infiniop/ops/logcumsumexp.h"
5859
#include "infiniop/ops/lp_norm.h"
5960
#include "infiniop/ops/masked_select.h"
6061
#include "infiniop/ops/mul.h"
@@ -91,8 +92,10 @@
9192
#include "infiniop/ops/topksoftmax.h"
9293
#include "infiniop/ops/triplet_margin_loss.h"
9394
#include "infiniop/ops/triplet_margin_with_distance_loss.h"
95+
#include "infiniop/ops/unfold.h"
9496
#include "infiniop/ops/upsample_bilinear.h"
9597
#include "infiniop/ops/upsample_nearest.h"
98+
#include "infiniop/ops/vander.h"
9699
#include "infiniop/ops/var.h"
97100
#include "infiniop/ops/var_mean.h"
98101
#include "infiniop/ops/zeros.h"
Lines changed: 30 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,30 @@
1+
#ifndef __INFINIOP_LOGCUMSUMEXP_API_H__
2+
#define __INFINIOP_LOGCUMSUMEXP_API_H__
3+
4+
#include "../operator_descriptor.h"
5+
6+
typedef struct InfiniopDescriptor *infiniopLogCumSumExpDescriptor_t;
7+
8+
__INFINI_C __export infiniStatus_t infiniopCreateLogCumSumExpDescriptor(infiniopHandle_t handle,
9+
infiniopLogCumSumExpDescriptor_t *desc_ptr,
10+
infiniopTensorDescriptor_t y,
11+
infiniopTensorDescriptor_t x,
12+
int axis,
13+
int exclusive,
14+
int reverse);
15+
16+
/* 获取执行 LogCumSumExp 所需的临时空间大小 */
17+
__INFINI_C __export infiniStatus_t infiniopGetLogCumSumExpWorkspaceSize(infiniopLogCumSumExpDescriptor_t desc,
18+
size_t *size);
19+
20+
__INFINI_C __export infiniStatus_t infiniopLogCumSumExp(infiniopLogCumSumExpDescriptor_t desc,
21+
void *workspace,
22+
size_t workspace_size,
23+
void *y,
24+
const void *x,
25+
void *stream);
26+
27+
/* 销毁描述符 */
28+
__INFINI_C __export infiniStatus_t infiniopDestroyLogCumSumExpDescriptor(infiniopLogCumSumExpDescriptor_t desc);
29+
30+
#endif

include/infiniop/ops/unfold.h

Lines changed: 28 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,28 @@
1+
#ifndef __INFINIOP_UNFOLD_API_H__
2+
#define __INFINIOP_UNFOLD_API_H__
3+
4+
#include "../operator_descriptor.h"
5+
6+
typedef struct InfiniopDescriptor *infiniopUnfoldDescriptor_t;
7+
8+
__INFINI_C __export infiniStatus_t infiniopCreateUnfoldDescriptor(infiniopHandle_t handle,
9+
infiniopUnfoldDescriptor_t *desc_ptr,
10+
infiniopTensorDescriptor_t output,
11+
infiniopTensorDescriptor_t input,
12+
const int *kernel_sizes,
13+
const int *strides,
14+
const int *paddings,
15+
const int *dilations);
16+
17+
// 获取 Unfold 工作区大小
18+
__INFINI_C __export infiniStatus_t infiniopGetUnfoldWorkspaceSize(infiniopUnfoldDescriptor_t desc, size_t *size);
19+
20+
__INFINI_C __export infiniStatus_t infiniopUnfold(infiniopUnfoldDescriptor_t desc,
21+
void *workspace,
22+
size_t workspace_size,
23+
void *output,
24+
const void *input,
25+
void *stream);
26+
__INFINI_C __export infiniStatus_t infiniopDestroyUnfoldDescriptor(infiniopUnfoldDescriptor_t desc);
27+
28+
#endif // __INFINIOP_UNFOLD_API_H__

include/infiniop/ops/vander.h

Lines changed: 25 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,25 @@
1+
#ifndef __INFINIOP_VANDER_API_H__
2+
#define __INFINIOP_VANDER_API_H__
3+
4+
#include "../operator_descriptor.h"
5+
6+
typedef struct InfiniopDescriptor *infiniopVanderDescriptor_t;
7+
__INFINI_C __export infiniStatus_t infiniopCreateVanderDescriptor(infiniopHandle_t handle,
8+
infiniopVanderDescriptor_t *desc_ptr,
9+
infiniopTensorDescriptor_t output,
10+
infiniopTensorDescriptor_t input,
11+
int N,
12+
int increasing);
13+
14+
__INFINI_C __export infiniStatus_t infiniopGetVanderWorkspaceSize(infiniopVanderDescriptor_t desc, size_t *size);
15+
16+
__INFINI_C __export infiniStatus_t infiniopVander(infiniopVanderDescriptor_t desc,
17+
void *workspace,
18+
size_t workspace_size,
19+
void *output,
20+
const void *input,
21+
void *stream);
22+
23+
__INFINI_C __export infiniStatus_t infiniopDestroyVanderDescriptor(infiniopVanderDescriptor_t desc);
24+
25+
#endif // __INFINIOP_VANDER_API_H__

python/infinicore/__init__.py

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -85,6 +85,9 @@
8585
from infinicore.ops.lerp import lerp
8686
from infinicore.ops.logaddexp import logaddexp
8787
from infinicore.ops.logaddexp2 import logaddexp2
88+
from infinicore.ops.logcumsumexp import logcumsumexp
89+
from infinicore.ops.logical_and import logical_and
90+
from infinicore.ops.logical_not import logical_not
8891
from infinicore.ops.masked_select import masked_select
8992
from infinicore.ops.matmul import matmul
9093
from infinicore.ops.mha_kvcache import mha_kvcache
@@ -103,6 +106,7 @@
103106
from infinicore.ops.tan import tan
104107
from infinicore.ops.topk import topk
105108
from infinicore.ops.unsqueeze import unsqueeze
109+
from infinicore.ops.vander import vander
106110
from infinicore.ops.var import var
107111
from infinicore.ops.var_mean import var_mean
108112
from infinicore.tensor import (
@@ -212,6 +216,10 @@
212216
"float_power",
213217
"flipud",
214218
"scatter",
219+
"logcumsumexp",
220+
"logical_not",
221+
"logical_and",
222+
"vander",
215223
"paged_caching",
216224
"paged_attention",
217225
"paged_attention_prefill",

0 commit comments

Comments
 (0)