Skip to content

Commit c469288

Browse files
committed
issue/896 - 为c++和python中的tensor添加打印函数
1 parent fa2a580 commit c469288

File tree

19 files changed

+1218
-33
lines changed

19 files changed

+1218
-33
lines changed

include/infinicore.hpp

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
#pragma once
22

33
#include "infinicore/device_event.hpp"
4+
#include "infinicore/io.hpp"
45
#include "infinicore/nn.hpp"
56
#include "infinicore/ops.hpp"
67
#include "infinicore/quantization.hpp"

include/infinicore/io.hpp

Lines changed: 86 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,86 @@
1+
#pragma once
2+
3+
#include "tensor.hpp"
4+
#include <iostream>
5+
6+
namespace infinicore::print_options {
7+
8+
/**
9+
* @brief Sets the line width. After \a line_width chars, a new line is added.
10+
* @param line_width The line width
11+
*/
12+
void set_line_width(int line_width);
13+
14+
/**
15+
* @brief Sets the threshold after which summarization is triggered (default: 1000).
16+
* @param threshold The number of elements in the tensor that triggers summarization in the output
17+
*/
18+
void set_threshold(int threshold);
19+
20+
/**
21+
* @brief Sets the number of edge items.
22+
* If the summarization is triggered, this value defines how many items of each dimension are printed.
23+
* @param edge_items The number of edge items
24+
*/
25+
void set_edge_items(int edge_items);
26+
27+
/**
28+
* @brief Sets the precision for printing floating point values.
29+
* @param precision The number of digits for floating point output
30+
*/
31+
32+
void set_precision(int precision);
33+
34+
/**
35+
* @brief Sets the sci mode of the floating point values when printing an Tensor.
36+
* @param sci_mode The sci mode: -1 for auto decision, 0 to disable, 1 to enable
37+
*/
38+
39+
void set_sci_mode(int sci_mode); // -1: auto, 0: disable, 1: enable
40+
41+
#define DEFINE_LOCAL_PRINT_OPTION(NAME) \
42+
class NAME { \
43+
public: \
44+
NAME(int value) : m_value(value) { id(); } \
45+
static int id() { \
46+
static int id = std::ios_base::xalloc(); \
47+
return id; \
48+
} \
49+
int value() const { return m_value; } \
50+
\
51+
private: \
52+
int m_value; \
53+
}; \
54+
\
55+
inline std::ostream &operator<<(std::ostream &out, const NAME &n) { \
56+
out.iword(NAME::id()) = n.value(); \
57+
return out; \
58+
}
59+
60+
/**
61+
* @class line_width
62+
* io manipulator used to set the width of the lines when printing an Tensor.
63+
*
64+
* @code{.cpp}
65+
* using po = infinicore::print_options;
66+
* std::cout << po::line_width(100) << tensor << std::endl;
67+
* @endcode
68+
*/
69+
DEFINE_LOCAL_PRINT_OPTION(line_width)
70+
71+
/**
72+
* io manipulator used to set the threshold after which summarization is triggered.
73+
*/
74+
DEFINE_LOCAL_PRINT_OPTION(threshold)
75+
76+
/**
77+
* io manipulator used to set the number of egde items if the summarization is triggered.
78+
*/
79+
DEFINE_LOCAL_PRINT_OPTION(edge_items)
80+
81+
/**
82+
* io manipulator used to set the precision of the floating point values when printing an Tensor.
83+
*/
84+
DEFINE_LOCAL_PRINT_OPTION(precision)
85+
86+
} // namespace infinicore::print_options

include/infinicore/nn/module.hpp

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,6 @@
66
#include <type_traits>
77
#include <unordered_map>
88
#include <vector>
9-
#include <spdlog/spdlog.h>
109

1110
namespace infinicore::nn {
1211
class Module {

include/infinicore/nn/rope.hpp

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@
44
#include "../tensor.hpp"
55
#include "module.hpp"
66
#include <memory>
7+
#include <cmath>
78

89
namespace infinicore::nn {
910

include/infinicore/tensor.hpp

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -90,6 +90,7 @@ class Tensor {
9090
Tensor(std::shared_ptr<TensorImpl> impl) : impl_(std::move(impl)) {}
9191
std::shared_ptr<TensorImpl> impl_;
9292
friend class TensorImpl;
93+
friend std::ostream &operator<<(std::ostream &out, const Tensor &tensor);
9394
};
9495

9596
class TensorImpl : public std::enable_shared_from_this<TensorImpl> {

python/infinicore/__init__.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,7 @@
77

88
import infinicore.context as context
99
import infinicore.nn as nn
10+
from infinicore._tensor_str import printoptions, set_printoptions
1011

1112
# Import context functions
1213
from infinicore.context import (
@@ -252,6 +253,8 @@
252253
"var",
253254
"topk",
254255
"all",
256+
"set_printoptions",
257+
"printoptions",
255258
]
256259

257260
use_ntops = False

python/infinicore/_tensor_str.py

Lines changed: 122 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,122 @@
1+
import contextlib
2+
import dataclasses
3+
from typing import Any, Optional
4+
5+
from infinicore.lib import _infinicore
6+
7+
8+
@dataclasses.dataclass
9+
class __PrinterOptions:
10+
precision: int = 4
11+
threshold: float = 1000
12+
edgeitems: int = 3
13+
linewidth: int = 80
14+
sci_mode: Optional[bool] = None
15+
16+
17+
PRINT_OPTS = __PrinterOptions()
18+
19+
20+
def set_printoptions(
21+
precision=None,
22+
threshold=None,
23+
edgeitems=None,
24+
linewidth=None,
25+
profile=None,
26+
sci_mode=None,
27+
):
28+
r"""Set options for printing.
29+
Args:
30+
precision: Number of digits of precision for floating point output (default = 4).
31+
threshold: Total number of array elements which trigger summarization rather than full `repr` (default = 1000).
32+
edgeitems: Number of array items in summary at beginning and end of each dimension (default = 3).
33+
linewidth: The number of characters per line (default = 80).
34+
profile: Sane defaults for pretty printing. Can override with any of the above options. (any one of `default`, `short`, `full`)
35+
sci_mode: Enable (True) or disable (False) scientific notation.
36+
If None (default) is specified, the value is automatically chosen by the framework.
37+
Example::
38+
>>> # Limit the precision of elements
39+
>>> torch.set_printoptions(precision=2)
40+
>>> torch.tensor([1.12345])
41+
tensor([1.12])
42+
"""
43+
if profile is not None:
44+
if profile == "default":
45+
PRINT_OPTS.precision = 4
46+
PRINT_OPTS.threshold = 1000
47+
PRINT_OPTS.edgeitems = 3
48+
PRINT_OPTS.linewidth = 80
49+
elif profile == "short":
50+
PRINT_OPTS.precision = 2
51+
PRINT_OPTS.threshold = 1000
52+
PRINT_OPTS.edgeitems = 2
53+
PRINT_OPTS.linewidth = 80
54+
elif profile == "full":
55+
PRINT_OPTS.precision = 4
56+
PRINT_OPTS.threshold = 2147483647 # CPP_INT32_MAX
57+
PRINT_OPTS.edgeitems = 3
58+
PRINT_OPTS.linewidth = 80
59+
else:
60+
raise ValueError(
61+
f"Invalid profile: {profile}. the profile must be one of 'default', 'short', 'full'"
62+
)
63+
64+
if precision is not None:
65+
PRINT_OPTS.precision = precision
66+
if threshold is not None:
67+
PRINT_OPTS.threshold = threshold
68+
if edgeitems is not None:
69+
PRINT_OPTS.edgeitems = edgeitems
70+
if linewidth is not None:
71+
PRINT_OPTS.linewidth = linewidth
72+
PRINT_OPTS.sci_mode = sci_mode
73+
74+
_infinicore.set_printoptions(
75+
PRINT_OPTS.precision,
76+
PRINT_OPTS.threshold,
77+
PRINT_OPTS.edgeitems,
78+
PRINT_OPTS.linewidth,
79+
PRINT_OPTS.sci_mode,
80+
)
81+
82+
83+
def get_printoptions() -> dict[str, Any]:
84+
r"""Gets the current options for printing, as a dictionary that
85+
can be passed as ``**kwargs`` to set_printoptions().
86+
"""
87+
return dataclasses.asdict(PRINT_OPTS)
88+
89+
90+
@contextlib.contextmanager
91+
def printoptions(
92+
precision=None, threshold=None, edgeitems=None, linewidth=None, sci_mode=None
93+
):
94+
r"""Context manager that temporarily changes the print options."""
95+
old_kwargs = get_printoptions()
96+
set_printoptions(
97+
precision=precision,
98+
threshold=threshold,
99+
edgeitems=edgeitems,
100+
linewidth=linewidth,
101+
sci_mode=sci_mode,
102+
)
103+
try:
104+
yield
105+
finally:
106+
set_printoptions(**old_kwargs)
107+
108+
109+
def _str(self):
110+
cpp_tensor_str = self._underlying.__str__()
111+
py_dtype_str = ", dtype=" + self.dtype.__repr__()
112+
113+
py_tensor_str = cpp_tensor_str.split(", d")[0]
114+
if self.device.type != "cpu":
115+
py_device_str = ", device='" + self.device.__str__() + "'"
116+
py_tensor_str += py_device_str
117+
py_tensor_str += py_dtype_str + ")\n"
118+
119+
return py_tensor_str
120+
121+
122+
set_printoptions()

python/infinicore/tensor.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@
66
import infinicore.dtype
77
from infinicore.lib import _infinicore
88

9+
from ._tensor_str import _str
910
from .utils import (
1011
infinicore_to_numpy_dtype,
1112
numpy_to_infinicore_dtype,
@@ -130,6 +131,9 @@ def __mul__(self, other):
130131
def narrow(self, dim, start, length):
131132
return infinicore.narrow(self, dim, start, length)
132133

134+
def __repr__(self):
135+
return _str(self)
136+
133137

134138
def empty(size, *, dtype=None, device=None, pin_memory=False):
135139
return Tensor(

python/infinicore/utils.py

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,8 @@ def to_torch_dtype(infini_dtype):
1515
return torch.float16
1616
elif infini_dtype == infinicore.float32:
1717
return torch.float32
18+
elif infini_dtype == infinicore.float64:
19+
return torch.float64
1820
elif infini_dtype == infinicore.bfloat16:
1921
return torch.bfloat16
2022
elif infini_dtype == infinicore.int8:
@@ -27,6 +29,8 @@ def to_torch_dtype(infini_dtype):
2729
return torch.int64
2830
elif infini_dtype == infinicore.uint8:
2931
return torch.uint8
32+
elif infini_dtype == infinicore.bool:
33+
return torch.bool
3034
else:
3135
raise ValueError(f"Unsupported infinicore dtype: {infini_dtype}")
3236

@@ -75,6 +79,8 @@ def numpy_to_infinicore_dtype(numpy_dtype):
7579
return infinicore.int64
7680
elif numpy_dtype == np.uint8:
7781
return infinicore.uint8
82+
elif numpy_dtype == np.bool_:
83+
return infinicore.bool
7884
else:
7985
raise ValueError(f"Unsupported numpy dtype: {numpy_dtype}")
8086

@@ -106,5 +112,7 @@ def infinicore_to_numpy_dtype(infini_dtype):
106112
return np.int64
107113
elif infini_dtype == infinicore.uint8:
108114
return np.uint8
115+
elif infini_dtype == infinicore.bool:
116+
return np.bool_
109117
else:
110118
raise ValueError(f"Unsupported infinicore dtype: {infini_dtype}")

src/infinicore-test/test_nn_module.cc

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -898,8 +898,8 @@ TestResult NNModuleTest::testModuleLinear() {
898898

899899
// Test forward with residual connection
900900
spdlog::info("Testing Linear forward with residual connection");
901-
auto residual = infinicore::Tensor::ones({2, 4}, infinicore::DataType::F32, infinicore::Device());
902-
auto output_with_residual = m1.forward(input1, residual);
901+
// auto residual = infinicore::Tensor::ones({2, 4}, infinicore::DataType::F32, infinicore::Device());
902+
auto output_with_residual = m1.forward(input1);
903903
if (output_with_residual->shape() != std::vector<size_t>({2, 4})) {
904904
spdlog::error("Linear output with residual shape mismatch. Expected {{2, 4}}, got different shape");
905905
return false;
@@ -911,10 +911,10 @@ TestResult NNModuleTest::testModuleLinear() {
911911

912912
// Create test data with known values for verification
913913
auto test_input = infinicore::Tensor::ones({2, 8}, infinicore::DataType::F32, infinicore::Device());
914-
auto test_residual = infinicore::Tensor::ones({2, 4}, infinicore::DataType::F32, infinicore::Device());
914+
// auto test_residual = infinicore::Tensor::ones({2, 4}, infinicore::DataType::F32, infinicore::Device());
915915

916916
// Get InfiniCore result
917-
auto infinicore_output = m1.forward(test_input, test_residual);
917+
auto infinicore_output = m1.forward(test_input);
918918

919919
// Compute naive result: output = input @ weight.T + bias + residual
920920
auto naive_output = infinicore::Tensor::empty({2, 4}, infinicore::DataType::F32, infinicore::Device());
@@ -935,7 +935,7 @@ TestResult NNModuleTest::testModuleLinear() {
935935
infinicore::op::add_(naive_output, matmul_result, bias_view);
936936

937937
// Add residual
938-
infinicore::op::add_(naive_output, naive_output, test_residual);
938+
// infinicore::op::add_(naive_output, naive_output, test_residual);
939939

940940
// Compare results with actual value checking
941941
if (infinicore_output->shape() != naive_output->shape()) {
@@ -956,10 +956,10 @@ TestResult NNModuleTest::testModuleLinear() {
956956
// Test computation correctness without bias (using m2)
957957
spdlog::info("Testing computation correctness without bias");
958958
auto test_input_no_bias = infinicore::Tensor::ones({1, 16}, infinicore::DataType::F32, infinicore::Device());
959-
auto test_residual_no_bias = infinicore::Tensor::ones({1, 3}, infinicore::DataType::F32, infinicore::Device());
959+
// auto test_residual_no_bias = infinicore::Tensor::ones({1, 3}, infinicore::DataType::F32, infinicore::Device());
960960

961961
// Get InfiniCore result (no bias)
962-
auto infinicore_output_no_bias = m2.forward(test_input_no_bias, test_residual_no_bias);
962+
auto infinicore_output_no_bias = m2.forward(test_input_no_bias);
963963

964964
// Compute naive result without bias: output = input @ weight.T + residual
965965
auto naive_output_no_bias = infinicore::Tensor::empty({1, 3}, infinicore::DataType::F32, infinicore::Device());
@@ -970,7 +970,7 @@ TestResult NNModuleTest::testModuleLinear() {
970970
auto matmul_result_no_bias = infinicore::op::matmul(test_input_no_bias, weight_t_no_bias); // [1, 3]
971971

972972
// Add residual
973-
infinicore::op::add_(naive_output_no_bias, matmul_result_no_bias, test_residual_no_bias);
973+
// infinicore::op::add_(naive_output_no_bias, matmul_result_no_bias, test_residual_no_bias);
974974

975975
// Compare results with actual value checking
976976
if (infinicore_output_no_bias->shape() != naive_output_no_bias->shape()) {

0 commit comments

Comments
 (0)