forked from davisking/dlib
-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathcublas_dlibapi.cpp
More file actions
237 lines (202 loc) · 8.52 KB
/
cublas_dlibapi.cpp
File metadata and controls
237 lines (202 loc) · 8.52 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
// Copyright (C) 2015 Davis E. King (davis@dlib.net)
// License: Boost Software License See LICENSE.txt for the full license.
#ifndef DLIB_DNN_CuBLAS_CPP_
#define DLIB_DNN_CuBLAS_CPP_
#ifdef DLIB_USE_CUDA
#include "cublas_dlibapi.h"
#include "cuda_utils.h"
#include <cublas_v2.h>
#include <vector>
static const char* cublas_get_error_string(cublasStatus_t s)
{
switch(s)
{
case CUBLAS_STATUS_NOT_INITIALIZED:
return "CUDA Runtime API initialization failed.";
case CUBLAS_STATUS_ALLOC_FAILED:
return "CUDA Resources could not be allocated.";
default:
return "A call to cuBLAS failed";
}
}
// Check the return value of a call to the cuBLAS runtime for an error condition.
#define CHECK_CUBLAS(call) \
do{ \
const cublasStatus_t error = call; \
if (error != CUBLAS_STATUS_SUCCESS) \
{ \
std::ostringstream sout; \
sout << "Error while calling " << #call << " in file " << __FILE__ << ":" << __LINE__ << ". ";\
sout << "code: " << error << ", reason: " << cublas_get_error_string(error);\
throw dlib::cublas_error(sout.str()); \
} \
}while(false)
namespace dlib
{
namespace cuda
{
// -----------------------------------------------------------------------------------
class cublas_context
{
public:
// not copyable
cublas_context(const cublas_context&) = delete;
cublas_context& operator=(const cublas_context&) = delete;
cublas_context()
{
handles.resize(16);
}
~cublas_context()
{
for (auto h : handles)
{
if (h)
cublasDestroy(h);
}
}
cublasHandle_t get_handle (
)
{
int new_device_id;
CHECK_CUDA(cudaGetDevice(&new_device_id));
// make room for more devices if needed
if (new_device_id >= (long)handles.size())
handles.resize(new_device_id+16);
// If we don't have a handle already for this device then make one
if (!handles[new_device_id])
CHECK_CUBLAS(cublasCreate(&handles[new_device_id]));
// Finally, return the handle for the current device
return handles[new_device_id];
}
private:
std::vector<cublasHandle_t> handles;
};
static cublasHandle_t context()
{
thread_local cublas_context c;
return c.get_handle();
}
// -----------------------------------------------------------------------------------
void gemm (
float beta,
tensor& dest,
float alpha,
const tensor& lhs,
bool trans_lhs,
const tensor& rhs,
bool trans_rhs,
operation_mode mode
)
{
if (mode == operation_mode::CHANNEL_WISE)
{
// Recall that BLAS uses column major order so to deal with that we flip the
// order of the lhs and rhs arguments.
const auto transa = trans_lhs ? CUBLAS_OP_T : CUBLAS_OP_N;
const auto transb = trans_rhs ? CUBLAS_OP_T : CUBLAS_OP_N;
const int dest_nr = dest.num_samples();
const int dest_nc = dest.size() / dest_nr;
const int lhs_nr = lhs.num_samples();
const int lhs_nc = lhs.size() / lhs_nr;
const int rhs_nr = rhs.num_samples();
const int rhs_nc = rhs.size() / rhs_nr;
if (trans_lhs && trans_rhs)
{
DLIB_ASSERT(dest_nr == lhs_nc &&
dest_nc == rhs_nr &&
lhs_nr == rhs_nc)
}
else if (!trans_lhs && trans_rhs)
{
DLIB_ASSERT(dest_nr == lhs_nr &&
dest_nc == rhs_nr &&
lhs_nc == rhs_nc)
}
else if (trans_lhs && !trans_rhs)
{
DLIB_ASSERT(dest_nr == lhs_nc &&
dest_nc == rhs_nc &&
lhs_nr == rhs_nr)
}
else
{
DLIB_ASSERT(dest_nr == lhs_nr &&
dest_nc == rhs_nc &&
lhs_nc == rhs_nr)
}
const int k = trans_rhs ? rhs_nc : rhs_nr;
CHECK_CUBLAS(cublasSgemm(context(),
transb,
transa,
dest_nc, dest_nr, k,
&alpha,
rhs.device(), rhs_nc,
lhs.device(), lhs_nc,
&beta,
dest.device(), dest_nc));
}
else if (mode == operation_mode::PLANE_WISE)
{
const auto transa = trans_lhs ? CUBLAS_OP_T : CUBLAS_OP_N;
const auto transb = trans_rhs ? CUBLAS_OP_T : CUBLAS_OP_N;
const bool lhs_is_matrix = is_2d_matrix(lhs);
const bool rhs_is_matrix = is_2d_matrix(rhs);
const bool dest_is_matrix = is_2d_matrix(dest);
const size_t lhs_plane_size = lhs.nr() * lhs.nc();
const size_t rhs_plane_size = rhs.nr() * rhs.nc();
const size_t dest_plane_size = dest.nr() * dest.nc();
long num_samples, num_channels = std::min({ lhs.k(), rhs.k(), dest.k() });
if (lhs_is_matrix && rhs_is_matrix && dest_is_matrix)
num_samples = 1;
else if (!lhs_is_matrix && rhs_is_matrix)
num_samples = lhs.num_samples();
else
num_samples = std::min({ lhs.num_samples(), rhs.num_samples(), dest.num_samples() });
size_t lhs_rows = lhs.nr();
size_t lhs_cols = lhs.nc();
if (lhs_is_matrix && (lhs.num_samples() > 1 || lhs.k() > 1)) {
lhs_rows = lhs.num_samples();
lhs_cols = lhs.k();
}
size_t rhs_rows = rhs.nr();
size_t rhs_cols = rhs.nc();
if (rhs_is_matrix && (rhs.num_samples() > 1 || rhs.k() > 1)) {
rhs_rows = rhs.num_samples();
rhs_cols = rhs.k();
}
size_t dest_rows = dest.nr();
size_t dest_cols = dest.nc();
if (dest_is_matrix && (dest.num_samples() > 1 || dest.k() > 1)) {
dest_rows = dest.num_samples();
dest_cols = dest.k();
}
for (long b = 0; b < num_samples; ++b)
{
for (long c = 0; c < num_channels; ++c)
{
auto lhs_slice = lhs_is_matrix ? lhs.device() :
lhs.device() + (b * num_channels + c) * lhs_plane_size;
auto rhs_slice = rhs_is_matrix ? rhs.device() :
rhs.device() + (b * num_channels + c) * rhs_plane_size;
auto dest_slice = dest_is_matrix ? dest.device() :
dest.device() + (b * num_channels + c) * dest_plane_size;
const int k = trans_rhs ? rhs_cols : rhs_rows;
CHECK_CUBLAS(cublasSgemm(
context(),
transb, transa,
dest_cols, dest_rows, k,
&alpha,
rhs_slice, rhs_cols,
lhs_slice, lhs_cols,
&beta,
dest_slice, dest_cols
));
}
}
}
}
// ------------------------------------------------------------------------------------
}
}
#endif // DLIB_USE_CUDA
#endif // DLIB_DNN_CuBLAS_CPP_