Skip to content

Commit 5f57686

Browse files
committed
[Jenkins] auto-formatting by clang-format version 6.0.0-1ubuntu2~16.04.1 (tags/RELEASE_600/final)
1 parent ba10d73 commit 5f57686

10 files changed

Lines changed: 62 additions & 50 deletions

stan/math/fwd/fun/log_sum_exp.hpp

Lines changed: 10 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -52,15 +52,17 @@ inline fvar<T> log_sum_exp(const fvar<T>& x1, double x2) {
5252
*/
5353
template <typename T, require_container_st<is_fvar, T>* = nullptr>
5454
inline auto log_sum_exp(const T& x) {
55-
return apply_vector_unary<ref_type_t<T>>::reduce(to_ref(x), [&](const auto& v) {
56-
using T_fvar_inner = typename value_type_t<decltype(v)>::Scalar;
57-
using mat_type = Eigen::Matrix<T_fvar_inner, -1, -1>;
58-
mat_type vals = v.val();
59-
mat_type exp_vals = vals.array().exp();
55+
return apply_vector_unary<ref_type_t<T>>::reduce(
56+
to_ref(x), [&](const auto& v) {
57+
using T_fvar_inner = typename value_type_t<decltype(v)>::Scalar;
58+
using mat_type = Eigen::Matrix<T_fvar_inner, -1, -1>;
59+
mat_type vals = v.val();
60+
mat_type exp_vals = vals.array().exp();
6061

61-
return fvar<T_fvar_inner>(
62-
log_sum_exp(vals), v.d().cwiseProduct(exp_vals).sum() / exp_vals.sum());
63-
});
62+
return fvar<T_fvar_inner>(
63+
log_sum_exp(vals),
64+
v.d().cwiseProduct(exp_vals).sum() / exp_vals.sum());
65+
});
6466
}
6567

6668
} // namespace math

stan/math/fwd/fun/quad_form_sym.hpp

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,8 @@ inline promote_scalar_t<return_type_t<EigMat1, EigMat2>, EigMat2> quad_form_sym(
3333
check_multiplicable("quad_form_sym", "A", A, "B", B);
3434
check_symmetric("quad_form_sym", "A", A);
3535
const auto& B_ref = to_ref(B);
36-
promote_scalar_t<T_ret, EigMat2> ret(multiply(B_ref.transpose(), multiply(A, B_ref)));
36+
promote_scalar_t<T_ret, EigMat2> ret(
37+
multiply(B_ref.transpose(), multiply(A, B_ref)));
3738
return T_ret(0.5) * (ret + ret.transpose());
3839
}
3940

stan/math/prim/err/check_symmetric.hpp

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -28,9 +28,8 @@ namespace math {
2828
* main diagonal is <code>NaN</code>
2929
*/
3030
template <typename EigMat, require_eigen_t<EigMat>* = nullptr>
31-
inline void check_symmetric(
32-
const char* function, const char* name,
33-
const EigMat& y) {
31+
inline void check_symmetric(const char* function, const char* name,
32+
const EigMat& y) {
3433
check_square(function, name, y);
3534
using std::fabs;
3635

stan/math/prim/fun/log_mix.hpp

Lines changed: 10 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -88,12 +88,15 @@ return_type_t<T_theta, T_lam> log_mix(const T_theta& theta,
8888
check_bounded(function, "theta", theta_ref, 0, 1);
8989
check_finite(function, "lambda", lambda_ref);
9090

91-
const auto& theta_dbl = to_ref(value_of(as_column_vector_or_scalar(theta_ref)));
92-
const auto& lam_dbl = to_ref(value_of(as_column_vector_or_scalar(lambda_ref)));
91+
const auto& theta_dbl
92+
= to_ref(value_of(as_column_vector_or_scalar(theta_ref)));
93+
const auto& lam_dbl
94+
= to_ref(value_of(as_column_vector_or_scalar(lambda_ref)));
9395

9496
T_partials_return logp = log_sum_exp(log(theta_dbl) + lam_dbl);
9597

96-
operands_and_partials<T_theta_ref, T_lam_ref> ops_partials(theta_ref, lambda_ref);
98+
operands_and_partials<T_theta_ref, T_lam_ref> ops_partials(theta_ref,
99+
lambda_ref);
97100
if (!is_constant_all<T_lam, T_theta>::value) {
98101
T_partials_vec theta_deriv = (lam_dbl.array() - logp).exp();
99102
if (!is_constant_all<T_lam>::value) {
@@ -160,7 +163,8 @@ return_type_t<T_theta, std::vector<T_lam>> log_mix(
160163
check_consistent_sizes(function, "theta", theta, "lambda", lambda[n]);
161164
}
162165

163-
const auto& theta_dbl = to_ref(value_of(as_column_vector_or_scalar(theta_ref)));
166+
const auto& theta_dbl
167+
= to_ref(value_of(as_column_vector_or_scalar(theta_ref)));
164168

165169
T_partials_mat lam_dbl(M, N);
166170
for (int n = 0; n < N; ++n) {
@@ -173,7 +177,8 @@ return_type_t<T_theta, std::vector<T_lam>> log_mix(
173177
logp[n] = log_sum_exp(logp_tmp.col(n));
174178
}
175179

176-
operands_and_partials<T_theta_ref, T_lamvec_type> ops_partials(theta_ref, lambda);
180+
operands_and_partials<T_theta_ref, T_lamvec_type> ops_partials(theta_ref,
181+
lambda);
177182
if (!is_constant_all<T_theta, T_lam>::value) {
178183
T_partials_mat derivs = exp(lam_dbl.rowwise() - logp.transpose());
179184
if (!is_constant_all<T_theta>::value) {

stan/math/prim/fun/mdivide_left_spd.hpp

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,8 @@ mdivide_left_spd(const EigMat1& A, const EigMat2& b) {
3939

4040
auto llt
4141
= Eigen::Matrix<return_type_t<EigMat1, EigMat2>,
42-
EigMat1::RowsAtCompileTime, EigMat1::ColsAtCompileTime>(A_ref)
42+
EigMat1::RowsAtCompileTime, EigMat1::ColsAtCompileTime>(
43+
A_ref)
4344
.llt();
4445
check_pos_definite(function, "A", llt);
4546
return llt.solve(

stan/math/prim/fun/mdivide_right_tri.hpp

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -95,7 +95,8 @@ mdivide_right_tri(const EigMat1& b, const EigMat2& A) {
9595
return from_matrix_cl(C_cl);
9696
} else {
9797
#endif
98-
return to_ref(A).template triangularView<TriView>()
98+
return to_ref(A)
99+
.template triangularView<TriView>()
99100
.transpose()
100101
.solve(b.transpose())
101102
.transpose();

stan/math/prim/fun/trace_gen_quad_form.hpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,7 @@ inline auto trace_gen_quad_form(const TD &D, const TA &A, const TB &B) {
3838
check_square("trace_gen_quad_form", "D", D);
3939
check_multiplicable("trace_gen_quad_form", "A", A, "B", B);
4040
check_multiplicable("trace_gen_quad_form", "B", B, "D", D);
41-
const auto& B_ref = to_ref(B);
41+
const auto &B_ref = to_ref(B);
4242
return multiply(B_ref, D.transpose()).cwiseProduct(multiply(A, B_ref)).sum();
4343
}
4444

@@ -67,7 +67,7 @@ inline double trace_gen_quad_form(const TD &D, const TA &A, const TB &B) {
6767
check_square("trace_gen_quad_form", "D", D);
6868
check_multiplicable("trace_gen_quad_form", "A", A, "B", B);
6969
check_multiplicable("trace_gen_quad_form", "B", B, "D", D);
70-
const auto& B_ref = to_ref(B);
70+
const auto &B_ref = to_ref(B);
7171
return (B_ref * D.transpose()).cwiseProduct(A * B_ref).sum();
7272
}
7373

stan/math/rev/fun/log_softmax.hpp

Lines changed: 27 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -58,39 +58,40 @@ class log_softmax_elt_vari : public vari {
5858
*/
5959
template <typename T, require_container_st<is_var, T>* = nullptr>
6060
inline auto log_softmax(const T& x) {
61-
return apply_vector_unary<ref_type_t<T>>::apply(to_ref(x), [&](const auto& alpha) {
62-
const int a_size = alpha.size();
61+
return apply_vector_unary<ref_type_t<T>>::apply(
62+
to_ref(x), [&](const auto& alpha) {
63+
const int a_size = alpha.size();
6364

64-
check_nonzero_size("log_softmax", "alpha", alpha);
65+
check_nonzero_size("log_softmax", "alpha", alpha);
6566

66-
vari** alpha_vi_array
67-
= ChainableStack::instance_->memalloc_.alloc_array<vari*>(a_size);
68-
Eigen::Map<vector_vi>(alpha_vi_array, a_size) = alpha.vi();
67+
vari** alpha_vi_array
68+
= ChainableStack::instance_->memalloc_.alloc_array<vari*>(a_size);
69+
Eigen::Map<vector_vi>(alpha_vi_array, a_size) = alpha.vi();
6970

70-
vector_d alpha_d = alpha.val();
71+
vector_d alpha_d = alpha.val();
7172

72-
// fold logic of math::softmax() and math::log_softmax()
73-
// to save computations
73+
// fold logic of math::softmax() and math::log_softmax()
74+
// to save computations
7475

75-
vector_d diff = (alpha_d.array() - alpha_d.maxCoeff());
76-
vector_d softmax_alpha_d = diff.array().exp();
77-
double sum = softmax_alpha_d.sum();
78-
vector_d log_softmax_alpha_d = diff.array() - std::log(sum);
76+
vector_d diff = (alpha_d.array() - alpha_d.maxCoeff());
77+
vector_d softmax_alpha_d = diff.array().exp();
78+
double sum = softmax_alpha_d.sum();
79+
vector_d log_softmax_alpha_d = diff.array() - std::log(sum);
7980

80-
// end fold
81-
double* softmax_alpha_d_array
82-
= ChainableStack::instance_->memalloc_.alloc_array<double>(a_size);
83-
Eigen::Map<vector_d>(softmax_alpha_d_array, a_size)
84-
= softmax_alpha_d.array() / sum;
81+
// end fold
82+
double* softmax_alpha_d_array
83+
= ChainableStack::instance_->memalloc_.alloc_array<double>(a_size);
84+
Eigen::Map<vector_d>(softmax_alpha_d_array, a_size)
85+
= softmax_alpha_d.array() / sum;
8586

86-
vector_v log_softmax_alpha(a_size);
87-
for (int k = 0; k < a_size; ++k) {
88-
log_softmax_alpha(k) = var(new internal::log_softmax_elt_vari(
89-
log_softmax_alpha_d[k], alpha_vi_array, softmax_alpha_d_array, a_size,
90-
k));
91-
}
92-
return log_softmax_alpha;
93-
});
87+
vector_v log_softmax_alpha(a_size);
88+
for (int k = 0; k < a_size; ++k) {
89+
log_softmax_alpha(k) = var(new internal::log_softmax_elt_vari(
90+
log_softmax_alpha_d[k], alpha_vi_array, softmax_alpha_d_array,
91+
a_size, k));
92+
}
93+
return log_softmax_alpha;
94+
});
9495
}
9596

9697
} // namespace math

stan/math/rev/fun/mdivide_left_spd.hpp

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -144,8 +144,9 @@ class mdivide_left_spd_vd_vari : public vari {
144144
};
145145
} // namespace internal
146146

147-
template <typename EigMat1, typename EigMat2,
148-
require_all_eigen_matrix_base_vt<is_var, EigMat1, EigMat2> * = nullptr>
147+
template <
148+
typename EigMat1, typename EigMat2,
149+
require_all_eigen_matrix_base_vt<is_var, EigMat1, EigMat2> * = nullptr>
149150
inline Eigen::Matrix<var, EigMat1::RowsAtCompileTime,
150151
EigMat2::ColsAtCompileTime>
151152
mdivide_left_spd(const EigMat1 &A, const EigMat2 &b) {

stan/math/rev/fun/multiply.hpp

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -604,7 +604,8 @@ inline var multiply(const RowVec& m1, const ColVec& m2) {
604604
check_not_nan("multiply", "m2", m2_ref);
605605
// Memory managed with the arena allocator.
606606
multiply_mat_vari<RowVecScalar, 1, Ca, ColVecScalar, 1>* baseVari
607-
= new multiply_mat_vari<RowVecScalar, 1, Ca, ColVecScalar, 1>(m1_ref, m2_ref);
607+
= new multiply_mat_vari<RowVecScalar, 1, Ca, ColVecScalar, 1>(m1_ref,
608+
m2_ref);
608609
var AB_v;
609610
AB_v.vi_ = baseVari->variRefAB_;
610611
return AB_v;

0 commit comments

Comments
 (0)