Stan Math Library  2.20.0
reverse mode automatic differentiation
student_t_lcdf.hpp
Go to the documentation of this file.
1 #ifndef STAN_MATH_PRIM_SCAL_PROB_STUDENT_T_LCDF_HPP
2 #define STAN_MATH_PRIM_SCAL_PROB_STUDENT_T_LCDF_HPP
3 
15 #include <cmath>
16 #include <limits>
17 
18 namespace stan {
19 namespace math {
20 
21 template <typename T_y, typename T_dof, typename T_loc, typename T_scale>
23  const T_y& y, const T_dof& nu, const T_loc& mu, const T_scale& sigma) {
25  T_partials_return;
26 
27  if (size_zero(y, nu, mu, sigma))
28  return 0.0;
29 
30  static const char* function = "student_t_lcdf";
31 
32  T_partials_return P(0.0);
33 
34  check_not_nan(function, "Random variable", y);
35  check_positive_finite(function, "Degrees of freedom parameter", nu);
36  check_finite(function, "Location parameter", mu);
37  check_positive_finite(function, "Scale parameter", sigma);
38 
39  scalar_seq_view<T_y> y_vec(y);
40  scalar_seq_view<T_dof> nu_vec(nu);
41  scalar_seq_view<T_loc> mu_vec(mu);
42  scalar_seq_view<T_scale> sigma_vec(sigma);
43  size_t N = max_size(y, nu, mu, sigma);
44 
46  sigma);
47 
48  // Explicit return for extreme values
49  // The gradients are technically ill-defined, but treated as zero
50  for (size_t i = 0; i < stan::length(y); i++) {
51  if (value_of(y_vec[i]) == -std::numeric_limits<double>::infinity())
52  return ops_partials.build(negative_infinity());
53  }
54 
55  using std::exp;
56  using std::log;
57  using std::pow;
58 
59  T_partials_return digammaHalf = 0;
60 
61  VectorBuilder<!is_constant_all<T_dof>::value, T_partials_return, T_dof>
62  digamma_vec(stan::length(nu));
63  VectorBuilder<!is_constant_all<T_dof>::value, T_partials_return, T_dof>
64  digammaNu_vec(stan::length(nu));
65  VectorBuilder<!is_constant_all<T_dof>::value, T_partials_return, T_dof>
66  digammaNuPlusHalf_vec(stan::length(nu));
67 
69  digammaHalf = digamma(0.5);
70 
71  for (size_t i = 0; i < stan::length(nu); i++) {
72  const T_partials_return nu_dbl = value_of(nu_vec[i]);
73 
74  digammaNu_vec[i] = digamma(0.5 * nu_dbl);
75  digammaNuPlusHalf_vec[i] = digamma(0.5 + 0.5 * nu_dbl);
76  }
77  }
78 
79  for (size_t n = 0; n < N; n++) {
80  // Explicit results for extreme values
81  // The gradients are technically ill-defined, but treated as zero
82  if (value_of(y_vec[n]) == std::numeric_limits<double>::infinity()) {
83  continue;
84  }
85 
86  const T_partials_return sigma_inv = 1.0 / value_of(sigma_vec[n]);
87  const T_partials_return t
88  = (value_of(y_vec[n]) - value_of(mu_vec[n])) * sigma_inv;
89  const T_partials_return nu_dbl = value_of(nu_vec[n]);
90  const T_partials_return q = nu_dbl / (t * t);
91  const T_partials_return r = 1.0 / (1.0 + q);
92  const T_partials_return J = 2 * r * r * q / t;
93  const T_partials_return betaNuHalf = beta(0.5, 0.5 * nu_dbl);
94  T_partials_return zJacobian = t > 0 ? -0.5 : 0.5;
95 
96  if (q < 2) {
97  T_partials_return z
98  = inc_beta(0.5 * nu_dbl, (T_partials_return)0.5, 1.0 - r);
99  const T_partials_return Pn = t > 0 ? 1.0 - 0.5 * z : 0.5 * z;
100  const T_partials_return d_ibeta
101  = pow(r, -0.5) * pow(1.0 - r, 0.5 * nu_dbl - 1) / betaNuHalf;
102 
103  P += log(Pn);
104 
106  ops_partials.edge1_.partials_[n]
107  += -zJacobian * d_ibeta * J * sigma_inv / Pn;
108 
110  T_partials_return g1 = 0;
111  T_partials_return g2 = 0;
112 
113  grad_reg_inc_beta(g1, g2, 0.5 * nu_dbl, (T_partials_return)0.5, 1.0 - r,
114  digammaNu_vec[n], digammaHalf,
115  digammaNuPlusHalf_vec[n], betaNuHalf);
116 
117  ops_partials.edge2_.partials_[n]
118  += zJacobian * (d_ibeta * (r / t) * (r / t) + 0.5 * g1) / Pn;
119  }
120 
122  ops_partials.edge3_.partials_[n]
123  += zJacobian * d_ibeta * J * sigma_inv / Pn;
125  ops_partials.edge4_.partials_[n]
126  += zJacobian * d_ibeta * J * sigma_inv * t / Pn;
127 
128  } else {
129  T_partials_return z
130  = 1.0 - inc_beta((T_partials_return)0.5, 0.5 * nu_dbl, r);
131  zJacobian *= -1;
132 
133  const T_partials_return Pn = t > 0 ? 1.0 - 0.5 * z : 0.5 * z;
134 
135  T_partials_return d_ibeta
136  = pow(1.0 - r, 0.5 * nu_dbl - 1) * pow(r, -0.5) / betaNuHalf;
137 
138  P += log(Pn);
139 
141  ops_partials.edge1_.partials_[n]
142  += zJacobian * d_ibeta * J * sigma_inv / Pn;
143 
145  T_partials_return g1 = 0;
146  T_partials_return g2 = 0;
147 
148  grad_reg_inc_beta(g1, g2, (T_partials_return)0.5, 0.5 * nu_dbl, r,
149  digammaHalf, digammaNu_vec[n],
150  digammaNuPlusHalf_vec[n], betaNuHalf);
151 
152  ops_partials.edge2_.partials_[n]
153  += zJacobian * (-d_ibeta * (r / t) * (r / t) + 0.5 * g2) / Pn;
154  }
155 
157  ops_partials.edge3_.partials_[n]
158  += -zJacobian * d_ibeta * J * sigma_inv / Pn;
160  ops_partials.edge4_.partials_[n]
161  += -zJacobian * d_ibeta * J * sigma_inv * t / Pn;
162  }
163  }
164  return ops_partials.build(P);
165 }
166 
167 } // namespace math
168 } // namespace stan
169 #endif
void check_finite(const char *function, const char *name, const T_y &y)
Check if y is finite.
boost::math::tools::promote_args< double, typename partials_type< typename scalar_type< T >::type >::type, typename partials_return_type< T_pack... >::type >::type type
T value_of(const fvar< T > &v)
Return the value of the specified variable.
Definition: value_of.hpp:17
Extends std::true_type when instantiated with zero or more template parameters, all of which extend t...
Definition: conjunction.hpp:14
fvar< T > log(const fvar< T > &x)
Definition: log.hpp:12
internal::ops_partials_edge< double, Op4 > edge4_
scalar_seq_view provides a uniform sequence-like wrapper around either a scalar or a sequence of scal...
This template builds partial derivatives with respect to a set of operands.
size_t length(const std::vector< T > &x)
Returns the length of the provided std::vector.
Definition: length.hpp:16
bool size_zero(T &x)
Returns 1 if input is of length 0, returns 0 otherwise.
Definition: size_zero.hpp:18
return_type< T_y, T_dof, T_loc, T_scale >::type student_t_lcdf(const T_y &y, const T_dof &nu, const T_loc &mu, const T_scale &sigma)
fvar< T > inc_beta(const fvar< T > &a, const fvar< T > &b, const fvar< T > &x)
Definition: inc_beta.hpp:18
fvar< T > beta(const fvar< T > &x1, const fvar< T > &x2)
Return fvar with the beta function applied to the specified arguments and its gradient.
Definition: beta.hpp:51
void check_positive_finite(const char *function, const char *name, const T_y &y)
Check if y is positive and finite.
void grad_reg_inc_beta(T &g1, T &g2, const T &a, const T &b, const T &z, const T &digammaA, const T &digammaB, const T &digammaSum, const T &betaAB)
Computes the gradients of the regularized incomplete beta function.
boost::math::tools::promote_args< double, typename scalar_type< T >::type, typename return_type< Types_pack... >::type >::type type
Definition: return_type.hpp:36
fvar< T > exp(const fvar< T > &x)
Definition: exp.hpp:11
void check_not_nan(const char *function, const char *name, const T_y &y)
Check if y is not NaN.
size_t max_size(const T1 &x1, const T2 &x2)
Definition: max_size.hpp:9
T_return_type build(double value)
Build the node to be stored on the autodiff graph.
VectorBuilder allocates type T1 values to be used as intermediate values.
internal::ops_partials_edge< double, Op2 > edge2_
fvar< T > pow(const fvar< T > &x1, const fvar< T > &x2)
Definition: pow.hpp:16
internal::ops_partials_edge< double, Op3 > edge3_
internal::ops_partials_edge< double, Op1 > edge1_
double negative_infinity()
Return negative infinity.
Definition: constants.hpp:115
fvar< T > digamma(const fvar< T > &x)
Return the derivative of the log gamma function at the specified argument.
Definition: digamma.hpp:23

     [ Stan Home Page ] © 2011–2018, Stan Development Team.