Stan Math Library  2.20.0
reverse mode automatic differentiation
student_t_cdf.hpp
Go to the documentation of this file.
1 #ifndef STAN_MATH_PRIM_SCAL_PROB_STUDENT_T_CDF_HPP
2 #define STAN_MATH_PRIM_SCAL_PROB_STUDENT_T_CDF_HPP
3 
14 #include <limits>
15 #include <cmath>
16 
17 namespace stan {
18 namespace math {
19 
20 template <typename T_y, typename T_dof, typename T_loc, typename T_scale>
22  const T_y& y, const T_dof& nu, const T_loc& mu, const T_scale& sigma) {
24  T_partials_return;
25 
26  if (size_zero(y, nu, mu, sigma))
27  return 1.0;
28 
29  static const char* function = "student_t_cdf";
30 
31  T_partials_return P(1.0);
32 
33  check_not_nan(function, "Random variable", y);
34  check_positive_finite(function, "Degrees of freedom parameter", nu);
35  check_finite(function, "Location parameter", mu);
36  check_positive_finite(function, "Scale parameter", sigma);
37 
38  scalar_seq_view<T_y> y_vec(y);
39  scalar_seq_view<T_dof> nu_vec(nu);
40  scalar_seq_view<T_loc> mu_vec(mu);
41  scalar_seq_view<T_scale> sigma_vec(sigma);
42  size_t N = max_size(y, nu, mu, sigma);
43 
45  sigma);
46 
47  // Explicit return for extreme values
48  // The gradients are technically ill-defined, but treated as zero
49  for (size_t i = 0; i < stan::length(y); i++) {
50  if (value_of(y_vec[i]) == -std::numeric_limits<double>::infinity())
51  return ops_partials.build(0.0);
52  }
53 
54  using std::exp;
55  using std::pow;
56 
57  T_partials_return digammaHalf = 0;
58 
59  VectorBuilder<!is_constant_all<T_dof>::value, T_partials_return, T_dof>
60  digamma_vec(stan::length(nu));
61  VectorBuilder<!is_constant_all<T_dof>::value, T_partials_return, T_dof>
62  digammaNu_vec(stan::length(nu));
63  VectorBuilder<!is_constant_all<T_dof>::value, T_partials_return, T_dof>
64  digammaNuPlusHalf_vec(stan::length(nu));
65 
67  digammaHalf = digamma(0.5);
68 
69  for (size_t i = 0; i < stan::length(nu); i++) {
70  const T_partials_return nu_dbl = value_of(nu_vec[i]);
71 
72  digammaNu_vec[i] = digamma(0.5 * nu_dbl);
73  digammaNuPlusHalf_vec[i] = digamma(0.5 + 0.5 * nu_dbl);
74  }
75  }
76 
77  for (size_t n = 0; n < N; n++) {
78  // Explicit results for extreme values
79  // The gradients are technically ill-defined, but treated as zero
80  if (value_of(y_vec[n]) == std::numeric_limits<double>::infinity()) {
81  continue;
82  }
83 
84  const T_partials_return sigma_inv = 1.0 / value_of(sigma_vec[n]);
85  const T_partials_return t
86  = (value_of(y_vec[n]) - value_of(mu_vec[n])) * sigma_inv;
87  const T_partials_return nu_dbl = value_of(nu_vec[n]);
88  const T_partials_return q = nu_dbl / (t * t);
89  const T_partials_return r = 1.0 / (1.0 + q);
90  const T_partials_return J = 2 * r * r * q / t;
91  const T_partials_return betaNuHalf = beta(0.5, 0.5 * nu_dbl);
92  double zJacobian = t > 0 ? -0.5 : 0.5;
93 
94  if (q < 2) {
95  T_partials_return z
96  = inc_beta(0.5 * nu_dbl, (T_partials_return)0.5, 1.0 - r);
97  const T_partials_return Pn = t > 0 ? 1.0 - 0.5 * z : 0.5 * z;
98  const T_partials_return d_ibeta
99  = pow(r, -0.5) * pow(1.0 - r, 0.5 * nu_dbl - 1) / betaNuHalf;
100 
101  P *= Pn;
102 
104  ops_partials.edge1_.partials_[n]
105  += -zJacobian * d_ibeta * J * sigma_inv / Pn;
107  T_partials_return g1 = 0;
108  T_partials_return g2 = 0;
109 
110  grad_reg_inc_beta(g1, g2, 0.5 * nu_dbl, (T_partials_return)0.5, 1.0 - r,
111  digammaNu_vec[n], digammaHalf,
112  digammaNuPlusHalf_vec[n], betaNuHalf);
113 
114  ops_partials.edge2_.partials_[n]
115  += zJacobian * (d_ibeta * (r / t) * (r / t) + 0.5 * g1) / Pn;
116  }
117 
119  ops_partials.edge3_.partials_[n]
120  += zJacobian * d_ibeta * J * sigma_inv / Pn;
122  ops_partials.edge4_.partials_[n]
123  += zJacobian * d_ibeta * J * sigma_inv * t / Pn;
124 
125  } else {
126  T_partials_return z
127  = 1.0 - inc_beta((T_partials_return)0.5, 0.5 * nu_dbl, r);
128 
129  zJacobian *= -1;
130 
131  const T_partials_return Pn = t > 0 ? 1.0 - 0.5 * z : 0.5 * z;
132 
133  T_partials_return d_ibeta
134  = pow(1.0 - r, 0.5 * nu_dbl - 1) * pow(r, -0.5) / betaNuHalf;
135 
136  P *= Pn;
137 
139  ops_partials.edge1_.partials_[n]
140  += zJacobian * d_ibeta * J * sigma_inv / Pn;
142  T_partials_return g1 = 0;
143  T_partials_return g2 = 0;
144 
145  grad_reg_inc_beta(g1, g2, (T_partials_return)0.5, 0.5 * nu_dbl, r,
146  digammaHalf, digammaNu_vec[n],
147  digammaNuPlusHalf_vec[n], betaNuHalf);
148 
149  ops_partials.edge2_.partials_[n]
150  += zJacobian * (-d_ibeta * (r / t) * (r / t) + 0.5 * g2) / Pn;
151  }
153  ops_partials.edge3_.partials_[n]
154  += -zJacobian * d_ibeta * J * sigma_inv / Pn;
156  ops_partials.edge4_.partials_[n]
157  += -zJacobian * d_ibeta * J * sigma_inv * t / Pn;
158  }
159  }
160 
162  for (size_t n = 0; n < stan::length(y); ++n)
163  ops_partials.edge1_.partials_[n] *= P;
164  }
166  for (size_t n = 0; n < stan::length(nu); ++n)
167  ops_partials.edge2_.partials_[n] *= P;
168  }
170  for (size_t n = 0; n < stan::length(mu); ++n)
171  ops_partials.edge3_.partials_[n] *= P;
172  }
174  for (size_t n = 0; n < stan::length(sigma); ++n)
175  ops_partials.edge4_.partials_[n] *= P;
176  }
177  return ops_partials.build(P);
178 }
179 
180 } // namespace math
181 } // namespace stan
182 #endif
void check_finite(const char *function, const char *name, const T_y &y)
Check if y is finite.
boost::math::tools::promote_args< double, typename partials_type< typename scalar_type< T >::type >::type, typename partials_return_type< T_pack... >::type >::type type
T value_of(const fvar< T > &v)
Return the value of the specified variable.
Definition: value_of.hpp:17
Extends std::true_type when instantiated with zero or more template parameters, all of which extend t...
Definition: conjunction.hpp:14
internal::ops_partials_edge< double, Op4 > edge4_
scalar_seq_view provides a uniform sequence-like wrapper around either a scalar or a sequence of scal...
This template builds partial derivatives with respect to a set of operands.
size_t length(const std::vector< T > &x)
Returns the length of the provided std::vector.
Definition: length.hpp:16
bool size_zero(T &x)
Returns 1 if input is of length 0, returns 0 otherwise.
Definition: size_zero.hpp:18
fvar< T > inc_beta(const fvar< T > &a, const fvar< T > &b, const fvar< T > &x)
Definition: inc_beta.hpp:18
fvar< T > beta(const fvar< T > &x1, const fvar< T > &x2)
Return fvar with the beta function applied to the specified arguments and its gradient.
Definition: beta.hpp:51
return_type< T_y, T_dof, T_loc, T_scale >::type student_t_cdf(const T_y &y, const T_dof &nu, const T_loc &mu, const T_scale &sigma)
void check_positive_finite(const char *function, const char *name, const T_y &y)
Check if y is positive and finite.
void grad_reg_inc_beta(T &g1, T &g2, const T &a, const T &b, const T &z, const T &digammaA, const T &digammaB, const T &digammaSum, const T &betaAB)
Computes the gradients of the regularized incomplete beta function.
boost::math::tools::promote_args< double, typename scalar_type< T >::type, typename return_type< Types_pack... >::type >::type type
Definition: return_type.hpp:36
fvar< T > exp(const fvar< T > &x)
Definition: exp.hpp:11
void check_not_nan(const char *function, const char *name, const T_y &y)
Check if y is not NaN.
size_t max_size(const T1 &x1, const T2 &x2)
Definition: max_size.hpp:9
T_return_type build(double value)
Build the node to be stored on the autodiff graph.
VectorBuilder allocates type T1 values to be used as intermediate values.
internal::ops_partials_edge< double, Op2 > edge2_
fvar< T > pow(const fvar< T > &x1, const fvar< T > &x2)
Definition: pow.hpp:16
internal::ops_partials_edge< double, Op3 > edge3_
internal::ops_partials_edge< double, Op1 > edge1_
fvar< T > digamma(const fvar< T > &x)
Return the derivative of the log gamma function at the specified argument.
Definition: digamma.hpp:23

     [ Stan Home Page ] © 2011–2018, Stan Development Team.