Stan Math Library  2.20.0
reverse mode automatic differentiation
neg_binomial_2_log_glm_lpmf.hpp
Go to the documentation of this file.
1 #ifndef STAN_MATH_PRIM_MAT_PROB_NEG_BINOMIAL_2_LOG_GLM_LPMF_HPP
2 #define STAN_MATH_PRIM_MAT_PROB_NEG_BINOMIAL_2_LOG_GLM_LPMF_HPP
3 
16 #include <vector>
17 #include <cmath>
18 
19 namespace stan {
20 namespace math {
21 
54 template <bool propto, typename T_y, typename T_x, typename T_alpha,
55  typename T_beta, typename T_precision>
57 neg_binomial_2_log_glm_lpmf(const T_y& y, const T_x& x, const T_alpha& alpha,
58  const T_beta& beta, const T_precision& phi) {
59  static const char* function = "neg_binomial_2_log_glm_lpmf";
60  typedef
61  typename stan::partials_return_type<T_y, T_x, T_alpha, T_beta,
62  T_precision>::type T_partials_return;
63  typedef typename std::conditional<
65  Eigen::Array<typename partials_return_type<T_precision>::type, -1, 1>,
66  typename partials_return_type<T_precision>::type>::type T_precision_val;
67  typedef typename std::conditional<
68  is_vector<T_y>::value || is_vector<T_precision>::value,
69  Eigen::Array<typename partials_return_type<T_y, T_precision>::type, -1,
70  1>,
71  typename partials_return_type<T_y, T_precision>::type>::type T_sum_val;
72 
73  using Eigen::Array;
74  using Eigen::Dynamic;
75  using Eigen::Matrix;
76  using Eigen::exp;
77  using Eigen::log1p;
78 
79  const size_t N = x.rows();
80  const size_t M = x.cols();
81 
82  check_nonnegative(function, "Failures variables", y);
83  check_finite(function, "Weight vector", beta);
84  check_finite(function, "Intercept", alpha);
85  check_positive_finite(function, "Precision parameter", phi);
86  check_consistent_size(function, "Vector of dependent variables", y, N);
87  check_consistent_size(function, "Weight vector", beta, M);
88  if (is_vector<T_precision>::value)
89  check_consistent_sizes(function, "Vector of precision parameters", phi,
90  "Vector of dependent variables", y);
92  check_consistent_sizes(function, "Vector of intercepts", alpha,
93  "Vector of dependent variables", y);
94 
95  if (size_zero(y, x, beta, phi))
96  return 0;
97 
99  return 0;
100 
101  T_partials_return logp(0);
102  const auto& x_val = value_of_rec(x);
103  const auto& y_val = value_of_rec(y);
104  const auto& beta_val = value_of_rec(beta);
105  const auto& alpha_val = value_of_rec(alpha);
106  const auto& phi_val = value_of_rec(phi);
107 
108  const auto& y_val_vec = as_column_vector_or_scalar(y_val);
109  const auto& beta_val_vec = as_column_vector_or_scalar(beta_val);
110  const auto& alpha_val_vec = as_column_vector_or_scalar(alpha_val);
111  const auto& phi_val_vec = as_column_vector_or_scalar(phi_val);
112 
113  const auto& y_arr = as_array_or_scalar(y_val_vec);
114  const auto& phi_arr = as_array_or_scalar(phi_val_vec);
115 
116  Array<T_partials_return, Dynamic, 1> theta = value_of(x) * beta_val_vec;
117  theta += as_array_or_scalar(alpha_val_vec);
118  check_finite(function, "Matrix of independent variables", theta);
119  T_precision_val log_phi = log(phi_arr);
120  Array<T_partials_return, Dynamic, 1> logsumexp_theta_logphi
121  = (theta > log_phi)
122  .select(theta + log1p(exp(log_phi - theta)),
123  log_phi + log1p(exp(theta - log_phi)));
124 
125  T_sum_val y_plus_phi = y_arr + phi_arr;
126 
127  // Compute the log-density.
129  logp -= sum(lgamma(y_arr + 1));
130  }
132  if (is_vector<T_precision>::value) {
133  scalar_seq_view<decltype(phi_val)> phi_vec(phi_val);
134  for (size_t n = 0; n < N; ++n)
135  logp += multiply_log(phi_vec[n], phi_vec[n]) - lgamma(phi_vec[n]);
136  } else {
137  logp += N
138  * (multiply_log(as_scalar(phi_val), as_scalar(phi_val))
139  - lgamma(as_scalar(phi_val)));
140  }
141  }
143  logp -= sum(y_plus_phi * logsumexp_theta_logphi);
145  logp += sum(y_arr * theta);
147  logp += sum(lgamma(y_plus_phi));
148  }
149 
150  // Compute the necessary derivatives.
152  x, alpha, beta, phi);
154  Array<T_partials_return, Dynamic, 1> theta_exp = theta.exp();
156  Matrix<T_partials_return, Dynamic, 1> theta_derivative
157  = y_arr - theta_exp * y_plus_phi / (theta_exp + phi_arr);
159  ops_partials.edge3_.partials_ = x_val.transpose() * theta_derivative;
160  }
162  ops_partials.edge1_.partials_
163  = (beta_val_vec * theta_derivative.transpose()).transpose();
164  }
167  ops_partials.edge2_.partials_ = theta_derivative;
168  else
169  ops_partials.edge2_.partials_[0] = sum(theta_derivative);
170  }
171  }
173  if (is_vector<T_precision>::value) {
174  ops_partials.edge4_.partials_
175  = 1 - y_plus_phi / (theta_exp + phi_arr) + log_phi
176  - logsumexp_theta_logphi + as_array_or_scalar(digamma(y_plus_phi))
177  - as_array_or_scalar(digamma(phi_val_vec));
178  } else {
179  ops_partials.edge4_.partials_[0]
180  = N
181  + sum(-y_plus_phi / (theta_exp + phi_arr) + log_phi
182  - logsumexp_theta_logphi
183  + as_array_or_scalar(digamma(y_plus_phi))
184  - as_array_or_scalar(digamma(phi_val_vec)));
185  }
186  }
187  }
188  return ops_partials.build(logp);
189 }
190 
191 template <typename T_y, typename T_x, typename T_alpha, typename T_beta,
192  typename T_precision>
194 neg_binomial_2_log_glm_lpmf(const T_y& y, const T_x& x, const T_alpha& alpha,
195  const T_beta& beta, const T_precision& phi) {
196  return neg_binomial_2_log_glm_lpmf<false>(y, x, alpha, beta, phi);
197 }
198 } // namespace math
199 } // namespace stan
200 #endif
fvar< T > sum(const std::vector< fvar< T > > &m)
Return the sum of the entries of the specified standard vector.
Definition: sum.hpp:20
const Eigen::Matrix< T, Eigen::Dynamic, 1 > & as_column_vector_or_scalar(const Eigen::Matrix< T, Eigen::Dynamic, 1 > &a)
Converts input argument to a column vector or a scalar.
void check_finite(const char *function, const char *name, const T_y &y)
Check if y is finite.
boost::math::tools::promote_args< double, typename partials_type< typename scalar_type< T >::type >::type, typename partials_return_type< T_pack... >::type >::type type
fvar< T > lgamma(const fvar< T > &x)
Return the natural logarithm of the gamma function applied to the specified argument.
Definition: lgamma.hpp:21
double value_of_rec(const fvar< T > &v)
Return the value of the specified variable.
T value_of(const fvar< T > &v)
Return the value of the specified variable.
Definition: value_of.hpp:17
Extends std::true_type when instantiated with zero or more template parameters, all of which extend t...
Definition: conjunction.hpp:14
fvar< T > log(const fvar< T > &x)
Definition: log.hpp:12
internal::ops_partials_edge< double, Op4 > edge4_
scalar_seq_view provides a uniform sequence-like wrapper around either a scalar or a sequence of scal...
This template builds partial derivatives with respect to a set of operands.
bool size_zero(T &x)
Returns 1 if input is of length 0, returns 0 otherwise.
Definition: size_zero.hpp:18
Template metaprogram to calculate whether a summand needs to be included in a proportional (log) prob...
return_type< T_x, T_alpha, T_beta, T_precision >::type neg_binomial_2_log_glm_lpmf(const T_y &y, const T_x &x, const T_alpha &alpha, const T_beta &beta, const T_precision &phi)
Returns the log PMF of the Generalized Linear Model (GLM) with Negative-Binomial-2 distribution and l...
void check_nonnegative(const char *function, const char *name, const T_y &y)
Check if y is non-negative.
fvar< T > beta(const fvar< T > &x1, const fvar< T > &x2)
Return fvar with the beta function applied to the specified arguments and its gradient.
Definition: beta.hpp:51
void check_consistent_size(const char *function, const char *name, const T &x, size_t expected_size)
Check if the dimension of x is consistent, which is defined to be expected_size if x is a vector or 1...
void check_positive_finite(const char *function, const char *name, const T_y &y)
Check if y is positive and finite.
Template metaprogram to calculate the partial derivative type resulting from promoting all the scalar...
boost::math::tools::promote_args< double, typename scalar_type< T >::type, typename return_type< Types_pack... >::type >::type type
Definition: return_type.hpp:36
fvar< T > exp(const fvar< T > &x)
Definition: exp.hpp:11
T_return_type build(double value)
Build the node to be stored on the autodiff graph.
double as_scalar(const std::vector< T > &a)
Converts input to a scalar.
Definition: as_scalar.hpp:20
matrix_cl transpose(const matrix_cl &src)
Takes the transpose of the matrix on the OpenCL device.
Definition: transpose.hpp:20
fvar< T > multiply_log(const fvar< T > &x1, const fvar< T > &x2)
internal::ops_partials_edge< double, Op2 > edge2_
Eigen::ArrayWrapper< const Eigen::Matrix< T, R, C > > as_array_or_scalar(const Eigen::Matrix< T, R, C > &v)
Converts a matrix type to an array.
fvar< T > log1p(const fvar< T > &x)
Definition: log1p.hpp:12
void check_consistent_sizes(const char *function, const char *name1, const T1 &x1, const char *name2, const T2 &x2)
Check if the dimension of x1 is consistent with x2.
internal::ops_partials_edge< double, Op3 > edge3_
internal::ops_partials_edge< double, Op1 > edge1_
fvar< T > digamma(const fvar< T > &x)
Return the derivative of the log gamma function at the specified argument.
Definition: digamma.hpp:23

     [ Stan Home Page ] © 2011–2018, Stan Development Team.