Skip to content

Commit

Permalink
Implement BetaNegativeBinomial distribution (#258)
Browse files Browse the repository at this point in the history
* Also broadcast parameters in Skellam when size is not provided

Co-authored-by: Ricardo Vieira <[email protected]>
  • Loading branch information
wd60622 and ricardoV94 authored Nov 10, 2023
1 parent e8ce688 commit b521e64
Show file tree
Hide file tree
Showing 4 changed files with 205 additions and 3 deletions.
1 change: 1 addition & 0 deletions docs/api_reference.rst
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@ Distributions
Chi
DiscreteMarkovChain
GeneralizedPoisson
BetaNegativeBinomial
GenExtreme
R2D2M2CP
Skellam
Expand Down
8 changes: 7 additions & 1 deletion pymc_experimental/distributions/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,16 +18,22 @@
"""

from pymc_experimental.distributions.continuous import Chi, GenExtreme
from pymc_experimental.distributions.discrete import GeneralizedPoisson, Skellam
from pymc_experimental.distributions.discrete import (
BetaNegativeBinomial,
GeneralizedPoisson,
Skellam,
)
from pymc_experimental.distributions.histogram_utils import histogram_approximation
from pymc_experimental.distributions.multivariate import R2D2M2CP
from pymc_experimental.distributions.timeseries import DiscreteMarkovChain

__all__ = [
"BetaNegativeBinomial",
"DiscreteMarkovChain",
"GeneralizedPoisson",
"GenExtreme",
"R2D2M2CP",
"Skellam",
"histogram_approximation",
"Chi",
]
124 changes: 123 additions & 1 deletion pymc_experimental/distributions/discrete.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@

import numpy as np
import pymc as pm
from pymc.distributions.dist_math import check_parameters, factln, logpow
from pymc.distributions.dist_math import betaln, check_parameters, factln, logpow
from pymc.distributions.shape_utils import rv_size_is_none
from pytensor import tensor as pt
from pytensor.tensor.random.op import RandomVariable
Expand Down Expand Up @@ -173,6 +173,125 @@ def logp(value, mu, lam):
)


class BetaNegativeBinomial:
R"""
Beta Negative Binomial distribution.
The pmf of this distribution is
.. math::
f(x \mid \alpha, \beta, r) = \frac{B(r + x, \alpha + \beta)}{B(r, \alpha)} \frac{\Gamma(x + \beta)}{x! \Gamma(\beta)}
where :math:`B` is the Beta function and :math:`\Gamma` is the Gamma function.
For more information, see https://en.wikipedia.org/wiki/Beta_negative_binomial_distribution.
.. plot::
:context: close-figs
import matplotlib.pyplot as plt
import numpy as np
from scipy.special import betaln, gammaln
def factln(x):
return gammaln(x + 1)
def logp(x, alpha, beta, r):
return (
betaln(r + x, alpha + beta)
- betaln(r, alpha)
+ gammaln(x + beta)
- factln(x)
- gammaln(beta)
)
plt.style.use('arviz-darkgrid')
x = np.arange(0, 25)
params = [
(1, 1, 1),
(1, 1, 10),
(1, 10, 1),
(1, 10, 10),
(10, 10, 10),
]
for alpha, beta, r in params:
pmf = np.exp(logp(x, alpha, beta, r))
plt.plot(x, pmf, "-o", label=r'$alpha$ = {}, $beta$ = {}, $r$ = {}'.format(alpha, beta, r))
plt.xlabel('x', fontsize=12)
plt.ylabel('f(x)', fontsize=12)
plt.legend(loc=1)
plt.show()
======== ======================================
Support :math:`x \in \mathbb{N}_0`
Mean :math:`{\begin{cases}{\frac {r\beta }{\alpha -1}}&{\text{if}}\ \alpha >1\\\infty &{\text{otherwise}}\ \end{cases}}`
Variance :math:`{\displaystyle {\begin{cases}{\frac {r\beta (r+\alpha -1)(\beta +\alpha -1)}{(\alpha -2){(\alpha -1)}^{2}}}&{\text{if}}\ \alpha >2\\\infty &{\text{otherwise}}\ \end{cases}}}`
======== ======================================
Parameters
----------
alpha : tensor_like of float
shape of the beta distribution (alpha > 0).
beta : tensor_like of float
shape of the beta distribution (beta > 0).
r : tensor_like of float
number of successes until the experiment is stopped (integer but can be extended to real)
"""

@staticmethod
def beta_negative_binomial_dist(alpha, beta, r, size):
if rv_size_is_none(size):
alpha, beta, r = pt.broadcast_arrays(alpha, beta, r)

p = pm.Beta.dist(alpha, beta, size=size)
return pm.NegativeBinomial.dist(p, r, size=size)

@staticmethod
def beta_negative_binomial_logp(value, alpha, beta, r):
res = (
betaln(r + value, alpha + beta)
- betaln(r, alpha)
+ pt.gammaln(value + beta)
- factln(value)
- pt.gammaln(beta)
)
res = pt.switch(
pt.lt(value, 0),
-np.inf,
res,
)

return check_parameters(
res,
alpha > 0,
beta > 0,
r > 0,
msg="alpha > 0, beta > 0, r > 0",
)

def __new__(cls, name, alpha, beta, r, **kwargs):
return pm.CustomDist(
name,
alpha,
beta,
r,
dist=cls.beta_negative_binomial_dist,
logp=cls.beta_negative_binomial_logp,
class_name="BetaNegativeBinomial",
**kwargs,
)

@classmethod
def dist(cls, alpha, beta, r, **kwargs):
return pm.CustomDist.dist(
alpha,
beta,
r,
dist=cls.beta_negative_binomial_dist,
logp=cls.beta_negative_binomial_logp,
class_name="BetaNegativeBinomial",
**kwargs,
)


class Skellam:
R"""
Skellam distribution.
Expand Down Expand Up @@ -228,6 +347,9 @@ class Skellam:

@staticmethod
def skellam_dist(mu1, mu2, size):
if rv_size_is_none(size):
mu1, mu2 = pt.broadcast_arrays(mu1, mu2)

return pm.Poisson.dist(mu=mu1, size=size) - pm.Poisson.dist(mu=mu2, size=size)

@staticmethod
Expand Down
75 changes: 74 additions & 1 deletion pymc_experimental/tests/distributions/test_discrete.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,11 @@
)
from pytensor import config

from pymc_experimental.distributions import GeneralizedPoisson, Skellam
from pymc_experimental.distributions import (
BetaNegativeBinomial,
GeneralizedPoisson,
Skellam,
)


class TestGeneralizedPoisson:
Expand Down Expand Up @@ -122,6 +126,75 @@ def test_moment(self, mu, lam, size, expected):
assert_moment_is_expected(model, expected)


class TestBetaNegativeBinomial:
"""
Wrapper class so that tests of experimental additions can be dropped into
PyMC directly on adoption.
"""

def test_logp(self):
"""
Beta Negative Binomial logp function test values taken from R package as
there is currently no implementation in scipy.
https://github.com/scipy/scipy/issues/17330
The test values can be generated in R with the following code:
.. code-block:: r
library(extraDistr)
create.test.rows <- function(alpha, beta, r, x) {
logp <- dbnbinom(x, alpha, beta, r, log=TRUE)
paste0("(", paste(alpha, beta, r, x, logp, sep=", "), ")")
}
x <- c(0, 1, 250, 5000)
print(create.test.rows(1, 1, 1, x), quote=FALSE)
print(create.test.rows(1, 1, 10, x), quote=FALSE)
print(create.test.rows(1, 10, 1, x), quote=FALSE)
print(create.test.rows(10, 1, 1, x), quote=FALSE)
print(create.test.rows(10, 10, 10, x), quote=FALSE)
"""
alpha, beta, r, value = pt.scalars("alpha", "beta", "r", "value")
logp = pm.logp(BetaNegativeBinomial.dist(alpha, beta, r), value)
logp_fn = pytensor.function([value, alpha, beta, r], logp)

tests = [
# 1, 1, 1
(1, 1, 1, 0, -0.693147180559945),
(1, 1, 1, 1, -1.79175946922805),
(1, 1, 1, 250, -11.0548820266432),
(1, 1, 1, 5000, -17.0349862828565),
# 1, 1, 10
(1, 1, 10, 0, -2.39789527279837),
(1, 1, 10, 1, -2.58021682959232),
(1, 1, 10, 250, -8.82261694534392),
(1, 1, 10, 5000, -14.7359968760473),
# 1, 10, 1
(1, 10, 1, 0, -2.39789527279837),
(1, 10, 1, 1, -2.58021682959232),
(1, 10, 1, 250, -8.82261694534418),
(1, 10, 1, 5000, -14.7359968760446),
# 10, 1, 1
(10, 1, 1, 0, -0.0953101798043248),
(10, 1, 1, 1, -2.58021682959232),
(10, 1, 1, 250, -43.5891148758123),
(10, 1, 1, 5000, -76.2953173311091),
# 10, 10, 10
(10, 10, 10, 0, -5.37909807285049),
(10, 10, 10, 1, -4.17512526852455),
(10, 10, 10, 250, -21.781591505836),
(10, 10, 10, 5000, -53.4836799634603),
]
for test_alpha, test_beta, test_r, test_value, expected_logp in tests:
np.testing.assert_allclose(
logp_fn(test_value, test_alpha, test_beta, test_r), expected_logp
)


class TestSkellam:
def test_logp(self):
check_logp(
Expand Down

0 comments on commit b521e64

Please sign in to comment.