I found this in some code I wrote a while back, so I can’t vouch for it 100%, but maybe it will give you an idea. You can inherit from pm.Continuous and define a `logp`

and `random`

method.

```
import numpy as np
import pymc3 as pm
from pymc3.distributions.dist_math import bound
from pymc3.distributions.continuous import draw_values, generate_samples, assert_negative_support
import theano.tensor as T
class CensoredExponential(pm.Continuous):
def __init__(self, lam, uncensored, *args, **kwargs):
super(CensoredExponential, self).__init__(*args, **kwargs)
self.lam = lam = T.as_tensor_variable(lam)
self.uncensored = uncensored = T.as_tensor_variable(uncensored)
self.mean = 1. / self.lam
self.median = self.mean * T.log(2)
self.mode = T.zeros_like(self.lam)
self.variance = self.lam ** -2
assert_negative_support(lam, 'lam', 'Exponential')
def random(self, point=None, size=None, repeat=None):
lam = draw_values([self.lam], point=point)[0]
return generate_samples(np.random.exponential, scale=1. / lam,
dist_shape=self.shape,
size=size)
def logp(self, value):
lam = self.lam
uncensored = self.uncensored
return bound(uncensored * T.log(lam) - lam * value, value > 0, lam > 0)
```