Intro Chapter 1: The Core Language Chapter 2: Forward Models Chapter 3: Conditioning Chapter 4: Inference Algorithms

Conditioning

// Decomment the conditions to observe the belief on the distribution of A 
var model = function() {
  var A = flip()
  var B = flip()
  var C = flip()
  // condition(A + B + C == 3)
  // condition(A + B + C >= 2) 
  // condition(B == 0)
  return {'A': A}
}
var dist = Infer(model)
viz(dist)
// Conditioning without inferring does not make sense
// Try to uncomment the condition below
var model = function() {
  var A = flip()
  var B = flip()
  var C = flip()
  // condition(A + B + C >= 2) 
  return {'A': A}
}

model() 

Enumerate

 
var model = function() {
  var A = flip()
  var B = flip()
  var C = flip()
  condition(A + B + C >= 2) 
  return {'A': A, 'B': B, 'C' : C}
}
// We only need 8 executions to see all the paths for this example. 
var truedist = Infer(model)
var dist = Infer({method: "enumerate", maxExecutions: 8, strategy : "likelyFirst"},model)
viz.table(truedist)
viz.table(dist)
///fold:
var model = function() {
  var A = flip()
  var B = flip()
  var C = flip()
  condition(A + B + C >= 2) 
  return {'A': A, 'B': B, 'C' : C}
}
/// 
// If we remove one execution, we see that the inference fails to give the precise probabilities.  
var truedist = Infer(model)
var dist = Infer({method: "enumerate", maxExecutions: 7, strategy : "likelyFirst"},model)
viz.table(truedist)
viz.table(dist)
// Let us modify the program a bit to change the likelyFirst strategy
// We use biaised coins more likely to return true. 
var model = function() {
  var A = flip(0.6)
  var B = flip(0.6)
  var C = flip(0.6)
  condition(A + B + C >= 2) 
  return {'A': A, 'B': B, 'C' : C}
}
var truedist = Infer(model)
var dist = Infer({method: "enumerate", maxExecutions: 7, strategy : "likelyFirst"},model)
viz.table(truedist)
viz.table(dist)
// This time, enumration can estimate well the probabilities. Why ? 
// Remove the condition to have an hint. 
// Then, this should not work... 
var model = function() {
  var A = flip(0.4)
  var B = flip(0.4)
  var C = flip(0.4)
  condition(A + B + C >= 2) 
  return {'A': A, 'B': B, 'C' : C}
}
var truedist = Infer(model)
var dist = Infer({method: "enumerate", maxExecutions: 7, strategy : "likelyFirst"},model)
viz.table(truedist)
viz.table(dist)
// As expected. 

A bit of more advanced reasoning: we saw that the "likelyFirst" strategy uses an heuristic dependent on the probabilities of each path. But in practice, we do not want to modify the generative model. Do we have another solution ?

// In fact, factor can be used to guide the heuristics. 
// Here, we remove some weight to the path where A is false
var model = function() {
  var A = flip()
  if (A == false) {factor(-10)};
  var B = flip()
  var C = flip()
  // But we give back the weight after
  if (A == false) {factor(+10)};
  condition(A + B + C >= 2)
  return {'A': A, 'B': B, 'C' : C}
}
// This time it works. 
var truedist = Infer(model)
var dist = Infer({method: "enumerate", maxExecutions: 6, strategy : "likelyFirst"},model)
viz.table(truedist)
viz.table(dist)
// You can remove the condition to see all the paths 
// Remove the second factor, what is the problem ? Why ? 

Rejection Sampling

// Direct Implementation of Rejection Sampling 
var takeSample = function () {
  var A = flip()
  var B = flip()
  var C = flip()
  var D = A + B + C
  // If we do not satisfy the condition, we try again
  return D >= 2 ? A : takeSample()
}
viz(repeat(1000, takeSample))
viz.table(repeat(1000, takeSample))
// Using the Inference Operator
var model = function () {
  var A = flip()
  var B = flip()
  var C = flip()
  var D = A + B + C
  condition(D >= 2)
  return A
}
var dist = Infer({method: 'rejection', samples: 1000}, model)
viz(dist)
viz.table(dist)

Example 1 : Inverse Physics

// Makes a floor with evenly spaced buckets
var bins = function (xmin, xmax, width) {
  return ((xmax < xmin + width)
          // floor
          ? {shape: 'rect', static: true, dims: [400, 10], x: 175, y: 500}
          // bins
          : [{shape: 'rect', static: true, dims: [1, 10], x: xmin, y: 490}].concat(bins(xmin + width, xmax, width))
         )
}

// Add two fixed circles
var world = [{shape: 'circle', static: true, dims: [60], x: 60, y: 200},
             {shape: 'circle', static: true, dims: [30], x: 300, y: 300}].concat(bins(-1000, 1000, 25))

// The random block has an random initial starting state, random on the horizontal line (in x) 
var randomBlock = function () {
  return {shape: 'circle', static: false, dims: [10], x: uniform(0, worldWidth), y: 0}
}
// The number 1000 indicates the number of "steps" you want to simulate 
physics.animate(1000, [randomBlock()].concat(world))
///fold: 
// Makes a floor with evenly spaced buckets
var bins = function (xmin, xmax, width) {
  return ((xmax < xmin + width)
          // floor
          ? {shape: 'rect', static: true, dims: [400, 10], x: 175, y: 500}
          // bins
          : [{shape: 'rect', static: true, dims: [1, 10], x: xmin, y: 490}].concat(bins(xmin + width, xmax, width))
         )
}

// Add two fixed circles
var world = [{shape: 'circle', static: true, dims: [60], x: 60, y: 200},
             {shape: 'circle', static: true, dims: [30], x: 300, y: 300}].concat(bins(-1000, 1000, 25))

// The random block has an random initial starting state, random on the horizontal line (in x) 
var randomBlock = function () {
  return {shape: 'circle', static: false, dims: [10], x: uniform(0, worldWidth), y: 0}
}
/// 
// Get the Ball (filter the only non static object in the world)
var getBallX = function(world) {
  var ball = filter(function(obj) { return !obj.static }, world)[0];
  return ball.x;
}

// Gives the actual observation of the ball. 
var observedX = 160;

// Generative Model 
var model = function() {
  var initState = world.concat([randomBlock()])
  var initX = getBallX(initState);
  var finalState = physics.run(1000, initState);
  var finalX = getBallX(finalState);
  observe(Gaussian({mu: finalX, sigma: 10}), observedX)
  return {initX: initX}
}

// Infer the initial distribution, using MCMC
var initialXDist = Infer(
  {method: 'MCMC',
   samples: 100,
   lag: 10,
   callbacks: [editor.MCMCProgress()]
  },
  model);

viz.density(initialXDist, {bounds: [0,350]})

Medical Diagnosis

var dist = Infer({method: 'enumerate'},
  function () {
    var lungCancer = flip(0.01)
    var TB = flip(0.005)
    var cold = flip(0.2)
    var stomachFlu = flip(0.1)
    var other = flip(0.1)

    var cough = ((cold && flip(0.5)) ||
                 (lungCancer && flip(0.3)) ||
                 (TB && flip(0.7)) ||
                 (other && flip(0.01)))

    var fever = ((cold && flip(0.3)) ||
                 (stomachFlu && flip(0.5)) ||
                 (TB && flip(0.2)) ||
                 (other && flip(0.01)))

    var chestPain = ((lungCancer && flip(0.4)) ||
                     (TB && flip(0.5)) ||
                     (other && flip(0.01)))

    var shortnessOfBreath = ((lungCancer && flip(0.4)) ||
                             (TB && flip(0.5)) ||
                             (other && flip(0.01)))

    condition(cough && fever && chestPain && shortnessOfBreath)
    return {lungCancer: lungCancer, TB: TB}
})
viz(dist)
viz.marginals(dist)
viz.table(dist)

Bayesian Data Analysis

Poll example, taken from Probmods Chapter 7


// observed data
var k = 1 // number of people who support candidate A
var n = 20  // number of people asked

var model = function() {

    // true population proportion who support candidate A
    var p = uniform(0, 1);

    // Observed k people support "A"
    // Assuming each person's response is independent of each other
    observe(Binomial({p : p, n: n}), k);

    // predict what the next n will say
    var posteriorPredictive = binomial(p, n);

    // recreate model structure, without observe
    var prior_p = uniform(0, 1);
    var priorPredictive = binomial(prior_p, n);

    return {
        prior: prior_p, priorPredictive : priorPredictive,
        posterior : p, posteriorPredictive : posteriorPredictive
    };
}

var posterior = Infer(model);

viz.marginals(posterior)
Linear Regression Example
//Observed data
var observedData = [{"x":0,"y":0},{"x":1,"y":5.2},{"x":2,"y":9.4},{"x":3,"y":15},{"x":4,"y":20.5},{"x":5,"y":24.3}]
    
// Create a drawing of size 500,500
var myDraw = Draw(500, 500, true)
    
//Intermediate functions to draw

//Scale a point to the image
var scale = function(p) {
      var newX = 20 + p.x * 75
      var newY = 480 - p.y * 15
      return{"x":newX,"y":newY}
}
    
// Draw a list of data points
var drawData = function(img,obs) {
    var scaleData = map(scale,obs)
    map(function(p) {
      img.circle(p.x, p.y, 3, 'black', 'white')
    }, scaleData)
}
    
// Draw a linear function 
var drawLinFun = function(img,f) { 
      var a = scale({"x":0, "y":f(0)})
      var b = scale({"x":5.5, "y":f(5.5)})
      img.line(a.x,a.y,b.x,b.y,1,0.01,"red")
}
    
// Make a function from a coefficient
var makeFun = function(a) {
    return ( function(x) {
        return(a * x)
      })
}
    
//Compute a Random Line, and observe data
// The prior distribution on a is the uniform one 
// The generative model take into account some noises, expressed by a gaussian. 
var randomLine = function() {
    var a = uniform(0,10)
    var f = makeFun(a)
      
    var obsFn = function(datum){
        observe(Gaussian({mu: f(datum.x), sigma: 2}), datum.y)
    }
    mapData({data: observedData}, obsFn)
      
    return a      
}

// Infer the distribution on coefficients
var post = Infer(randomLine)
    
// Draw samples from the inferred distribution, to have some visual intuition about the distribution
var randomDraw = function() {
    var a = sample(post)
    drawLinFun(myDraw,makeFun(a))
}

//Visualize the distribution on coefficients
viz(post)
    
// Draw the data 
drawData(myDraw,observedData)
// Draw the samples from the marginal distribution 
repeat(1000,randomDraw)
    
print("The value should be around 5")
Fair Coin or Trick Coin ?
// Our degree of belief for the fact that the coin is fair 
var fairPrior = 0.999
// The observed list of data 
var obsData = repeat(5,function() {'h'})

//Make a Coin of a Given Weight
var makeCoin = function(x) {
    return function() {
        (flip(x)) ? 'h' : 't'
    }
}

//Generative Model 
var model = function() {
    //We create the coin acording to our belief 
    var isFair = flip(fairPrior)
    var coin = (isFair) ? makeCoin(0.5) : makeCoin(0.95)
    // A standard way to compare data with a distribution is to use "map"
    mapData({data: obsData},function(datum) {condition(coin() == datum)})
    // mapData is similar to map, but you inform the inference that all calls are independent
    return isFair
}

var post = Infer(model)
viz(post)
// We can be even more precise in our prior distribution.
// We can try to infer the actual weight of the coin, by believing that trick coin could have any weight > 0.5


// Our degree of belief for the fact that the coin is fair 
var fairPrior = 0.999
// The observed list of data 
var obsData = repeat(10,function() {'h'})

//Generative Model 
var model = function() {
    //We create the coin acording to our belief 
    var isFair = flip(fairPrior)
    // The weight is either 0.5, or a trick coin that prefer head 
    var weight = (isFair) ? 0.5 : uniform(0.5,1)
    var dist = Bernoulli({p : weight})
    // Note that we use observe here, with a Bernoulli distribution, this optimizes the inference
    mapData({data: obsData},function(datum) {observe(dist,datum == 'h')})
    return weight
}

// We can try SMC as we alernate between observations and samples 
var post = Infer({method: 'SMC', particles : 5000},model)
viz(post)