Intro Chapter 1: The Core Language Chapter 2: Forward Models Chapter 3: Conditioning Chapter 4: Inference Algorithms

Comparisons of Algorithms

// We take back our previous example, with unfair coin 
var baserate = 0.1

// We try the rejection method. If the baserate is too low, most samples will be rejected. 
var infModel = function(){
  Infer({method: 'rejection', samples: 100}, function(){
    var A = flip(baserate)
    var B = flip(baserate)
    var C = flip(baserate)
    condition(A+B+C >= 2)
    return A})
}

//With the lodash library, we can compute the average time of an inference algorithm
var time = function(foo, trials) {
  var start = _.now()
  var ret = repeat(trials, foo)
  var end = _.now()
  return (end-start)/trials
}

time(infModel, 10)
// Try to lower the baserate to 0.01, it should already be slow. 
On this example, we could also use "enumerate"
///fold:
var time = function(foo, trials) {
  var start = _.now()
  var ret = repeat(trials, foo)
  var end = _.now()
  return (end-start)/trials
}
///

var baserate = 0.1

var infModel = function(){
  Infer({method: 'enumerate'}, function(){
    var A = flip(baserate)
    var B = flip(baserate)
    var C = flip(baserate)
    condition(A+B+C >= 2)
    return A})
}

time(infModel, 10)
// Try to lower the baserate. What is the impact on time ? Why ? 
///fold:
//a timing utility: run 'foo' 'trials' times, report average time.
var time = function(foo, trials) {
  var start = _.now()
  var ret = repeat(trials, foo)
  var end = _.now()
  return (end-start)/trials
}
///

// However, enumerate does not scale well with the number of samples. 
// Let us modify the program to test this
var baserate = 0.1
var numFlips = 3

var infModel = function(){
  Infer({method: 'enumerate'}, function(){
    var choices = repeat(numFlips, function(){flip(baserate)})
    // Sum computes the sum of all elements in an array 
    condition(sum(choices) >= 2)
    return choices[0]})
}

time(infModel, 10)
// Try this program with 15 flips. And for rejection, what is the impact of increasing the number of flips ? Why ?
///fold:
//a timing utility: run 'foo' 'trials' times, report average time.
var time = function(foo, trials) {
  var start = _.now()
  var ret = repeat(trials, foo)
  var end = _.now()
  return (end-start)/trials
}
///
// Finally, let's try the MCMC algorithm this time. 
var baserate = 0.1
var numFlips = 3

var infModel = function(){
  Infer({method: 'MCMC', lag: 100}, function(){
    var choices = repeat(numFlips, function(){flip(baserate)})
    condition(sum(choices) >= 2)
    return choices[0]})
}

time(infModel, 10)
// You can even try baserate = 0.001 and numFlips = 100 and it should still be rather efficient. 
// However, what we cannot see here is that what we gain in efficiency, we lose in precision. 
// But this is a standard tradeoff. 

Rejection Sampling and Observe

//Try to run this model.   
var model = function(){
  var trueX = sample(Gaussian({mu: 0, sigma: 1}))
  var obsX = sample(Gaussian({mu: trueX, sigma: 0.1}))
  condition(obsX == 0.2)
  return trueX
}
viz(Infer({method: 'rejection', samples:1}, model))
// This should fail. Indeed, the condition is almost impossible to satisfy, a random sample will not satisfy the condition. 
// With oberve, this will work. 
var model = function(){
  var trueX = sample(Gaussian({mu: 0, sigma: 1}))
  observe(Gaussian({mu: trueX, sigma: 0.1}), 0.2)
  return trueX
}
viz(Infer({method: 'rejection', samples:1000, maxScore: 2}, model))
//However, you need to precise that the "maxScore" should be different from the default value ("0").
//Try to remove this option, why does rejection sampling "naively" should not work ? 

Markov Chains

// Here is a simple Markov Chain 
var states = ['S', 'C'];
var transitionProbs = {S: [.3, .7], C: [.5, .5]}

// The categorical distribution is exactly what we need here
var transition = function(state){
  return categorical({vs: states, ps: transitionProbs[state]})
}

// We repeat n times the transition function
var chain = function(state, n){
  return (n == 0 ? state : chain(transition(state), n-1))
}

print("State after 10 steps:")
viz.hist(repeat(1000,function() {chain('S',10)}))
viz.hist(repeat(1000,function() {chain('C',10)}))

print("State after 25 steps:")
viz.hist(repeat(1000,function() {chain('S',25)}))
viz.hist(repeat(1000,function() {chain('C',25)}))

print("State after 50 steps:")
viz.hist(repeat(1000,function() {chain('S',50)}))
viz.hist(repeat(1000,function() {chain('C',50)}))

Metropolis Hasting on the Example of Trick Dice.

// Dice
var dice = function() {
  return randomInteger(6) + 1
}

// Computing the Acceptance Rate for our example
var alpha = function(n,m) {
  return Math.min(1,((m == 6) ? 5 : 1) / ((n == 6) ? 5 : 1))
}

// The MH algorithm
var myMH = function(Nsamples) {
  var myMHloop = function(n,l) {
    if (n == 0) {return l}
    else {
      var x = l[l.length - 1]; 
      var y = dice(); 
      (flip(alpha(x,y))) ? l.push(y) : l.push(x) ;
      myMHloop(n-1,l) 
    }
  }
  return myMHloop(Nsamples,[1])
}
print(myMH(1000))
viz(myMH(1000))
// Remark that the frequencies are correct, but the samples are not independent (big blocks of 6)!

// Geometric Distribution 
var geometric = function() {
    (flip()) ? 0 : (1 + geometric())
}

viz(repeat(10000,geometric))

// Computing the acceptance rate for the Geometric Distribution
var alpha = function(n,i) {
  return (n == 0) ? 1/4 :((i == 1) ? 1/2 : 1) 
}

// The MH algorithm
var myMH = function(Nsamples) {
  var myMHloop = function(n,l) {
    if (n == 0) {return l}
    else {
      var x = l[l.length - 1]; 
      var y = (x == 0) ? 1 : ((flip()) ? x+1 : x-1); 
      (flip(alpha(x,y-x))) ? l.push(y) : l.push(x) ;
      myMHloop(n-1,l) 
    }
  }
  return myMHloop(Nsamples,[0])
}
print(myMH(10000))
viz(myMH(10000))
// You can try to change the initial state and look at the first samples