Commit 3572c578 authored by Manuel Baum's avatar Manuel Baum
Browse files

Comments and a small refactoring to give ConvergenceTestSimpleCounting its own module

parent b0937822
......@@ -38,7 +38,7 @@ diameter_parameters=LinearBetaParameters(0.01,{age:0.2},2,5)
diameter.set_density_parameters(diameter_parameters)
mcmc_ask=MCMC(bn)
mcmc_ask=MCMC(bn,1000)
evidence={age:EvEqual(2)}
......
......@@ -66,7 +66,7 @@ diameter.set_density_parameters(diameter_parameters)
children.set_density_parameters(LinearExponentialParameters(0.1,{ground:1.0,height:1.0}))
mcmc_ask=MCMC(bn)
mcmc_ask=MCMC(bn,1000)
evidence={age:EvEqual(2)}
......
......@@ -27,7 +27,7 @@ alarm.set_probability_table(alarm_cpt, [burglary,alarm])
mcmc_ask=MCMC(bn)
mcmc_ask=MCMC(bn,1000)
evidence={burglary:EvEq("Intruder")}
......
......@@ -45,7 +45,7 @@ diameter_parameters=LinearGaussParameters(0.1,{age:-0.2},0.1)
diameter.set_density_parameters(diameter_parameters)
mcmc_ask=MCMC(bn)
mcmc_ask=MCMC(bn,1000)
evidence={age:EvEq(2)}
......
......@@ -2,36 +2,57 @@ from primo.reasoning import MarkovChainSampler
from primo.reasoning import GibbsTransitionModel
from primo.reasoning import MetropolisHastingsTransitionModel
from primo.reasoning.density import ProbabilityTable
from primo.reasoning.convergence_test import ConvergenceTestSimpleCounting
class ConvergenceTestSimpleCounting(object):
def __init__(self, limit):
self.chain_length=0
self.limit=limit
def has_converged(self, state):
self.chain_length= self.chain_length +1
return self.chain_length >= self.limit
class MCMC(object):
def __init__(self, bn):
#self.transition_model = GibbsTransitionModel()
self.transition_model = MetropolisHastingsTransitionModel()
def __init__(self, bn, times, transition_model=None, convergence_test=None):
'''
@param bn: The BayesNet that will be used for inference.
@param times: How many samples shall be generated for each inference.
@param transition_model: The transition model that shall be used to
wander around the state space of the BayesNet.
@param convergence_test: The ConvergenceTest that shall be used to determine
if the markov chain that is being generated internally has converged
to its stationary distribution.
'''
self.bn=bn
self.times=1000
self.number_of_chains=4
self.times=times
if transition_model == None:
transition_model = MetropolisHastingsTransitionModel()
self.mcs = MarkovChainSampler()
self.mcs.set_convergence_test(ConvergenceTestSimpleCounting(100))
if convergence_test == None:
convergence_test = ConvergenceTestSimpleCounting(100)
self.mcs = MarkovChainSampler(transition_model,convergence_test)
def calculate_marginal(self):
pass
def calculate_PriorMarginal(self,variables,AssumedDensity):
'''
Calculate the marginal over some variables.
@param variables_of_interest: A list containing the variables over which
the prior marginal shall be defined.
@param AssumedDensity: A class from primo.reasoning.density . This
parameter is used to define the class of density for the return value.
@returns: An object of the class AssumedDensity.
'''
return self.calculate_PosteriorMarginal(variables,dict(),AssumedDensity)
def calculate_MAP(self, variables_of_interest, evidence, AssumedDensity):
'''
Calculate the maximum a posteriori hypothesis given some evidence.
@param variables_of_interest: A list containing the variables for which
the map-hypothesis is wanted.
@param evidence: A Dict from Node to Evidence.
@param AssumedDensity: A class from primo.reasoning.density . This
parameter is used to generate a posterior distribution from which it
is possible to find the value/set of values that are most probable.
You could pass a Gauss in a continuous setting here, for example.
@returns: The most probable instatiaion given the evidence in the form:
List of pairs (Node, Value).
'''
initial_state=self._generateInitialStateWithEvidence(evidence)
chain = self.mcs.generateMarkovChain(self.bn, self.times, self.transition_model, initial_state, evidence, variables_of_interest)
chain = self.mcs.generateMarkovChain(self.bn, self.times, initial_state, evidence, variables_of_interest)
density = AssumedDensity()
density.add_variables(variables_of_interest)
......@@ -40,8 +61,17 @@ class MCMC(object):
def calculate_PosteriorMarginal(self,variables_of_interest,evidence,AssumedDensity):
'''
Calculate some posterior marginal.
@param variables_of_interest: A list containing the variables over which
the posterior marginal shall be defined.
@param evidence: A Dict from Node to Evidence.
@param AssumedDensity: A class from primo.reasoning.density . This
parameter is used to define the class of density for the return value.
@returns: An object of the class AssumedDensity.
'''
initial_state=self._generateInitialStateWithEvidence(evidence)
chain = self.mcs.generateMarkovChain(self.bn, self.times, self.transition_model, initial_state, evidence, variables_of_interest)
chain = self.mcs.generateMarkovChain(self.bn, self.times, initial_state, evidence, variables_of_interest)
density = AssumedDensity()
density.add_variables(variables_of_interest)
......@@ -52,9 +82,13 @@ class MCMC(object):
def calculate_PoE(self,evidence):
'''
Calculate Probability of Evidence.
@param evidence: A Dict from Node to Evidence.
@returns: Float number representing the wanted probability.
'''
initial_state=self._generateInitialStateWithEvidence(evidence)
chain = self.mcs.generateMarkovChain(self.bn, self.times, self.transition_model, initial_state)
chain = self.mcs.generateMarkovChain(self.bn, self.times, initial_state)
compatible_count=0
number_of_samples=0
for state in chain:
......@@ -77,6 +111,12 @@ class MCMC(object):
return self.forward_sample(evidence)
def forward_sample(self, evidence):
'''
Generate a sample from the distribution defined by the given BayesNet by
forward sampling.
@param evidence: A Dict from Node to Evidence.
'''
state={}
for var in self.bn.get_nodes_in_topological_sort():
if var in evidence.keys():
......
......@@ -15,12 +15,23 @@ def weighted_random(weights):
class GibbsTransitionModel(object):
'''
Implements Gibbs-sampling. Can be used to constuct a Markov Chain.
Implements Gibbs-sampling. Can be used to constuct a Markov Chain and is
mainly used by MarkovChainSampler.
After "Probabilistic Graphical Models, Daphne Koller and Nir Friedman"(p.506)
'''
def __init__(self):
pass
def transition(self, network, state, extern_evidence):
'''
Does one single state transition.
@param network: A BayesNet.
@param state: The current state of the BayesNet. Given as a Dict from
RandomNode to Value
@param extern_evidence: Evidence that is given by the user. This is a
Dict from Node to Evidence.
'''
nodes = network.get_nodes([])
nodes_to_resample=[n for n in nodes if not n in extern_evidence.keys() or extern_evidence[n].get_unambigous_value() == None]
......@@ -52,6 +63,8 @@ class GibbsTransitionModel(object):
class MetropolisHastingsTransitionModel(object):
'''
Implements the Metropolis-Hastings-Algorithm. Can be used to constuct a Markov Chain.
After "Probabilistic Graphical Models, Daphne Koller and Nir Friedman"(p.644)
'''
def __init__(self):
pass
......@@ -75,7 +88,14 @@ class MetropolisHastingsTransitionModel(object):
return p
def transition(self, network, state, extern_evidence):
#print state
'''
Does one single state transition.
@param network: A BayesNet.
@param state: The current state of the BayesNet. Given as a Dict from
RandomNode to Value
@param extern_evidence: Evidence that is given by the user. This is a
Dict from Node to Evidence.
'''
nodes = network.get_nodes([])
nodes_to_resample=[n for n in nodes if not n in extern_evidence.keys() or extern_evidence[n].get_unambigous_value() == None]
for node in nodes_to_resample:
......@@ -85,31 +105,52 @@ class MetropolisHastingsTransitionModel(object):
p_of_proposal_given_mb = self._compute_p_of_value_given_mb(network, state, node, proposed_value)
p_of_current_given_mb = self._compute_p_of_value_given_mb(network, state, node, current_value)
#print node.name
#print p_of_proposal_given_mb
#print p_of_current_given_mb
acceptance_probability = min(1.0, p_of_proposal_given_mb/p_of_current_given_mb * 1.0/1.0)
if random.random() <= acceptance_probability:
state[node]=proposed_value
#new_state=weighted_random(reduced_cpd.get_table())
#state[node]=node.get_value_range()[new_state]
#print state
return state
class MarkovChainSampler(object):
'''
Can be used to generate a Markov Chain by sampling a Bayesian Network.
Can be used to generate a Markov Chain by sampling a Bayesian Network. This
object is mainly used by the MCMC inference class.
'''
def __init__(self):
self.convergence_test=None
pass
def __init__(self, transition_model, convergence_test):
'''
@param transition_model: This object is used to determine the next state
for the chain. Can be GibbsTransitionModel or MetropolisHastingsTransitionModel.
@param convergence_test: A Test that is being used to determine if the
markov chain has converged to its stationary distribution. For example
ConvergenceTestSimpleCounting.
'''
self.transition_model=transition_model
self.convergence_test=convergence_test
def set_convergence_test(self, test):
self.convergence_test=test
def generateMarkovChain(self, network, time_steps, transition_model, initial_state, evidence={}, variables_of_interest=[]):
def generateMarkovChain(self, network, time_steps, initial_state, evidence={}, variables_of_interest=[]):
'''
This function generates a markov chain by sampling from a bayesian network.
It is possible to use different transition functions.
After "Probabilistic Graphical Models, Daphne Koller and Nir Friedman"(p.509)
@param network: A BayesNet.
@param time_steps: Integer specifying how long the chain shall be.
@param initial_state: The state from which transition will start.
@param evidence: A Dict from RandomNode to Evidence.
@param variables_of_interest: If not all variable instantiations are needed
this List of RandomNode objects can be used to select which Nodes
are mentioned in the return object.
@returns: A Generator-object for a List of States. Each state is a Dict
from RandomNode to Value
'''
self.convergence_test.reset()
state=initial_state
if evidence:
......@@ -122,7 +163,7 @@ class MarkovChainSampler(object):
#let the distribution converge to the target distribution
while not self.convergence_test.has_converged(state):
state=transition_model.transition(network, state, evidence)
state=self.transition_model.transition(network, state, evidence)
#finally sample from the target distribution
for t in xrange(time_steps):
......@@ -130,7 +171,7 @@ class MarkovChainSampler(object):
yield self._reduce_state_to_variables_of_interest(state, variables_of_interest)
else:
yield state
state=transition_model.transition(network, state, evidence)
state=self.transition_model.transition(network, state, evidence)
def _reduce_state_to_variables_of_interest(self, state, variables_of_interest):
return dict((k,v) for (k,v) in state.iteritems() if k in variables_of_interest)
......
class ConvergenceTestSimpleCounting(object):
def __init__(self, limit):
self.chain_length=0
self.limit=limit
def has_converged(self, state):
self.chain_length= self.chain_length +1
return self.chain_length >= self.limit
def reset(self):
self.chain_length=0
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment