Skip to content
Snippets Groups Projects
Commit 6b07f5bc authored by Manuel Baum's avatar Manuel Baum
Browse files

Fixed several bugs related to evidence. Hopefully included LinearBeta, still...

Fixed several bugs related to evidence. Hopefully included LinearBeta, still needs testing though. Changed sample_global and sample_local once again in ContinuousNode
parent 282527ea
No related branches found
No related tags found
No related merge requests found
...@@ -20,22 +20,22 @@ bn = BayesNet() ...@@ -20,22 +20,22 @@ bn = BayesNet()
cnf=ContinuousNodeFactory() cnf=ContinuousNodeFactory()
age = cnf.createLinearExponentialNode("Plant_age") age = cnf.createLinearExponentialNode("Plant_age")
height = cnf.createLinearGaussNode("Plant_height") height = cnf.createLinearGaussNode("Plant_height")
#diameter = cnf.createLinearBetaNode("Plant_diameter") diameter = cnf.createLinearBetaNode("Plant_diameter")
bn.add_node(age) bn.add_node(age)
bn.add_node(height) bn.add_node(height)
#bn.add_node(diameter) bn.add_node(diameter)
bn.add_edge(age,height) bn.add_edge(age,height)
#bn.add_edge(age,diameter) bn.add_edge(age,diameter)
#parameterization #parameterization
age_parameters=LinearExponentialParameters(4,{}) age_parameters=LinearExponentialParameters(1.0,{},2.0)
age.set_density_parameters(age_parameters) age.set_density_parameters(age_parameters)
height_parameters=LinearGaussParameters(0.1,{age:1},0.3) height_parameters=LinearGaussParameters(0.1,{age:1},0.3)
height.set_density_parameters(height_parameters) height.set_density_parameters(height_parameters)
#diameter_parameters=LinearGaussParameters(0.1,{age:-0.2},0.1) diameter_parameters=LinearBetaParameters(0.01,{age:0.2},2,5)
#diameter.set_density_parameters(diameter_parameters) diameter.set_density_parameters(diameter_parameters)
mcmc_ask=MCMC(bn) mcmc_ask=MCMC(bn)
......
...@@ -20,27 +20,24 @@ class ContinuousNode(Node): ...@@ -20,27 +20,24 @@ class ContinuousNode(Node):
return self.cpd.sample_proposal(x) return self.cpd.sample_proposal(x)
def sample_local(self, x, evidence=None): def sample_local(self, x, evidence):
'''This is the most simple and stupid implementation of the method. It '''This is the most simple and stupid implementation of the method. It
uses bogo-search to find a sample that fits the evidence. You could uses bogo-search to find a sample that fits the evidence. You could
reimplement it by constructing the integral over the normalvariate in the reimplement it by constructing the integral over the normalvariate in the
intervalls allowed by the evidence and then generate a sample directly. intervalls allowed by the evidence and then generate a sample directly.
Currently this method has O(inf).''' Currently this method has O(inf).'''
v=random.normalvariate(x,1.0) v=random.normalvariate(x,1.0)
if evidence != None: if self in evidence.keys():
while not evidence.is_compatible(v): while not evidence[self].is_compatible(v):
v=random.normalvariate(x,1.0) v=random.normalvariate(x,1.0)
return v return v
def sample_global(self, evidence=None): def sample_global(self, state):
'''Simple, Stupid and O(inf). Improvement idea see comment on sample_local()''' '''Simple, Stupid and O(inf). Improvement idea see comment on sample_local()'''
if evidence==None: proposal=self.cpd.sample_global(state)
return self.cpd.sample_global() #while not evidence.is_compatible(proposal):
else: # proposal=self.cpd.sample_global(evidence)
proposal=self.cpd.sample_global() return proposal
while not evidence.is_compatible(proposal):
proposal=self.cpd.sample_global()
return proposal
def get_probability(self, value, node_value_pairs): def get_probability(self, value, node_value_pairs):
return self.cpd.get_probability(value, node_value_pairs) return self.cpd.get_probability(value, node_value_pairs)
......
...@@ -28,23 +28,23 @@ class DiscreteNode(RandomNode): ...@@ -28,23 +28,23 @@ class DiscreteNode(RandomNode):
return self.cpd.is_normalized_as_cpt(self) return self.cpd.is_normalized_as_cpt(self)
def sample_global(self, evidence=None): def sample_global(self, evidence=None):
if evidence==None: if evidence==None or not self in evidence.keys():
compatibles=self.value_range compatibles=self.value_range
else: else:
compatibles=[] compatibles=[]
for v in self.value_range: for v in self.value_range:
if evidence.is_compatible(v): if evidence[self].is_compatible(v):
compatibles.append(v) compatibles.append(v)
return random.choice(compatibles) return random.choice(compatibles)
def sample_local(self, x, evidence=None): def sample_local(self, x, evidence=None):
if evidence==None: if evidence==None or not self in evidence.keys():
compatibles=self.value_range compatibles=self.value_range
else: else:
compatibles=[] compatibles=[]
for v in self.value_range: for v in self.value_range:
if evidence.is_compatible(v): if evidence[self].is_compatible(v):
compatibles.append(v) compatibles.append(v)
return random.choice(compatibles) return random.choice(compatibles)
...@@ -74,15 +74,17 @@ class MCMC(object): ...@@ -74,15 +74,17 @@ class MCMC(object):
return probability_of_evidence return probability_of_evidence
def _generateInitialStateWithEvidence(self, evidence): def _generateInitialStateWithEvidence(self, evidence):
state=[] return self.forward_sample(evidence)
for var in self.bn.get_nodes([]):
def forward_sample(self, evidence):
state={}
for var in self.bn.get_nodes_in_topological_sort():
if var in evidence.keys(): if var in evidence.keys():
unambigous=evidence[var].get_unambigous_value() value=evidence[var].get_unambigous_value()
if unambigous == None: if value == None:
state.append((var,var.sample_global(evidence[var]))) value=var.sample_global(state)
else: state[var]=value
state.append((var,unambigous))
else: else:
state.append((var,var.sample_global())) state[var]=var.sample_global(state)
return dict(state) return state
...@@ -11,9 +11,10 @@ class GibbsTransitionModel(object): ...@@ -11,9 +11,10 @@ class GibbsTransitionModel(object):
def __init__(self): def __init__(self):
pass pass
def transition(self, network, state, constant_nodes): def transition(self, network, state, extern_evidence):
nodes = network.get_nodes([]) nodes = network.get_nodes([])
nodes_to_resample=list(set(nodes)-set(constant_nodes)) nodes_to_resample=[n for n in nodes if not n in extern_evidence.keys() or extern_evidence[n].get_unambigous_value() == None]
#print nodes_to_resample
for node in nodes_to_resample: for node in nodes_to_resample:
parents=network.get_parents(node) parents=network.get_parents(node)
if parents: if parents:
...@@ -69,13 +70,13 @@ class MetropolisHastingsTransitionModel(object): ...@@ -69,13 +70,13 @@ class MetropolisHastingsTransitionModel(object):
p = p * child.get_probability(state[child],evidence) p = p * child.get_probability(state[child],evidence)
return p return p
def transition(self, network, state, constant_nodes): def transition(self, network, state, extern_evidence):
nodes = network.get_nodes([]) nodes = network.get_nodes([])
nodes_to_resample=list(set(nodes)-set(constant_nodes)) nodes_to_resample=[n for n in nodes if not n in extern_evidence.keys() or extern_evidence[n].get_unambigous_value() == None]
for node in nodes_to_resample: for node in nodes_to_resample:
#propose a new value for this variable: #propose a new value for this variable:
current_value = state[node] current_value = state[node]
proposed_value = node.sample_local(current_value) proposed_value = node.sample_local(current_value, extern_evidence)
p_of_proposal_given_mb = self._compute_p_of_value_given_mb(network, state, node, proposed_value) p_of_proposal_given_mb = self._compute_p_of_value_given_mb(network, state, node, proposed_value)
p_of_current_given_mb = self._compute_p_of_value_given_mb(network, state, node, current_value) p_of_current_given_mb = self._compute_p_of_value_given_mb(network, state, node, current_value)
...@@ -98,27 +99,29 @@ class MarkovChainSampler(object): ...@@ -98,27 +99,29 @@ class MarkovChainSampler(object):
def set_convergence_test(self, test): def set_convergence_test(self, test):
self.convergence_test=test self.convergence_test=test
def generateMarkovChain(self, network, time_steps, transition_model, initial_state, evidence=[], variables_of_interest=[]): def generateMarkovChain(self, network, time_steps, transition_model, initial_state, evidence={}, variables_of_interest=[]):
state=initial_state state=initial_state
if evidence: if evidence:
for node in evidence.keys(): for node in evidence.keys():
if not evidence[node].is_compatible(state[node]): if not evidence[node].is_compatible(state[node]):
raise Exception("The evidence given does not fit to the initial_state specified") raise Exception("The evidence given does not fit to the initial_state specified")
constant_nodes = evidence.keys() # constant_nodes = evidence.keys()
else: # else:
constant_nodes=[] # constant_nodes=[]
#let the distribution converge to the target distribution #let the distribution converge to the target distribution
while not self.convergence_test.has_converged(state): while not self.convergence_test.has_converged(state):
state=transition_model.transition(network, state, constant_nodes) #print state
state=transition_model.transition(network, state, evidence)
#finally sample from the target distribution #finally sample from the target distribution
for t in xrange(time_steps): for t in xrange(time_steps):
#print state
if variables_of_interest: if variables_of_interest:
yield self._reduce_state_to_variables_of_interest(state, variables_of_interest) yield self._reduce_state_to_variables_of_interest(state, variables_of_interest)
else: else:
yield state yield state
state=transition_model.transition(network, state, constant_nodes) state=transition_model.transition(network, state, evidence)
def _reduce_state_to_variables_of_interest(self, state, variables_of_interest): def _reduce_state_to_variables_of_interest(self, state, variables_of_interest):
return dict((k,v) for (k,v) in state.iteritems() if k in variables_of_interest) return dict((k,v) for (k,v) in state.iteritems() if k in variables_of_interest)
......
from primo.reasoning.density import Density from primo.reasoning.density import Density
from scipy.stats import beta
import random import random
class LinearBetaParameters(): class LinearBetaParameters():
def __init__(self, p0, p, q0, q): def __init__(self, b0, b, alpha, beta):
self.p0=p0 self.b0=b0
self.p=p self.b=b
self.q0=q0 self.alpha=alpha
self.q=q self.beta=beta
class LinearBeta(Density): class LinearBeta(Density):
def __init__(self, node): def __init__(self, node):
pass self.b={}
self.b0=0
self.alpha=1
self.beta=1
self.node=node
def set_parameters(self,parameters):
self.b=parameters.b
self.b0=parameters.b0
self.alpha=parameters.alpha
self.beta=parameters.beta
def add_variable(self, variable):
'''This method needs some serious reworking: Variables should not be permitted
to be parents because of their value range. Instead it should be evaluated
if they can yield parameters for this distribution that are permitted. This
can in any case happen under bad influence coefficients'''
if( not variable.get_value_range() == (0,float("Inf"))):
raise Exception("Tried to add Variable into Gaussian densitiy, but variable had wrong value-range")
self.b[variable]=0.0
def get_probability(self,value, node_value_pairs):
#print "Probability to compute for value:"+str(value)
#Compute the offset for the density and displace the value accordingly
x = self.b0
#print "X:"+str(x)
for node,node_value in node_value_pairs:
x = x + self.b[node]*node_value
#print "X:"+str(x)
#print value
value=value-x
#Evaluate the displaced density at value
#print str(value)
if value<0 or value>1:
return 0
return beta(self.alpha, self.beta).pdf(value)
def _compute_offset_given_parents(self, state):
x = self.b0
for node in self.b.keys():
if node in state.keys():
x = x + self.b[node]*state[node]
return x
def sample_global(self, state):
value=random.betavariate(self.alpha,self.beta)+self._compute_offset_given_parents(state)
#print "Sampled:"+str(value)
return value
...@@ -4,9 +4,10 @@ import random ...@@ -4,9 +4,10 @@ import random
import math import math
class LinearExponentialParameters(object): class LinearExponentialParameters(object):
def __init__(self, b0, b): def __init__(self, b0, b, lambda0):
self.b0=b0 self.b0=b0
self.b=b self.b=b
self.lambda0=lambda0
...@@ -14,14 +15,16 @@ class LinearExponential(Density): ...@@ -14,14 +15,16 @@ class LinearExponential(Density):
def __init__(self, node): def __init__(self, node):
self.b={} self.b={}
self.b0=0 self.b0=0
self.lambda0=1
self.node=node self.node=node
def set_parameters(self,parameters): def set_parameters(self,parameters):
self.b=parameters.b self.b=parameters.b
self.b0=parameters.b0 self.b0=parameters.b0
self.lambda0=parameters.lambda0
def add_variable(self, variable): def add_variable(self, variable):
'''This method needs some serious reworking: Variables should not be permitted '''This method needs some serious reworking: Variables should not be denied
to be parents because of their value range. Instead it should be evaluated to be parents because of their value range. Instead it should be evaluated
if they can yield parameters for this distribution that are permitted. This if they can yield parameters for this distribution that are permitted. This
can in any case happen under bad influence coefficients''' can in any case happen under bad influence coefficients'''
...@@ -30,15 +33,23 @@ class LinearExponential(Density): ...@@ -30,15 +33,23 @@ class LinearExponential(Density):
self.b[variable]=0.0 self.b[variable]=0.0
def get_probability(self,value, node_value_pairs): def get_probability(self,value, node_value_pairs):
if value<=0:
return 0 #Compute the offset for the density and displace the value accordingly
lambda_parameter = self.b0 x = self.b0
for node,value in node_value_pairs: for node,value in node_value_pairs:
lambda_parameter = lambda_parameter + self.b[node]*value x = x + self.b[node]*value
#print scipy.stats.expon(lambda_parameter).pdf(value) value=value-x
return lambda_parameter*math.exp(-lambda_parameter*value) #Evaluate the displaced density at value
if value<0:
return 0
return self.lambda0*math.exp(-self.lambda0*value)
def _compute_offset_given_parents(self, state):
x = self.b0
for node in self.b.keys():
if node in state.keys():
x = x + self.b[node]*state[node]
return x
def sample_global(self): def sample_global(self,state):
print self.b0 return random.expovariate(self.lambda0)+self._compute_offset_given_parents(state)
return random.expovariate(self.b0)
...@@ -58,6 +58,13 @@ class LinearGauss(Density): ...@@ -58,6 +58,13 @@ class LinearGauss(Density):
def set_var(self, var): def set_var(self, var):
self.var=var self.var=var
def sample_global(self): def _compute_offset_given_parents(self, state):
return random.normalvariate(self.b0,self.var**0.5) x = self.b0
for node in self.b.keys():
if node in state.keys():
x = x + self.b[node]*state[node]
return x
def sample_global(self,state):
return random.normalvariate(self._compute_offset_given_parents(state),self.var**0.5)
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment