Skip to content
Snippets Groups Projects
Commit 6b07f5bc authored by Manuel Baum's avatar Manuel Baum
Browse files

Fixed several bugs related to evidence. Hopefully included LinearBeta, still...

Fixed several bugs related to evidence. Hopefully included LinearBeta, still needs testing though. Changed sample_global and sample_local once again in ContinuousNode
parent 282527ea
No related branches found
No related tags found
No related merge requests found
......@@ -20,22 +20,22 @@ bn = BayesNet()
cnf=ContinuousNodeFactory()
age = cnf.createLinearExponentialNode("Plant_age")
height = cnf.createLinearGaussNode("Plant_height")
#diameter = cnf.createLinearBetaNode("Plant_diameter")
diameter = cnf.createLinearBetaNode("Plant_diameter")
bn.add_node(age)
bn.add_node(height)
#bn.add_node(diameter)
bn.add_node(diameter)
bn.add_edge(age,height)
#bn.add_edge(age,diameter)
bn.add_edge(age,diameter)
#parameterization
age_parameters=LinearExponentialParameters(4,{})
age_parameters=LinearExponentialParameters(1.0,{},2.0)
age.set_density_parameters(age_parameters)
height_parameters=LinearGaussParameters(0.1,{age:1},0.3)
height.set_density_parameters(height_parameters)
#diameter_parameters=LinearGaussParameters(0.1,{age:-0.2},0.1)
#diameter.set_density_parameters(diameter_parameters)
diameter_parameters=LinearBetaParameters(0.01,{age:0.2},2,5)
diameter.set_density_parameters(diameter_parameters)
mcmc_ask=MCMC(bn)
......
......@@ -20,27 +20,24 @@ class ContinuousNode(Node):
return self.cpd.sample_proposal(x)
def sample_local(self, x, evidence=None):
def sample_local(self, x, evidence):
'''This is the most simple and stupid implementation of the method. It
uses bogo-search to find a sample that fits the evidence. You could
reimplement it by constructing the integral over the normalvariate in the
intervalls allowed by the evidence and then generate a sample directly.
Currently this method has O(inf).'''
v=random.normalvariate(x,1.0)
if evidence != None:
while not evidence.is_compatible(v):
if self in evidence.keys():
while not evidence[self].is_compatible(v):
v=random.normalvariate(x,1.0)
return v
def sample_global(self, evidence=None):
def sample_global(self, state):
'''Simple, Stupid and O(inf). Improvement idea see comment on sample_local()'''
if evidence==None:
return self.cpd.sample_global()
else:
proposal=self.cpd.sample_global()
while not evidence.is_compatible(proposal):
proposal=self.cpd.sample_global()
return proposal
proposal=self.cpd.sample_global(state)
#while not evidence.is_compatible(proposal):
# proposal=self.cpd.sample_global(evidence)
return proposal
def get_probability(self, value, node_value_pairs):
return self.cpd.get_probability(value, node_value_pairs)
......
......@@ -28,23 +28,23 @@ class DiscreteNode(RandomNode):
return self.cpd.is_normalized_as_cpt(self)
def sample_global(self, evidence=None):
if evidence==None:
if evidence==None or not self in evidence.keys():
compatibles=self.value_range
else:
compatibles=[]
for v in self.value_range:
if evidence.is_compatible(v):
if evidence[self].is_compatible(v):
compatibles.append(v)
return random.choice(compatibles)
def sample_local(self, x, evidence=None):
if evidence==None:
if evidence==None or not self in evidence.keys():
compatibles=self.value_range
else:
compatibles=[]
for v in self.value_range:
if evidence.is_compatible(v):
if evidence[self].is_compatible(v):
compatibles.append(v)
return random.choice(compatibles)
......@@ -74,15 +74,17 @@ class MCMC(object):
return probability_of_evidence
def _generateInitialStateWithEvidence(self, evidence):
state=[]
for var in self.bn.get_nodes([]):
return self.forward_sample(evidence)
def forward_sample(self, evidence):
state={}
for var in self.bn.get_nodes_in_topological_sort():
if var in evidence.keys():
unambigous=evidence[var].get_unambigous_value()
if unambigous == None:
state.append((var,var.sample_global(evidence[var])))
else:
state.append((var,unambigous))
value=evidence[var].get_unambigous_value()
if value == None:
value=var.sample_global(state)
state[var]=value
else:
state.append((var,var.sample_global()))
return dict(state)
state[var]=var.sample_global(state)
return state
......@@ -11,9 +11,10 @@ class GibbsTransitionModel(object):
def __init__(self):
pass
def transition(self, network, state, constant_nodes):
def transition(self, network, state, extern_evidence):
nodes = network.get_nodes([])
nodes_to_resample=list(set(nodes)-set(constant_nodes))
nodes_to_resample=[n for n in nodes if not n in extern_evidence.keys() or extern_evidence[n].get_unambigous_value() == None]
#print nodes_to_resample
for node in nodes_to_resample:
parents=network.get_parents(node)
if parents:
......@@ -69,13 +70,13 @@ class MetropolisHastingsTransitionModel(object):
p = p * child.get_probability(state[child],evidence)
return p
def transition(self, network, state, constant_nodes):
def transition(self, network, state, extern_evidence):
nodes = network.get_nodes([])
nodes_to_resample=list(set(nodes)-set(constant_nodes))
nodes_to_resample=[n for n in nodes if not n in extern_evidence.keys() or extern_evidence[n].get_unambigous_value() == None]
for node in nodes_to_resample:
#propose a new value for this variable:
current_value = state[node]
proposed_value = node.sample_local(current_value)
proposed_value = node.sample_local(current_value, extern_evidence)
p_of_proposal_given_mb = self._compute_p_of_value_given_mb(network, state, node, proposed_value)
p_of_current_given_mb = self._compute_p_of_value_given_mb(network, state, node, current_value)
......@@ -98,27 +99,29 @@ class MarkovChainSampler(object):
def set_convergence_test(self, test):
self.convergence_test=test
def generateMarkovChain(self, network, time_steps, transition_model, initial_state, evidence=[], variables_of_interest=[]):
def generateMarkovChain(self, network, time_steps, transition_model, initial_state, evidence={}, variables_of_interest=[]):
state=initial_state
if evidence:
for node in evidence.keys():
if not evidence[node].is_compatible(state[node]):
raise Exception("The evidence given does not fit to the initial_state specified")
constant_nodes = evidence.keys()
else:
constant_nodes=[]
# constant_nodes = evidence.keys()
# else:
# constant_nodes=[]
#let the distribution converge to the target distribution
while not self.convergence_test.has_converged(state):
state=transition_model.transition(network, state, constant_nodes)
#print state
state=transition_model.transition(network, state, evidence)
#finally sample from the target distribution
for t in xrange(time_steps):
#print state
if variables_of_interest:
yield self._reduce_state_to_variables_of_interest(state, variables_of_interest)
else:
yield state
state=transition_model.transition(network, state, constant_nodes)
state=transition_model.transition(network, state, evidence)
def _reduce_state_to_variables_of_interest(self, state, variables_of_interest):
return dict((k,v) for (k,v) in state.iteritems() if k in variables_of_interest)
......
from primo.reasoning.density import Density
from scipy.stats import beta
import random
class LinearBetaParameters():
def __init__(self, p0, p, q0, q):
self.p0=p0
self.p=p
self.q0=q0
self.q=q
def __init__(self, b0, b, alpha, beta):
self.b0=b0
self.b=b
self.alpha=alpha
self.beta=beta
class LinearBeta(Density):
def __init__(self, node):
pass
self.b={}
self.b0=0
self.alpha=1
self.beta=1
self.node=node
def set_parameters(self,parameters):
self.b=parameters.b
self.b0=parameters.b0
self.alpha=parameters.alpha
self.beta=parameters.beta
def add_variable(self, variable):
'''This method needs some serious reworking: Variables should not be permitted
to be parents because of their value range. Instead it should be evaluated
if they can yield parameters for this distribution that are permitted. This
can in any case happen under bad influence coefficients'''
if( not variable.get_value_range() == (0,float("Inf"))):
raise Exception("Tried to add Variable into Gaussian densitiy, but variable had wrong value-range")
self.b[variable]=0.0
def get_probability(self,value, node_value_pairs):
#print "Probability to compute for value:"+str(value)
#Compute the offset for the density and displace the value accordingly
x = self.b0
#print "X:"+str(x)
for node,node_value in node_value_pairs:
x = x + self.b[node]*node_value
#print "X:"+str(x)
#print value
value=value-x
#Evaluate the displaced density at value
#print str(value)
if value<0 or value>1:
return 0
return beta(self.alpha, self.beta).pdf(value)
def _compute_offset_given_parents(self, state):
x = self.b0
for node in self.b.keys():
if node in state.keys():
x = x + self.b[node]*state[node]
return x
def sample_global(self, state):
value=random.betavariate(self.alpha,self.beta)+self._compute_offset_given_parents(state)
#print "Sampled:"+str(value)
return value
......@@ -4,9 +4,10 @@ import random
import math
class LinearExponentialParameters(object):
def __init__(self, b0, b):
def __init__(self, b0, b, lambda0):
self.b0=b0
self.b=b
self.lambda0=lambda0
......@@ -14,14 +15,16 @@ class LinearExponential(Density):
def __init__(self, node):
self.b={}
self.b0=0
self.lambda0=1
self.node=node
def set_parameters(self,parameters):
self.b=parameters.b
self.b0=parameters.b0
self.lambda0=parameters.lambda0
def add_variable(self, variable):
'''This method needs some serious reworking: Variables should not be permitted
'''This method needs some serious reworking: Variables should not be denied
to be parents because of their value range. Instead it should be evaluated
if they can yield parameters for this distribution that are permitted. This
can in any case happen under bad influence coefficients'''
......@@ -30,15 +33,23 @@ class LinearExponential(Density):
self.b[variable]=0.0
def get_probability(self,value, node_value_pairs):
if value<=0:
return 0
lambda_parameter = self.b0
#Compute the offset for the density and displace the value accordingly
x = self.b0
for node,value in node_value_pairs:
lambda_parameter = lambda_parameter + self.b[node]*value
#print scipy.stats.expon(lambda_parameter).pdf(value)
return lambda_parameter*math.exp(-lambda_parameter*value)
x = x + self.b[node]*value
value=value-x
#Evaluate the displaced density at value
if value<0:
return 0
return self.lambda0*math.exp(-self.lambda0*value)
def _compute_offset_given_parents(self, state):
x = self.b0
for node in self.b.keys():
if node in state.keys():
x = x + self.b[node]*state[node]
return x
def sample_global(self):
print self.b0
return random.expovariate(self.b0)
def sample_global(self,state):
return random.expovariate(self.lambda0)+self._compute_offset_given_parents(state)
......@@ -58,6 +58,13 @@ class LinearGauss(Density):
def set_var(self, var):
self.var=var
def sample_global(self):
return random.normalvariate(self.b0,self.var**0.5)
def _compute_offset_given_parents(self, state):
x = self.b0
for node in self.b.keys():
if node in state.keys():
x = x + self.b[node]*state[node]
return x
def sample_global(self,state):
return random.normalvariate(self._compute_offset_given_parents(state),self.var**0.5)
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment