From 6b07f5bc626bf39b82a2277d6e8904b259d8a458 Mon Sep 17 00:00:00 2001
From: Manuel Baum <mbaum@techfak.uni-bielefeld.de>
Date: Wed, 3 Jul 2013 18:57:46 +0200
Subject: [PATCH] Fixed several bugs related to evidence. Hopefully included
 LinearBeta, still needs testing though. Changed sample_global and
 sample_local once again in ContinuousNode

---
 examples/MCMC_Continuous.py                  | 12 ++--
 primo/reasoning/ContinuousNode.py            | 19 +++----
 primo/reasoning/DiscreteNode.py              |  8 +--
 primo/reasoning/MCMC.py                      | 20 ++++---
 primo/reasoning/MarkovChainSampler.py        | 27 +++++----
 primo/reasoning/density/LinearBeta.py        | 60 ++++++++++++++++++--
 primo/reasoning/density/LinearExponential.py | 33 +++++++----
 primo/reasoning/density/LinearGauss.py       | 11 +++-
 8 files changed, 129 insertions(+), 61 deletions(-)

diff --git a/examples/MCMC_Continuous.py b/examples/MCMC_Continuous.py
index 7bf4af8..dad8cd1 100644
--- a/examples/MCMC_Continuous.py
+++ b/examples/MCMC_Continuous.py
@@ -20,22 +20,22 @@ bn = BayesNet()
 cnf=ContinuousNodeFactory()
 age = cnf.createLinearExponentialNode("Plant_age")
 height = cnf.createLinearGaussNode("Plant_height")
-#diameter = cnf.createLinearBetaNode("Plant_diameter")
+diameter = cnf.createLinearBetaNode("Plant_diameter")
 bn.add_node(age)
 bn.add_node(height)
-#bn.add_node(diameter)
+bn.add_node(diameter)
 bn.add_edge(age,height)
-#bn.add_edge(age,diameter)
+bn.add_edge(age,diameter)
 
 #parameterization
-age_parameters=LinearExponentialParameters(4,{})
+age_parameters=LinearExponentialParameters(1.0,{},2.0)
 age.set_density_parameters(age_parameters)
 
 height_parameters=LinearGaussParameters(0.1,{age:1},0.3)
 height.set_density_parameters(height_parameters)
 
-#diameter_parameters=LinearGaussParameters(0.1,{age:-0.2},0.1)
-#diameter.set_density_parameters(diameter_parameters)
+diameter_parameters=LinearBetaParameters(0.01,{age:0.2},2,5)
+diameter.set_density_parameters(diameter_parameters)
 
 
 mcmc_ask=MCMC(bn)
diff --git a/primo/reasoning/ContinuousNode.py b/primo/reasoning/ContinuousNode.py
index b7c1e42..5700580 100644
--- a/primo/reasoning/ContinuousNode.py
+++ b/primo/reasoning/ContinuousNode.py
@@ -20,27 +20,24 @@ class ContinuousNode(Node):
         return self.cpd.sample_proposal(x)
         
         
-    def sample_local(self, x, evidence=None):
+    def sample_local(self, x, evidence):
         '''This is the most simple and stupid implementation of the method. It
         uses bogo-search to find a sample that fits the evidence. You could
         reimplement it by constructing the integral over the normalvariate in the
         intervalls allowed by the evidence and then generate a sample directly.
         Currently this method has O(inf).'''
         v=random.normalvariate(x,1.0)
-        if evidence != None:
-            while not evidence.is_compatible(v):
+        if self in evidence.keys():
+            while not evidence[self].is_compatible(v):
                 v=random.normalvariate(x,1.0)
         return v
         
-    def sample_global(self, evidence=None):
+    def sample_global(self, state):
         '''Simple, Stupid and O(inf). Improvement idea see comment on sample_local()'''
-        if evidence==None:
-            return self.cpd.sample_global()
-        else:
-            proposal=self.cpd.sample_global()
-            while not evidence.is_compatible(proposal):
-                proposal=self.cpd.sample_global()
-            return proposal
+        proposal=self.cpd.sample_global(state)
+        #while not evidence.is_compatible(proposal):
+        #    proposal=self.cpd.sample_global(evidence)
+        return proposal
         
     def get_probability(self, value, node_value_pairs):
         return self.cpd.get_probability(value, node_value_pairs)
diff --git a/primo/reasoning/DiscreteNode.py b/primo/reasoning/DiscreteNode.py
index eed1390..90bf85f 100644
--- a/primo/reasoning/DiscreteNode.py
+++ b/primo/reasoning/DiscreteNode.py
@@ -28,23 +28,23 @@ class DiscreteNode(RandomNode):
         return self.cpd.is_normalized_as_cpt(self)
         
     def sample_global(self, evidence=None):
-        if evidence==None:
+        if evidence==None or not self in evidence.keys():
             compatibles=self.value_range
         else:
             compatibles=[]
             for v in self.value_range:
-                if evidence.is_compatible(v):
+                if evidence[self].is_compatible(v):
                     compatibles.append(v)
         
         return random.choice(compatibles)
         
     def sample_local(self, x, evidence=None):
-        if evidence==None:
+        if evidence==None or not self in evidence.keys():
             compatibles=self.value_range
         else:
             compatibles=[]
             for v in self.value_range:
-                if evidence.is_compatible(v):
+                if evidence[self].is_compatible(v):
                     compatibles.append(v)
         
         return random.choice(compatibles)
diff --git a/primo/reasoning/MCMC.py b/primo/reasoning/MCMC.py
index fe7fd1b..bc04b1a 100644
--- a/primo/reasoning/MCMC.py
+++ b/primo/reasoning/MCMC.py
@@ -74,15 +74,17 @@ class MCMC(object):
         return probability_of_evidence
         
     def _generateInitialStateWithEvidence(self, evidence):
-        state=[]
-        for var in self.bn.get_nodes([]):
+        return self.forward_sample(evidence)
+        
+    def forward_sample(self, evidence):
+        state={}
+        for var in self.bn.get_nodes_in_topological_sort():
             if var in evidence.keys():
-                unambigous=evidence[var].get_unambigous_value()
-                if unambigous == None:
-                    state.append((var,var.sample_global(evidence[var])))
-                else:
-                    state.append((var,unambigous))
+                value=evidence[var].get_unambigous_value()
+                if value == None:
+                    value=var.sample_global(state)
+                state[var]=value
             else:
-                state.append((var,var.sample_global()))
-        return dict(state)
+                state[var]=var.sample_global(state)
+        return state
 
diff --git a/primo/reasoning/MarkovChainSampler.py b/primo/reasoning/MarkovChainSampler.py
index 0ff1ec6..12504b5 100644
--- a/primo/reasoning/MarkovChainSampler.py
+++ b/primo/reasoning/MarkovChainSampler.py
@@ -11,9 +11,10 @@ class GibbsTransitionModel(object):
     def __init__(self):
         pass
         
-    def transition(self, network, state, constant_nodes):
+    def transition(self, network, state, extern_evidence):
         nodes = network.get_nodes([])
-        nodes_to_resample=list(set(nodes)-set(constant_nodes))
+        nodes_to_resample=[n for n in nodes if not n in extern_evidence.keys() or extern_evidence[n].get_unambigous_value() == None]
+        #print nodes_to_resample
         for node in nodes_to_resample:
             parents=network.get_parents(node)
             if parents:
@@ -69,13 +70,13 @@ class MetropolisHastingsTransitionModel(object):
             p = p * child.get_probability(state[child],evidence)
         return p
         
-    def transition(self, network, state, constant_nodes):
+    def transition(self, network, state, extern_evidence):
         nodes = network.get_nodes([])
-        nodes_to_resample=list(set(nodes)-set(constant_nodes))
+        nodes_to_resample=[n for n in nodes if not n in extern_evidence.keys() or extern_evidence[n].get_unambigous_value() == None]
         for node in nodes_to_resample:
             #propose a new value for this variable:
             current_value = state[node]
-            proposed_value = node.sample_local(current_value)
+            proposed_value = node.sample_local(current_value, extern_evidence)
             
             p_of_proposal_given_mb = self._compute_p_of_value_given_mb(network, state, node, proposed_value)
             p_of_current_given_mb = self._compute_p_of_value_given_mb(network, state, node, current_value)
@@ -98,27 +99,29 @@ class MarkovChainSampler(object):
     def set_convergence_test(self, test):
         self.convergence_test=test
         
-    def generateMarkovChain(self, network, time_steps, transition_model, initial_state, evidence=[], variables_of_interest=[]):
+    def generateMarkovChain(self, network, time_steps, transition_model, initial_state, evidence={}, variables_of_interest=[]):
         state=initial_state
+        
         if evidence:
             for node in evidence.keys():
                 if  not evidence[node].is_compatible(state[node]):
                     raise Exception("The evidence given does not fit to the initial_state specified")
-            constant_nodes = evidence.keys()
-        else:
-            constant_nodes=[]
+#            constant_nodes = evidence.keys()
+#        else:
+#            constant_nodes=[]
             
         #let the distribution converge to the target distribution
         while not self.convergence_test.has_converged(state):
-            state=transition_model.transition(network, state, constant_nodes)
+            #print state
+            state=transition_model.transition(network, state, evidence)
         #finally sample from the target distribution
         for t in xrange(time_steps):
-            #print state
+            
             if variables_of_interest:
                 yield self._reduce_state_to_variables_of_interest(state, variables_of_interest)
             else:
                 yield state
-            state=transition_model.transition(network, state, constant_nodes)
+            state=transition_model.transition(network, state, evidence)
             
     def _reduce_state_to_variables_of_interest(self, state, variables_of_interest):
         return dict((k,v) for (k,v) in state.iteritems() if k in variables_of_interest)
diff --git a/primo/reasoning/density/LinearBeta.py b/primo/reasoning/density/LinearBeta.py
index 786c920..38ab0ff 100644
--- a/primo/reasoning/density/LinearBeta.py
+++ b/primo/reasoning/density/LinearBeta.py
@@ -1,14 +1,62 @@
 from primo.reasoning.density import Density
+from scipy.stats import beta
 import random
 
 class LinearBetaParameters():
-    def __init__(self, p0, p, q0, q):
-        self.p0=p0
-        self.p=p
-        self.q0=q0
-        self.q=q
+    def __init__(self, b0, b, alpha, beta):
+        self.b0=b0
+        self.b=b
+        self.alpha=alpha
+        self.beta=beta
 
 class LinearBeta(Density):
     def __init__(self, node):
-        pass
+        self.b={}
+        self.b0=0
+        self.alpha=1
+        self.beta=1
+        self.node=node
+        
+    def set_parameters(self,parameters):
+        self.b=parameters.b
+        self.b0=parameters.b0
+        self.alpha=parameters.alpha
+        self.beta=parameters.beta
+        
+    def add_variable(self, variable):
+        '''This method needs some serious reworking: Variables should not be permitted
+        to be parents because of their value range. Instead it should be evaluated
+        if they can yield parameters for this distribution that are permitted. This 
+        can in any case happen under bad influence coefficients'''
+        if( not variable.get_value_range() == (0,float("Inf"))):
+            raise Exception("Tried to add Variable into Gaussian densitiy, but variable had wrong value-range")
+        self.b[variable]=0.0
+        
+    def get_probability(self,value, node_value_pairs):
+        #print "Probability to compute for value:"+str(value)
+        #Compute the offset for the density and displace the value accordingly
+        x = self.b0
+        #print "X:"+str(x)
+        for node,node_value in node_value_pairs:
+            x = x + self.b[node]*node_value
+            #print "X:"+str(x)
+        #print value
+        value=value-x
+        #Evaluate the displaced density at value
+        #print str(value)
+        if value<0 or value>1:
+            return 0
+        return beta(self.alpha, self.beta).pdf(value)
+        
+    def _compute_offset_given_parents(self, state):
+        x = self.b0
+        for node in self.b.keys():
+            if node in state.keys():
+                x = x + self.b[node]*state[node]
+        return x
 
+
+    def sample_global(self, state):
+        value=random.betavariate(self.alpha,self.beta)+self._compute_offset_given_parents(state)
+        #print "Sampled:"+str(value)
+        return value
diff --git a/primo/reasoning/density/LinearExponential.py b/primo/reasoning/density/LinearExponential.py
index 1fcab91..2751d5e 100644
--- a/primo/reasoning/density/LinearExponential.py
+++ b/primo/reasoning/density/LinearExponential.py
@@ -4,9 +4,10 @@ import random
 import math
 
 class LinearExponentialParameters(object):
-    def __init__(self, b0, b):
+    def __init__(self, b0, b, lambda0):
         self.b0=b0
         self.b=b
+        self.lambda0=lambda0
 
         
 
@@ -14,14 +15,16 @@ class LinearExponential(Density):
     def __init__(self, node):
         self.b={}
         self.b0=0
+        self.lambda0=1
         self.node=node
         
     def set_parameters(self,parameters):
         self.b=parameters.b
         self.b0=parameters.b0
+        self.lambda0=parameters.lambda0
         
     def add_variable(self, variable):
-        '''This method needs some serious reworking: Variables should not be permitted
+        '''This method needs some serious reworking: Variables should not be denied
         to be parents because of their value range. Instead it should be evaluated
         if they can yield parameters for this distribution that are permitted. This 
         can in any case happen under bad influence coefficients'''
@@ -30,15 +33,23 @@ class LinearExponential(Density):
         self.b[variable]=0.0
         
     def get_probability(self,value, node_value_pairs):
-        if value<=0:
-            return 0
-        lambda_parameter = self.b0
+        
+        #Compute the offset for the density and displace the value accordingly
+        x = self.b0
         for node,value in node_value_pairs:
-            lambda_parameter = lambda_parameter + self.b[node]*value
-        #print scipy.stats.expon(lambda_parameter).pdf(value)
-        return lambda_parameter*math.exp(-lambda_parameter*value)
+            x = x + self.b[node]*value
+        value=value-x
+        #Evaluate the displaced density at value
+        if value<0:
+            return 0
+        return self.lambda0*math.exp(-self.lambda0*value)
 
+    def _compute_offset_given_parents(self, state):
+        x = self.b0
+        for node in self.b.keys():
+            if node in state.keys():
+                x = x + self.b[node]*state[node]
+        return x
 
-    def sample_global(self):
-        print self.b0
-        return random.expovariate(self.b0)
+    def sample_global(self,state):
+        return random.expovariate(self.lambda0)+self._compute_offset_given_parents(state)
diff --git a/primo/reasoning/density/LinearGauss.py b/primo/reasoning/density/LinearGauss.py
index 92ca3eb..f1ec5fb 100644
--- a/primo/reasoning/density/LinearGauss.py
+++ b/primo/reasoning/density/LinearGauss.py
@@ -58,6 +58,13 @@ class LinearGauss(Density):
     def set_var(self, var):
         self.var=var
         
-    def sample_global(self):
-        return random.normalvariate(self.b0,self.var**0.5)
+    def _compute_offset_given_parents(self, state):
+        x = self.b0
+        for node in self.b.keys():
+            if node in state.keys():
+                x = x + self.b[node]*state[node]
+        return x
+        
+    def sample_global(self,state):
+        return random.normalvariate(self._compute_offset_given_parents(state),self.var**0.5)
             
-- 
GitLab