Skip to content
Snippets Groups Projects
Commit f68746f8 authored by Hendrik Buschmeier's avatar Hendrik Buschmeier
Browse files

Removed trailing spaces.

parent 33ec713f
No related branches found
No related tags found
No related merge requests found
......@@ -14,7 +14,7 @@ class Density(object):
def __init__(self):
super(Density, self).__init__()
def add_variables(self, variables):
for v in variables:
self.add_variable(v)
......@@ -57,7 +57,7 @@ class GaussParameters(object):
self.b=b
#the variance
self.var=var
class NDGaussParameters(object):
def __init__(self, mu, cov):
......@@ -71,7 +71,7 @@ class Beta(Density):
This class represents an beta probabilty density. Unfortunately this
is currently a little bulky to use as the parameters for the dependency are
not very transparent. This is how the dependency works:
The parameters p,q for the exponential distribution are computed analogous
as the activation of a perceptron with sigmoid-activation function:
output_scale * sigmoid(input_scale* (b0 + b'state)) where b'state means the dot product between b (a vector
......@@ -79,24 +79,24 @@ class Beta(Density):
density depends on). Here: sigmoid=1/(1+exp(-x))
The parameters output_scale and input_scale can be used to strech or compress
the sigmoid.
The reason for this is that the parameters are required to be >0. And with
linear dependencies on the parents this could in no way be guaranteed.
Why the sigmoid function:
I had to guarantee that the parameters are > 0. As i did not want to
impose any restrictions on the value range of the parents it was necessary
to map the support of the parents values to a valid support for the parameters. In
other (and maybe more correct words): The dependency function to compute
p and q needed to be of the form R^n->]0,inf].
The first function that came to my mind was:
weighted sum of the parents values put into an exponential function. This
caused problems due to the fast growth of the exponential.
For that reason i switched to the sigmoid function that guarantees 0<p,q<1.
And because p,q<1 is not very practical output_scale has been introduced
to scale from ]0,1[ to ]0,output_scale[.
to scale from ]0,1[ to ]0,output_scale[.
input_scale can be used to strech the sigmoid in input_direction.
'''
def __init__(self, node):
......@@ -105,37 +105,37 @@ class Beta(Density):
self.p={}
self.q={}
self.node=node
self.input_scale=0.1
self.output_scale=5.0
def set_parameters(self,parameters):
self.p0=parameters.p0
self.q0=parameters.q0
self.p=parameters.p
self.q=parameters.q
def add_variable(self, variable):
if( not isinstance(variable,ContinuousNode.ContinuousNode)):
raise Exception("Tried to add Variable as parent, but is not a ContinuousNode")
self.p[variable]=0.0
self.q[variable]=0.0
def get_probability(self,value, node_value_pairs):
p=self._compute_p_given_parents(dict(node_value_pairs))
q=self._compute_q_given_parents(dict(node_value_pairs))
probability = scipy.stats.beta(p, q).pdf(value)
return probability
def _compute_p_given_parents(self, state):
x = self.p0
for node in self.p.keys():
if node in state.keys():
x = x + self.p[node]*state[node]
return self.output_scale*1.0/(1.0+math.exp(-x*self.input_scale))
def _compute_q_given_parents(self, state):
x = self.q0
for node in self.q.keys():
......@@ -145,7 +145,7 @@ class Beta(Density):
def sample_global(self, state, lower_limit, upper_limit):
'''This method can be used to sample from this distribution. It is necessary that
'''This method can be used to sample from this distribution. It is necessary that
a value for each parent is specified and it is possible to constrain the
value that is being sampled to some intervall.
@param state: A dict (node->value) that specifies a value for each variable
......@@ -160,21 +160,21 @@ class Beta(Density):
q=self._compute_q_given_parents(state)
distribution=scipy.stats.beta(p,q)
lower_cdf=distribution.cdf(lower_limit)
upper_cdf=distribution.cdf(upper_limit)
sample_in_integral=random.uniform(lower_cdf, upper_cdf)
sample=distribution.ppf(sample_in_integral)
return sample
return sample
class Exponential(Density):
'''
This class represents an exponential probabilty density. Unfortunately this
is currently a little bulky to use as the parameters for the dependency are
not very transparent. This is how the dependency works:
The parameter lambda for the exponential distribution is computed analogous
as the activation of a perceptron with sigmoid-activation function:
output_scale * sigmoid(input_scale* (b0 + b'state)) where b'state means the dot product between b (a vector
......@@ -182,24 +182,24 @@ class Exponential(Density):
density depends on). Here: sigmoid=1/(1+exp(-x))
The parameters output_scale and input_scale can be used to strech or compress
the sigmoid.
The reason for this is that the parameter lambda is required to be >0. And with
linear dependencies on the parents this could in no way be guaranteed.
Why the sigmoid function:
I had to guarantee that the parameter lambda is > 0. As i did not want to
impose any restrictions on the value range of the parents it was necessary
to map the support of the parents values to a valid support for lambda. In
other (and maybe more correct words): The dependency function to compute
lambda needed to be of the form R^n->]0,inf].
The first function that came to my mind was:
weighted sum of the parents values put into an exponential function. This
caused problems due to the fast growth of the exponential.
For that reason i switched to the sigmoid function that guarantees 0<lambda<1.
And because lambda<1 is not very practical output_scale has been introduced
to scale from ]0,1[ to ]0,output_scale[.
to scale from ]0,1[ to ]0,output_scale[.
input_scale can be used to strech the sigmoid in input_direction.
'''
def __init__(self, node):
......@@ -213,22 +213,22 @@ class Exponential(Density):
self.input_scale=1.0
#scaling coefficient to stretch or compress the sigmoid in output-direction
self.output_scale=4.0
def set_parameters(self,parameters):
self.b=parameters.b
self.b0=parameters.b0
def add_variable(self, variable):
'''This method needs some serious reworking: Variables should not be denied
to be parents because of their value range. Instead it should be evaluated
if they can yield parameters for this distribution that are permitted. This
if they can yield parameters for this distribution that are permitted. This
can in any case happen under bad influence coefficients'''
if( not isinstance(variable,ContinuousNode.ContinuousNode)):
raise Exception("Tried to add Variable as parent, but is not a ContinuousNode")
self.b[variable]=0.0
def get_probability(self,value, node_value_pairs):
#Compute the offset for the density and displace the value accordingly
_lambda = self._compute_lambda_given_parents(dict(node_value_pairs))
#Evaluate the displaced density at value
......@@ -243,7 +243,7 @@ class Exponential(Density):
return _lambda
def sample_global(self,state, lower_limit, upper_limit):
'''This method can be used to sample from this distribution. It is necessary that
'''This method can be used to sample from this distribution. It is necessary that
a value for each parent is specified and it is possible to constrain the
value that is being sampled to some intervall.
@param state: A dict (node->value) that specifies a value for each variable
......@@ -256,14 +256,14 @@ class Exponential(Density):
'''
_lambda=self._compute_lambda_given_parents(state)
distribution=scipy.stats.expon(loc=0,scale=1.0/_lambda)
lower_cdf=distribution.cdf(lower_limit)
upper_cdf=distribution.cdf(upper_limit)
sample_in_integral=random.uniform(lower_cdf, upper_cdf)
sample=distribution.ppf(sample_in_integral)
#sample=random.expovariate(_lambda)
#print "EXPO-SAMPLE: "+str(sample)+" at lambda: "+str(_lambda)
return sample
......@@ -277,7 +277,7 @@ class ProbabilityTable(Density):
def get_neutral_multiplication_PT():
pt = ProbabilityTable()
pt.set_probability_table(numpy.array(1.0),[])
return pt
......@@ -301,7 +301,7 @@ class ProbabilityTable(Density):
ax = self.table.ndim
self.table=numpy.expand_dims(self.table,ax)
self.table=numpy.repeat(self.table,len(variable.value_range),axis = ax)
def set_probability_table(self, table, nodes):
......@@ -328,7 +328,7 @@ class ProbabilityTable(Density):
self.table[index] = self.table[index] + 1
return self.normalize_as_jpt()
def get_most_probable_instantiation(self):
'''
This method returns a list of (node,value)-pairs for which this density
......@@ -342,17 +342,17 @@ class ProbabilityTable(Density):
def set_probability(self, value, node_value_pairs):
index = self.get_cpt_index(node_value_pairs)
self.table[index]=value
def get_probability(self, node_value_pairs):
index = self.get_cpt_index(node_value_pairs)
return self.table[index]
def sample_global(self, global_state, variable, allowed_values):
'''
This method can be used to sample from the density according to a global
state containing values for all other variables that this node belongs to.
@param global_state: A Dict (node -> value) that must hold a value for
all variables that this density depends on. Except for the variable
all variables that this density depends on. Except for the variable
that it is directly associated with.
@param variable: The variable that this density is directly associated
with.
......@@ -381,16 +381,16 @@ class ProbabilityTable(Density):
value = values[index_in_values_list]
index.append(node.value_range.index(value))
return tuple(index)
def get_node_value_pairs(self, index):
'''
Can be used to determine the node-value combination that belongs to some
index for the contitional probabilty table. That state needs to be fully
specified/all variables this density depends on need to be specified.
This method should probably only be used internally.
@param index: A tuple
@param index: A tuple
@returns: a list of (node,value) pairs
'''
nv_pairs=[]
......@@ -431,7 +431,7 @@ class ProbabilityTable(Density):
retInstance.table = retInstance.table * 1.0 / numpy.sum(retInstance.table)
retInstance.variables = copy.copy(self.variables)
return retInstance
#this is the old code:
#return self.table * 1.0 / numpy.sum(self.table)
......@@ -544,14 +544,14 @@ class ProbabilityTable(Density):
ev.table[pos_value] = tmpCpd[pos_value]
return ev
def copy(self):
'''Returns a copied version of this probabilityTable'''
ev = ProbabilityTable()
ev.variables = copy.copy(self.variables)
ev.table = copy.copy(self.table)
return ev
......@@ -580,56 +580,56 @@ class Gauss(Density):
def __init__(self,variable):
super(Gauss, self).__init__()
self.b0=0#numpy.array([0.0])
self.b={}
self.var=1.0
def set_parameters(self,parameters):
self.set_b0(parameters.b0)
self.set_b(parameters.b)
self.set_var(parameters.var)
def add_variable(self, variable):
if not isinstance(variable, primo.nodes.ContinuousNode):
raise Exception("Tried to add Variable into Gaussian densitiy, but variable is not continuous")
self.b[variable]=0.0
def get_probability(self, x, node_value_pairs):
reduced_mu = self.b0
for node,value in node_value_pairs:
reduced_mu = reduced_mu + self.b[node]*value
return scipy.stats.norm(reduced_mu, numpy.sqrt(self.var)).pdf(x)
def set_b(self, variable, b):
if not variable in b.keys():
raise Exception("Tried to set dependency-variable b for a variable that has not yet been added to this variable's dependencies")
self.b[variable]=b
def set_b(self, b):
if not set(self.b.keys())==set(b.keys()):
raise Exception("The variables given in the new b do not match the old dependencies of this density")
self.b=b
def set_b0(self, b0):
self.b0=b0
def set_var(self, var):
self.var=var
def _compute_offset_given_parents(self, state):
x = self.b0
for node in self.b.keys():
if node in state.keys():
x = x + self.b[node]*state[node]
return x
def sample_global(self,state,lower_limit,upper_limit):
'''This method can be used to sample from this distribution. It is necessary that
'''This method can be used to sample from this distribution. It is necessary that
a value for each parent is specified and it is possible to constrain the
value that is being sampled to some intervall.
@param state: A dict (node->value) that specifies a value for each variable
......@@ -640,17 +640,17 @@ class Gauss(Density):
sampled as value.
@returns: The sampled value. A real number.
'''
distribution=scipy.stats.norm(self._compute_offset_given_parents(state), self.var**0.5)
lower_cdf=distribution.cdf(lower_limit)
upper_cdf=distribution.cdf(upper_limit)
sample_in_integral=random.uniform(lower_cdf, upper_cdf)
sample=distribution.ppf(sample_in_integral)
return sample
......@@ -669,44 +669,44 @@ class NDGauss(Density):
def __init__(self):
super(NDGauss, self).__init__()
self.mu=numpy.array([0.0])
self.C=numpy.array([[1.0]])
self.variables=[]
def add_variable(self, variable):
v_min,v_max=variable.get_value_range()
if not (v_min>= -float("Inf") and v_max <=float("Inf")):
raise Exception("Tried to add Variable into Gaussian densitiy, but variable had wrong value-range")
self.variables.append(variable)
m=len(self.variables)
self.mu.resize([m,1])
self.C.resize((m,m))
self.C[m-1,m-1]=1.0
def set_parameters(self,parameters):
self.set_mu(parameters.mu)
self.set_cov(parameters.cov)
def set_mu(self, mu):
self.mu=mu
def set_cov(self, C):
self.C=C
def sample(self):
return numpy.random.multivariate_normal(self.mu,self.C)
def parametrize_from_states(self, samples, number_of_samples):
'''This method uses a list of variable-instantiations to change this node's parametrization
to represent a Gaussian constructed from the given samples.
The Argument samples is a list of pairs (RandomNode, value).'''
X=numpy.empty((number_of_samples, len(self.variables)))
for i,state in enumerate(samples):
for j,variable in enumerate(self.variables):
......@@ -715,10 +715,10 @@ class NDGauss(Density):
self.mu=numpy.mean(X,axis=0)
self.C=numpy.cov(X.transpose())
return self
def get_most_probable_instantiation(self):
return self.mu
def __str__(self):
ret= "Gauss(\nmu="+str(self.mu)+"\nC="+str(self.C)+")"
return ret
......@@ -5,42 +5,42 @@ class Evidence(object):
'''
A generic class for evidence. Can not be used on its own. Look for its
subclasses.
'''
'''
def __init__(self):
pass
def is_compatible(self, value):
'''
This method can be used to check if a value is consistent with some
evidence.
evidence.
'''
raise Exception("Not defined for this kind of Evidence")
def get_unique_value(self):
'''
Sometimes only one value of some domain is compatible with the evidence.
This is obviously the case for EvidenceEqual. It is then possible to
use this value to speed up computations.
@return: The only value compatible with the evidence or else None.
'''
return None
class EvidenceEqual(Evidence):
'''
This class can be used to specify evidence that a variable has taken some
This class can be used to specify evidence that a variable has taken some
specified value.
e.g. a=5
'''
def __init__(self, value):
self.value=value
def is_compatible(self, value):
return self.value==value
def get_unique_value(self):
return self.value
class EvidenceInterval(Evidence):
'''
This class can be used to specify evidence that a variable has taken on
......@@ -50,13 +50,13 @@ class EvidenceInterval(Evidence):
def __init__(self,min_val,max_val):
self.min_val=min_val
self.max_val=max_val
def is_compatible(self, value):
return self.min_val <= value and value<=self.max_val
def get_interval(self):
return self.min_val,self.max_val
class EvidenceLower(EvidenceInterval):
'''
......@@ -67,7 +67,7 @@ class EvidenceLower(EvidenceInterval):
def __init__(self,limit):
super(EvidenceLower, self).__init__(float("-inf"),limit)
class EvidenceHigher(EvidenceInterval):
'''
This class can be used to specify evidence that a variable has taken on
......
......@@ -13,12 +13,12 @@ class UtilityTable(object):
self.variables -- list of the parent nodes
self.table -- utility table which contains the utility
'''
def __init__(self):
super(UtilityTable, self).__init__()
self.table = np.array(0)
self.variables = []
def add_variable(self, variable):
self.variables.append(variable)
......@@ -33,8 +33,8 @@ class UtilityTable(object):
index_in_values_list = nodes.index(node)
value = values[index_in_values_list]
index.append(node.value_range.index(value))
return tuple(index)
return tuple(index)
def set_utility_table(self, table, nodes):
if not set(nodes) == set(self.variables):
raise Exception("The list which should define the ordering of the variables does not match"
......@@ -47,17 +47,17 @@ class UtilityTable(object):
self.table = table
self.variables = nodes
def set_utility(self, value, node_value_pairs):
index = self.get_ut_index(node_value_pairs)
self.table[index]=value
def get_utility_table(self):
return self.table
def get_variables(self):
return self.variables
def get_utility(self, node_value_pairs):
index = self.get_ut_index(node_value_pairs)
return self.table[index]
......@@ -69,59 +69,59 @@ class MakeDecision(object):
"""
Calculates a Decision on a given Bayesian Decision Network
"""
def __init__(self, bdn = None):
"""
Constructor
Keyword arguments:
bdn -- Bayesian Decision Network (default None)
"""
super(MakeDecision, self).__init__()
self.bdn = bdn
def set_bdn(self, bdn):
"""
Sets the Bayesian Decision Network
Keyword arguments:
bdn -- Bayesian Decision Network
"""
self.bdn = bdn
def get_bdn(self):
"""
Getter for the Bayesian Decision Network
"""
return self.bdn
def max_sum(self, decisionNode):
"""Implementation of the max sum Algorithm to get the best Decision (according to the MEU principle).
maximize over decisions and summing over RandomNodes.
This function sets the state of provided DecisionNode, so later decisions can't affect that Node
Keyword arguments:
decisionNode -- Decision Node on which the decision should be made
"""
if self.bdn == None:
raise Exception("Bayesian Decision Network was not set!")
partialOrder = self.bdn.get_partialOrdering()
utility_nodes = self.bdn.get_all_utility_nodes()
if not partialOrder:
raise Exception("No partial Order was set!")
if decisionNode not in partialOrder:
raise Exception("Decision Node is not in the partial Order!")
if not self.bdn.is_valid():
raise Exception("The Bayesian Decision Network is not valid!")
#Check if the Decision Nodes that are ordered before the provided Decision Node have a state
for node in partialOrder:
if isinstance(node, primo.nodes.DecisionNode):
......@@ -130,9 +130,9 @@ class MakeDecision(object):
raise Exception("Decision Nodes that are ordered before the provided Decision Node must have a state!")
else:
break
'''Run through the partialOrder in reverse. Get the last two Nodes, reduce the Random Nodes with the Decision Node
parent and with the decisions already made. Then multiply the cpts of the Random Nodes. Multiply the probability values
'''Run through the partialOrder in reverse. Get the last two Nodes, reduce the Random Nodes with the Decision Node
parent and with the decisions already made. Then multiply the cpts of the Random Nodes. Multiply the probability values
with the sum of the utility values and calculate the best decision (which has the MEU).
'''
randomNodes = self.bdn.get_all_nodes()
......@@ -142,12 +142,12 @@ class MakeDecision(object):
max_utility = []
#for every decision value of the decision node
for decValue in partialOrder[i-1].get_value_range():
#if the decision already have a value then abort. The decision has already been made.
if not partialOrder[i-1].get_state() == None:
finish = True
break
cpts = []
#reduce Random Nodes with a Decision value
for rNode in randomNodes:
......@@ -155,18 +155,18 @@ class MakeDecision(object):
cpts.append(rNode.get_cpd_reduced([(partialOrder[i-1], decValue)]))
else:
cpts.append(rNode.get_cpd())
#reduce the cpts with the future_best_decisions
for j in range(0,len(cpts)):
for node,value in future_best_decisions:
if node in cpts[j].get_variables():
cpts[j] = cpts[j].reduction([(node,value)])
#multiply the cpts
jointCPT = cpts[0]
for j in range(1,len(cpts)):
jointCPT = jointCPT.multiplication(cpts[j])
#calculate Utility
table = jointCPT.get_table()
value_range_list = []
......@@ -175,17 +175,17 @@ class MakeDecision(object):
tupleList=[]
for value in var.get_value_range():
tupleList.append((var,value))
if tupleList:
if tupleList:
value_range_list.append(tupleList)
#get all possible assignments
#get all possible assignments
permutationList = []
if len(value_range_list) >= 2:
permutationList = list(itertools.product(*value_range_list))
else:
permutationList = value_range_list
#save the results of each probability value and the according sum of utilities
result = []
if len(permutationList) > 1:
......@@ -198,31 +198,31 @@ class MakeDecision(object):
index = jointCPT.get_cpt_index([perm])
value = table[index]
result.append(value * self.calculate_utility([perm], (partialOrder[i-1],decValue), future_best_decisions))
#end result for this decision
max_utility.append((decValue,sum(result)))
#nothing more to do since the decision has already been made
if finish:
break
zippedList = zip(*max_utility)
val = max(zippedList[1])
ind = zippedList[1].index(val)
#Best Decision
best_decision = zippedList[0][ind]
future_best_decisions.append((partialOrder[i-1],best_decision))
#the last one is the decision that we want to know about
return future_best_decisions[len(future_best_decisions)-1]
#the last one is the decision that we want to know about
return future_best_decisions[len(future_best_decisions)-1]
def calculate_utility(self, assignment, currentDecision, list_of_best_decision):
"""
Sums up the utility values
Keyword arguments:
assignment -- the assignment of the variables
currentDecision -- the current decision that we want to calculate
list_of_best_decision -- list of the decisions that are lying in the future
......@@ -239,13 +239,13 @@ class MakeDecision(object):
if node in zippedAssignment[0]:
index = zippedAssignment[0].index(node)
tempList.append((node,zippedAssignment[1][index]))
elif zippedDecisions:
elif zippedDecisions:
if node in zippedDecisions[0]:
index = zippedDecisions[0].index(node)
tempList.append((node,zippedDecisions[1][index]))
else:
tempList.append(currentDecision)
else:
tempList.append(currentDecision)
utilityList.append(uNode.get_utility(tempList))
return sum(utilityList)
\ No newline at end of file
else:
tempList.append(currentDecision)
utilityList.append(uNode.get_utility(tempList))
return sum(utilityList)
\ No newline at end of file
......@@ -44,279 +44,279 @@ class FactorTree(object):
edges. Thus, the first query is expensive and all following are easy calculated.
The speed of the first message calculation depends on how the tree was build.
Literature: Modeling and Reasoning with Bayesian Networks - Adnan Darwiche
Chapter 7
Chapter 7
'''
def __init__(self,graph,rootNode):
self.graph = graph
self.rootNode = rootNode
def calculate_PoE(self):
'''Calculates the probability of evidence with the set evidence'''
if not self.graph.graph['messagesValid']:
self.calculate_messages()
cpd = self.calculate_marginal_forOne(self.rootNode)
for v in cpd.get_variables()[:]:
cpd = cpd.marginalization(v)
return cpd
def calculate_marginal(self,variables):
''' If evidence is set, then this methods calculates the posterior marginal.
With an empty evidence this is automatically the prior marginal.'''
if not self.graph.graph['messagesValid']:
self.calculate_messages()
resPT = primo.densities.ProbabilityTable.get_neutral_multiplication_PT()
for f in self.graph.nodes():
if f.get_node() in variables:
resPT = resPT.multiplication(self.calculate_marginal_forOne(f))
resPT = resPT.normalize_as_jpt()
return resPT
def calculate_marginal_forOne(self,factor):
curCPD = factor.get_calculation_CDP().copy()
for p in self.graph.predecessors(factor):
tmpCPD = self.graph[p][factor]['msgRightWay']
curCPD = curCPD.multiplication(tmpCPD)
for p in self.graph.neighbors(factor):
tmpCPD = self.graph[factor][p]['msgAgainstWay']
curCPD = curCPD.multiplication(tmpCPD)
for v in curCPD.get_variables()[:]:
if v != factor.get_node():
curCPD = curCPD.marginalization(v)
return curCPD
def draw(self):
'''Draws the FactorTree'''
import matplotlib.pyplot as plt
nx.draw_circular(self.graph)
plt.show()
def calculate_messages(self):
''' Calculates the messages and stores the intermediate results.'''
self.pull_phase(self.rootNode,self.graph)
self.push_phase(self.rootNode,self.graph,primo.densities.ProbabilityTable.get_neutral_multiplication_PT())
self.graph.graph['messagesValid'] = True
def set_evidences(self,evidences):
self.graph.graph['messagesValid'] = False
evNodes = zip(*evidences)
for factor in self.graph.nodes():
if factor.get_node() in evNodes[0]:
idx = evNodes[0].index(factor.get_node())
factor.set_evidence(evidences[idx])
def pull_phase(self,factor,graph):
calCPD = factor.get_calculation_CDP()
#calculate the messages of the children
for child in graph.neighbors(factor):
tmpInput = self.pull_phase(child,graph)
#project each factor on the specific separator
separator = graph[factor][child]['separator']
for var in tmpInput.variables[:]:
if var not in separator:
tmpInput = tmpInput.marginalization(var)
#save message on edge: it's the opposite of the direction of the edge
graph[factor][child]['msgAgainstWay'] = tmpInput
graph[factor][child]['msgAgainstWay'] = tmpInput
#calculate the new message
calCPD = calCPD.multiplication(tmpInput)
return calCPD
def push_phase(self,factor,graph,inCPD):
for child in graph.neighbors(factor):
tmpCPD = inCPD.multiplication(factor.get_calculation_CDP())
for child2 in graph.neighbors(factor):
if (child != child2):
tmpCPD = tmpCPD.multiplication(graph[factor][child2]['msgAgainstWay'])
separator = graph[factor][child]['separator']
#project on outgoing edge separator
for var in tmpCPD.variables:
if var not in separator:
tmpCPD = tmpCPD.marginalization(var)
#add setOut to outgoing vars from child
#Message with the direction of the edge
graph[factor][child]['msgRightWay'] = tmpCPD
self.push_phase(child,graph,tmpCPD)
class FactorTreeFactory(object):
'''The FactorTreeFactory creates the FactorTree out of a BayesNet.'''
def create_random_factortree(self,bayesNet):
''' Creates a randomly structured FactorTree. This method is useful for testing
if reasoning works for arbitrary trees.'''
allNodes = bayesNet.get_all_nodes()
if len(allNodes) == 0:
raise Exception("createRandomFactorTree: No nodes in given BayesNet")
tn = allNodes.pop()
rootFactor = Factor(tn)
graph = nx.DiGraph(messagesValid=False)
graph.add_node(rootFactor)
graph.add_node(rootFactor)
usedNodes = [rootFactor]
for n in allNodes[:]:
parentNode = choice(usedNodes[:])
newFactor = Factor(n)
graph.add_edge(parentNode,newFactor, inVars=set(),outVars=set())
usedNodes.append(newFactor)
self.calculate_seperators_pull(rootFactor,graph)
self.calculate_seperators_push(rootFactor,graph,set())
self.intersect_seperators(graph)
self.calculate_clusters(rootFactor,graph,set())
return FactorTree(graph,rootFactor)
def create_greedy_factortree(self,bayesNet):
'''This method creates a factor the after the following algorithm:
1. Sort factors after containing variables (descending).
2. For each node in the sorted list insert at it's best position.
The best position is the node with the most joint variables.'''
The best position is the node with the most joint variables.'''
allNodes = bayesNet.get_all_nodes()
if len(allNodes) == 0:
raise Exception("createRandomFactorTree: No nodes in given BayesNet")
sortNodeList = []
for n in allNodes:
sortNodeList.append((len(n.get_cpd().get_variables()),n))
#sort node list
sortNodeList = sorted(sortNodeList,key=itemgetter(0),reverse=True)
sortNodeList = zip(*sortNodeList)
sortNodeList = list(sortNodeList[1])
#root node with the most variables
rootFactor = Factor(sortNodeList.pop(0))
#create new graph for factor tree
graph = nx.DiGraph(messagesValid=False)
graph.add_node(rootFactor)
graph.add_node(rootFactor)
#All nodes are added
for nd in sortNodeList[:]:
(ct,insFactor) = self.find_best_node_for_insertion(graph,rootFactor,set(nd.get_cpd().get_variables()))
nFactor = Factor(nd)
graph.add_edge(insFactor,nFactor, inVars=set(),outVars=set())
#For the later calculation the seperators are needed
self.calculate_seperators_pull(rootFactor,graph)
self.calculate_seperators_push(rootFactor,graph,set())
self.intersect_seperators(graph)
#the cluster are not necessarily needed but indicate how good the calculation of messages performs
self.calculate_clusters(rootFactor,graph,set())
return FactorTree(graph,rootFactor)
def find_best_node_for_insertion(self,graph,factor,nodeSet):
'''finds the node in the graph with the most common variables to the given node'''
curJointCount = len(set(factor.get_variables()) & nodeSet)
curInsertFactor = factor
for nbs in graph.neighbors(factor):
(count,retFactor) = self.find_best_node_for_insertion(graph,nbs,nodeSet)
if count >= curJointCount:
curJointCount = count
curInsertFactor = retFactor
return (curJointCount,curInsertFactor)
def calculate_seperators_pull(self,factor,graph):
s = set()
s = set()
pullSet = set(factor.get_variables())
#find all variables in outgoing edges for factor
for child in graph.neighbors(factor):
s = self.calculate_seperators_pull(child,graph)
graph[factor][child]['inVars'] = s
pullSet = s | pullSet
return pullSet
def calculate_seperators_push(self,factor,graph,setOut):
#add local vars to set
setOut = set(factor.get_variables()) | setOut
for child in graph.neighbors(factor):
tmpSet = copy.copy(setOut)
for child2 in graph.neighbors(factor):
if (child != child2):
tmpSet = tmpSet | graph[factor][child2]['inVars']
#add setOut to outgoing variables from the child
tmp = graph[factor][child]['outVars']
graph[factor][child]['outVars'] = tmp | tmpSet
self.calculate_seperators_push(child,graph,tmpSet)
def intersect_seperators(self,graph):
for n,nbrs in graph.adjacency_iter():
for nbr,eattr in nbrs.items():
eattr['separator'] = eattr['inVars'] & eattr['outVars']
def calculate_clusters(self,factor,graph,parent_seperator):
localCluster = parent_seperator | set(factor.get_variables())
for n in graph.neighbors(factor):
tmpSeparator = graph[factor][n]['separator']
localCluster = localCluster | tmpSeparator
self.calculate_clusters(n,graph,tmpSeparator)
factor.set_cluster(localCluster)
......@@ -325,86 +325,86 @@ class EasiestFactorElimination(object):
1. Needed evidences are set (optional).
2. All nodes are multiplied.
3. The redundant variables are summed out
Literature: Modeling and Reasoning with Bayesian Networks - Adnan Darwiche
Chapter 6-7
Chapter 6-7
'''
def __init__(self,bayesNet):
self.bn= bayesNet
def calculate_PriorMarginal(self,variables):
def calculate_PriorMarginal(self,variables):
'''Calculates the prior marignal for the given variables. The resulting
CPD is returned.'''
nodes = self.bn.get_all_nodes()
finCpd = nodes.pop().get_cpd()
for n in nodes:
finCpd = finCpd.multiplication(n.get_cpd())
for v in finCpd.get_variables():
if v not in variables:
finCpd = finCpd.marginalization(v)
return finCpd
def calculate_PosteriorMarginal(self,variables,evidence):
'''Calculates the posterior marginal for given variables and evidence.
It returns the resulting cpd.'''
nodes = self.bn.get_all_nodes()
#List of evidences
ev_list = zip(*evidence)
ev_list = zip(*evidence)
# Special Case: First Node
node1 = nodes.pop()
if node1 in ev_list[0]:
ind = ev_list[0].index(node1)
finCpd = node1.get_cpd().set_evidence(evidence[ind])
else:
finCpd = node1.get_cpd()
# For all other nodes
for n in nodes:
if n in ev_list[0]:
#Set evidence and multiply
ind = ev_list[0].index(n)
nCPD = n.get_cpd().set_evidence(evidence[ind])
finCpd = finCpd.multiplication(nCPD)
finCpd = finCpd.multiplication(nCPD)
else:
#only multiply
finCpd = finCpd.multiplication(n.get_cpd())
for v in finCpd.get_variables():
if v not in variables:
finCpd = finCpd.marginalization(v)
finCpd = finCpd.normalize_as_jpt()
return finCpd
def calculate_PoE(self,evidence):
''' Calculates the probabilty of evidence for the given evidence and returns the result.'''
nodes = self.bn.get_all_nodes()
unzipped_list = zip(*evidence)
node1 = nodes.pop()
if node1 in unzipped_list[0]:
ind = unzipped_list[0].index(node1)
finCpd = node1.get_cpd().set_evidence(evidence[ind])
else:
finCpd = node1.get_cpd()
for n in nodes:
if n in unzipped_list[0]:
ind = unzipped_list[0].index(n)
......@@ -412,10 +412,10 @@ class EasiestFactorElimination(object):
finCpd = finCpd.multiplication(nCPD)
else:
finCpd = finCpd.multiplication(n.get_cpd())
for v in finCpd.get_variables():
finCpd = finCpd.marginalization(v)
return finCpd
......@@ -19,7 +19,7 @@ class MCMC(object):
'''
self.bn=bn
self.times=times
if transition_model == None:
transition_model = MetropolisHastingsTransitionModel()
......@@ -30,18 +30,18 @@ class MCMC(object):
def calculate_PriorMarginal(self,variables,AssumedDensity):
'''
Calculate the marginal over some variables.
Calculate the marginal over some variables.
@param variables_of_interest: A list containing the variables over which
the prior marginal shall be defined.
@param AssumedDensity: A class from primo.reasoning.density . This
parameter is used to define the class of density for the return value.
@returns: An object of the class AssumedDensity.
'''
'''
return self.calculate_PosteriorMarginal(variables,dict(),AssumedDensity)
def calculate_MAP(self, variables_of_interest, evidence, AssumedDensity):
'''
Calculate the maximum a posteriori hypothesis given some evidence.
Calculate the maximum a posteriori hypothesis given some evidence.
@param variables_of_interest: A list containing the variables for which
the map-hypothesis is wanted.
@param evidence: A Dict from Node to Evidence.
......@@ -51,10 +51,10 @@ class MCMC(object):
You could pass a Gauss in a continuous setting here, for example.
@returns: The most probable instatiaion given the evidence in the form:
List of pairs (Node, Value).
'''
'''
initial_state=self._generateInitialStateWithEvidence(evidence)
chain = self.mcs.generateMarkovChain(self.bn, self.times, initial_state, evidence, variables_of_interest)
density = AssumedDensity()
density.add_variables(variables_of_interest)
density = density.parametrize_from_states(chain,self.times)
......@@ -63,25 +63,25 @@ class MCMC(object):
def calculate_PosteriorMarginal(self,variables_of_interest,evidence,AssumedDensity):
'''
Calculate some posterior marginal.
Calculate some posterior marginal.
@param variables_of_interest: A list containing the variables over which
the posterior marginal shall be defined.
@param evidence: A Dict from Node to Evidence.
@param AssumedDensity: A class from primo.reasoning.density . This
parameter is used to define the class of density for the return value.
@returns: An object of the class AssumedDensity.
'''
'''
initial_state=self._generateInitialStateWithEvidence(evidence)
chain = self.mcs.generateMarkovChain(self.bn, self.times, initial_state, evidence, variables_of_interest)
density = AssumedDensity()
density.add_variables(variables_of_interest)
density = density.parametrize_from_states(chain,self.times)
return density
def calculate_PoE(self,evidence):
'''
Calculate Probability of Evidence.
......@@ -95,27 +95,27 @@ class MCMC(object):
for state in chain:
compatible = True
for node,node_evidence in evidence.items():
if not node_evidence.is_compatible(state[node]):
compatible = False
break
if compatible:
compatible_count = compatible_count + 1
number_of_samples = number_of_samples + 1
probability_of_evidence = float(compatible_count)/float(number_of_samples)
return probability_of_evidence
def _generateInitialStateWithEvidence(self, evidence):
return self.forward_sample(evidence)
def forward_sample(self, evidence):
'''
Generate a sample from the distribution defined by the given BayesNet by
forward sampling.
@param evidence: A Dict from Node to Evidence.
'''
state={}
......@@ -140,21 +140,21 @@ class GibbsTransitionModel(object):
'''
Implements Gibbs-sampling. Can be used to constuct a Markov Chain and is
mainly used by MarkovChainSampler. This transition model can only be used
if the product of each variable and the variables in it's markov blanket
if the product of each variable and the variables in it's markov blanket
can be computed in closed form. This is currently only the case for discrete
variables / ProbabilityTables, but could possibly extended to the continuous
setting by assuming gaussian forms for the products or using only classes of
pdfs for which closed forms are computable.
If the pdf-classes used can not offer this kind of computation you should
use the MetropolisHastingsTransitionModel, as it only requires to compute
a single probability, which can way easier be obtained.
Implemented after "Probabilistic Graphical Models, Daphne Koller and Nir Friedman"(p.506)
'''
def __init__(self):
pass
def transition(self, network, state, extern_evidence):
'''
Does one single state transition.
......@@ -174,11 +174,11 @@ class GibbsTransitionModel(object):
reduced_cpd = node.get_cpd_reduced(evidence)
else:
reduced_cpd = node.get_cpd()
#reduce the children's cpds
children = network.get_children(node)
for child in children:
#reduce this node's cpd
parents=network.get_parents(child)
evidence=[(parent,state[parent]) for parent in parents if parent != node]
......@@ -186,39 +186,39 @@ class GibbsTransitionModel(object):
reduced_child_cpd = child.get_cpd_reduced(evidence)
reduced_cpd = reduced_cpd.multiplication(reduced_child_cpd)
new_state=weighted_random(reduced_cpd.get_table())
state[node]=node.get_value_range()[new_state]
return state
class MetropolisHastingsTransitionModel(object):
'''
Implements the Metropolis-Hastings-Algorithm. Can be used to constuct a Markov Chain.
After "Probabilistic Graphical Models, Daphne Koller and Nir Friedman"(p.644)
'''
def __init__(self):
pass
def _compute_p_of_value_given_mb(self, network, state, node, value):
parents=network.get_parents(node)
if parents:
evidence=[(parent,state[parent]) for parent in parents]
else:
evidence=[]
evidence=[]
p = node.get_probability(value,evidence)
children = network.get_children(node)
for child in children:
#reduce this node's cpd
parents=network.get_parents(child)
evidence=[(parent,state[parent]) for parent in parents if parent != node]
evidence.append((node,value))
p = p * child.get_probability(state[child],evidence)
return p
def transition(self, network, state, extern_evidence):
'''
Does one single state transition.
......@@ -235,16 +235,16 @@ class MetropolisHastingsTransitionModel(object):
current_value = state[node]
#print node.sample_local(current_value, extern_evidence)
proposed_value, cdf_ratio = node.sample_local(current_value, extern_evidence)
p_of_proposal_given_mb = self._compute_p_of_value_given_mb(network, state, node, proposed_value)
p_of_current_given_mb = self._compute_p_of_value_given_mb(network, state, node, current_value)
#print "acceptance_probability = min(1.0, "+str(p_of_proposal_given_mb)+" / "+str(p_of_current_given_mb) + " * "+str(cdf_ratio)
acceptance_probability = min(1.0, p_of_proposal_given_mb/p_of_current_given_mb * cdf_ratio * 1.0/1.0)
if random.random() <= acceptance_probability:
state[node]=proposed_value
return state
return state
class MarkovChainSampler(object):
'''
......@@ -261,17 +261,17 @@ class MarkovChainSampler(object):
'''
self.transition_model=transition_model
self.convergence_test=convergence_test
def set_convergence_test(self, test):
self.convergence_test=test
def generateMarkovChain(self, network, time_steps, initial_state, evidence={}, variables_of_interest=[]):
'''
This function generates a markov chain by sampling from a bayesian network.
It is possible to use different transition functions.
After "Probabilistic Graphical Models, Daphne Koller and Nir Friedman"(p.509)
@param network: A BayesNet.
@param time_steps: Integer specifying how long the chain shall be.
@param initial_state: The state from which transition will start.
......@@ -279,13 +279,13 @@ class MarkovChainSampler(object):
@param variables_of_interest: If not all variable instantiations are needed
this List of RandomNode objects can be used to select which Nodes
are mentioned in the return object.
@returns: A Generator-object for a List of States. Each state is a Dict
from RandomNode to Value
'''
self.convergence_test.reset()
state=initial_state
if evidence:
for node in evidence.keys():
if not evidence[node].is_compatible(state[node]):
......@@ -293,22 +293,22 @@ class MarkovChainSampler(object):
# constant_nodes = evidence.keys()
# else:
# constant_nodes=[]
#let the distribution converge to the target distribution
while not self.convergence_test.has_converged(state):
state=self.transition_model.transition(network, state, evidence)
#finally sample from the target distribution
for t in xrange(time_steps):
if variables_of_interest:
yield self._reduce_state_to_variables_of_interest(state, variables_of_interest)
else:
yield state
state=self.transition_model.transition(network, state, evidence)
def _reduce_state_to_variables_of_interest(self, state, variables_of_interest):
return dict((k,v) for (k,v) in state.iteritems() if k in variables_of_interest)
class ConvergenceTestSimpleCounting(object):
def __init__(self, limit):
......
......@@ -7,33 +7,33 @@ import time
import primo.networks
class Particle(object):
'''
'''
This is the basic particle class used by the DBN particle filter.
Inherit this class for more functionality.
'''
def __init__(self):
self.state = None
def set_state(self, state):
'''
'''
Set the state of this particle and call the update() function.
Keyword arguments:
state -- new state of this particle
'''
self.state = state
self.state = state
self.update()
def get_state(self):
'''
'''
Get the state of this particle.
Returns the state of this particle.
'''
return self.state
def update(self):
'''
'''
Implement this method to update the particle as required.
'''
pass
......@@ -47,11 +47,11 @@ def weighted_random(weights):
def wighted_sample_with_replacement(samples = [], weights = [], N = 0):
'''
The population is resampled to generate a new population of N samples.
Each new sample is selected from the current population; the probability
The population is resampled to generate a new population of N samples.
Each new sample is selected from the current population; the probability
that a particular sample is selected is proportional to its weight.
See "Artificial Intelligence: A Modern Approach (Third edition)" by
See "Artificial Intelligence: A Modern Approach (Third edition)" by
Stuart Russell and Peter Norvig (p. 596 ff.)
Keyword arguments:
......@@ -76,10 +76,10 @@ def weighted_sample(network, evidence = {}):
'''
Each nonevidence variable is sampled according to the conditional
distribution given the values already sampled for the variable's parents,
while a weight isaccumulated based on the likelihood for each evidence
while a weight isaccumulated based on the likelihood for each evidence
variable.
See "Artificial Intelligence: A Modern Approach (Third edition)" by
See "Artificial Intelligence: A Modern Approach (Third edition)" by
Stuart Russell and Peter Norvig (p. 534)
Keyword arguments:
......@@ -92,7 +92,7 @@ def weighted_sample(network, evidence = {}):
state = {}
if not isinstance(network, primo.network.BayesianNetwork):
raise Exception("The given network is not an instance of BayesNet.")
nodes = network.get_nodes_in_topological_sort()
for node in nodes:
#reduce this node's cpd
......@@ -102,7 +102,7 @@ def weighted_sample(network, evidence = {}):
reduced_cpd = node.get_cpd_reduced(evidence_tmp)
else:
reduced_cpd = node.get_cpd()
# (re-)calulate weight
if node in evidence:
w *= reduced_cpd.get_table()[node.get_value_range().index(evidence[node])]
......@@ -117,8 +117,8 @@ def weighted_sample(network, evidence = {}):
def particle_filtering_DBN(network, N, T, get_evidence_function, particle_class = Particle, interval = 0):
'''
Create N samples for the given network with T time slices.
See "Artificial Intelligence: A Modern Approach (Third edition)" by
See "Artificial Intelligence: A Modern Approach (Third edition)" by
Stuart Russell and Peter Norvig (p. 596 ff.)
Keyword arguments:
......@@ -139,7 +139,7 @@ def particle_filtering_DBN(network, N, T, get_evidence_function, particle_class
# Sample from inital distribution
samples = sample_from_inital_distribution(network, get_evidence_function(), N, particle_class)
# Sample time slices
initial_samples = True
if T == -1:
......@@ -180,9 +180,9 @@ def sample_from_inital_distribution(network, evidence, N, particle_class = Parti
samples.append(particle_class())
samples[n].set_state(copy.copy(state))
weights.append(w)
weights = normalize_weights(weights)
weights = normalize_weights(weights)
# wighted sample with replacement
return wighted_sample_with_replacement(samples, weights, N)
......@@ -196,7 +196,7 @@ def sample_one_time_slice(network, samples, evidence, initial_samples = False):
samples -- a dict of samples (sampled from initial distribution at the beginning or a previous time slice)
evidence -- dict with the following structure: {node1:evidence1, node2:evidence2, ...}
initial_samples -- is true if the given samples where sampled from the initial distribution
Returns a list of N new samples
'''
weights = []
......@@ -209,12 +209,12 @@ def sample_one_time_slice(network, samples, evidence, initial_samples = False):
samples[n].set_state(copy.copy(state))
weights.append(w)
weights = normalize_weights(weights)
# wighted sample with replacement
return wighted_sample_with_replacement(samples, weights, N)
def normalize_weights(weights=[]):
'''
Normalize the given weights.
......
......@@ -97,8 +97,8 @@ class XMLBIF(object):
tag_for.appendChild(txt_for)
tag_def.appendChild(tag_for)
# It's not guaranteed that the own node is at dimension zero in
# the probability table.But for the function the order of the
# It's not guaranteed that the own node is at dimension zero in
# the probability table.But for the function the order of the
# variables is important
for parent in reversed(current_node.get_cpd().get_variables()):
tag_par = minidom.Element("GIVEN")
......
......@@ -138,17 +138,17 @@ class BayesianNetwork(object):
class BayesianDecisionNetwork(BayesianNetwork):
def __init__(self):
def __init__(self):
super(BayesianDecisionNetwork, self).__init__()
self.partialOrdering = []
self.random_nodes = []
self.decision_nodes = []
self.utility_nodes = []
def is_valid(self):
'''Check if graph structure is valid.
Returns true if graph is directed, acyclic and if there is a path that connects every decision node(consistency check),
Returns true if graph is directed, acyclic and if there is a path that connects every decision node(consistency check),
false otherwise'''
if self.graph.number_of_selfloops() > 0:
......@@ -157,14 +157,14 @@ class BayesianDecisionNetwork(BayesianNetwork):
for node in self.graph.nodes():
if self.has_loop(node):
return False
decisionNodeList = []
for node in self.get_all_nodes():
if isinstance(node, DecisionNode):
decisionNodeList.append(node)
return all([nx.has_path(self.graph, x, y) == True for x in decisionNodeList for y in decisionNodeList])
def add_node(self, node):
if isinstance(node, Node):
if node.name in self.node_lookup.keys():
......@@ -184,19 +184,19 @@ class BayesianDecisionNetwork(BayesianNetwork):
def get_all_nodes(self):
'''Returns all RandomNodes'''
return self.random_nodes
return self.random_nodes
def get_all_decision_nodes(self):
return self.decision_nodes
def get_all_utility_nodes(self):
return self.utility_nodes
def add_edge(self, node_from, node_to):
"""
Adds an edge between two nodes. It is impossible to create en edge between two decision nodes and between two
Adds an edge between two nodes. It is impossible to create en edge between two decision nodes and between two
utility nodes.
keyword arguments:
node_from -- Node from where the edge shall begin
node_to -- Node where the edge shall end
......@@ -210,24 +210,24 @@ class BayesianDecisionNetwork(BayesianNetwork):
node_to.announce_parent(node_from)
else:
raise Exception("Tried to add an Edge between two Nodes of which at least one was not contained in the Bayesnet")
def reset_Decisions(self):
"""
Resets all decisions in the Bayesian Decision Network
"""
for node in self.decision_nodes:
node.set_state(None)
def get_partialOrdering(self):
"""
Getter for the partial ordere
"""
return self.partialOrdering
def set_partialOrdering(self, partialOrder):
"""
Sets the partial ordering for this Bayesian Decision Network
partialOrder -- ordered list of RandomNodes and Decision Nodes
example: [decisionNode1, [randomNode1,randomNode2], decisionNode2, [randomNode3]]
"""
......@@ -314,12 +314,12 @@ class TwoTBN(BayesianNetwork):
'''
Set all initial nodes to the value of their corresponding nodes
in state (previous time slice).
Keyword arguments:
state -- Current state of the network (previous time slice).
initial -- Set initial to true if this will be the first time slice
and state only contains nodes of the initial distribution.
Returns this instance with all initial nodes set to their
new value.
'''
......@@ -340,7 +340,7 @@ class TwoTBN(BayesianNetwork):
def add_node(self, node, initial=False, node_t=None):
'''
Add a node to the TwoTBN.
Keyword arguments:
node -- Node to be added.
initial -- If true node is marked as initial node.
......@@ -349,11 +349,11 @@ class TwoTBN(BayesianNetwork):
super(TwoTBN, self).add_node(node)
if initial:
self.set_initial_node(node, node_t)
def set_initial_node(self, node_name, node_name_t):
'''
Mark a node as initial node.
Keyword arguments:
node_name -- Name of the initial node.
node_name_t -- Name of the corresponding node in the time slice.
......@@ -365,7 +365,7 @@ class TwoTBN(BayesianNetwork):
def has_initial_node_by_name(self, node_name):
'''
Check if this instance has an inital node with name node_name.
Returns true on success, false otherwise.
'''
for (node, node_t) in self.__initial_nodes:
......
......@@ -16,7 +16,7 @@ class Node(object):
def __init__(self, node_name):
# Remove all special characters and replace " " with "_"
name = re.sub(r"[^a-zA-Z_0-9 ]*", "", node_name)
self.name = name.replace(" ", "_")
self.name = name.replace(" ", "_")
# for visual illustration
self.pos = (0, 0)
......@@ -26,10 +26,10 @@ class Node(object):
class RandomNode(Node):
'''Represents a random variable. There should be subclasses of this for
different kinds of data. There are currently DiscreteNode for
different kinds of data. There are currently DiscreteNode for
discrete-valued random variables and ContinuousNode for random Variables
with R or an Intervall in R as domain.
At a later point in time there may be structural nodes too.
'''
......@@ -38,13 +38,13 @@ class RandomNode(Node):
def __init__(self, name):
super(RandomNode, self).__init__(name)
#value_range defines the domain of this random variable
self.value_range=None
def set_cpd(self, cpd):
self.cpd = cpd
def get_cpd(self):
return self.cpd
......@@ -64,21 +64,21 @@ class RandomNode(Node):
def get_value_range(self):
return self.value_range
def sample_gobal(self, x, evidence=None):
'''
This method can be used to sample from this local distribution.
@param state: A Dict from Node-objects to values. You can specify the
values of this nodes parents in this dict and the conditional
@param state: A Dict from Node-objects to values. You can specify the
values of this nodes parents in this dict and the conditional
probability density will be adjusted accordingly.
'''
raise Exception("Called unimplemented Method")
def sample_local(self, x, evidence=None):
'''
This method can be used to do a random walk in the domain of this node.
@param x: The spot around which the next sample shall be generated.
@param evidence: Evidence which is to be concerned when new samples are
being generated. I am not entirely sure that this belongs here or is
......@@ -100,16 +100,16 @@ class DiscreteNode(RandomNode):
self.value_range = value_range
self.cpd = primo.densities.ProbabilityTable()
self.cpd.add_variable(self)
def __str__(self):
return self.name
def __repr__(self):
return "DiscreteNode("+self.name+")"
def set_probability(self, value, node_value_pairs):
self.cpd.set_probability(value, node_value_pairs)
def get_probability(self, value, node_value_pairs):
return self.cpd.get_probability([(self,value)] + node_value_pairs)
......@@ -118,7 +118,7 @@ class DiscreteNode(RandomNode):
def is_valid(self):
return self.cpd.is_normalized_as_cpt(self)
def sample_global(self, state, evidence):
if evidence==None or not self in evidence.keys():
compatibles=self.value_range
......@@ -127,9 +127,9 @@ class DiscreteNode(RandomNode):
for v in self.value_range:
if evidence[self].is_compatible(v):
compatibles.append(v)
return self.cpd.sample_global(state,self,compatibles)
def sample_local(self, x, evidence=None):
if evidence==None or not self in evidence.keys():
compatibles=self.value_range
......@@ -138,7 +138,7 @@ class DiscreteNode(RandomNode):
for v in self.value_range:
if evidence[self].is_compatible(v):
compatibles.append(v)
return random.choice(compatibles), 1.0
......@@ -150,34 +150,34 @@ class ContinuousNode(RandomNode):
'''
def __init__(self, name, value_range, DensityClass):
super(ContinuousNode, self).__init__(name)
#value_range is a 2-tuple that defines this variable's domain.
self.value_range = value_range
#the class density_class defines the class of function that is used
#for this ContinuousNode's pdf.
self.density_class = DensityClass
#cpd - ConditionalProbabilityDensity is the concrete density function
#cpd - ConditionalProbabilityDensity is the concrete density function
#of this ContinuousNode, conditioned on this Node's parents.
self.cpd = DensityClass(self)
def __str__(self):
return self.name
def __repr__(self):
return "str(ContinuousNode)"+self.name+")"
def set_density_parameters(self, density_parameters):
self.cpd.set_parameters(density_parameters)
def sample_local(self, x, evidence):
'''
This method can be used to do a random walk in the domain of this node.
@param x: The spot around which the next sample shall be generated.
@param evidence: Evidence which is to be concerned when new samples are
being generated. I am not entirely sure that this belongs here or is
correct in theory...
ATTENTION:
This is the most simple and stupid implementation of the method. It
uses bogo-search to find a sample that fits the evidence. You could
......@@ -185,7 +185,7 @@ class ContinuousNode(RandomNode):
intervalls allowed by the evidence and then generate a sample directly.
Currently this method has O(inf).'''
std_walk=1.0
#intersect possible evidence-interval with value_range:
if self in evidence.keys():
evidence_range=evidence[self].get_interval()
......@@ -194,35 +194,35 @@ class ContinuousNode(RandomNode):
else:
lower_limit=self.value_range[0]
upper_limit=self.value_range[1]
if lower_limit==upper_limit:
v=lower_limit
if lower_limit>upper_limit:
raise Exception("Intersection of random variable's value_range and"
"allowed Interval for Evidence is empty - no sampling possible")
#generate the actual sample
distribution=scipy.stats.norm(x, std_walk)
lower_cdf=distribution.cdf(lower_limit)
upper_cdf=distribution.cdf(upper_limit)
sample_in_integral=random.uniform(lower_cdf, upper_cdf)
sample=distribution.ppf(sample_in_integral)
a=scipy.stats.norm(self.value_range[0], std_walk).cdf(x)
b=scipy.stats.norm(self.value_range[0], std_walk).cdf(sample)
cdf_ratio = a/b
cdf_ratio = a/b
return sample,cdf_ratio
def sample_global(self, state, evidence):
'''
This method can be used to sample from this local distribution.
@param state: A Dict from Node-objects to values. You can specify the
values of this nodes parents in this dict and the conditional
@param state: A Dict from Node-objects to values. You can specify the
values of this nodes parents in this dict and the conditional
probability density will be adjusted accordingly.
'''
#is there some evidence for this node?
......@@ -248,15 +248,15 @@ class ContinuousNode(RandomNode):
if lower_limit>upper_limit:
raise Exception("Intersection of random variable's value_range and"
"allowed Interval for Evidence is empty - no sampling possible")
proposal=self.cpd.sample_global(state,lower_limit,upper_limit)
return proposal
def get_probability(self, value, state):
'''
This method can be used to query the cpd for how probable a value is,
given this nodes markov-blanket.
@param value: The value for this random-variable.
@param state: A Dict from Node-objects to values. Should at least contain
all variables from this nodes markov-blanket.
......@@ -268,52 +268,52 @@ class ContinuousNodeFactory(object):
'''This class offers methods for generating ContinuousNodes'''
def __init__(self):
pass
def createGaussNode(self, name):
'''
Create a LinearGaussNode with linear dependencies on parents.
@param name: The name of the node.
'''
'''
return self.createContinuousNode(
name,
(-float("Inf"),
float("Inf")),
primo.densities.Gauss)
def createExponentialNode(self, name):
'''
Create a LinearExponentialNode with linear dependencies on parents.
@param name: The name of the node.
'''
'''
return self.createContinuousNode(
name,
(0,float("Inf")),
primo.densities.Exponential)
def createBetaNode(self, name):
'''
Create a LinearBetaNode with linear dependencies on parents.
@param name: The name of the node.
'''
'''
return self.createContinuousNode(
name,
(0, 1),
primo.densities.Beta)
def createContinuousNode(self,name,value_range,density_class):
'''
Create a ContinuousNode. This method should only be invoked from
outside this class if no specialized method is available.
@param name: The name of the node.
@param value_range: A 2-tuple which represents the interval that is the
domain of the variable.
@param DensityClass: A class from primo.reasoning.density that shall be
the node's pdf
'''
'''
return ContinuousNode(
name,
value_range,
......@@ -322,117 +322,117 @@ class ContinuousNodeFactory(object):
class DecisionNode(Node):
"""Handles a DecisionNode which contains a list of actions and has a state"""
def __init__(self, name, value_range):
"""
Initialize a DecisionNode
Keyword arguments:
name -- Name of this DecisionNode
value_range -- A list of actions
"""
super(DecisionNode, self).__init__(name)
self.value_range = value_range
self.state = None
def get_value_range(self):
"""returns a list of actions"""
return self.value_range
def set_value_range(self, value_range):
"""
Sets the value range
Keyword arguments:
value_range -- List of actions
"""
self.value_range = value_range
def announce_parent(self, node):
pass
def set_state(self, decision):
"""
Sets the state of this Decision Node
Keyword arguments:
decision -- The decision that has been made
"""
if decision in self.value_range:
self.state = decision
else:
raise Exception("Could not set the state, given decision is not in value range")
def get_state(self):
"""
Getter for the state
"""
return self.state
def __str__(self):
return self.name + "\n" + str(self.value_range) + "\n" + str(self.state)
class UtilityNode(Node):
"""Handles an UtilityNode"""
def __init__(self, name):
"""
Construktor for the Utility Node
Keyword arguments:
name -- The name of this node
"""
super(UtilityNode, self).__init__(name)
self.ut = UtilityTable()
def announce_parent(self, node):
"""
Gets called automatically when this node gets a new parent
Keyword arguments:
node -- the parent node of this utility node
"""
self.ut.add_variable(node)
def set_utility_table(self, table, nodes):
"""
Sets the utility table
keyword arguments:
table -- the utility table
nodes -- a list of nodes which are the parents of this utility node
"""
self.ut.set_utility_table(table, nodes)
def set_utility(self, value, assignment):
"""
Sets one utility in the utility table of this node
keyword arguments:
value -- the utlity value
assignment -- a list of assignments of node value pairs
"""
self.ut.set_utility(value, assignment)
def get_utility_table(self):
"""
Getter for the utility table
"""
return self.ut
def get_utility(self, node_value_pairs):
"""
Getter for the utility stored in the utility table
keyword arguments:
node_value_pairs -- list of node,value pairs
"""
return self.ut.get_utility(node_value_pairs)
def __str__(self):
return self.name + "\n" + str(self.ut)
\ No newline at end of file
return self.name + "\n" + str(self.ut)
\ No newline at end of file
# -*- coding: utf-8 -*-
import distutils.core
import sys
from distutils.core import setup
if sys.argv[-1] == 'setup.py':
print("To install, run 'python setup.py install'")
......@@ -11,12 +10,13 @@ if sys.version_info[:2] < (2, 6):
sys.version_info[:2])
sys.exit(-1)
setup(name='primo',
version='1.0',
description='PRobabilistic Inference MOdules',
long_description='This project contains probabilistic inference modules for Python. Our aim is to create a library which offers well known probabilistic (graphical) models like Bayesian or temporal networks. A variety of inference algorithms will be implemented soon.',
author='Manuel Baum, Hendrik Buschmeier, Denis John, Lukas Kettenbach, Max Koch',
url='https://github.com/mbaumBielefeld/PRIMO',
download_url='https://github.com/mbaumBielefeld/PRIMO/archive/develop.zip',
packages = ['primo', 'primo.core', 'primo.decision', 'primo.decision.make_decision', 'primo.reasoning', 'primo.reasoning.density', 'primo.reasoning.factorelemination', 'primo.reasoning.particlebased', 'primo.utils']
)
distutils.core.setup(
name='primo',
version='1.0',
description='PRobabilistic Inference MOdules',
long_description='This project contains probabilistic inference modules for Python. Our aim is to create a library which offers well known probabilistic (graphical) models like Bayesian or temporal networks. A variety of inference algorithms will be implemented soon.',
author='Manuel Baum, Hendrik Buschmeier, Denis John, Lukas Kettenbach, Max Koch',
url='https://github.com/mbaumBielefeld/PRIMO',
download_url='https://github.com/mbaumBielefeld/PRIMO/archive/develop.zip',
packages = ['primo', 'primo.inference']
)
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment