-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathexperiment.py
More file actions
44 lines (35 loc) · 1.77 KB
/
experiment.py
File metadata and controls
44 lines (35 loc) · 1.77 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
# Definition of the experiment.
# Author: Ronald Albert
# Date: June 2023
from sampling_model import galton_watson, sample
from graph_mcmc import mcmc_walk
from optimization import optimized_distribution
import numpy as np
# -------------------------------------------------------------------------
# Calculate the Kullback-Leibler divergence between two distributions
# -------------------------------------------------------------------------
# p: Distribution parameter p
# q: Distribution parameter q
# -------------------------------------------------------------------------
# Returns: The Kullback-Leibler divergence between p and q
# -------------------------------------------------------------------------
def kl_divergence(p, q):
return sum(p[i] * np.log2(p[i]/q[i]) for i in range(len(p)))
# -------------------------------------------------------------------------
# Run the experiment
# -------------------------------------------------------------------------
# offspring_distribution: The offspring distribution
# dist_g: Distribution for the MCMC algorithm
# W: Maximum value of the offspring distribution
# L: Maximum number of levels in the tree
# p: Probability of sampling a path from the Galton-Watson process
# n_steps: Number of steps in the walk
# -------------------------------------------------------------------------
# Returns: The estimated distribution
# -------------------------------------------------------------------------
def experiment(offspring_distribution, dist_g, W, L, p, n_steps):
G = galton_watson(offspring_distribution, L)
S, n_nodes = sample(G, p)
sampled_graphs = mcmc_walk(G, S, n_nodes, W, L, dist_g, p, n_steps)
theta = optimized_distribution(sampled_graphs, dist_g)
return kl_divergence(theta, offspring_distribution), theta