-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathrun.py
More file actions
47 lines (40 loc) · 1.76 KB
/
run.py
File metadata and controls
47 lines (40 loc) · 1.76 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
from utils import *
from voting_agent.baseline_agent import BaselineAgent
from training.trainer import learning_environment, baseline_learning_environment
from voting_agent.BQ import BranchingQN
from voting_agent.MultiLabelQ import MultiLabelDQ
import ipdb
import random
def experiment_part(params, experiment_name, voter_preferences, instance, profile, voter_ballot, device):
voters = [voter for voter in voter_preferences.keys()]
params["env_config"]['first_instance'] = instance
params["env_config"]['voter_preferences'] = voter_preferences
params["env_config"]["voter_ballot"] = voter_ballot
env = env_creator(params["env_config"], profile)
# ------------ Agent initialization ----------------
ACTION_SPACE = env.action_space(voters[0])
observations, infos = env.reset()
if instance.meta['vote_type'] == 'approval':
ACTION_DIMENSTION = len(instance)
if instance.meta['vote_type'] == 'cumulative':
if instance.meta['unit']=='Toulouse':
ACTION_DIMENSTION = 7
if instance.meta['unit']=='Aaurau':
ACTION_DIMENSTION = 10
state, info = env.reset()
params["voter_config"] = {
"number_of_tokens" : ACTION_DIMENSTION,
"input_dims" : len(observations[voters[0]].flatten()),
"action_space" : ACTION_SPACE.n
}
voters = {}
for agent in env.agents:
voters[agent] = params['training_config']['algorithm'](
params['voter_config']['input_dims'],
params['voter_config']['number_of_tokens'],
params['voter_config']['action_space'],
params,
device=device
)
data = learning_environment(params, env, voters, experiment_name)
save_object(data, experiment_name, params['exp_part'])