Skip to content

Commit e5e436c

Browse files
committed
Merge branch 'ramondev' into dev
2 parents ffc6b95 + 6024a86 commit e5e436c

4 files changed

Lines changed: 161 additions & 0 deletions

File tree

test/imports_worst_params.py

Lines changed: 104 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,104 @@
1+
from numpy import array
2+
from numpy.ma import masked_array
3+
from sklearn.linear_model import LogisticRegression
4+
from sklearn.ensemble import RandomForestClassifier
5+
from sklearn.svm import SVC
6+
7+
clf_cv_results = {'mean_fit_time': array([0.01718318, 0.0028271 , 0.38156233, 0.33234448, 0.35453713,
8+
0.00439863, 0.00284553, 0.00274587, 0.00266459, 0.00200651,
9+
0.00236692, 0.00278957]),
10+
'std_fit_time': array([0.00516329, 0.00163186, 0.09469229, 0.07785508, 0.06154388,
11+
0.00176376, 0.00057058, 0.00091433, 0.00144808, 0.00063374,
12+
0.00082487, 0.00075851]),
13+
'mean_score_time': array([0.0018924 , 0.00069876, 0.03394792, 0.03130136, 0.03214581,
14+
0.00179503, 0.00121999, 0.000912 , 0.00106797, 0.00100076,
15+
0.00091968, 0.00097802]),
16+
'std_score_time': array([0.00290424, 0.00045828, 0.01646534, 0.01045479, 0.00829282,
17+
0.00059888, 0.00042194, 0.00030658, 0.00033866, 0.00044725,
18+
0.00073592, 0.0003762 ]),
19+
'param_classifier': masked_array(data=[LogisticRegression(max_iter=1000, solver='liblinear'),
20+
LogisticRegression(max_iter=1000, solver='liblinear'),
21+
RandomForestClassifier(), RandomForestClassifier(),
22+
RandomForestClassifier(), SVC(), SVC(), SVC(), SVC(),
23+
SVC(), SVC(), SVC()],
24+
mask=[False, False, False, False, False, False, False, False,
25+
False, False, False, False],
26+
fill_value='?',
27+
dtype=object),
28+
'param_classifier__penalty': masked_array(data=['l1', 'l2', '--', '--', '--', '--', '--', '--', '--', '--', '--', '--'],
29+
mask=[False, False, True, True, True, True, True, True,
30+
True, True, True, True],
31+
fill_value='?',
32+
dtype=object),
33+
'param_classifier__max_features': masked_array(data=['--', '--', 1, 2, 3, '--', '--', '--', '--', '--', '--', '--'],
34+
mask=[ True, True, False, False, False, True, True, True,
35+
True, True, True, True],
36+
fill_value='?',
37+
dtype=object),
38+
'param_classifier__C': masked_array(data=['--', '--', '--', '--', '--', 0.001, 0.1, 0.5, 1, 5, 10, 100],
39+
mask=[ True, True, True, True, True, False, False, False,
40+
False, False, False, False],
41+
fill_value='?',
42+
dtype=object),
43+
'params': [{'classifier': LogisticRegression(max_iter=1000, solver='liblinear'),
44+
'classifier__penalty': 'l1'},
45+
{'classifier': LogisticRegression(max_iter=1000, solver='liblinear'),
46+
'classifier__penalty': 'l2'},
47+
{'classifier': RandomForestClassifier(), 'classifier__max_features': 1},
48+
{'classifier': RandomForestClassifier(), 'classifier__max_features': 2},
49+
{'classifier': RandomForestClassifier(), 'classifier__max_features': 3},
50+
{'classifier': SVC(), 'classifier__C': 0.001},
51+
{'classifier': SVC(), 'classifier__C': 0.1},
52+
{'classifier': SVC(), 'classifier__C': 0.5},
53+
{'classifier': SVC(), 'classifier__C': 1},
54+
{'classifier': SVC(), 'classifier__C': 5},
55+
{'classifier': SVC(), 'classifier__C': 10},
56+
{'classifier': SVC(), 'classifier__C': 100}],
57+
'split0_test_score': array([0.91666667, 0.91666667, 1. , 1. , 1. ,
58+
0.33333333, 0.91666667, 0.91666667, 1. , 1. ,
59+
1. , 1. ]),
60+
'split1_test_score': array([0.91666667, 1. , 0.83333333, 0.83333333, 0.83333333,
61+
0.33333333, 0.91666667, 0.91666667, 0.91666667, 0.91666667,
62+
0.91666667, 1. ]),
63+
'split2_test_score': array([1. , 1. , 0.91666667, 0.91666667, 0.91666667,
64+
0.58333333, 0.91666667, 0.91666667, 1. , 1. ,
65+
1. , 1. ]),
66+
'split3_test_score': array([1. , 1. , 1. , 1. , 1. ,
67+
0.58333333, 0.91666667, 1. , 1. , 1. ,
68+
1. , 1. ]),
69+
'split4_test_score': array([1. , 1. , 1. , 1. , 1. ,
70+
0.66666667, 1. , 1. , 1. , 1. ,
71+
1. , 1. ]),
72+
'split5_test_score': array([1. , 1. , 1. , 1. , 1. ,
73+
0.66666667, 1. , 1. , 1. , 1. ,
74+
1. , 1. ]),
75+
'split6_test_score': array([1. , 1. , 1. , 1. , 1. ,
76+
0.41666667, 0.75 , 0.91666667, 1. , 1. ,
77+
1. , 1. ]),
78+
'split7_test_score': array([0.66666667, 0.75 , 0.83333333, 0.83333333, 0.83333333,
79+
0.58333333, 0.91666667, 0.83333333, 0.83333333, 0.83333333,
80+
0.83333333, 0.83333333]),
81+
'split8_test_score': array([1. , 1. , 1. , 1. , 1. ,
82+
0.33333333, 1. , 1. , 1. , 1. ,
83+
1. , 0.91666667]),
84+
'split9_test_score': array([1. , 1. , 0.91666667, 1. , 1. ,
85+
0.33333333, 1. , 1. , 0.91666667, 0.91666667,
86+
0.91666667, 0.91666667]),
87+
'mean_test_score': array([0.95 , 0.96666667, 0.95 , 0.95833333, 0.95833333,
88+
0.48333333, 0.93333333, 0.95 , 0.96666667, 0.96666667,
89+
0.96666667, 0.96666667]),
90+
'std_test_score': array([0.1 , 0.07637626, 0.06666667, 0.06718548, 0.06718548,
91+
0.13844373, 0.07264832, 0.05527708, 0.05527708, 0.05527708,
92+
0.05527708, 0.05527708]),
93+
'rank_test_score': array([ 8, 1, 8, 6, 6, 12, 11, 8, 1, 1, 1, 1])}
94+
95+
96+
df = {'rank_test_score': array([0,2,3,4,6,5,2]),
97+
'mean_test_score': array([0.2, 0.13, 0.9, 0.60, 0.50, 0.2, 0.03]),
98+
'params': [{'C': 100, 'degree': 1, 'gamma': 'scale', 'kernel': 'sigmoid'},
99+
{'C': 100, 'degree': 1, 'gamma': 'scale', 'kernel': 'sigmoid'},
100+
{'C': 100, 'degree': 1, 'gamma': 'scale', 'kernel': 'sigmoid'},
101+
{'C': 100, 'degree': 1, 'gamma': 'scale', 'kernel': 'sigmoid'},
102+
{'C': 5, 'degree': 3, 'gamma': 'auto', 'kernel': 's'},
103+
{'C': 100, 'degree': 1, 'gamma': 'scale', 'kernel': 'sigmoid'},
104+
{'C': 100, 'degree': 1, 'gamma': 'scale', 'kernel': 'sigmoid'}]}

test/test_add_previous.py

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,15 @@
1+
from add_previous import add_previous
2+
import pandas as pd
3+
4+
df = pd.DataFrame(data= {'Goals': [0, 2, 1, 4, 5, 2, 1, 0, 2, 0, 1, 1],
5+
'Teams': ['Team 1', 'Team 2', 'Team 1', 'Team 2', 'Team 1', 'Team 2',
6+
'Team 1', 'Team 2', 'Team 1', 'Team 2', 'Team 1', 'Team 2']})
7+
8+
9+
def test_add_previous():
10+
add_previous(df, 3, 'Teams', 'Goals')
11+
list_features = df.columns.to_list()
12+
print(list_features)
13+
length = len(list_features)
14+
15+
assert length == 5

test/test_winner_loser.py

Lines changed: 24 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,24 @@
1+
from winner_loser import winner_loser
2+
import pandas as pd
3+
import pytest
4+
5+
# Define test data
6+
df = pd.DataFrame(data= {'Goals': [0, 2, 1, 4, 2, 2],
7+
'Teams': ['Team 1', 'Team 2', 'Team 1', 'Team 2', 'Team 1', 'Team 2']})
8+
9+
# Run tests
10+
11+
@pytest.mark.parametrize(
12+
"input_a, input_b ,input_c, expected",
13+
[
14+
(0, df, 'Goals', 'Loss'),
15+
(1, df, 'Goals', 'Victory'),
16+
(2, df, 'Goals', 'Loss'),
17+
(3, df, 'Goals', 'Victory'),
18+
(4, df, 'Goals', 'Draw'),
19+
(5, df, 'Goals', 'Draw')
20+
]
21+
)
22+
23+
def test_winner_loser_multi(input_a, input_b, input_c, expected):
24+
assert winner_loser(input_a, input_b, input_c) == expected

test/test_worst_params.py

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,18 @@
1+
from worst_params import worst_params
2+
import pytest
3+
import numpy as np
4+
from imports_worst_params import clf_cv_results, df
5+
from sklearn.svm import SVC
6+
import json
7+
8+
9+
@pytest.mark.parametrize(
10+
"input_a, expected",
11+
[
12+
(df, ("{'C': 5, 'degree': 3, 'gamma': 'auto', 'kernel': 's'}", 0.5)),
13+
(clf_cv_results, ("{'classifier': SVC(), 'classifier__C': 0.001}", 0.48333333))
14+
]
15+
)
16+
17+
def test_worst_params_multi(input_a, expected):
18+
assert worst_params(input_a) == expected

0 commit comments

Comments
 (0)