-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathf1Predictions.py
More file actions
185 lines (150 loc) · 5.97 KB
/
f1Predictions.py
File metadata and controls
185 lines (150 loc) · 5.97 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
import fastf1
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.preprocessing import OneHotEncoder, StandardScaler
from sklearn.pipeline import Pipeline
from sklearn.compose import ColumnTransformer
from sklearn.metrics import mean_squared_error, r2_score, mean_absolute_error
from sklearn.inspection import permutation_importance
import matplotlib.pyplot as plt
import seaborn as sns
# -----------------------------
# Simulated example F1 dataset
# -----------------------------
data = {
'driver': ['Verstappen', 'Hamilton', 'Leclerc', 'Russell', 'Norris'] * 20,
'team': ['Red Bull', 'Mercedes', 'Ferrari', 'Mercedes', 'McLaren'] * 20,
'circuit': ['Monza', 'Silverstone', 'Monaco', 'Spa', 'Bahrain'] * 20,
'session': ['Q1', 'Q2', 'Q3', 'Race', 'Race'] * 20,
'lap_number': np.random.randint(1, 60, 100),
'track_temp': np.random.uniform(25, 45, 100),
'air_temp': np.random.uniform(20, 35, 100),
'lap_time': np.random.uniform(75, 100, 100) # in seconds
}
df = pd.DataFrame(data)
print(df.head())
# -----------------------------
# Feature selection
# -----------------------------
features = ['driver', 'team', 'circuit', 'session', 'lap_number', 'track_temp', 'air_temp']
target = 'lap_time'
X = df[features]
y = df[target]
# -----------------------------
# Preprocessing
# -----------------------------
categorical_features = ['driver', 'team', 'circuit', 'session']
numerical_features = ['lap_number', 'track_temp', 'air_temp']
preprocessor = ColumnTransformer(
transformers=[
('num', StandardScaler(), numerical_features),
('cat', OneHotEncoder(handle_unknown='ignore'), categorical_features)
]
)
# -----------------------------
# Gradient Boosting Model Pipeline
# -----------------------------
model = Pipeline(steps=[
('preprocessor', preprocessor),
('regressor', GradientBoostingRegressor(
n_estimators=200,
learning_rate=0.1,
max_depth=4,
random_state=42
))
])
# -----------------------------
# Split data & train
# -----------------------------
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
model.fit(X_train, y_train)
preprocessor = model.named_steps['preprocessor']
regressor = model.named_steps['regressor']
# Get feature names from each transformer
numeric_names = numerical_features
categorical_names = preprocessor.named_transformers_['cat'].get_feature_names_out(categorical_features)
# Combine all feature names
all_feature_names = np.concatenate([numeric_names, categorical_names])
# Step 3: Get feature importances from the regressor
importances = regressor.feature_importances_
importance_df = pd.DataFrame({
'Feature': all_feature_names,
'Importance': importances
})
# Normalize to percentage
importance_df['Importance (%)'] = 100 * importance_df['Importance'] / importance_df['Importance'].sum()
# Sort by importance
importance_df = importance_df.sort_values('Importance (%)', ascending=False)
# Display
print(importance_df.head(15)) # Show top 15 features
# -----------------------------
# Evaluation
# -----------------------------
y_pred = model.predict(X_test)
mse = mean_squared_error(y_test, y_pred)
r2 = r2_score(y_test, y_pred)
print(f"Mean Squared Error: {mse:.3f}")
print(f"R^2 Score: {r2:.3f}")
print(f"\n🔍 Model Error (MAE): {mean_absolute_error(y_test, y_pred):.2f} seconds")
# Assume you have the final transformed features (after preprocessing)
# For example:
X_transformed = model.named_steps['preprocessor'].transform(X)
feature_names = np.concatenate([
numerical_features,
model.named_steps['preprocessor'].named_transformers_['cat'].get_feature_names_out(categorical_features)
])
# Convert to DataFrame
X_df = pd.DataFrame(X_transformed, columns=feature_names)
# Compute correlation matrix
corr_matrix = X_df.corr().abs()
# Show heatmap of correlations
plt.figure(figsize=(12, 10))
sns.heatmap(corr_matrix, cmap="coolwarm", vmax=1.0, square=True)
plt.title("Feature Correlation Matrix")
plt.tight_layout()
plt.show()
# -------------------------------------------
# 🔍 Feature Importance Breakdown (in %)
# -------------------------------------------
# Find highly correlated pairs
threshold = 0.9
upper_triangle = corr_matrix.where(np.triu(np.ones(corr_matrix.shape), k=1).astype(bool))
high_corr = [(col1, col2, upper_triangle.loc[col1, col2])
for col1 in upper_triangle.columns
for col2 in upper_triangle.columns
if pd.notnull(upper_triangle.loc[col1, col2]) and upper_triangle.loc[col1, col2] > threshold]
# Display results
if high_corr:
print("Highly correlated feature pairs (r > 0.9):")
for f1, f2, score in high_corr:
print(f"{f1} <-> {f2}: {score:.3f}")
else:
print("No highly correlated features found above the threshold.")
result = permutation_importance(model, X_test, y_test, n_repeats=10, random_state=42)
importances = result.importances_mean
for i in importances.argsort()[::-1]:
print(f"{all_feature_names[i]}: {importances[i]:.4f}")
# Get feature names from preprocessor
cat_names = model.named_steps['preprocessor'].named_transformers_['cat'].get_feature_names_out(categorical_features)
all_feature_names = np.concatenate([numerical_features, cat_names])
# Get importances
importances = model.named_steps['regressor'].feature_importances_
importance_df = pd.DataFrame({
'Feature': all_feature_names,
'Importance': importances
})
importance_df['Importance (%)'] = 100 * importance_df['Importance'] / importance_df['Importance'].sum()
importance_df = importance_df.sort_values(by='Importance (%)', ascending=False)
# Display top 15 features
print("\nTop Feature Importances:")
print(importance_df.head(15))
# Optional: Plot
plt.figure(figsize=(10, 6))
plt.barh(importance_df['Feature'].head(15), importance_df['Importance (%)'].head(15))
plt.gca().invert_yaxis()
plt.title("Top 15 Feature Importances (% Contribution)")
plt.xlabel("Importance (%)")
plt.tight_layout()
plt.show()