-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathml-service.py
More file actions
132 lines (113 loc) · 3.87 KB
/
ml-service.py
File metadata and controls
132 lines (113 loc) · 3.87 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
#!/usr/bin/env python3
"""
Simplified FastAPI ML Microservice for testing
"""
import os
import sys
from typing import List
from datetime import datetime
# FastAPI imports
from fastapi import FastAPI, HTTPException
from pydantic import BaseModel
import uvicorn
# Add ML services to Python path
ml_services_path = os.path.join(os.path.dirname(__file__), 'ml services')
sys.path.insert(0, ml_services_path)
# Import ML model
from student_risk_ml_python import StudentRiskMLModel
# Initialize FastAPI app
app = FastAPI(title="Student Risk ML Service", version="1.0.0")
# Global ML model
ml_model = None
# Load model at startup
try:
print("🤖 Loading ML model...")
ml_model = StudentRiskMLModel()
model_path = os.path.join('ml services', 'models', 'student_risk_model.pkl')
if os.path.exists(model_path):
ml_model.load_model(model_path)
print("✅ ML model loaded successfully")
else:
print("⚠️ Training new model...")
ml_model.train_model()
os.makedirs(os.path.join('ml services', 'models'), exist_ok=True)
ml_model.save_model(model_path)
print("✅ New model trained and saved")
except Exception as e:
print(f"❌ Model loading error: {e}")
ml_model = None
# Request models
class StudentData(BaseModel):
studentId: str
attendance: float
gpa: float
obtainedMarks: int
totalMarks: int
totalDue: float
paid: float
paidOnTime: bool
class PredictionRequest(BaseModel):
students: List[StudentData]
# Endpoints
@app.get("/")
async def root():
return {
"service": "Student Risk ML Service",
"status": "running",
"model_loaded": ml_model is not None,
"timestamp": datetime.now().isoformat()
}
@app.get("/health")
async def health():
return {
"status": "healthy" if ml_model else "unhealthy",
"model_loaded": ml_model is not None,
"timestamp": datetime.now().isoformat()
}
@app.post("/predict")
async def predict_batch(request: PredictionRequest):
if not ml_model:
raise HTTPException(status_code=503, detail="ML model not available")
predictions = []
for student in request.students:
try:
student_dict = student.dict()
prediction = ml_model.predict_student_risk(student_dict)
predictions.append({
"studentId": student.studentId,
"riskScore": float(prediction["riskScore"]),
"riskLevel": prediction["riskLevel"],
"riskFactors": prediction.get("riskFactors", [])
})
except Exception as e:
print(f"Prediction error for {student.studentId}: {e}")
predictions.append({
"studentId": student.studentId,
"riskScore": 0.5,
"riskLevel": "Medium",
"riskFactors": [f"Prediction error: {str(e)}"]
})
return {
"predictions": predictions,
"processed_count": len(predictions),
"timestamp": datetime.now().isoformat()
}
@app.post("/predict-single")
async def predict_single(student: StudentData):
if not ml_model:
raise HTTPException(status_code=503, detail="ML model not available")
try:
student_dict = student.dict()
prediction = ml_model.predict_student_risk(student_dict)
return {
"studentId": student.studentId,
"riskScore": float(prediction["riskScore"]),
"riskLevel": prediction["riskLevel"],
"riskFactors": prediction.get("riskFactors", []),
"timestamp": datetime.now().isoformat()
}
except Exception as e:
raise HTTPException(status_code=500, detail=f"Prediction failed: {str(e)}")
if __name__ == "__main__":
print("🚀 Starting ML Microservice on http://localhost:8000")
uvicorn.run(app, host="127.0.0.1", port=8000, log_level="info")