-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathmain.py
More file actions
98 lines (77 loc) · 3.5 KB
/
main.py
File metadata and controls
98 lines (77 loc) · 3.5 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
import time
import json
import os
from script_executor import execute_script
from traceback_parser import parse_traceback_with_ai, extract_python_files_from_traceback
from file_handler import get_file_contents, update_script
from fix_suggester import suggest_fix_with_ai
def write_full_execution_log(log_data, log_file="full_execution_log.json"):
"""
Writes the complete execution log to a JSON file.
Args:
log_data: A list of dictionaries, where each dictionary represents the context of an iteration.
log_file: The path to the log file.
"""
with open(log_file, "w") as f:
json.dump(log_data, f, indent=4)
print(f"Full execution log saved to: {log_file}")
def main():
script_path = "script.py"
project_id = "your-gcp-project-id" # Replace with your GCP project ID
model_name = "gemini-1.5-flash-002" # Or your preferred model
max_iterations = 10
iteration = 0
full_log_data = [] # List to store the execution context of each iteration
while iteration < max_iterations:
iteration += 1
print(f"\nIteration: {iteration}")
success, output, stack_trace = execute_script(script_path)
# Initialize parsed_traceback, ai_response, and file_contents to None
parsed_traceback = None
ai_response = None
file_contents = {}
if not success:
print("Script execution failed!")
print("Stack Trace:\n", stack_trace)
parsed_traceback = parse_traceback_with_ai(stack_trace, project_id, model_name)
print("Parsed Traceback:\n", parsed_traceback)
python_files = extract_python_files_from_traceback(parsed_traceback)
print("Python files involved:\n", python_files)
file_contents = get_file_contents(python_files)
ai_response = suggest_fix_with_ai(script_path, stack_trace, file_contents, project_id, model_name)
if ai_response:
print("\nAI Suggested Fix:")
print(json.dumps(ai_response, indent=2))
if "full_corrected_script" in ai_response:
update_script(script_path, ai_response["full_corrected_script"])
print(f"Script updated with AI's suggestion. Retrying...")
time.sleep(2)
# We do NOT continue here anymore. Log this iteration, then loop back.
else:
print("AI did not provide a 'full_corrected_script'. Cannot update script.")
else:
print("AI could not provide a fix.")
else:
print("Script executed successfully!")
print("Output:\n", output)
# Create a dictionary representing the current iteration's context
iteration_context = {
"iteration": iteration,
"success": success,
"output": output,
"stack_trace": stack_trace,
"parsed_traceback": parsed_traceback,
"ai_response": ai_response,
"file_contents": file_contents,
}
# Add the iteration context to the full log data
full_log_data.append(iteration_context)
# Write the full execution log to file after each iteration
write_full_execution_log(full_log_data)
# Check for success AFTER logging.
if success:
break
if iteration == max_iterations:
print(f"\nReached maximum iterations ({max_iterations}). Script may not be fully corrected.")
if __name__ == "__main__":
main()