-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathsee_think_act_agent.py
More file actions
394 lines (323 loc) · 13.9 KB
/
see_think_act_agent.py
File metadata and controls
394 lines (323 loc) · 13.9 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
"""
See-Think-Act Agent for Windows 11
Autonomous agent that captures screenshots, analyzes with Qwen3-VL via Ollama, and performs actions
"""
import time
import json
import logging
from typing import Optional, Dict, Any, List
from pathlib import Path
from datetime import datetime
from utils.screenshot_capture import ScreenshotCapture
from utils.ollama_client import OllamaVisionClient
from utils.action_executor import ActionExecutor
from utils.agent_function_call import ComputerUse
class SeeThinkActAgent:
"""
Autonomous AI Agent that can See, Think, and Act
- See: Captures screenshots of the desktop
- Think: Analyzes with Qwen3-VL model via Ollama
- Act: Executes actions based on model's decisions
"""
def __init__(self,
model: str = "qwen3-vl:235b-cloud",
max_iterations: int = 50,
save_screenshots: bool = True,
screenshot_dir: str = "screenshots",
log_level: str = "INFO"):
"""
Initialize the See-Think-Act Agent
Args:
model: Ollama model name
max_iterations: Maximum number of iterations before stopping
save_screenshots: Whether to save screenshots
screenshot_dir: Directory to save screenshots
log_level: Logging level
"""
# Setup logging
logging.basicConfig(
level=getattr(logging, log_level),
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
self.logger = logging.getLogger(__name__)
# Initialize components
self.screenshot_capture = ScreenshotCapture()
self.ollama_client = OllamaVisionClient(model=model)
# Get actual screen size
screen_width, screen_height = self.screenshot_capture.get_screen_size()
self.action_executor = ActionExecutor(
screen_width=screen_width,
screen_height=screen_height
)
# Configuration
self.max_iterations = max_iterations
self.save_screenshots = save_screenshots
self.screenshot_dir = Path(screenshot_dir)
# Create screenshot directory
if self.save_screenshots:
self.screenshot_dir.mkdir(exist_ok=True)
# Initialize computer use tool
self.computer_use = ComputerUse(
cfg={
"display_width_px": 1000, # Normalized coordinates
"display_height_px": 1000
}
)
# State tracking
self.iteration_count = 0
self.task_completed = False
self.task_status = None
self.conversation_history = []
self.logger.info(f"Agent initialized with model: {model}")
self.logger.info(f"Screen size: {screen_width}x{screen_height}")
def _get_system_prompt(self) -> str:
"""Get the system prompt for the agent"""
return """You are a helpful AI assistant that can see, think, and act on a Windows 11 computer.
Your capabilities:
1. SEE: You receive screenshots of the current desktop state
2. THINK: You analyze what you see and plan the next action
3. ACT: You can control the mouse, keyboard, and perform various actions
When given a task:
1. First, look at the screenshot to understand the current state
2. Think about what needs to be done to accomplish the task
3. Take ONE action at a time
4. After each action, you'll get a new screenshot to see the result
5. Continue until the task is complete
Important guidelines:
- Be precise with mouse coordinates - aim for the center of buttons/links
- Wait after actions that might take time (opening apps, loading pages)
- If an action fails, try adjusting your approach
- When the task is complete, use the 'terminate' action with status 'success'
- If the task cannot be completed, use 'terminate' with status 'failure'
You have access to a 'computer' tool that allows you to:
- Click (left, right, double, middle)
- Type text
- Press keys
- Move mouse
- Scroll
- Wait for changes
- Terminate when done
"""
def _prepare_tools(self) -> List[Dict[str, Any]]:
"""Prepare tool definitions for Ollama"""
# Convert ComputerUse function to Ollama tool format
computer_function = self.computer_use.function
tool = {
"type": "function",
"function": {
"name": computer_function["name"],
"description": computer_function["description"],
"parameters": computer_function["parameters"]
}
}
return [tool]
def _save_screenshot(self, image, prefix: str = "screenshot") -> str:
"""Save screenshot with timestamp"""
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
filename = f"{prefix}_{self.iteration_count:03d}_{timestamp}.png"
filepath = self.screenshot_dir / filename
image.save(filepath)
self.logger.info(f"Screenshot saved: {filepath}")
return str(filepath)
def _capture_current_state(self):
"""Capture current screenshot"""
self.logger.info("Capturing screenshot...")
screenshot = self.screenshot_capture.capture_screen()
if self.save_screenshots:
self._save_screenshot(screenshot)
return screenshot
def _think_and_decide(self, task: str, screenshot) -> Dict[str, Any]:
"""
Send screenshot to model and get decision
Args:
task: The user's task
screenshot: PIL Image of current screen
Returns:
Response from the model
"""
self.logger.info("Thinking and deciding next action...")
# Build the prompt
if self.iteration_count == 0:
user_prompt = f"Task: {task}\n\nThis is the current state of the desktop. What should I do first to accomplish this task?"
else:
user_prompt = f"Task: {task}\n\nThis is the current state after the previous action. What should I do next?"
# Prepare tools
tools = self._prepare_tools()
# Get response from model
try:
response = self.ollama_client.chat_with_image(
user_query=user_prompt,
image=screenshot,
system_prompt=self._get_system_prompt(),
tools=tools
)
self.logger.info(f"Model response: {response}")
return response
except Exception as e:
self.logger.error(f"Error in model inference: {e}")
raise
def _execute_action(self, response: Dict[str, Any]) -> bool:
"""
Execute the action decided by the model
Args:
response: Response from the model
Returns:
True if action executed successfully
"""
# Parse the action from response
action = self.ollama_client.parse_computer_use_action(response)
if not action:
self.logger.warning("No action found in response")
# Check if the model just wants to observe
if 'message' in response and 'content' in response['message']:
content = response['message']['content']
self.logger.info(f"Model response: {content}")
# Check for termination signal in text
if 'terminate' in content.lower() or 'complete' in content.lower():
self.logger.info("Task appears to be complete (textual indication)")
self.task_completed = True
self.task_status = 'success'
return True
return False
# Extract action details
if 'function' in action:
action_details = action['function']
action_name = action_details.get('name')
if isinstance(action_details.get('arguments'), str):
arguments = json.loads(action_details['arguments'])
else:
arguments = action_details.get('arguments', {})
else:
arguments = action.get('arguments', {})
action_name = action.get('name', 'computer')
# Check for termination
if arguments.get('action') == 'terminate':
self.task_completed = True
self.task_status = arguments.get('status', 'success')
self.logger.info(f"Task terminated with status: {self.task_status}")
return True
# Check for answer action (just observation)
if arguments.get('action') == 'answer':
answer_text = arguments.get('text', '')
self.logger.info(f"Model's answer: {answer_text}")
return True
# Execute the action
self.logger.info(f"Executing action: {arguments}")
success = self.action_executor.execute_computer_use_action({
'name': action_name,
'arguments': arguments
})
# Wait a bit after action for UI to update
if success and arguments.get('action') != 'wait':
time.sleep(1)
return success
def run(self, task: str) -> Dict[str, Any]:
"""
Run the agent to complete the given task
Args:
task: The task to complete
Returns:
Dictionary with results including success status and message
"""
self.logger.info(f"=" * 80)
self.logger.info(f"Starting task: {task}")
self.logger.info(f"=" * 80)
# Reset state
self.iteration_count = 0
self.task_completed = False
self.task_status = None
self.conversation_history = []
start_time = time.time()
try:
while self.iteration_count < self.max_iterations and not self.task_completed:
self.iteration_count += 1
self.logger.info(f"\n{'=' * 80}")
self.logger.info(f"Iteration {self.iteration_count}/{self.max_iterations}")
self.logger.info(f"{'=' * 80}")
# Step 1: SEE - Capture screenshot
screenshot = self._capture_current_state()
# Step 2: THINK - Analyze and decide
response = self._think_and_decide(task, screenshot)
# Store in conversation history
self.conversation_history.append({
'iteration': self.iteration_count,
'response': response
})
# Step 3: ACT - Execute action
success = self._execute_action(response)
if not success:
self.logger.warning("Action execution failed, but continuing...")
# Small delay between iterations
time.sleep(0.5)
# Task completion
elapsed_time = time.time() - start_time
if self.task_completed:
result = {
'success': self.task_status == 'success',
'status': self.task_status,
'message': f'Task completed in {self.iteration_count} iterations',
'iterations': self.iteration_count,
'elapsed_time': elapsed_time
}
self.logger.info(f"\n{'=' * 80}")
self.logger.info(f"TASK COMPLETED: {result['message']}")
self.logger.info(f"Status: {self.task_status}")
self.logger.info(f"Time: {elapsed_time:.2f} seconds")
self.logger.info(f"{'=' * 80}\n")
else:
result = {
'success': False,
'status': 'timeout',
'message': f'Task did not complete within {self.max_iterations} iterations',
'iterations': self.iteration_count,
'elapsed_time': elapsed_time
}
self.logger.warning(f"\n{'=' * 80}")
self.logger.warning(f"TASK TIMEOUT: {result['message']}")
self.logger.warning(f"{'=' * 80}\n")
return result
except KeyboardInterrupt:
self.logger.info("\nTask interrupted by user")
return {
'success': False,
'status': 'interrupted',
'message': 'Task interrupted by user',
'iterations': self.iteration_count,
'elapsed_time': time.time() - start_time
}
except Exception as e:
self.logger.error(f"Error during task execution: {e}", exc_info=True)
return {
'success': False,
'status': 'error',
'message': f'Error: {str(e)}',
'iterations': self.iteration_count,
'elapsed_time': time.time() - start_time
}
def main():
"""Example usage of the agent"""
# Initialize agent
agent = SeeThinkActAgent(
model="qwen3-vl:235b-cloud",
max_iterations=30,
save_screenshots=True
)
# Test connection
if not agent.ollama_client.test_connection():
print("\n⚠️ WARNING: Could not connect to Ollama or model not found")
print("Make sure Ollama is running and the model is pulled:")
print(" ollama run qwen3-vl:235b-cloud")
return
# Example task
task = "Open Notepad and type 'Hello from AI Agent!'"
print(f"\nTask: {task}")
print("Starting agent... (Press Ctrl+C to stop)\n")
# Run the agent
result = agent.run(task)
# Print results
print("\n" + "=" * 80)
print("FINAL RESULT:")
print(json.dumps(result, indent=2))
print("=" * 80)
if __name__ == "__main__":
main()