Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions src/microbots/MicroBot.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,11 +28,11 @@
Don't add any chat or extra messages outside the json format. Because the system will parse only the json response.
The properties ( task_done, thoughts, command ) are mandatory on each response.

after each command, the system will execute the command respond you the output.
ensure to run only one command at a time.
after each command, the system will execute the command and respond to you with the output.
Ensure to run only one command at a time.
NEVER use 'ls -R', 'tree', or 'find' without -maxdepth on large repos - use targeted paths like 'ls drivers/block/' to avoid exceeding context limits.
Use specific patterns: 'find <path> -name "*.c" -maxdepth 2' instead of recursive exploration.
No human in involved in the task. So, don't seek human intervention.
No human is involved in the task. So, don't seek human intervention.
"""


Expand Down
12 changes: 6 additions & 6 deletions src/microbots/llm/ollama_local.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@
# LOCAL_MODEL_PORT=11434
# ```
#
# To use with Microbot, define you Microbot as following
# To use with Microbot, define your Microbot as following
# ```python
# bot = Microbot(
# model="ollama-local/codellama:latest",
Expand Down Expand Up @@ -53,14 +53,14 @@ def __init__(self, system_prompt, model_name=LOCAL_MODEL_NAME, model_port=LOCAL_
self.messages = [{"role": "system", "content": system_prompt}]

if not self.model_name or not self.model_port:
raise ValueError("LOCAL_MODEL_NAME and LOCAL_MODEL_PORT environment variables must be set for or passed as arguments OllamaLocal.")
raise ValueError("LOCAL_MODEL_NAME and LOCAL_MODEL_PORT environment variables must be set or passed as arguments to OllamaLocal.")

# Set these values here. This logic will be handled in the parent class.
self.max_retries = max_retries
self.retries = 0

def ask(self, message) -> LLMAskResponse:
self.retries = 0 # reset retries for each ask. Handled in parent class.
self.retries = 0 # reset retries for each ask. Handled in parent class.

self.messages.append({"role": "user", "content": message})

Expand Down Expand Up @@ -99,11 +99,11 @@ def _send_request_to_local_model(self, messages):
if response.status_code == 200:
response_json = response.json()
logger.debug(f"\nResponse JSON: {response_json}")
response_back = response_json.get("response", {})
response_back = response_json.get("response", "")

# However, as instructed, Ollama is not providing the response only in JSON.
# It adds some extra text above or below the json sometimes.
# So, this hack to extract the json part from the response.
# It adds some extra text above or below the JSON sometimes.
# So, this hack extracts the JSON part from the response.
try:
response_back = response_back.split("{", 1)[1]
response_back = "{" + response_back.rsplit("}", 1)[0] + "}"
Expand Down
2 changes: 1 addition & 1 deletion test/llm/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,6 @@
import time
import requests
import shutil
from pathlib import Path


@pytest.fixture(scope="session")
Expand Down Expand Up @@ -128,6 +127,7 @@ def ollama_server(check_ollama_installed, ensure_ollama_model_pulled, ollama_mod
server_already_running = True
print(f"\nOllama server already running on port {ollama_model_port}")
except requests.exceptions.RequestException:
# If the request fails, assume the server is not running and proceed to start it.
pass

process = None
Expand Down
4 changes: 2 additions & 2 deletions test/llm/test_llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ def test_default_values(self):
response = LLMAskResponse()
assert response.task_done is False
assert response.command == ""
assert response.thoughts is None or response.thoughts == ""
assert response.thoughts == ""

def test_custom_values(self):
"""Test creating response with custom values"""
Expand All @@ -56,7 +56,7 @@ def test_partial_initialization(self):
response = LLMAskResponse(command="ls -la")
assert response.task_done is False
assert response.command == "ls -la"
assert response.thoughts is None or response.thoughts == ""
assert response.thoughts == ""

@pytest.mark.integration
class TestValidateLlmResponse:
Expand Down
5 changes: 2 additions & 3 deletions test/llm/test_ollama_local.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,8 +7,7 @@
import pytest
import sys
import os
from unittest.mock import Mock, patch, MagicMock
import json
from unittest.mock import Mock, patch

# Add src to path for imports
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), "../../src")))
Expand Down Expand Up @@ -313,7 +312,7 @@ def test_ask_resets_retries(self, mock_post):
}
mock_post.return_value = mock_response

result = ollama.ask("List files")
ollama.ask("List files")

assert ollama.retries == 0

Expand Down
4 changes: 2 additions & 2 deletions test/llm/test_openai_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,7 @@ def test_ask_successful_response(self):
mock_response.output_text = json.dumps({
"task_done": False,
"command": "echo 'hello'",
"thoughts": None
"thoughts": ""
})
api.ai_client.responses.create = Mock(return_value=mock_response)

Expand All @@ -105,7 +105,7 @@ def test_ask_successful_response(self):
assert isinstance(result, LLMAskResponse)
assert result.task_done is False
assert result.command == "echo 'hello'"
assert result.thoughts is None or result.thoughts == ""
assert result.thoughts == ""

# Verify retries was reset
assert api.retries == 0
Expand Down