Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,10 @@
.temp/
dataset/
test/
submission/
api_config.yaml
api_profiles.yaml
temp-share/

# Mac
.DS_Store
Expand Down Expand Up @@ -169,3 +172,5 @@ cython_debug/
# and can be added to the global gitignore or merged into this file. For a more nuclear
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
#.idea/

.claude/
1 change: 0 additions & 1 deletion docs/src/data/modelData.ts
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,6 @@ export const modelData: Data[] = [
{ name: 'RCA-Agent', model: 'Claude 3.5 Sonnet', org: 'Microsoft', correct: '11.34%', partial: '17.31%', date: '2025/1/23' },
{ name: 'RCA-Agent', model: 'GPT-4o', org: 'Microsoft', correct: '8.96%', partial: '17.91%', date: '2025/1/23' },
{ name: 'RCA-Agent', model: 'Gemini 1.5 Pro', org: 'Microsoft', correct: '2.69%', partial: '6.87%', date: '2025/1/23' },

// Closed Models - Balanced
{ name: 'Prompting (Balanced)', model: 'Claude 3.5 Sonnet', org: 'Microsoft', correct: '3.88%', partial: '18.81%', date: '2025/1/23' },
{ name: 'Prompting (Balanced)', model: 'GPT-4o', org: 'Microsoft', correct: '3.28%', partial: '14.33%', date: '2025/1/23' },
Expand Down
4 changes: 4 additions & 0 deletions rca/api_config.example.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
SOURCE: "Anthropic"
MODEL: "claude-sonnet-4-5-20250929"
API_KEY: "sk-ant-xxxxxxxxxxxxx"
API_BASE: ""
4 changes: 0 additions & 4 deletions rca/api_config.yaml

This file was deleted.

32 changes: 32 additions & 0 deletions rca/api_profiles.example.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
# API Profiles — copy a profile's fields into api_config.yaml or use CLI overrides
# Usage: python -m rca.run_agent_standard --dataset Bank --source OpenAI --model gpt-4o --api_key sk-...

anthropic-sonnet:
SOURCE: "Anthropic"
MODEL: "claude-sonnet-4-5-20250929"
API_KEY: "sk-ant-xxxxxxxxxxxxx"

anthropic-opus-4.5:
SOURCE: "Anthropic"
MODEL: "claude-opus-4-5-20251101"
API_KEY: "sk-ant-xxxxxxxxxxxxx"

anthropic-opus-4.6:
SOURCE: "Anthropic"
MODEL: "claude-opus-4-6-20260110"
API_KEY: "sk-ant-xxxxxxxxxxxxx"

openai-gpt5.2:
SOURCE: "OpenAI"
MODEL: "gpt-5.2-2025-12-11"
API_KEY: "sk-proj-xxxxxxxxxxxxx"

openai-o3:
SOURCE: "OpenAI"
MODEL: "o3"
API_KEY: "sk-proj-xxxxxxxxxxxxx"

google-gemini:
SOURCE: "Google"
MODEL: "gemini-3-pro-preview"
API_KEY: "AIza-xxxxxxxxxxxxx"
72 changes: 50 additions & 22 deletions rca/api_router.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,33 +23,50 @@ def OpenAI_chat_completion(messages, temperature):
).choices[0].message.content

def Google_chat_completion(messages, temperature):
import google.generativeai as genai
genai.configure(
api_key=configs["API_KEY"]
from google import genai
from google.genai import types
client = genai.Client(api_key=configs["API_KEY"], http_options=types.HttpOptions(timeout=120_000))
system_instruction = None
if messages and messages[0]["role"] == "system":
system_instruction = messages[0]["content"]
messages = messages[1:]
contents = []
for item in messages:
role = "model" if item["role"] == "assistant" else "user"
contents.append(types.Content(role=role, parts=[types.Part.from_text(text=item["content"])]))
config = types.GenerateContentConfig(
temperature=temperature,
system_instruction=system_instruction,
)
response = client.models.generate_content(
model=configs["MODEL"],
contents=contents,
config=config,
)
genai.GenerationConfig(temperature=temperature)
system_instruction = messages[0]["content"] if messages[0]["role"] == "system" else None
messages = [item for item in messages if item["role"] != "system"]
messages = [{"role": "model" if item["role"] == "assistant" else item["role"], "parts": item["content"]} for item in messages]
history = messages[:-1]
message = messages[-1]
return genai.GenerativeModel(
model_name=configs["MODEL"],
system_instruction=system_instruction
).start_chat(
history=history if history != [] else None
).send_message(message).text
return response.text

def Anthropic_chat_completion(messages, temperature):
import anthropic
client = anthropic.Anthropic(
api_key=configs["API_KEY"]
)
return client.messages.create(
system = None
if messages and messages[0]["role"] == "system":
system = messages[0]["content"]
messages = messages[1:]
kwargs = dict(
model=configs["MODEL"],
messages=messages,
temperature=temperature
).content
temperature=temperature,
max_tokens=128000,
)
if system:
kwargs["system"] = system
text = ""
with client.messages.stream(**kwargs) as stream:
for chunk in stream.text_stream:
text += chunk
return text

# for 3-rd party API which is compatible with OpenAI API (with different 'API_BASE')
def AI_chat_completion(messages, temperature):
Expand Down Expand Up @@ -78,14 +95,25 @@ def send_request():
else:
raise ValueError("Invalid SOURCE in api_config file.")

for i in range(3):
max_retries = 60
for i in range(max_retries):
try:
return send_request()
except Exception as e:
print(e)
if '429' in str(e):
print("Rate limit exceeded. Waiting for 1 second.")
time.sleep(1)
if 'insufficient_quota' in str(e):
wait = 60
else:
wait = min(2 ** i, 30)
print(f"Rate limit exceeded. Waiting for {wait} seconds (attempt {i+1}/{max_retries}).")
time.sleep(wait)
continue
elif 'Connection' in type(e).__name__ or 'ConnectionError' in str(type(e)):
wait = min(2 ** i, 30)
print(f"Connection error. Waiting for {wait} seconds (attempt {i+1}/{max_retries}).")
time.sleep(wait)
continue
else:
raise e
raise e
raise RuntimeError(f"API request failed after {max_retries} retries due to rate limiting.")
34 changes: 31 additions & 3 deletions rca/baseline/rca_agent/controller.py
Original file line number Diff line number Diff line change
Expand Up @@ -81,12 +81,23 @@ def control_loop(objective:str, plan:str, ap, bp, logger, max_step = 15, max_tur

note = [{'role': 'user', 'content': f"Continue your reasoning process for the target issue:\n\n{objective}\n\nFollow the rules during issue solving:\n\n{ap.rules}.\n\nResponse format:\n\n{format}"}]
attempt_actor = []
response_raw = ""
try:
response_raw = get_chat_completion(
messages=prompt + note,
)
if response_raw is None:
logger.error("API returned None response")
prompt.append({'role': 'user', 'content': "The API request failed. Please provide your analysis in requested JSON format."})
continue
if "```json" in response_raw:
response_raw = re.search(r"```json\n(.*)\n```", response_raw, re.S).group(1).strip()
m = re.search(r"```json\s*\n(.*?)\n\s*```", response_raw, re.S)
if m:
response_raw = m.group(1).strip()
else:
m2 = re.search(r"```json\s*(.*?)```", response_raw, re.S)
if m2:
response_raw = m2.group(1).strip()
logger.debug(f"Raw Response:\n{response_raw}")
if '"analysis":' not in response_raw or '"instruction":' not in response_raw or '"completed":' not in response_raw:
logger.warning("Invalid response format. Please provide a valid JSON response.")
Expand All @@ -107,10 +118,18 @@ def control_loop(objective:str, plan:str, ap, bp, logger, max_step = 15, max_tur
answer = get_chat_completion(
messages=prompt,
)
if answer is None:
answer = "API request failed. No root cause found."
logger.debug(f"Raw Final Answer:\n{answer}")
prompt.append({'role': 'assistant', 'content': answer})
if "```json" in answer:
answer = re.search(r"```json\n(.*)\n```", answer, re.S).group(1).strip()
m = re.search(r"```json\s*\n(.*?)\n\s*```", answer, re.S)
if m:
answer = m.group(1).strip()
else:
m2 = re.search(r"```json\s*(.*?)```", answer, re.S)
if m2:
answer = m2.group(1).strip()
return answer, trajectory, prompt

code, result, status, new_history = execute_act(instruction, bp.schema, history, attempt_actor, kernel, logger)
Expand Down Expand Up @@ -144,8 +163,17 @@ def control_loop(objective:str, plan:str, ap, bp, logger, max_step = 15, max_tur
answer = get_chat_completion(
messages=prompt,
)
if answer is None:
answer = "API request failed. No root cause found."
logger.debug(f"Raw Final Answer:\n{answer}")
prompt.append({'role': 'assistant', 'content': answer})
if "```json" in answer:
answer = re.search(r"```json\n(.*)\n```", answer, re.S).group(1).strip()
m = re.search(r"```json\s*\n(.*?)\n\s*```", answer, re.S)
if m:
answer = m.group(1).strip()
else:
# Fallback: try to extract JSON object directly after ```json
m2 = re.search(r"```json\s*(.*?)```", answer, re.S)
if m2:
answer = m2.group(1).strip()
return answer, trajectory, prompt
Loading