-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathagent.py
More file actions
195 lines (156 loc) · 5.48 KB
/
agent.py
File metadata and controls
195 lines (156 loc) · 5.48 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
import operator
from typing import Any, List, Annotated
from typing_extensions import TypedDict
from uuid import uuid4
from dotenv import load_dotenv
from langchain_aws import ChatBedrockConverse
from langchain_core.messages import HumanMessage, SystemMessage
from langgraph.graph import END, START, StateGraph
from langgraph.prebuilt import ToolNode
# ------------------------------
# Import custom tools
# ------------------------------
from tools.owasp import fetch_owasp_context
from tools.security_tools import (
check_authentication_security,
check_injection_points,
check_outdated_components,
check_script_integrity,
check_security_headers,
check_ssrf_risk,
check_tls_security,
scan_open_ports,
)
# ------------------------------
# Load environment
# ------------------------------
load_dotenv()
# ------------------------------
# Define graph state
# ------------------------------
class SecurityState(TypedDict, total=False):
input: str
messages: Annotated[List[Any], operator.add]
explanation: str
scan_results: Any
# ------------------------------
# Initialize model and tools
# ------------------------------
llm = ChatBedrockConverse(
model_id="arn:aws:bedrock:us-east-2:197348940135:inference-profile/us.amazon.nova-micro-v1:0",
provider="amazon",
temperature=0,
)
a_tools = [
check_security_headers,
check_outdated_components,
scan_open_ports,
check_tls_security,
check_ssrf_risk,
check_authentication_security,
check_injection_points,
check_script_integrity,
fetch_owasp_context,
]
llm_tools = llm.bind_tools(a_tools)
SYSTEM_PROMPT = """
You are a Security Vulnerability Analyst specializing in OWASP Top 10 risks.
Your job is to analyze user-provided URLs, code, or architecture details for
potential vulnerabilities and provide simple, fact-based, and clear guidance.
Guidelines:
- Keep responses short, clear, and beginner-friendly.
- Use fetch_owasp_context for verified OWASP references.
- Do NOT show reasoning or internal steps.
- If unclear, ask a specific follow-up question.
- If tool outputs are available, summarize them clearly.
"""
# ------------------------------
# Main security agent node
# ------------------------------
def sec_assistant(state: SecurityState) -> SecurityState:
"""LLM agent node using explicit message history."""
history: List[Any] = list(state.get("messages", []))
# Initialize system prompt if first turn
if not history:
history.append(SystemMessage(content=SYSTEM_PROMPT))
elif not isinstance(history[0], SystemMessage):
history.insert(0, SystemMessage(content=SYSTEM_PROMPT))
# Add user input
history.append(HumanMessage(content=state["input"]))
# Invoke Bedrock model (LLM with bound tools)
ai_message = llm_tools.invoke(history)
# Append model output to message history
history.append(ai_message)
# Construct new state
new_state: SecurityState = {
"input": state["input"],
"messages": history[-10:], # keep last 10 to limit payload size
"explanation": getattr(ai_message, "content", "") or "",
}
# Optional: capture structured scan results
if getattr(ai_message, "content", ""):
new_state["scan_results"] = ai_message.content
return new_state
# ------------------------------
# Graph setup
# ------------------------------
Graph = StateGraph(SecurityState)
Graph.add_node("sec_agent", sec_assistant)
Graph.add_node("tools", ToolNode(a_tools))
Graph.add_edge(START, "sec_agent")
def _route_from_agent(state: SecurityState) -> str:
"""Routing function for tools."""
messages = state.get("messages", [])
if messages:
last_message = messages[-1]
tool_calls = getattr(last_message, "tool_calls", None)
if tool_calls:
return "tools"
return "end"
Graph.add_conditional_edges(
"sec_agent",
_route_from_agent,
{"tools": "tools", "end": END},
)
Graph.add_edge("tools", "sec_agent")
graph = Graph.compile()
# ------------------------------
# Bedrock runtime entrypoint
# ------------------------------
from bedrock_agentcore.runtime import BedrockAgentCoreApp
app = BedrockAgentCoreApp()
@app.entrypoint
def agent_invocation(payload, context):
"""Lambda entry for invoking security agent."""
input_data = payload.get("input", {})
user_prompt = input_data.get("prompt", "No prompt provided. Please guide the user about available tools.")
tmp_state = {
"input": user_prompt,
"messages": [HumanMessage(content=user_prompt)],
}
thread_id = payload.get("sessionId") or str(uuid4())
tmp_output = graph.invoke(
tmp_state,
config={"configurable": {"thread_id": thread_id}},
)
# Extract the last readable AI message
messages_f = tmp_output.get("messages", [])
if messages_f:
ai_messages = [
m for m in messages_f
if getattr(m, "type", None) == "ai" or getattr(m, "role", None) == "assistant"
]
if ai_messages:
last_ai = ai_messages[-1]
content = getattr(last_ai, "content", "")
if isinstance(content, list):
text_blocks = [c.get("text", "") for c in content if c.get("type") == "text"]
result = "\n".join(text_blocks).strip() or "No readable output."
else:
result = str(content)
else:
result = "No AI response generated."
else:
result = "No messages returned by the agent."
return result
app.run()