Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 10 additions & 1 deletion .env.example
Original file line number Diff line number Diff line change
@@ -1 +1,10 @@
OPENAI_API_KEY=
OPENAI_API_KEY=

# LLM model — strong models are required for reliable UI generation
# Recommended: gpt-5.4, gpt-5.4-pro, claude-opus-4-6, gemini-3.1-pro
LLM_MODEL=gpt-5.4-2026-03-05

# Rate limiting (per IP) — disabled by default
RATE_LIMIT_ENABLED=false
RATE_LIMIT_WINDOW_MS=60000
RATE_LIMIT_MAX=40
3 changes: 3 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -60,3 +60,6 @@ bun.lockb

# Demos
.demos

# References
.references
10 changes: 10 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,16 @@ make setup # Install deps + create .env template
make dev # Start all services
```

> **Strong models required.** Generative UI demands high-capability models that can produce complex, well-structured HTML/SVG in a single pass. Set `LLM_MODEL` in your `.env` to one of:
>
> | Model | Provider |
> |-------|----------|
> | `gpt-5.4` / `gpt-5.4-pro` | OpenAI |
> | `claude-opus-4-6` | Anthropic |
> | `gemini-3.1-pro` | Google |
>
> Smaller or weaker models will produce broken layouts, missing interactivity, or incomplete visualizations.

- **App**: http://localhost:3000
- **Agent**: http://localhost:8123

Expand Down
4 changes: 3 additions & 1 deletion apps/agent/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,8 @@
It defines the workflow graph, state, tools, nodes and edges.
"""

import os

from copilotkit import CopilotKitMiddleware
from langchain.agents import create_agent
from langchain_openai import ChatOpenAI
Expand All @@ -17,7 +19,7 @@
_skills_text = load_all_skills()

agent = create_agent(
model=ChatOpenAI(model="gpt-5.4-2026-03-05"),
model=ChatOpenAI(model=os.environ.get("LLM_MODEL", "gpt-5.4-2026-03-05")),
tools=[query_data, *todo_tools, generate_form, *template_tools],
middleware=[CopilotKitMiddleware()],
state_schema=AgentState,
Expand Down
60 changes: 52 additions & 8 deletions apps/app/src/app/api/copilotkit/route.ts
Original file line number Diff line number Diff line change
Expand Up @@ -6,28 +6,72 @@ import {
import { LangGraphAgent } from "@copilotkit/runtime/langgraph";
import { NextRequest } from "next/server";

// Simple in-memory sliding-window rate limiter (per IP)
// Enable via RATE_LIMIT_ENABLED=true — off by default.
// For high-traffic deployments, consider Redis-backed rate limiting instead.
const RATE_LIMIT_ENABLED = process.env.RATE_LIMIT_ENABLED === "true";
const RATE_LIMIT_WINDOW_MS = Number(process.env.RATE_LIMIT_WINDOW_MS) || 60_000;
const RATE_LIMIT_MAX = Number(process.env.RATE_LIMIT_MAX) || 40;
const hits = new Map<string, number[]>();

function isRateLimited(ip: string): boolean {
if (!RATE_LIMIT_ENABLED) return false;
const now = Date.now();
const timestamps = hits.get(ip)?.filter(t => t > now - RATE_LIMIT_WINDOW_MS) ?? [];
timestamps.push(now);
hits.set(ip, timestamps);
return timestamps.length > RATE_LIMIT_MAX;
}

// Prune stale entries every 5 min to prevent unbounded memory growth
if (RATE_LIMIT_ENABLED) {
setInterval(() => {
const cutoff = Date.now() - RATE_LIMIT_WINDOW_MS;
hits.forEach((timestamps, ip) => {
const recent = timestamps.filter(t => t > cutoff);
if (recent.length === 0) hits.delete(ip);
else hits.set(ip, recent);
});
}, 300_000);
}

// Normalize Render's fromService hostport (bare host:port) into a full URL
const raw = process.env.LANGGRAPH_DEPLOYMENT_URL;
const deploymentUrl = !raw
? "http://localhost:8123"
: raw.startsWith("http")
? raw
: `http://${raw}`;

// 1. Define the agent connection to LangGraph
const defaultAgent = new LangGraphAgent({
deploymentUrl: process.env.LANGGRAPH_DEPLOYMENT_URL || "http://localhost:8123",
deploymentUrl,
graphId: "sample_agent",
langsmithApiKey: process.env.LANGSMITH_API_KEY || "",
});

// 3. Define the route and CopilotRuntime for the agent
export const POST = async (req: NextRequest) => {
const ip = req.headers.get("x-forwarded-for")?.split(",")[0]?.trim() ?? "unknown";
if (isRateLimited(ip)) {
return new Response("Too many requests", { status: 429 });
}

const { handleRequest } = copilotRuntimeNextJSAppRouterEndpoint({
endpoint: "/api/copilotkit",
serviceAdapter: new ExperimentalEmptyAdapter(),
runtime: new CopilotRuntime({
agents: { default: defaultAgent, },
a2ui: { injectA2UITool: true },
mcpApps: {
servers: [{
type: "http",
url: process.env.MCP_SERVER_URL || "https://mcp.excalidraw.com",
serverId: "example_mcp_app",
}],
},
...(process.env.MCP_SERVER_URL && {
mcpApps: {
servers: [{
type: "http",
url: process.env.MCP_SERVER_URL,
serverId: "example_mcp_app",
}],
},
}),
}),
});

Expand Down
18 changes: 18 additions & 0 deletions docker/Dockerfile.agent
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
FROM langchain/langgraph-api:3.12

ADD apps/agent /deps/agent

RUN for dep in /deps/*; do \
echo "Installing $dep"; \
if [ -d "$dep" ]; then \
(cd "$dep" && PYTHONDONTWRITEBYTECODE=1 uv pip install --system --no-cache-dir -c /api/constraints.txt -e .); \
fi; \
done

ENV LANGSERVE_GRAPHS='{"sample_agent": "/deps/agent/main.py:graph"}'
Copy link
Copy Markdown
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The langgraph-api base image expects the graph object to be importable, but there's no verification that it can actually resolve /deps/agent/main.py:graph at runtime. Worth a quick smoke test before deploying:

docker build -f docker/Dockerfile.agent . && docker run --rm <image> python -c "from main import graph; print(graph)"


RUN mkdir -p /api/langgraph_api /api/langgraph_runtime /api/langgraph_license \
&& touch /api/langgraph_api/__init__.py /api/langgraph_runtime/__init__.py /api/langgraph_license/__init__.py
RUN PYTHONDONTWRITEBYTECODE=1 uv pip install --system --no-cache-dir --no-deps -e /api

WORKDIR /deps/agent
47 changes: 47 additions & 0 deletions render.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,47 @@
services:
# ── Agent (LangGraph Python) — private, not exposed to internet ──
- type: pserv
name: open-generative-ui-agent
runtime: docker
plan: starter
dockerfilePath: docker/Dockerfile.agent
healthCheckPath: /ok
envVars:
- key: OPENAI_API_KEY
sync: false
- key: LANGSMITH_API_KEY
sync: false
- key: LLM_MODEL
value: gpt-5.4-2026-03-05
buildFilter:
paths:
- apps/agent/**
- docker/Dockerfile.agent

# ── Frontend (Next.js) — public web service ──
- type: web
name: open-generative-ui-app
runtime: docker
plan: starter
dockerfilePath: docker/Dockerfile.app
envVars:
- key: LANGGRAPH_DEPLOYMENT_URL
fromService:
name: open-generative-ui-agent
type: pserv
property: hostport
- key: LANGSMITH_API_KEY
sync: false
- key: RATE_LIMIT_ENABLED
value: "false"
- key: RATE_LIMIT_WINDOW_MS
value: "60000"
- key: RATE_LIMIT_MAX
value: "40"
buildFilter:
paths:
- apps/app/**
- package.json
- pnpm-lock.yaml
- turbo.json
- docker/Dockerfile.app
Loading