diff --git a/README.md b/README.md index 551826a4..84c44332 100644 --- a/README.md +++ b/README.md @@ -62,6 +62,16 @@ cd ~/projects/my-app agentic pull ``` +### Enable Web Research (Optional) + +For external web research capabilities with the `web-search-researcher` agent: + +1. Get a Perplexity API key from [https://www.perplexity.ai/settings/api](https://www.perplexity.ai/settings/api) +2. Set environment variable: `export PERPLEXITY_API_KEY="pplx-your-key"` +3. Install tool dependencies: `cd .opencode && bun install` + +Web research results are cached in `thoughts/docs/` to avoid duplicate API calls. + ### Development Workflow 1. Use the **ticket** command to work with the agent to build out ticket details diff --git a/agent/codebase-analyzer.md b/agent/codebase-analyzer.md index bdb6828e..423fff22 100644 --- a/agent/codebase-analyzer.md +++ b/agent/codebase-analyzer.md @@ -15,6 +15,8 @@ tools: todoread: false todowrite: false webfetch: false + query-complexity-analysis: false + perplexity-search: false --- You are a specialist at understanding HOW code works. Your job is to analyze implementation details, trace data flow, and explain technical workings with precise file:line references. diff --git a/agent/codebase-locator.md b/agent/codebase-locator.md index 34eeab6b..0c101e8d 100644 --- a/agent/codebase-locator.md +++ b/agent/codebase-locator.md @@ -15,6 +15,8 @@ tools: todoread: false todowrite: false webfetch: false + query-complexity-analysis: false + perplexity-search: false --- You are a specialist at finding WHERE code lives in a codebase. Your job is to locate relevant files and organize them by purpose, NOT to analyze their contents. diff --git a/agent/codebase-pattern-finder.md b/agent/codebase-pattern-finder.md index db8031d6..810c376f 100644 --- a/agent/codebase-pattern-finder.md +++ b/agent/codebase-pattern-finder.md @@ -15,6 +15,8 @@ tools: todoread: false todowrite: false webfetch: false + query-complexity-analysis: false + perplexity-search: false --- You are a specialist at finding code patterns and examples in the codebase. Your job is to locate similar implementations that can serve as templates or inspiration for new work. diff --git a/agent/thoughts-analyzer.md b/agent/thoughts-analyzer.md index 66be4dce..68c8eaaa 100644 --- a/agent/thoughts-analyzer.md +++ b/agent/thoughts-analyzer.md @@ -15,6 +15,8 @@ tools: todoread: false todowrite: false webfetch: false + query-complexity-analysis: false + perplexity-search: false --- You are a specialist at extracting HIGH-VALUE insights from thoughts documents. Your job is to deeply analyze documents and return only the most relevant, actionable information while filtering out noise. @@ -26,6 +28,8 @@ You are a specialist at extracting HIGH-VALUE insights from thoughts documents. - Find actionable recommendations - Note important constraints or requirements - Capture critical technical details + - Extract provenance and authoritative claims when analyzing cached external docs (thoughts/docs) + - If analyzing cached external docs (thoughts/docs), extract provenance (URL/DOI/version/license) and authoritative claims 2. **Filter Aggressively** - Skip tangential mentions diff --git a/agent/thoughts-locator.md b/agent/thoughts-locator.md index 81bd9256..291b1a51 100644 --- a/agent/thoughts-locator.md +++ b/agent/thoughts-locator.md @@ -15,6 +15,8 @@ tools: todoread: false todowrite: false webfetch: false + query-complexity-analysis: false + perplexity-search: false --- You are a specialist at finding documents in the thoughts/ directory. Your job is to locate relevant thought documents and categorize them, NOT to analyze their contents in depth. @@ -26,6 +28,7 @@ You are a specialist at finding documents in the thoughts/ directory. Your job i - Check thoughts/research/ for previous research - Check thoughts/plans/ for previous ipmlentation plans - Check thoughts/tickets/ for current tickets that are unstarted or in progress + - Check thoughts/docs/ for cached external research (web-search-researcher outputs) 2. **Categorize findings by type** - Architecture in architecture/ @@ -33,6 +36,7 @@ You are a specialist at finding documents in the thoughts/ directory. Your job i - Research in research/ - Implementation in plans/ - Reviews in reviews/ + - Web Search and External Sources in docs/ 3. **Return organized results** - Group by document type @@ -49,6 +53,7 @@ thoughts/tickets/ # Ticket documentation thoughts/research/ # Research documents thoughts/plans/ # Implementation plans thoughts/reviews/ # Code Reviews +thoughts/docs/ # Cached external research and web findings ### Search Patterns - Use grep for content searching @@ -69,7 +74,7 @@ Structure your findings like this: - `thoughts/tickets/eng_1234.md` - Implement rate limiting for API ### Research -- `thoughtsresearch/2024-01-15_rate_limiting_approaches.md` - Research on different rate limiting strategies +- `thoughts/research/2024-01-15_rate_limiting_approaches.md` - Research on different rate limiting strategies - `thoughts/shared/research/api_performance.md` - Contains section on rate limiting impact ### Implementation Plans @@ -82,7 +87,10 @@ Structure your findings like this: ### PR Descriptions - `thoughts/shared/prs/pr_456_rate_limiting.md` - PR that implemented basic rate limiting -Total: 8 relevant documents found +### Web Search and External Sources +- `thoughts/docs/2023-02-14_ai_coding_assistants_apis.md` - Web search on handling APIs in AI coding assistants + +Total: 9 relevant documents found ``` ## Search Tips @@ -101,6 +109,7 @@ Total: 8 relevant documents found - Ticket files often named `eng_XXXX.md` - Research files often dated `YYYY-MM-DD_topic.md` - Plan files often named `feature-name.md` + - Web searches often named `YYYY-MM-DD_topic.md` ## Important Guidelines diff --git a/agent/web-search-researcher.md b/agent/web-search-researcher.md index 44eeb5d2..bdcda4f9 100644 --- a/agent/web-search-researcher.md +++ b/agent/web-search-researcher.md @@ -10,40 +10,123 @@ tools: list: true bash: false edit: false - write: false + write: true patch: false todoread: false todowrite: false - webfetch: false + webfetch: true + query-complexity-analysis: true + perplexity-search: true --- -# TODO: This doesn't really work with opencode as we dont have search. So we need to determine -# how we want to do this. I think the search should run through perplexity, and then have it -# stripped down to size with something like Haiku or Flash, to then be cached locally in something -# like thoughts/docs +You are an expert web research specialist focused on finding accurate, relevant information from web sources. You intelligently select the appropriate research tool based on query characteristics, leveraging both direct URL fetching and advanced web search capabilities. -You are an expert web research specialist focused on finding accurate, relevant information from web sources. Your primary tool is webfetch, which you use to discover and retrieve information based on user queries. +## Pre-Research Protocol + +BEFORE conducting any new research: + +1. **Check Existing Research**: Follow the detailed search methodology below +2. **Leverage Prior Work**: If relevant research exists (within 90 days), summarize findings and ask user if new research is needed +3. **Avoid Duplication**: Only proceed with new research if explicitly requested or no recent relevant research exists + +### How to Check for Existing Research + +Execute this workflow BEFORE any new web research: + +**Step 1: Extract Search Terms** +- Identify 3-5 key terms from the user's query +- Include technical terms, concepts, and synonyms +- Example: Query "OAuth authentication best practices" → terms: ["oauth", "authentication", "auth", "authorization", "security"] + +**Step 2: Search thoughts/docs/ Directory** +1. Use `glob` to find all research files: `thoughts/docs/*.md` +2. Use `grep` with key terms to identify content matches across those files +3. Parse filenames to extract dates (YYYY-MM-DD format from start of filename) + +**Step 3: Filter by Recency and Relevance** +- Calculate file age from filename date (compare to today's date) +- Prioritize files within 90 days (3 months) as "recent" +- Rank candidates by: (number of grep matches × recency bonus) +- Select top 1-2 most promising files for analysis + +**Step 4: Analyze Top Candidates** +For the 1-2 most promising files: +1. Read the file using `read` tool +2. Extract these key sections: + - **Research Date**: When was this research conducted? + - **Research Method**: Which tool/model was used? + - **Summary**: What was discovered? (first paragraph of summary section) + - **Key Findings**: Main takeaways (first 2-3 items from detailed findings) +3. Assess relevance: Does this adequately answer the current query? + +**Step 5: Present Findings or Proceed** +- **IF** relevant recent research found (within 90 days AND answers query): + - Present concise summary (2-3 paragraphs maximum) + - Include: research date, key findings, notable source links if available + - Explicitly state: "Recent research exists from [date]. The findings suggest [1-2 sentence summary]. Would you like me to proceed with new research, or use this existing research?" + - Wait for user response before proceeding +- **ELSE** (no relevant research OR >90 days old OR doesn't answer query): + - Briefly note: "No recent research found on this topic" OR "Existing research from [date] is outdated" + - Proceed directly to new web research without asking + +**Example Search Pattern**: +``` +# Step 1: Query is "best practices for OAuth 2.0 implementation" +Key terms: ["oauth", "oauth2", "authentication", "authorization", "implementation"] + +# Step 2: Find and search files +glob: thoughts/docs/*.md +grep: "(oauth|oauth2|authentication|authorization|implementation)" in matched files + +# Step 3: Filter results +Found: 2025-10-14_oauth_patterns.md → age = 0 days (very recent!) → 5 grep matches +Found: 2025-07-20_api_security.md → age = 86 days (recent) → 2 grep matches +Found: 2024-01-15_auth_methods.md → age = 273 days (too old, skip) + +# Step 4: Read top candidate (2025-10-14_oauth_patterns.md) +Extract: Research Date, Method, Summary, Key Findings + +# Step 5: Present to user or proceed +Present summary and ask if new research needed +``` + +**Search Efficiency Tips**: +- Use regex-capable grep patterns for flexible matching: `(term1|term2|term3)` +- Check both exact terms and common variations (e.g., "oauth" and "oauth2", "auth" and "authentication") +- Don't read every file—only read top 1-2 candidates after filtering +- If glob returns no results, thoughts/docs/ is empty—proceed directly to web research ## Core Responsibilities When you receive a research query, you will: 1. **Analyze the Query**: Break down the user's request to identify: - - Key search terms and concepts + - Whether a specific URL is provided for direct fetching + - Key search terms and concepts for web investigation + - Complexity level requiring simple facts vs. deep analysis - Types of sources likely to have answers (documentation, blogs, forums, academic papers) - - Multiple search angles to ensure comprehensive coverage -2. **Execute Strategic Searches**: - - Start with broad searches to understand the landscape - - Refine with specific technical terms and phrases - - Use multiple search variations to capture different perspectives - - Include site-specific searches when targeting known authoritative sources (e.g., "site:docs.stripe.com webhook signature") +2. **Select Appropriate Research Tool**: -3. **Fetch and Analyze Content**: - - Use WebFetch to retrieve full content from promising search results - - Prioritize official documentation, reputable technical blogs, and authoritative sources + **Decision Logic:** + + - IF query contains a single, specific URL: + - Use `webfetch` to retrieve and analyze that content directly + + - ELSE IF query requires broad web investigation: + - Execute `query-complexity-analysis` with the research query + - Extract the recommended Perplexity model from the analysis result + - Execute `perplexity-search` with the query and recommended model + - IF perplexity-search is unavailable or fails, immediately follow the Fallback Procedure (see Fallback Procedure section) + + **Frugality Principle**: Use the simplest tool capable of answering the query. Only escalate to more advanced models if initial results prove insufficient. + +3. **Execute Research and Analyze Content**: + - Retrieve content using the selected tool - Extract specific quotes and sections relevant to the query - Note publication dates to ensure currency of information + - Prioritize official documentation, reputable technical blogs, and authoritative sources + - Cross-reference multiple perspectives when available 4. **Synthesize Findings**: - Organize information by relevance and authority @@ -77,13 +160,36 @@ When you receive a research query, you will: - Find benchmarks and performance comparisons - Search for decision matrices or evaluation criteria -## Output Format +## Output Format and File Storage -Structure your findings as: +**ALL research must be saved to**: `thoughts/docs/YYYY-MM-DD_topic.md` + +**Naming Convention**: +- Date in kebab-case: `YYYY-MM-DD` (e.g., `2025-10-14`) +- Topic in snake_case: extracted from query (e.g., `oauth_authentication_patterns`) + +**Citation Mandate**: + +Every external source referenced in your research report MUST include a functional hyperlink to its origin when available. This applies to: +- Direct quotes +- Paraphrased information +- Data points and statistics +- Code examples +- Any fact or claim not considered common knowledge + +Always include source URLs for full traceability. If a URL is unavailable, explicitly note this limitation in the source attribution. + +**Required Document Structure**: + +```markdown +# [Research Topic Title] + +**Research Date**: YYYY-MM-DD +**Research Method**: [webfetch | Perplexity Sonar Pro | Perplexity Sonar Reasoning Pro | Perplexity Sonar Deep Research] +**Query**: [Original user query] -``` ## Summary -[Brief overview of key findings] +[2-4 sentence executive summary of key findings] ## Detailed Findings @@ -91,8 +197,9 @@ Structure your findings as: **Source**: [Name with link] **Relevance**: [Why this source is authoritative/useful] **Key Information**: -- Direct quote or finding (with link to specific section if possible) -- Another relevant point +- Direct quote or finding [with link to specific section when available] +- Another relevant point [with link when available] +**Details**: [2-3 sentences for additional details and main takeaways] ### [Topic/Source 2] [Continue pattern...] @@ -103,23 +210,143 @@ Structure your findings as: ## Gaps or Limitations [Note any information that couldn't be found or requires further investigation] + +## Research Metadata +- **Complexity Assessment**: [If query-complexity-analysis was used] +- **Model Recommendation**: [If perplexity-search was used, which model] +- **Research Duration**: [Approximate time spent] +- **Primary Tool**: [perplexity-search | webfetch | other] +- **Primary Tool Status**: [SUCCESS | UNAVAILABLE | FAILED - error reason] +- **Fallback Tier Activated**: [N/A | Tier 1 | Tier 2 | Tier 3] +- **Fallback Tool Used**: [N/A | tool name | webfetch | none] +- **Research Completeness**: [Comprehensive | Partial-SingleSource | Failed] ``` ## Quality Guidelines -- **Accuracy**: Always quote sources accurately and provide direct links +- **Accuracy**: Always quote sources accurately and provide direct links when available - **Relevance**: Focus on information that directly addresses the user's query - **Currency**: Note publication dates and version information when relevant - **Authority**: Prioritize official sources, recognized experts, and peer-reviewed content - **Completeness**: Search from multiple angles to ensure comprehensive coverage - **Transparency**: Clearly indicate when information is outdated, conflicting, or uncertain -## Search Efficiency +## Research Efficiency -- Start with 2-3 well-crafted searches before fetching content -- Fetch only the most promising 3-5 pages initially -- If initial results are insufficient, refine search terms and try again +- **Execute the Pre-Research Protocol** (see "How to Check for Existing Research" above) before any web research +- **For URL-specific tasks**: Use webfetch directly without complexity analysis +- **For web research**: + 1. Run `query-complexity-analysis` to determine appropriate Perplexity model + 2. Execute `perplexity-search` with the recommended model + 3. Start with the recommended model; only escalate to more advanced models if results prove insufficient +- **Be frugal but thorough**: Prioritize lightweight, fast approaches over exhaustive searches when appropriate +- **Document everything**: Save all research to `thoughts/docs/` with proper file-naming convention - Use search operators effectively: quotes for exact phrases, minus for exclusions, site: for specific domains -- Consider searching in different forms: tutorials, documentation, Q&A sites, and discussion forums -Remember: You are the user's expert guide to web information. Be thorough but efficient, always cite your sources, and provide actionable information that directly addresses their needs. Think deeply as you work. +## Tool Usage Notes + +### perplexity-search (Primary for Complex Queries) +- **Use when**: Broad web investigation is needed without a specific URL +- **Models**: + - `sonar-pro`: Simple factual queries + - `sonar-reasoning-pro`: Complex reasoning, comparisons, explanations + - `sonar-deep-research`: Comprehensive research, in-depth analysis +- **Returns**: Answer report with citations and source links +- **On Failure**: See Fallback Procedure section + +### query-complexity-analysis +- **Use when**: Need to determine the appropriate Perplexity model for web research +- **Returns**: Recommended model (sonar-pro, sonar-reasoning-pro, or sonar-deep-research) with reasoning +- **Factors considered**: Research keywords, complexity indicators, technical terms, temporal context + +### webfetch (Built-in, Always Available) +- **Use when**: + - Query provides a single, specific URL to analyze + - Fallback Tier 2: when no other web search tools available +- **Best for**: Extracting information from known documentation pages, blog posts, or specific articles +- **Limitation**: Cannot discover sources; requires URL to be provided +- **Guarantee**: This tool is built into Opencode and always available + +## Fallback Procedure + +**IMPORTANT**: This procedure activates when `perplexity-search` is unavailable or fails: +- **Unavailable**: Tool is deactivated in configuration (`perplexity-search: false` in YAML) +- **Failed**: Execution fails (API error, authentication failure, rate limit, timeout, or any other error) + +Execute this tiered fallback strategy to ensure research completion: + +### Tier 1: Discover and Use Alternative Web Search Tools + +**Objective**: Find ANY available tool capable of performing comprehensive web searches (beyond single-URL fetching). + +**Discovery Heuristics** - Look for tools that exhibit these characteristics: +- **Input**: Accepts a search query/question as a parameter (not just a URL) +- **Output**: Returns information from multiple web sources or search results +- **Capability**: More comprehensive than single-URL fetching + +**Common Tool Name Patterns** (not exhaustive): +- Names containing: `search`, `web-search`, `websearch`, `query`, `research` +- Names containing: `perplexity`, `playwright`, `tavily`, `exa`, `serper`, `brave-search`, `google` +- MCP server tools that perform web searches + +**Tool Selection Strategy**: +1. **Scan available tools**: Review what tools are currently accessible beyond the standard set +2. **Identify web search capabilities**: Look for tools matching the discovery heuristics above +3. **Prioritize by comprehensiveness**: + - **Tier 1A (Most Preferred)**: Tools that search + fetch full page content + - **Tier 1B (Acceptable)**: Tools that return search results/summaries (then use `webfetch` on top results) + +**Execution**: +- IF alternative web search tool(s) found: + - Select most comprehensive tool available + - Execute with the same query used for perplexity-search + - Process results and synthesize findings + - Document which tool was used as fallback +- IF no alternative web search tools found → Proceed to Tier 2 + +**Note**: This tier is **opportunistic** - availability depends on user's MCP server configuration. + +### Tier 2: Single-URL Fallback with webfetch + +**Activate when**: No alternative web search tools available OR all Tier 1 tools failed + +**webfetch** is built into Opencode and always available, but requires a specific URL. + +**Procedure**: + +1. **Construct Target URL** from query (extract primary topic/technology): + - **Libraries/frameworks**: `https://[name].org/docs/` or `https://docs.[name].com` + - **APIs/services**: `https://docs.[service].com/` or `https://developers.[service].com/docs/` + - **Web standards**: `https://developer.mozilla.org/en-US/docs/Web/[topic]` + - **General concepts**: `https://en.wikipedia.org/wiki/[Concept]` + +2. **Execute webfetch** with constructed URL and extract relevant sections + +3. **Acknowledge Limitation** - Include in research output: + ``` + ⚠️ **Research Limitation Notice** + This research was completed using a single authoritative source due to primary + search tool unavailability. Results may not represent comprehensive coverage. + Recommendation: Cross-verify findings with additional sources manually. + ``` + +### Tier 3: Complete Failure + +**Activate when**: Even webfetch fails + +**Action**: Report comprehensive error log to user with: +- All attempted tools and failure reasons +- Possible causes (network, API keys, rate limits) +- Recommended actions (check connectivity, verify config, retry) +- Do NOT create empty research file + +### Logging Requirements + +For ANY fallback activation, document the following in the Research Metadata section (see Output Format section): +- **Primary Tool**: perplexity-search +- **Primary Tool Status**: [UNAVAILABLE - tool deactivated | FAILED - error reason] +- **Fallback Tier Activated**: [Tier 1 / Tier 2 / Tier 3] +- **Fallback Tool Used**: [tool name / webfetch / none] +- **Research Completeness**: [Comprehensive / Partial-SingleSource / Failed] + +Remember: You are the user's expert guide to web information. Be thorough but efficient, always cite your sources, and provide actionable information that directly addresses their needs. Think deeply as you work, and maintain a systematic approach to research that balances comprehensiveness with practicality. \ No newline at end of file diff --git a/command/plan.md b/command/plan.md index 3afb3208..01a149ff 100644 --- a/command/plan.md +++ b/command/plan.md @@ -24,6 +24,7 @@ You are tasked with creating detailed implementation plans through an interactiv - Use the **codebase-locator** task to find all files related to the files given by the user - Use the **codebase-analyzer** task to understand how the current implementation works - If relevant, use the **thoughts-locator** task to find any existing thoughts documents about this feature + - If relevant, use the **web-search-researcher** task to find additional external authorative context and capture provenance These agents will: - Find relevant source files, configs, and tests @@ -83,6 +84,7 @@ After getting initial clarifications: **For historical context:** - **thoughts-locator** - To find any research, plans, or decisions about this area - **thoughts-analyzer** - To extract key insights from the most relevant documents + - **web-search-researcher** - To pull authoritative external references when needed Each agent knows how to: - Find the right files and code patterns @@ -230,6 +232,7 @@ After structure approval: - Original ticket: `thoughts/tickets/eng_XXXX.md` - Related research: `thoughts/research/[relevant].md` +- External sources: `thoughts/docs/YYYY-MM-DD_[relevant].md` - Similar implementation: `[file:line]` ``` diff --git a/command/research.md b/command/research.md index d9c8fc8f..4b30d494 100644 --- a/command/research.md +++ b/command/research.md @@ -30,6 +30,7 @@ The user will provide a ticket for you to read and begin researching. **Phase 1 - Locate (Codebase & Thoughts):** - Identify all topics/components/areas you need to locate - Group related topics into coherent batches + - Spawn **web-search-researcher** agents in parallel to gather external authorative context and capturing provenance - Spawn **codebase-locator** agents in parallel for each topic group to find WHERE files and components live - Simultaneously spawn **thoughts-locator** agents in parallel to discover relevant documents - **WAIT** for all locator agents to complete before proceeding @@ -117,6 +118,7 @@ Use the following metadata for the research document frontmatter: [Relevant insights from thoughts/ directory with references] - `thoughts/research/something.md` - Historical decision about X - `thoughts/plans/build-thing.md` - Past exploration of Y + - `thoughts/docs/YYYY-MM-DD_some-topic.md` - Past searches of Z topic ## Related Research [Links to other research documents in thoughts/shared/research/] diff --git a/dependencies/bun.opencode.lock b/dependencies/bun.opencode.lock new file mode 100644 index 00000000..392af71b --- /dev/null +++ b/dependencies/bun.opencode.lock @@ -0,0 +1,18 @@ +{ + "lockfileVersion": 1, + "workspaces": { + "": { + "name": "agentic-tools-dependencies", + "dependencies": { + "@opencode-ai/plugin": "^0.15.3", + }, + }, + }, + "packages": { + "@opencode-ai/plugin": ["@opencode-ai/plugin@0.15.3", "", { "dependencies": { "@opencode-ai/sdk": "0.15.3", "zod": "4.1.8" } }, "sha512-XVOV/pRlU1r2koBacLVxLqPQUmkUUgaD2oHYchMDq67n35SojilsqDKZ/TatZIa/PGWC94X5L7qXt0XYDDYrhA=="], + + "@opencode-ai/sdk": ["@opencode-ai/sdk@0.15.3", "", {}, "sha512-Ia6Qg5Duo8RexSGRCIXo7amOJkdIvwjV8B3b+5UWLjfZWbCKiqD9YTmyGni1B2KAYWfpXABUKJ9OH3M6tUiJIg=="], + + "zod": ["zod@4.1.8", "", {}, "sha512-5R1P+WwQqmmMIEACyzSvo4JXHY5WiAFHRMg+zBZKgKS+Q1viRa0C1hmUKtHltoIFKtIdki3pRxkmpP74jnNYHQ=="], + } +} diff --git a/dependencies/package.opencode.json b/dependencies/package.opencode.json new file mode 100644 index 00000000..fa8cb9b8 --- /dev/null +++ b/dependencies/package.opencode.json @@ -0,0 +1,13 @@ +{ + "name": "agentic-tools-dependencies", + "version": "1.0.0", + "description": "Dependencies for agentic OpenCode tools", + "type": "module", + "private": true, + "dependencies": { + "@opencode-ai/plugin": "^0.15.3" + }, + "engines": { + "node": ">=18.0.0" + } +} \ No newline at end of file diff --git a/docs/agentic.md b/docs/agentic.md index c6ad3824..5c3ff311 100644 --- a/docs/agentic.md +++ b/docs/agentic.md @@ -37,6 +37,8 @@ agentic pull --ignore-frontmatter **What it does:** - Creates `.opencode` directory if it doesn't exist - Copies all files from `agent/` and `command/` directories +- Copies custom tools from `tool/` directory +- Copies dependency files (package.json, bun.lock) for tool support - Preserves directory structure - Reports progress for each file copied - When `--ignore-frontmatter` is used: preserves existing frontmatter in target .md files @@ -44,14 +46,18 @@ agentic pull --ignore-frontmatter **Output:** ``` 📦 Pulling to: /home/user/projects/my-app/.opencode -📁 Including: agent, command +📁 Including: agent, command, tool ✓ Copied: agent/codebase-analyzer.md ✓ Copied: agent/codebase-locator.md ✓ Copied: command/research.md ✓ Copied: command/plan.md + ✓ Copied: tool/perplexity-search.ts + ✓ Updated: package.json -✅ Pulled 10 files +✅ Updated 14 files +📦 Updated 2 dependency files +💡 Run 'bun install' in the .opencode directory to install tool dependencies ``` ### `agentic status [project-path]` diff --git a/docs/agents.md b/docs/agents.md index 39eb38dd..a7421ddd 100644 --- a/docs/agents.md +++ b/docs/agents.md @@ -83,18 +83,35 @@ Agents are specialized AI assistants that perform focused tasks within the Agent ### Web Agent #### web-search-researcher -**Purpose**: Perform web searches and analyze content. +**Purpose**: Perform intelligent web research with automatic caching and fallback mechanisms. **Capabilities**: -- Fetches web pages -- Analyzes documentation -- Extracts relevant information -- Provides summaries +- Searches using Perplexity API with model selection based on query complexity +- Caches research in `thoughts/docs/` to avoid duplicate API calls (90-day freshness check) +- Automatically falls back to alternative search tools or webfetch if Perplexity unavailable +- Provides properly cited research with source URLs + +**Models Used**: +- `sonar-pro`: Simple factual queries (fastest, most cost-effective) +- `sonar-reasoning-pro`: Complex comparisons and reasoning tasks +- `sonar-deep-research`: Comprehensive research requiring in-depth analysis **Use Cases**: -- Researching external libraries -- Finding documentation -- Gathering best practices +- Researching external libraries and frameworks +- Finding best practices and design patterns +- Gathering documentation from authoritative sources +- Understanding industry standards and conventions + +**Requirements**: +- `PERPLEXITY_API_KEY` environment variable +- Tool dependencies installed (`bun install` in `.opencode/`) +- Custom tools enabled in OpenCode + +**Fallback Strategy**: +1. Primary: Perplexity API with intelligent model selection +2. Secondary: Alternative web search tools (from MCP servers) +3. Tertiary: Direct webfetch for single URLs +4. Reports which method was used in research metadata ## Agent Coordination diff --git a/docs/commands.md b/docs/commands.md index 5d530ba0..d27c36cc 100644 --- a/docs/commands.md +++ b/docs/commands.md @@ -35,7 +35,8 @@ Example: 2. Spawns codebase-locator agents for discovery 3. Spawns analyzer agents for deep dives 4. Searches thoughts/ for historical context -5. Synthesizes findings into research document +5. Optionally spawns web-search-researcher for external sources +6. Synthesizes findings into research document **Output**: `thoughts/research/YYYY-MM-DD_topic.md` diff --git a/docs/thoughts.md b/docs/thoughts.md index 7bd637ac..bfe35caa 100644 --- a/docs/thoughts.md +++ b/docs/thoughts.md @@ -13,6 +13,7 @@ thoughts/ ├── research/ # Analysis and findings ├── plans/ # Implementation specifications ├── reviews/ # Post-implementation validation +├── docs/ # External web research cache └── archive/ # Outdated documents (excluded from searches) ``` @@ -120,6 +121,29 @@ Any relevant background information - Documents implementation reality - Captures improvements for future +### docs/ + +**Purpose**: Cache external web research to avoid duplicate API calls and preserve sources. + +**File Format**: `YYYY-MM-DD_topic.md` + +**Content Structure**: +- Research metadata (date, method, query) +- Summary of findings +- Detailed information with citations +- Source URLs for verification + +**Usage**: +- Automatically populated by `web-search-researcher` agent +- Checked before new web searches (90-day freshness) +- Referenced during research and planning phases +- Provides authoritative external sources + +**Important Notes**: +- Created automatically when web research is performed +- Can use Perplexity API key to populate +- Falls under same search patterns as other thoughts directories + ### archive/ **Purpose**: Store outdated documents that are no longer relevant. diff --git a/docs/usage.md b/docs/usage.md index 7093fbb3..b7f94d78 100644 --- a/docs/usage.md +++ b/docs/usage.md @@ -73,6 +73,7 @@ This produces a research document in `thoughts/research/` with findings about: - Relevant files and components - Architecture patterns to follow - Integration points +- External best practices (when web research is enabled) ### 3. Planning Phase diff --git a/scripts/publish.ts b/scripts/publish.ts index db41f3d3..261d2afa 100755 --- a/scripts/publish.ts +++ b/scripts/publish.ts @@ -65,7 +65,7 @@ for (const platform of platforms) { ); // Copy agent and command directories to platform package - const dirsToCopy = ["agent", "command", "docs"]; + const dirsToCopy = ["agent", "command", "tool", "docs", "dependencies"]; for (const dir of dirsToCopy) { const srcDir = path.join(process.cwd(), dir); const destDir = path.join(pkgDir, dir); @@ -189,7 +189,7 @@ for (const file of filesToCopy) { } // Copy agent and command directories -const dirsToCopy = ["agent", "command", "docs"]; +const dirsToCopy = ["agent", "command", "tool", "docs", "dependencies"]; for (const dir of dirsToCopy) { const srcDir = path.join(process.cwd(), dir); const destDir = path.join(mainPkgDir, dir); @@ -270,11 +270,11 @@ if (previousReleaseTag) { } : {} } ); - + if (response.ok) { const data = await response.json() as { commits: Array<{ commit: { message: string } }> }; const commits = data.commits || []; - + const notes = commits .map(commit => { const msg = commit.commit.message.split('\n')[0]; // First line only @@ -283,13 +283,13 @@ if (previousReleaseTag) { .filter(msg => { const lower = msg.toLowerCase(); return !lower.includes("release:") && - !lower.includes("chore:") && - !lower.includes("ci:") && - !lower.includes("wip:") && - !lower.includes("docs:") && - !lower.includes("doc:"); + !lower.includes("chore:") && + !lower.includes("ci:") && + !lower.includes("wip:") && + !lower.includes("docs:") && + !lower.includes("doc:"); }); - + if (notes.length > 0) { releaseNotes += notes.join('\n'); } else { diff --git a/src/cli/init.ts b/src/cli/init.ts index 59947a89..50362547 100644 --- a/src/cli/init.ts +++ b/src/cli/init.ts @@ -13,20 +13,20 @@ interface AgenticConfig { export async function init(projectPath?: string, thoughtsDirOverride?: string): Promise { const isInteractive = !thoughtsDirOverride; const rl = isInteractive ? readline.createInterface({ input, output }) : null; - + try { // Resolve the project path const targetPath = projectPath ? resolve(projectPath) : process.cwd(); const opencodeDir = join(targetPath, ".opencode"); const configPath = join(opencodeDir, "agentic.json"); - + // Check if already initialized if (existsSync(configPath)) { if (isInteractive && rl) { const overwrite = await rl.question( "Agentic is already initialized in this project. Do you want to reinitialize? (y/N): " ); - + if (overwrite.toLowerCase() !== "y") { console.log("Initialization cancelled."); return; @@ -35,15 +35,15 @@ export async function init(projectPath?: string, thoughtsDirOverride?: string): console.log("Agentic is already initialized. Reinitializing..."); } } - + console.log("\n🚀 Initializing Agentic for your project...\n"); - + // Create .opencode directory if it doesn't exist if (!existsSync(opencodeDir)) { mkdirSync(opencodeDir, { recursive: true }); console.log(`✅ Created .opencode directory`); } - + // Determine thoughts directory location let thoughtsDir: string; if (thoughtsDirOverride) { @@ -57,24 +57,25 @@ export async function init(projectPath?: string, thoughtsDirOverride?: string): } else { thoughtsDir = "thoughts"; } - + // Resolve thoughts directory path const thoughtsPath = join(targetPath, thoughtsDir); - + // Create thoughts directory structure const thoughtsSubDirs = [ "architecture", - "tickets", + "tickets", "research", "plans", - "reviews" + "reviews", + "docs" ]; - + if (!existsSync(thoughtsPath)) { mkdirSync(thoughtsPath, { recursive: true }); console.log(`✅ Created ${thoughtsDir} directory`); } - + for (const subDir of thoughtsSubDirs) { const subDirPath = join(thoughtsPath, subDir); if (!existsSync(subDirPath)) { @@ -82,7 +83,7 @@ export async function init(projectPath?: string, thoughtsDirOverride?: string): console.log(` ✅ Created ${thoughtsDir}/${subDir}`); } } - + // Create config object const config: AgenticConfig = { thoughts: thoughtsDir, @@ -90,11 +91,11 @@ export async function init(projectPath?: string, thoughtsDirOverride?: string): model: "sonic-fast" } }; - + // Write config file writeFileSync(configPath, JSON.stringify(config, null, 2)); console.log(`\n✅ Created agentic.json configuration file`); - + // Create a README in thoughts directory const readmePath = join(thoughtsPath, "README.md"); if (!existsSync(readmePath)) { @@ -109,20 +110,26 @@ This directory contains structured documentation for your project: - **research/** - Research notes, investigations, and findings - **plans/** - Project plans, roadmaps, and implementation strategies - **reviews/** - Code reviews, retrospectives, and assessments +- **docs/** - External web research and documentation from authoritative sources ## Usage These directories are used by Agentic to organize and retrieve contextual information about your project. `; - + writeFileSync(readmePath, readmeContent); console.log(`✅ Created ${thoughtsDir}/README.md`); } - + console.log("\n🎉 Agentic initialization complete!"); console.log(`\nConfiguration saved to: ${configPath}`); console.log(`Thoughts directory created at: ${thoughtsPath}`); - + + console.log("\n💡 Next steps:"); + console.log(" 1. Run 'agentic pull' to deploy agents, commands, and tools"); + console.log(" 2. Navigate to .opencode/ and run 'bun install' to install tool dependencies"); + console.log(" 3. Set PERPLEXITY_API_KEY environment variable for web research features"); + } finally { if (rl) { rl.close(); diff --git a/src/cli/pull.ts b/src/cli/pull.ts index 3d2ec690..b8c99bec 100644 --- a/src/cli/pull.ts +++ b/src/cli/pull.ts @@ -112,4 +112,46 @@ export async function pull( } console.log(`\n✅ Updated ${filesToCopy.length} file${filesToCopy.length === 1 ? "" : "s"}`); + + // Copy dependency files for tool support + await copyDependencyFiles(sourceDir, targetBase); +} + +async function copyDependencyFiles(sourceDir: string, targetBase: string) { + const dependencyFiles = [ + { source: 'package.opencode.json', target: 'package.json' }, + { source: 'bun.opencode.lock', target: 'bun.lock' } + ]; + + let copiedCount = 0; + + for (const { source, target } of dependencyFiles) { + const sourceFile = join(sourceDir, 'dependencies', source); + const targetFile = join(targetBase, target); + + if (existsSync(sourceFile)) { + try { + // Check if target file exists and is different + let shouldCopy = true; + if (existsSync(targetFile)) { + const sourceContent = await Bun.file(sourceFile).text(); + const targetContent = await Bun.file(targetFile).text(); + shouldCopy = sourceContent !== targetContent; + } + + if (shouldCopy) { + await copyFile(sourceFile, targetFile); + console.log(` ✅ Updated: ${target}`); + copiedCount++; + } + } catch (error) { + console.log(` ⚠️ Warning: Could not copy ${source} to ${target}`); + } + } + } + + if (copiedCount > 0) { + console.log(`\n📦 Updated ${copiedCount} dependency file${copiedCount === 1 ? "" : "s"}`); + console.log("💡 Run 'bun install' or 'npm install' in the .opencode directory to install tool dependencies"); + } } diff --git a/src/cli/status.ts b/src/cli/status.ts index 6fafb8cf..aef1c34a 100644 --- a/src/cli/status.ts +++ b/src/cli/status.ts @@ -1,5 +1,5 @@ import { join } from "node:path"; -import { resolveProjectPath, findOutOfSyncFiles } from "./utils"; +import { resolveProjectPath, findOutOfSyncFiles, checkDependencyStatus } from "./utils"; export async function status( projectPath: string | undefined, @@ -28,11 +28,11 @@ export async function status( // Display files by status for (const file of syncStatus) { if (file.status === 'up-to-date') { - console.log(`✅ ${file.path}`); + console.log(` ✓ ${file.path}`); } else if (file.status === 'outdated') { - console.log(`⚠️ ${file.path} (outdated)`); + console.log(` ⚠️ ${file.path} (outdated)`); } else if (file.status === 'missing') { - console.log(`❌ ${file.path} (missing)`); + console.log(` ❌ ${file.path} (missing)`); } } @@ -49,4 +49,32 @@ export async function status( console.log(`\n⚠️ ${totalIssues} file${totalIssues === 1 ? "" : "s"} need${totalIssues === 1 ? "s" : ""} updating`); console.log("Run 'agentic pull' to sync the files"); } + + // Check dependency status if not using global config + if (!useGlobal) { + const depStatus = await checkDependencyStatus(resolvedProjectPath); + + console.log("\n🔧 Tool Dependencies:"); + console.log(` ${depStatus.packageJsonExists ? "✓" : "❌"} package.json`); + console.log(` ${depStatus.nodeModulesExists ? "✓" : "❌"} node_modules`); + console.log(` ${depStatus.pluginInstalled ? "✓" : "❌"} @opencode-ai/plugin`); + console.log(` ${depStatus.perplexityApiKeySet ? "✓" : "❌"} PERPLEXITY_API_KEY`); + + const depIssues = [ + !depStatus.packageJsonExists, + !depStatus.nodeModulesExists, + !depStatus.pluginInstalled, + !depStatus.perplexityApiKeySet, + ].filter(Boolean).length; + + if (depIssues > 0) { + console.log(`\n⚠️ ${depIssues} dependency issue${depIssues === 1 ? "" : "s"} found`); + if (!depStatus.nodeModulesExists) { + console.log("💡 Run 'bun install' or 'npm install' in the .opencode directory to install tool dependencies"); + } + if (!depStatus.perplexityApiKeySet) { + console.log("💡 Set PERPLEXITY_API_KEY environment variable for web research features"); + } + } + } } \ No newline at end of file diff --git a/src/cli/utils.ts b/src/cli/utils.ts index 8b8e0654..9e43c0d5 100644 --- a/src/cli/utils.ts +++ b/src/cli/utils.ts @@ -15,6 +15,27 @@ export interface FileSync { status: 'up-to-date' | 'outdated' | 'missing'; } +export interface DependencyStatus { + packageJsonExists: boolean; + nodeModulesExists: boolean; + pluginInstalled: boolean; + perplexityApiKeySet: boolean; +} + +export async function checkDependencyStatus(projectPath: string): Promise { + const opencodeDir = join(projectPath, ".opencode"); + const packageJsonPath = join(opencodeDir, "package.json"); + const nodeModulesPath = join(opencodeDir, "node_modules"); + const pluginPath = join(nodeModulesPath, "@opencode-ai", "plugin"); + + return { + packageJsonExists: existsSync(packageJsonPath), + nodeModulesExists: existsSync(nodeModulesPath), + pluginInstalled: existsSync(pluginPath), + perplexityApiKeySet: !!process.env.PERPLEXITY_API_KEY, + }; +} + async function* walkDir(dir: string): AsyncGenerator { const files = await readdir(dir, { withFileTypes: true }); for (const file of files) { @@ -151,18 +172,18 @@ export async function findOutOfSyncFiles( // Resolve the agent model with proper priority const resolvedModel = await resolveAgentModel(agentModel, resolvedProjectPath); - + // Directories to sync - const dirsToSync = ["agent", "command"]; - + const dirsToSync = ["agent", "command", "tool"]; + // Only check files from agentic source against target for (const dir of dirsToSync) { const sourceDirPath = join(sourceDir, dir); if (!existsSync(sourceDirPath)) continue; - + const stats = await stat(sourceDirPath); if (!stats.isDirectory()) continue; - + for await (const sourceFile of walkDir(sourceDirPath)) { const relativePath = sourceFile.slice(sourceDir.length + 1); const targetFile = join(targetPath, relativePath); @@ -207,61 +228,61 @@ export async function findOutOfSyncFiles( } } } - + return results; } export function resolveProjectPath(providedPath?: string, useGlobal: boolean = false): string { const home = homedir(); - + // If using global flag, return the global config directory if (useGlobal) { const globalDir = join(home, ".config", "opencode"); - + // Create the directory if it doesn't exist if (!existsSync(globalDir)) { mkdirSync(globalDir, { recursive: true }); } - + return globalDir; } - + if (providedPath) { // Path was provided, check if .opencode exists const resolvedPath = resolve(providedPath); const opencodeDir = join(resolvedPath, ".opencode"); - + if (!existsSync(opencodeDir)) { console.error(`Error: No .opencode directory found at ${opencodeDir}`); process.exit(1); } - + return resolvedPath; } - + // No path provided, start searching from current directory const cwd = process.cwd(); - + // Ensure we're in a subdirectory of $HOME if (!cwd.startsWith(home)) { console.error(`Error: Current directory is not within home directory (${home})`); console.error("Automatic project detection only works within your home directory"); process.exit(1); } - + // Search upward for .opencode directory let currentDir = cwd; - + while (currentDir !== home && currentDir !== "/") { const opencodeDir = join(currentDir, ".opencode"); - + if (existsSync(opencodeDir)) { return currentDir; } - + currentDir = dirname(currentDir); } - + // No .opencode found console.error("Error: No .opencode directory found in current directory or any parent directories"); console.error("Please run this command from a project directory or specify a path"); diff --git a/tool/perplexity-search.ts b/tool/perplexity-search.ts new file mode 100644 index 00000000..f8acc746 --- /dev/null +++ b/tool/perplexity-search.ts @@ -0,0 +1,92 @@ +import { tool } from '@opencode-ai/plugin'; + +export default tool({ + name: 'perplexity-search', + description: 'Search using Perplexity API with specified model.', + parameters: { + type: 'object', + properties: { + query: { + type: 'string', + minLength: 1, + maxLength: 2000, + description: 'The search query to send to Perplexity.' + }, + model: { + type: 'string', + enum: ['sonar-pro', 'sonar-reasoning-pro', 'sonar-deep-research'], + default: 'sonar-pro', + description: 'The specific Perplexity model to use for the search.' + } + }, + required: ['query'], + additionalProperties: false + }, + async execute(args) { + const { query, model } = args; + + // Model-specific configuration + const temperature = model === 'sonar-pro' ? 0.2 + : model === 'sonar-reasoning-pro' ? 0.3 + : 0.4; + + // Environment variable validation + const apiKey = process.env.PERPLEXITY_API_KEY; + if (!apiKey) { + throw new Error('PERPLEXITY_API_KEY required. Get one at https://www.perplexity.ai/settings/api'); + } + + if (!apiKey.startsWith('pplx-')) { + throw new Error('PERPLEXITY_API_KEY should start with "pplx-". Get a valid key at https://www.perplexity.ai/settings/api'); + } + + // API request to Perplexity + const response = await fetch('https://api.perplexity.ai/chat/completions', { + method: 'POST', + headers: { + 'Authorization': `Bearer ${apiKey}`, + 'Content-Type': 'application/json' + }, + body: JSON.stringify({ + model, + messages: [{ role: 'user', content: query }], + temperature, + search_mode: 'academic', + media_response: { + overrides: { + return_videos: false, + return_images: false + } + }, + ...(model === 'sonar-deep-research' && { reasoning_effort: 'high' }) + }) + }); + + // Handle HTTP status codes + if (!response.ok) { + if (response.status === 401) { + throw new Error('Authentication failed. Check your PERPLEXITY_API_KEY at https://www.perplexity.ai/settings/api'); + } else if (response.status === 429) { + throw new Error('Rate limit exceeded. Please wait before making another request.'); + } else if (response.status >= 500) { + throw new Error(`Perplexity server error: ${response.status} ${response.statusText}`); + } else { + throw new Error(`Perplexity API error: ${response.status} ${response.statusText}`); + } + } + + const data: any = await response.json(); + let content = data.choices[0].message.content; + + // Extract and format citations + if (data.search_results?.length > 0) { + content += "\n\nCitations:\n"; + data.search_results.forEach((result: any, i: number) => { + const citation = `[${i + 1}] ${result.title} - ${result.url}`; + content += citation + '\n'; + }); + } + + return content; + } +}); \ No newline at end of file diff --git a/tool/query-complexity-analysis.ts b/tool/query-complexity-analysis.ts new file mode 100644 index 00000000..e784f6bd --- /dev/null +++ b/tool/query-complexity-analysis.ts @@ -0,0 +1,82 @@ +import { tool } from '@opencode-ai/plugin'; + +export default tool({ + name: 'query-complexity-analysis', + description: 'Simple heuristic to recommend which Perplexity model to use based on query keywords.', + parameters: { + type: 'object', + properties: { + query: { + type: 'string', + minLength: 1, + description: 'The user query to analyze for complexity and intent.' + } + }, + required: ['query'], + additionalProperties: false + }, + async execute(args) { + const { query } = args; + // Validate query input and provide clear error messages + if (!query?.trim()) { + throw new Error('Query cannot be empty'); + } + + const q = query.toLowerCase(); + + function buildPattern(keywords: string[]): RegExp { + const escaped = keywords.map(k => k.replace(/[.*+?^${}()|[\]\\]/g, '\\$&')); + return new RegExp('\\b(' + escaped.join('|') + ')\\b', 'i'); + } + + const researchPattern = buildPattern([ + 'analyze', 'analysis', 'investigate', 'investigation', 'comprehensive', 'in-depth', 'detailed analysis', + 'trends in', 'future of', 'evolution of', 'state of', 'review of', + 'survey of', 'systematic', 'thorough', 'extensive', 'deep dive' + ]); + + const complexPattern = buildPattern([ + 'how', 'why', 'explain', 'compare', 'contrast', 'difference between', + 'pros and cons', 'advantage', 'advantages', 'disadvantage', 'disadvantages', + 'benefit', 'benefits', 'drawback', 'drawbacks', + 'relationship between', 'impact of', 'effect of', 'cause', 'reason' + ]); + + const technicalPattern = buildPattern([ + 'algorithm', 'algorithms', 'framework', 'frameworks', 'architecture', + 'implementation', 'implementations', 'protocol', 'protocols', + 'methodology', 'methodologies', 'technique', 'techniques', 'approach', 'approaches', + 'system', 'systems', 'model', 'models', 'theory', 'theories', + 'concept', 'concepts', 'principle', 'principles', 'mechanism', 'mechanisms', + 'process', 'processes', 'procedure', 'procedures', + 'typescript', 'javascript', 'vue', 'python', 'react', 'node', 'api', + 'dataset', 'datasets', 'database', 'databases', 'machine learning', 'ai', + 'blockchain', 'cloud', 'devops' + ]); + + const temporalPattern = buildPattern([ + 'latest', 'current', 'recent', 'now', 'today', 'this year', 'new', + 'update', 'updated', 'updates', 'modern', 'contemporary', 'up-to-date', 'fresh', 'live' + ]); + + // Simple rule-based classification (hierarchical if/then rules) + + // 1. Research keywords → deep research model + if (researchPattern.test(q)) { + return `Recommended model: sonar-deep-research\nReasoning: Research/analysis keywords detected`; + } + + // 2. Complex reasoning → reasoning model + if (complexPattern.test(q)) { + return `Recommended model: sonar-reasoning-pro\nReasoning: Complex reasoning keywords detected`; + } + + // 3. Technical + temporal (current tech) → reasoning model + if (technicalPattern.test(q) && temporalPattern.test(q)) { + return `Recommended model: sonar-reasoning-pro\nReasoning: Technical query with temporal context`; + } + + // 4. Default to simple model + return `Recommended model: sonar-pro\nReasoning: Simple factual query`; + } +}); \ No newline at end of file