From 2dd495be2fe4ad8d8046e600f2c143fe8256be2d Mon Sep 17 00:00:00 2001 From: AJAmit17 Date: Tue, 3 Mar 2026 19:10:47 +0530 Subject: [PATCH 1/7] feat: add exportToLangChain function and provider detection in langchain adapter --- src/adapters/index.ts | 1 + src/adapters/langchain.ts | 450 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 451 insertions(+) create mode 100644 src/adapters/langchain.ts diff --git a/src/adapters/index.ts b/src/adapters/index.ts index 2bedd3e..6b095d0 100644 --- a/src/adapters/index.ts +++ b/src/adapters/index.ts @@ -4,3 +4,4 @@ export { exportToOpenAI } from './openai.js'; export { exportToCrewAI } from './crewai.js'; export { exportToOpenClawString, exportToOpenClaw } from './openclaw.js'; export { exportToNanobotString, exportToNanobot } from './nanobot.js'; +export { exportToLangChain } from './langchain.js'; diff --git a/src/adapters/langchain.ts b/src/adapters/langchain.ts new file mode 100644 index 0000000..a0c90cc --- /dev/null +++ b/src/adapters/langchain.ts @@ -0,0 +1,450 @@ +import { existsSync, readFileSync, readdirSync } from 'node:fs'; +import { join, resolve } from 'node:path'; +import yaml from 'js-yaml'; +import { loadAgentManifest, loadFileIfExists, AgentManifest } from '../utils/loader.js'; +import { loadAllSkills, getAllowedTools } from '../utils/skill-loader.js'; + +// --------------------------------------------------------------------------- +// Provider detection — maps model name prefixes to pip packages + env vars +// --------------------------------------------------------------------------- +interface ProviderInfo { + provider: string; + pipPackage: string; + envVar: string; +} + +function detectProvider(model: string): ProviderInfo { + const m = model.toLowerCase(); + if (m.startsWith('claude')) return { provider: 'anthropic', pipPackage: 'langchain-anthropic', envVar: 'ANTHROPIC_API_KEY' }; + if (m.startsWith('gemini')) return { provider: 'google_genai', pipPackage: 'langchain-google-genai', envVar: 'GOOGLE_API_KEY' }; + if (m.startsWith('grok')) return { provider: 'xai', pipPackage: 'langchain-xai', envVar: 'XAI_API_KEY' }; + if (m.startsWith('mistral')) return { provider: 'mistralai', pipPackage: 'langchain-mistralai', envVar: 'MISTRAL_API_KEY' }; + if (m.startsWith('deepseek')) return { provider: 'deepseek', pipPackage: 'langchain-deepseek', envVar: 'DEEPSEEK_API_KEY' }; + if (m.startsWith('command')) return { provider: 'cohere', pipPackage: 'langchain-cohere', envVar: 'COHERE_API_KEY' }; + // Default: OpenAI (covers gpt-*, o1-*, o3-*, etc.) + return { provider: 'openai', pipPackage: 'langchain-openai', envVar: 'OPENAI_API_KEY' }; +} + +// Make detectProvider available for tests +export { detectProvider }; + +export function exportToLangChain(dir: string): string { + const agentDir = resolve(dir); + const manifest = loadAgentManifest(agentDir); + + // Build system prompt (with knowledge + memory inlined) + const systemPrompt = buildSystemPrompt(agentDir, manifest); + + // Build tools array + const tools = buildToolDefinitions(agentDir, manifest); + + // Detect sub-agents + const subAgents = buildSubAgentDefinitions(agentDir, manifest); + + // Detect memory config + const hasMemory = existsSync(join(agentDir, 'memory', 'memory.yaml')) || + existsSync(join(agentDir, 'memory', 'MEMORY.md')); + + // Determine LLM provider and model from manifest + const model = manifest.model?.preferred ?? 'gpt-4o'; + const temperature = manifest.model?.constraints?.temperature ?? 0.7; + const maxTokens = manifest.model?.constraints?.max_tokens; + + // Collect all providers needed (main + sub-agents) + const mainProvider = detectProvider(model); + const allPipPackages = new Set([mainProvider.pipPackage]); + const allEnvVars = new Set([mainProvider.envVar]); + + for (const sub of subAgents) { + const subProv = detectProvider(sub.model ?? model); + allPipPackages.add(subProv.pipPackage); + allEnvVars.add(subProv.envVar); + } + + const pipPackages = ['langchain', ...allPipPackages]; + + // Generate Python code for LangChain + const lines: string[] = []; + + // --- Docstring --- + lines.push('"""'); + lines.push(`LangChain agent definition for ${manifest.name} v${manifest.version}`); + lines.push('Generated by gitagent export'); + lines.push(''); + lines.push('Prerequisites:'); + lines.push(` pip install ${pipPackages.join(' ')}`); + lines.push(''); + lines.push('What is mapped:'); + lines.push(' - System prompt (SOUL.md, RULES.md, skills, compliance)'); + lines.push(' - Tool stubs from tools/*.yaml'); + lines.push(' - Model and temperature from agent.yaml'); + lines.push(' - Knowledge documents (always_load) inlined into system prompt'); + if (manifest.runtime?.max_turns) { + lines.push(' - max_turns → recursion_limit'); + } + if (hasMemory) { + lines.push(' - Memory managed automatically by create_agent'); + } + if (subAgents.length > 0) { + lines.push(` - Sub-agents as tool delegates: ${subAgents.map(s => s.name).join(', ')}`); + } + if (manifest.delegation) { + lines.push(' - Delegation instructions embedded in system prompt'); + } + lines.push('"""\n'); + + // --- Imports (universal — no provider-specific imports) --- + lines.push('import os'); + lines.push('import sys'); + lines.push('from langchain.chat_models import init_chat_model'); + lines.push('from langchain.agents import create_agent'); + lines.push('from langchain.tools import tool'); + lines.push(''); + + // --- Tool stub definitions (module level — no API key needed) --- + if (tools.length > 0) { + lines.push('# --- Tool definitions ---\n'); + for (const t of tools) { + const funcName = t.name.replace(/-/g, '_'); + lines.push('@tool'); + lines.push(`def ${funcName}(${t.params}) -> str:`); + lines.push(` """${t.description}"""`); + lines.push(' # TODO: Implement tool logic'); + lines.push(' return "Not implemented"'); + lines.push(''); + } + } + + // --- System prompt (module level) --- + const escapedPrompt = systemPrompt + .replace(/\\/g, '\\\\') + .replace(/"""/g, '\\"\\"\\"'); + + lines.push(`SYSTEM_PROMPT = """${escapedPrompt}"""`); + lines.push(''); + + // --- Main block --- + // All model/agent creation happens here, AFTER env var checks, + // so providers auto-detect API keys from environment variables. + const envVarChecks = [...allEnvVars]; + const agentVarName = manifest.name.replace(/-/g, '_'); + + lines.push('if __name__ == "__main__":'); + for (const envVar of envVarChecks) { + lines.push(` if not os.environ.get("${envVar}"):`); + lines.push(` print("Error: ${envVar} environment variable is not set")`); + lines.push(' sys.exit(1)'); + lines.push(''); + } + + // Sub-agent delegates inside main (they require API keys) + if (subAgents.length > 0) { + lines.push(' # --- Sub-agent delegates ---'); + lines.push(' # Each sub-agent is a full agent the main agent can invoke via a tool.\n'); + + for (const sub of subAgents) { + const subVarName = sub.name.replace(/-/g, '_'); + const subModel = sub.model ?? model; + const subTemp = sub.temperature ?? 0.7; + + const subPromptEscaped = sub.systemPrompt + .replace(/\\/g, '\\\\') + .replace(/"""/g, '\\"\\"\\"'); + + const subModelArgs: string[] = [`"${subModel}"`, `temperature=${subTemp}`]; + + lines.push(` _${subVarName} = create_agent(`); + lines.push(` model=init_chat_model(${subModelArgs.join(', ')}),`); + lines.push(' tools=[],'); + lines.push(` system_prompt="""${subPromptEscaped}""",`); + lines.push(` name="${subVarName}",`); + lines.push(' )'); + lines.push(''); + + lines.push(' @tool'); + lines.push(` def delegate_to_${subVarName}(query: str) -> str:`); + lines.push(` """${sub.description} — Delegate a task to the ${sub.name} sub-agent."""`); + lines.push(` result = _${subVarName}.invoke(`); + lines.push(' {"messages": [{"role": "user", "content": query}]}'); + lines.push(' )'); + lines.push(' return result["messages"][-1].content'); + lines.push(''); + } + } + + // Tools list (including sub-agent delegates + tool stubs) + const allToolNames: string[] = []; + for (const sub of subAgents) { + allToolNames.push(`delegate_to_${sub.name.replace(/-/g, '_')}`); + } + for (const t of tools) { + allToolNames.push(t.name.replace(/-/g, '_')); + } + + if (allToolNames.length > 0) { + lines.push(` tools = [${allToolNames.join(', ')}]`); + } else { + lines.push(' tools = []'); + } + lines.push(''); + + // Model init args (no api_key — providers auto-detect from env vars) + const modelInitArgs: string[] = [`"${model}"`, `temperature=${temperature}`]; + if (maxTokens) { + modelInitArgs.push(`max_tokens=${maxTokens}`); + } + + // Agent creation + lines.push(` ${agentVarName} = create_agent(`); + lines.push(` model=init_chat_model(${modelInitArgs.join(', ')}),`); + lines.push(' tools=tools,'); + lines.push(' system_prompt=SYSTEM_PROMPT,'); + lines.push(` name="${agentVarName}",`); + lines.push(' )'); + lines.push(''); + + // Invocation + lines.push(' user_input = " ".join(sys.argv[1:]) if len(sys.argv) > 1 else "Hello, what can you help me with?"'); + + if (manifest.runtime?.max_turns) { + lines.push(` result = ${agentVarName}.invoke(`); + lines.push(' {"messages": [{"role": "user", "content": user_input}]},'); + lines.push(` {"recursion_limit": ${manifest.runtime.max_turns * 2}},`); + lines.push(' )'); + } else { + lines.push(` result = ${agentVarName}.invoke(`); + lines.push(' {"messages": [{"role": "user", "content": user_input}]}'); + lines.push(' )'); + } + + lines.push(' print(result["messages"][-1].content)'); + lines.push(''); + + return lines.join('\n'); +} + +function buildSystemPrompt(agentDir: string, manifest: AgentManifest): string { + const parts: string[] = []; + + // Agent identity header + parts.push(`# ${manifest.name} v${manifest.version}`); + parts.push(`${manifest.description}\n`); + + const soul = loadFileIfExists(join(agentDir, 'SOUL.md')); + if (soul) parts.push(soul); + + const rules = loadFileIfExists(join(agentDir, 'RULES.md')); + if (rules) parts.push(rules); + + const duties = loadFileIfExists(join(agentDir, 'DUTIES.md')); + if (duties) parts.push(duties); + + // Skills — loaded via skill-loader + const skillsDir = join(agentDir, 'skills'); + const skills = loadAllSkills(skillsDir); + for (const skill of skills) { + const toolsList = getAllowedTools(skill.frontmatter); + const toolsNote = toolsList.length > 0 ? `\nAllowed tools: ${toolsList.join(', ')}` : ''; + parts.push(`## Skill: ${skill.frontmatter.name}\n${skill.frontmatter.description}${toolsNote}\n\n${skill.instructions}`); + } + + // Knowledge (always_load documents) — inlined into prompt + const knowledgeDir = join(agentDir, 'knowledge'); + const indexPath = join(knowledgeDir, 'index.yaml'); + if (existsSync(indexPath)) { + const index = yaml.load(readFileSync(indexPath, 'utf-8')) as { + documents?: Array<{ path: string; always_load?: boolean }>; + }; + + if (index.documents) { + const alwaysLoad = index.documents.filter(d => d.always_load); + for (const doc of alwaysLoad) { + const content = loadFileIfExists(join(knowledgeDir, doc.path)); + if (content) { + parts.push(`## Knowledge: ${doc.path}\n${content}`); + } + } + } + } + + // Compliance constraints + if (manifest.compliance) { + const c = manifest.compliance; + const constraints: string[] = ['## Compliance Constraints']; + + if (c.supervision?.human_in_the_loop === 'always') { + constraints.push('- All decisions require human approval before execution'); + } + if (c.supervision?.escalation_triggers) { + constraints.push('- Escalate to human supervisor when:'); + for (const trigger of c.supervision.escalation_triggers) { + for (const [key, value] of Object.entries(trigger)) { + constraints.push(` - ${key}: ${value}`); + } + } + } + if (c.communications?.fair_balanced) constraints.push('- All communications must be fair and balanced (FINRA 2210)'); + if (c.communications?.no_misleading) constraints.push('- Never make misleading, exaggerated, or promissory statements'); + if (c.data_governance?.pii_handling === 'redact') constraints.push('- Redact all PII from outputs and intermediate reasoning'); + if (c.data_governance?.pii_handling === 'prohibit') constraints.push('- Do not process any personally identifiable information'); + + if (c.segregation_of_duties) { + const sod = c.segregation_of_duties; + if (sod.assignments) { + constraints.push('- Segregation of duties is enforced:'); + for (const [agentName, roles] of Object.entries(sod.assignments)) { + constraints.push(` - Agent "${agentName}" has role(s): ${roles.join(', ')}`); + } + } + if (sod.conflicts) { + constraints.push('- Duty separation rules (no single agent may hold both):'); + for (const [a, b] of sod.conflicts) { + constraints.push(` - ${a} and ${b}`); + } + } + if (sod.enforcement === 'strict') { + constraints.push('- SOD enforcement is STRICT — violations will block execution'); + } + } + + if (constraints.length > 1) parts.push(constraints.join('\n')); + } + + // Delegation instructions + if (manifest.delegation) { + const delParts: string[] = ['## Delegation']; + delParts.push(`Mode: ${manifest.delegation.mode ?? 'manual'}`); + if (manifest.agents) { + delParts.push('Available sub-agents you can delegate to:'); + for (const [name, config] of Object.entries(manifest.agents)) { + const funcName = `delegate_to_${name.replace(/-/g, '_')}`; + delParts.push(`- Use the "${funcName}" tool to delegate to ${name}: ${config.description ?? ''}`); + if (config.delegation?.triggers) { + delParts.push(` Triggers: ${config.delegation.triggers.join(', ')}`); + } + } + } + parts.push(delParts.join('\n')); + } + + // Memory + const memory = loadFileIfExists(join(agentDir, 'memory', 'MEMORY.md')); + if (memory && memory.trim().split('\n').length > 2) { + parts.push(`## Memory\n${memory}`); + } + + return parts.join('\n\n'); +} + +interface SubAgentDef { + name: string; + description: string; + model: string | undefined; + temperature: number | undefined; + systemPrompt: string; +} + +function buildSubAgentDefinitions(agentDir: string, manifest: AgentManifest): SubAgentDef[] { + const subAgents: SubAgentDef[] = []; + + if (!manifest.agents) return subAgents; + + for (const [name, config] of Object.entries(manifest.agents)) { + const subDir = join(agentDir, 'agents', name); + + // Try to load the sub-agent's own manifest + SOUL + let subModel: string | undefined; + let subTemp: number | undefined; + let subPromptParts: string[] = []; + + if (existsSync(join(subDir, 'agent.yaml'))) { + try { + const subManifest = loadAgentManifest(subDir); + subModel = subManifest.model?.preferred; + subTemp = subManifest.model?.constraints?.temperature; + subPromptParts.push(`You are ${subManifest.name}: ${subManifest.description}`); + } catch { /* ignore */ } + } + + const subSoul = loadFileIfExists(join(subDir, 'SOUL.md')); + if (subSoul) subPromptParts.push(subSoul); + + const subDuties = loadFileIfExists(join(subDir, 'DUTIES.md')); + if (subDuties) subPromptParts.push(subDuties); + + // Fallback description + if (subPromptParts.length === 0) { + subPromptParts.push(config.description ?? `Sub-agent: ${name}`); + } + + subAgents.push({ + name, + description: config.description ?? `Delegate tasks to ${name}`, + model: subModel, + temperature: subTemp, + systemPrompt: subPromptParts.join('\n\n'), + }); + } + + return subAgents; +} + +interface ToolDef { + name: string; + description: string; + params: string; +} + +function buildToolDefinitions(agentDir: string, _manifest: AgentManifest): ToolDef[] { + const tools: ToolDef[] = []; + const toolsDir = join(agentDir, 'tools'); + + if (!existsSync(toolsDir)) return tools; + + const files = readdirSync(toolsDir).filter(f => f.endsWith('.yaml')); + + for (const file of files) { + const content = readFileSync(join(toolsDir, file), 'utf-8'); + const toolConfig = yaml.load(content) as { + name: string; + description: string; + input_schema?: { + properties?: Record; + required?: string[]; + }; + }; + + const params: string[] = []; + if (toolConfig.input_schema?.properties) { + for (const [name, schema] of Object.entries(toolConfig.input_schema.properties)) { + const pyType = jsonTypeToPython(schema.type); + const isRequired = toolConfig.input_schema.required?.includes(name); + if (isRequired) { + params.push(`${name}: ${pyType}`); + } else { + params.push(`${name}: ${pyType} = None`); + } + } + } + + tools.push({ + name: toolConfig.name, + description: toolConfig.description, + params: params.join(', '), + }); + } + + return tools; +} + +function jsonTypeToPython(jsonType: string): string { + switch (jsonType) { + case 'string': return 'str'; + case 'integer': return 'int'; + case 'number': return 'float'; + case 'boolean': return 'bool'; + case 'array': return 'list'; + case 'object': return 'dict'; + default: return 'str'; + } +} From 5c5e543ca54c772ecfcbc5f48cdf9b03e0b0cce8 Mon Sep 17 00:00:00 2001 From: AJAmit17 Date: Tue, 3 Mar 2026 19:10:56 +0530 Subject: [PATCH 2/7] feat: add runWithLangChain function to execute LangChain agents --- src/runners/git.ts | 4 ++++ src/runners/langchain.ts | 49 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 53 insertions(+) create mode 100644 src/runners/langchain.ts diff --git a/src/runners/git.ts b/src/runners/git.ts index 7e11a2d..11abfa1 100644 --- a/src/runners/git.ts +++ b/src/runners/git.ts @@ -11,6 +11,7 @@ import { runWithOpenClaw } from './openclaw.js'; import { runWithNanobot } from './nanobot.js'; import { runWithLyzr } from './lyzr.js'; import { runWithGitHub } from './github.js'; +import { runWithLangChain } from './langchain.js'; import { error, info, success, label, heading, divider, warn } from '../utils/format.js'; export interface GitRunOptions { @@ -109,6 +110,9 @@ export async function runWithGit( case 'github': await runWithGitHub(agentDir, manifest, { prompt: options.prompt }); break; + case 'langchain': + runWithLangChain(agentDir, manifest); + break; case 'prompt': console.log(exportToSystemPrompt(agentDir)); break; diff --git a/src/runners/langchain.ts b/src/runners/langchain.ts new file mode 100644 index 0000000..fb89e05 --- /dev/null +++ b/src/runners/langchain.ts @@ -0,0 +1,49 @@ +import { writeFileSync, unlinkSync } from 'node:fs'; +import { join } from 'node:path'; +import { tmpdir } from 'node:os'; +import { spawnSync } from 'node:child_process'; +import { randomBytes } from 'node:crypto'; +import { exportToLangChain, detectProvider } from '../adapters/langchain.js'; +import { AgentManifest } from '../utils/loader.js'; +import { error, info } from '../utils/format.js'; + +export function runWithLangChain(agentDir: string, _manifest: AgentManifest): void { + const model = _manifest.model?.preferred ?? 'gpt-4o'; + const providerInfo = detectProvider(model); + + // Check the appropriate API key env var + const apiKey = process.env[providerInfo.envVar]; + if (!apiKey) { + error(`${providerInfo.envVar} environment variable is not set`); + info(`Set it with: export ${providerInfo.envVar}="your-key-here"`); + process.exit(1); + } + + const script = exportToLangChain(agentDir); + const tmpFile = join(tmpdir(), `gitagent-langchain-${randomBytes(4).toString('hex')}.py`); + + writeFileSync(tmpFile, script, 'utf-8'); + + const pipHint = `langchain ${providerInfo.pipPackage}`; + info(`Running LangChain agent from "${agentDir}"...`); + info(`Requires: pip install ${pipHint}`); + + try { + const result = spawnSync('python3', [tmpFile], { + stdio: 'inherit', + cwd: agentDir, + env: { ...process.env }, + }); + + if (result.error) { + error(`Failed to run Python: ${result.error.message}`); + info('Make sure python3 is installed and langchain packages are available:'); + info(` pip install ${pipHint}`); + process.exit(1); + } + + process.exit(result.status ?? 0); + } finally { + try { unlinkSync(tmpFile); } catch { /* ignore */ } + } +} From a176f98ef54e53019b0bb34a6d595b44d8643fe2 Mon Sep 17 00:00:00 2001 From: AJAmit17 Date: Tue, 3 Mar 2026 19:11:05 +0530 Subject: [PATCH 3/7] feat: add langchain support to export and run commands --- src/commands/export.ts | 8 ++++++-- src/commands/run.ts | 8 ++++++-- 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/src/commands/export.ts b/src/commands/export.ts index 83bb2af..1f0a1a6 100644 --- a/src/commands/export.ts +++ b/src/commands/export.ts @@ -8,6 +8,7 @@ import { exportToCrewAI, exportToOpenClawString, exportToNanobotString, + exportToLangChain, } from '../adapters/index.js'; import { exportToLyzrString } from '../adapters/lyzr.js'; import { exportToGitHubString } from '../adapters/github.js'; @@ -20,7 +21,7 @@ interface ExportOptions { export const exportCommand = new Command('export') .description('Export agent to other formats') - .requiredOption('-f, --format ', 'Export format (system-prompt, claude-code, openai, crewai, openclaw, nanobot, lyzr, github)') + .requiredOption('-f, --format ', 'Export format (system-prompt, claude-code, openai, crewai, openclaw, nanobot, lyzr, github, langchain)') .option('-d, --dir ', 'Agent directory', '.') .option('-o, --output ', 'Output file path') .action(async (options: ExportOptions) => { @@ -57,9 +58,12 @@ export const exportCommand = new Command('export') case 'github': result = exportToGitHubString(dir); break; + case 'langchain': + result = exportToLangChain(dir); + break; default: error(`Unknown format: ${options.format}`); - info('Supported formats: system-prompt, claude-code, openai, crewai, openclaw, nanobot, lyzr, github'); + info('Supported formats: system-prompt, claude-code, openai, crewai, openclaw, nanobot, lyzr, github, langchain'); process.exit(1); } diff --git a/src/commands/run.ts b/src/commands/run.ts index 4773dd5..77234d6 100644 --- a/src/commands/run.ts +++ b/src/commands/run.ts @@ -12,6 +12,7 @@ import { runWithNanobot } from '../runners/nanobot.js'; import { runWithLyzr } from '../runners/lyzr.js'; import { runWithGitHub } from '../runners/github.js'; import { runWithGit } from '../runners/git.js'; +import { runWithLangChain } from '../runners/langchain.js'; interface RunOptions { repo?: string; @@ -26,7 +27,7 @@ interface RunOptions { export const runCommand = new Command('run') .description('Run an agent from a git repository or local directory') .option('-r, --repo ', 'Git repository URL') - .option('-a, --adapter ', 'Adapter: claude, openai, crewai, openclaw, nanobot, lyzr, github, git, prompt', 'claude') + .option('-a, --adapter ', 'Adapter: claude, openai, crewai, openclaw, nanobot, lyzr, github, langchain, git, prompt', 'claude') .option('-b, --branch ', 'Git branch/tag to clone', 'main') .option('--refresh', 'Force re-clone (pull latest)', false) .option('--no-cache', 'Clone to temp dir, delete on exit') @@ -112,6 +113,9 @@ export const runCommand = new Command('run') case 'github': await runWithGitHub(agentDir, manifest, { prompt: options.prompt }); break; + case 'langchain': + runWithLangChain(agentDir, manifest); + break; case 'git': if (!options.repo) { error('The git adapter requires --repo (-r)'); @@ -130,7 +134,7 @@ export const runCommand = new Command('run') break; default: error(`Unknown adapter: ${options.adapter}`); - info('Supported adapters: claude, openai, crewai, openclaw, nanobot, lyzr, github, git, prompt'); + info('Supported adapters: claude, openai, crewai, openclaw, nanobot, lyzr, github, langchain, git, prompt'); process.exit(1); } } catch (e) { From 04aa89b2e470e0a8bda2b6aa49d746569f4b821d Mon Sep 17 00:00:00 2001 From: AJAmit17 Date: Tue, 3 Mar 2026 19:11:10 +0530 Subject: [PATCH 4/7] feat: add LangChain adapter to README documentation --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 72236d0..120161d 100644 --- a/README.md +++ b/README.md @@ -284,6 +284,7 @@ Adapters are used by both `export` and `run`. Available adapters: | `git` | Git-native execution (run only) | | `openclaw` | OpenClaw format | | `nanobot` | Nanobot format | +| `langchain` | LangChain agent Python code | ```bash # Export to system prompt From 1a3764e9ddeaac830f831b1553e56d009576c602 Mon Sep 17 00:00:00 2001 From: AJAmit17 <1NH22CD011@newhorizonindia.edu> Date: Wed, 4 Mar 2026 17:14:18 +0530 Subject: [PATCH 5/7] feat: pass prompt option to runWithLangChain function for enhanced agent interaction --- src/commands/run.ts | 2 +- src/runners/langchain.ts | 94 ++++++++++++++++++++++++++++++++++------ 2 files changed, 81 insertions(+), 15 deletions(-) diff --git a/src/commands/run.ts b/src/commands/run.ts index 77234d6..015ff34 100644 --- a/src/commands/run.ts +++ b/src/commands/run.ts @@ -114,7 +114,7 @@ export const runCommand = new Command('run') await runWithGitHub(agentDir, manifest, { prompt: options.prompt }); break; case 'langchain': - runWithLangChain(agentDir, manifest); + runWithLangChain(agentDir, manifest, { prompt: options.prompt }); break; case 'git': if (!options.repo) { diff --git a/src/runners/langchain.ts b/src/runners/langchain.ts index fb89e05..2e18d3c 100644 --- a/src/runners/langchain.ts +++ b/src/runners/langchain.ts @@ -1,44 +1,110 @@ -import { writeFileSync, unlinkSync } from 'node:fs'; +import { writeFileSync, unlinkSync, existsSync } from 'node:fs'; import { join } from 'node:path'; -import { tmpdir } from 'node:os'; +import { tmpdir, homedir, platform } from 'node:os'; import { spawnSync } from 'node:child_process'; import { randomBytes } from 'node:crypto'; import { exportToLangChain, detectProvider } from '../adapters/langchain.js'; import { AgentManifest } from '../utils/loader.js'; import { error, info } from '../utils/format.js'; -export function runWithLangChain(agentDir: string, _manifest: AgentManifest): void { +const IS_WINDOWS = platform() === 'win32'; + +/** Paths inside a venv differ between Windows and Unix. */ +function venvPython(venvDir: string): string { + return IS_WINDOWS + ? join(venvDir, 'Scripts', 'python.exe') + : join(venvDir, 'bin', 'python'); +} + +function venvPip(venvDir: string): string { + return IS_WINDOWS + ? join(venvDir, 'Scripts', 'pip.exe') + : join(venvDir, 'bin', 'pip'); +} + +/** Find a system Python 3 to bootstrap the venv. */ +function findSystemPython(): string | null { + for (const cmd of ['python3', 'python']) { + const r = spawnSync(cmd, ['--version'], { stdio: 'pipe' }); + if (!r.error && r.status === 0) { + // Reject the Windows Store stub (outputs to stderr, version string absent from stdout) + const out = (r.stdout?.toString() ?? '') + (r.stderr?.toString() ?? ''); + if (out.includes('Python 3')) return cmd; + } + } + return null; +} + +export function runWithLangChain(agentDir: string, _manifest: AgentManifest, options: { prompt?: string } = {}): void { const model = _manifest.model?.preferred ?? 'gpt-4o'; const providerInfo = detectProvider(model); // Check the appropriate API key env var - const apiKey = process.env[providerInfo.envVar]; - if (!apiKey) { + if (!process.env[providerInfo.envVar]) { error(`${providerInfo.envVar} environment variable is not set`); - info(`Set it with: export ${providerInfo.envVar}="your-key-here"`); + info(`Set it with: ${IS_WINDOWS ? '$env:' : 'export '}${providerInfo.envVar}="your-key-here"`); process.exit(1); } + // Persistent venv at ~/.gitagent/gitagent-env — reused across runs + const venvDir = join(homedir(), '.gitagent', 'gitagent-env'); + const packages = ['langchain', 'langchain-core', providerInfo.pipPackage]; + + // --- Step 1: create venv if it doesn't exist --- + if (!existsSync(venvPython(venvDir))) { + info(`Creating Python virtual environment at ${venvDir} ...`); + const sysPython = findSystemPython(); + if (!sysPython) { + error('Python 3 not found. Please install Python 3 and try again.'); + process.exit(1); + } + const create = spawnSync(sysPython, ['-m', 'venv', venvDir], { stdio: 'inherit' }); + if (create.status !== 0) { + error('Failed to create virtual environment.'); + process.exit(1); + } + } + + // --- Step 2: install packages if any are missing --- + const checkImport = spawnSync( + venvPython(venvDir), + ['-c', `import langchain; import ${providerInfo.pipPackage.replace(/-/g, '_')}`], + { stdio: 'pipe' }, + ); + + if (checkImport.status !== 0) { + info(`Installing packages: ${packages.join(' ')} ...`); + const install = spawnSync( + venvPip(venvDir), + ['install', '--quiet', '--upgrade', ...packages], + { stdio: 'inherit' }, + ); + if (install.status !== 0) { + error('Failed to install required packages.'); + info(`Try manually: ${venvPip(venvDir)} install ${packages.join(' ')}`); + process.exit(1); + } + } + + // --- Step 3: write + run script --- const script = exportToLangChain(agentDir); const tmpFile = join(tmpdir(), `gitagent-langchain-${randomBytes(4).toString('hex')}.py`); - writeFileSync(tmpFile, script, 'utf-8'); - const pipHint = `langchain ${providerInfo.pipPackage}`; - info(`Running LangChain agent from "${agentDir}"...`); - info(`Requires: pip install ${pipHint}`); + info(`Running LangChain agent from "${agentDir}" ...`); + + // Pass prompt as a CLI arg so the script receives it via sys.argv + const scriptArgs = options.prompt ? [tmpFile, options.prompt] : [tmpFile]; try { - const result = spawnSync('python3', [tmpFile], { + const result = spawnSync(venvPython(venvDir), scriptArgs, { stdio: 'inherit', cwd: agentDir, env: { ...process.env }, }); if (result.error) { - error(`Failed to run Python: ${result.error.message}`); - info('Make sure python3 is installed and langchain packages are available:'); - info(` pip install ${pipHint}`); + error(`Failed to run script: ${result.error.message}`); process.exit(1); } From db0872bea2cd283fee00c3fccd119dff1ebe6ab8 Mon Sep 17 00:00:00 2001 From: AJAmit17 <1NH22CD011@newhorizonindia.edu> Date: Wed, 4 Mar 2026 17:46:03 +0530 Subject: [PATCH 6/7] feat: enhance provider detection and error handling in LangChain integration --- src/adapters/langchain.ts | 29 ++++++++++++++++++++--------- src/runners/langchain.ts | 9 +++++++++ 2 files changed, 29 insertions(+), 9 deletions(-) diff --git a/src/adapters/langchain.ts b/src/adapters/langchain.ts index a0c90cc..6dee812 100644 --- a/src/adapters/langchain.ts +++ b/src/adapters/langchain.ts @@ -13,16 +13,15 @@ interface ProviderInfo { envVar: string; } -function detectProvider(model: string): ProviderInfo { +/** Supported providers. Returns null for any unsupported model. */ +function detectProvider(model: string): ProviderInfo | null { const m = model.toLowerCase(); - if (m.startsWith('claude')) return { provider: 'anthropic', pipPackage: 'langchain-anthropic', envVar: 'ANTHROPIC_API_KEY' }; - if (m.startsWith('gemini')) return { provider: 'google_genai', pipPackage: 'langchain-google-genai', envVar: 'GOOGLE_API_KEY' }; - if (m.startsWith('grok')) return { provider: 'xai', pipPackage: 'langchain-xai', envVar: 'XAI_API_KEY' }; - if (m.startsWith('mistral')) return { provider: 'mistralai', pipPackage: 'langchain-mistralai', envVar: 'MISTRAL_API_KEY' }; - if (m.startsWith('deepseek')) return { provider: 'deepseek', pipPackage: 'langchain-deepseek', envVar: 'DEEPSEEK_API_KEY' }; - if (m.startsWith('command')) return { provider: 'cohere', pipPackage: 'langchain-cohere', envVar: 'COHERE_API_KEY' }; - // Default: OpenAI (covers gpt-*, o1-*, o3-*, etc.) - return { provider: 'openai', pipPackage: 'langchain-openai', envVar: 'OPENAI_API_KEY' }; + // Anthropic — claude-* + if (m.startsWith('claude')) return { provider: 'anthropic', pipPackage: 'langchain-anthropic', envVar: 'ANTHROPIC_API_KEY' }; + // OpenAI — gpt-*, o1-*, o2-*, o3-*, o4-* + if (m.startsWith('gpt') || /^o\d/.test(m)) return { provider: 'openai', pipPackage: 'langchain-openai', envVar: 'OPENAI_API_KEY' }; + // Unsupported model + return null; } // Make detectProvider available for tests @@ -52,11 +51,23 @@ export function exportToLangChain(dir: string): string { // Collect all providers needed (main + sub-agents) const mainProvider = detectProvider(model); + if (!mainProvider) { + throw new Error( + `Model "${model}" is not supported by the LangChain adapter.\n` + + 'gitagent with LangChain currently supports OpenAI (gpt-*, o1-*, o3-*, …) and Anthropic (claude-*) only.' + ); + } const allPipPackages = new Set([mainProvider.pipPackage]); const allEnvVars = new Set([mainProvider.envVar]); for (const sub of subAgents) { const subProv = detectProvider(sub.model ?? model); + if (!subProv) { + throw new Error( + `Sub-agent model "${sub.model ?? model}" is not supported by the LangChain adapter.\n` + + 'gitagent with LangChain currently supports OpenAI (gpt-*, o1-*, o3-*, …) and Anthropic (claude-*) only.' + ); + } allPipPackages.add(subProv.pipPackage); allEnvVars.add(subProv.envVar); } diff --git a/src/runners/langchain.ts b/src/runners/langchain.ts index 2e18d3c..2e35b43 100644 --- a/src/runners/langchain.ts +++ b/src/runners/langchain.ts @@ -39,6 +39,15 @@ export function runWithLangChain(agentDir: string, _manifest: AgentManifest, opt const model = _manifest.model?.preferred ?? 'gpt-4o'; const providerInfo = detectProvider(model); + // Unsupported model — tell the user clearly + if (!providerInfo) { + error(`Model "${model}" is not supported by the LangChain adapter.`); + info('gitagent with LangChain currently supports:'); + info(' • OpenAI — gpt-4o, gpt-4, o1-mini, o3-mini, …'); + info(' • Anthropic — claude-3-5-sonnet, claude-3-opus, …'); + process.exit(1); + } + // Check the appropriate API key env var if (!process.env[providerInfo.envVar]) { error(`${providerInfo.envVar} environment variable is not set`); From d63375debdd47bee0f010cfc0d1e2c40ba5fa9e2 Mon Sep 17 00:00:00 2001 From: Amit Acharya Date: Thu, 26 Mar 2026 03:38:17 +0530 Subject: [PATCH 7/7] feat: add LangChain CLI adapter with export and run Full LangChain CLI adapter: - Export: generates standalone Python scripts using LangChain agent framework - Run: executes gitagent agents using LangChain runtime with venv management - Multi-provider support: OpenAI (GPT-4, o1, o3) and Anthropic (Claude) - Tool conversion: maps gitagent YAML tools to @tool decorated functions - Sub-agent delegation: converts sub-agents to LangChain agent delegates - System prompt: combines SOUL.md, RULES.md, skills --- docs/adapters/langchain.md | 687 +++++++++++++++++++++++++++++++++ src/adapters/langchain.test.ts | 482 +++++++++++++++++++++++ src/commands/import.ts | 13 +- src/runners/git.ts | 4 - 4 files changed, 1175 insertions(+), 11 deletions(-) create mode 100644 docs/adapters/langchain.md create mode 100644 src/adapters/langchain.test.ts diff --git a/docs/adapters/langchain.md b/docs/adapters/langchain.md new file mode 100644 index 0000000..2e7f3d6 --- /dev/null +++ b/docs/adapters/langchain.md @@ -0,0 +1,687 @@ +# LangChain Adapter + +Complete mapping guide for converting gitagent agents to LangChain Python code. + +## Overview + +LangChain is a popular framework for building applications with large language models. The gitagent LangChain adapter generates standalone Python scripts that use LangChain's agent framework with support for: + +- **Multi-provider support**: OpenAI (GPT-4, o1, o3) and Anthropic (Claude) +- **Tool integration**: Converts gitagent tool definitions to LangChain `@tool` decorators +- **Sub-agent delegation**: Maps gitagent sub-agents to LangChain agent delegates +- **Memory management**: Automatic memory handling via `create_agent()` +- **System prompts**: Combines SOUL.md, RULES.md, skills, and knowledge into unified prompt + +The gitagent LangChain adapter enables: +1. **Export**: Convert gitagent → standalone LangChain Python script +2. **Run**: Execute gitagent agents using LangChain runtime + +## Installation + +### Prerequisites + +```bash +# Python 3.8+ required +python --version + +# Install gitagent +npm install -g @shreyaskapale/gitagent +``` + +### LangChain Dependencies + +The generated Python scripts include pip install instructions. For manual installation: + +```bash +# For OpenAI models +pip install langchain langchain-core langchain-openai + +# For Anthropic models +pip install langchain langchain-core langchain-anthropic + +# For both providers +pip install langchain langchain-core langchain-openai langchain-anthropic +``` + +## Field Mapping + +### Export: gitagent → LangChain + +| gitagent | LangChain | Notes | +|----------|-----------|-------| +| `SOUL.md` | `SYSTEM_PROMPT` variable | Core identity and personality | +| `RULES.md` | `SYSTEM_PROMPT` variable | Appended to system prompt | +| `DUTIES.md` | `SYSTEM_PROMPT` variable | Appended to system prompt | +| `skills/*/SKILL.md` | `SYSTEM_PROMPT` variable | Each skill becomes a section | +| `tools/*.yaml` | `@tool` decorated functions | Tool stubs with type hints | +| `knowledge/` (always_load) | `SYSTEM_PROMPT` variable | Embedded as knowledge sections | +| `manifest.model.preferred` | `init_chat_model()` first arg | Model name (e.g., "gpt-4o", "claude-opus-4-6") | +| `manifest.model.constraints.temperature` | `init_chat_model(temperature=...)` | Temperature parameter | +| `manifest.model.constraints.max_tokens` | `init_chat_model(max_tokens=...)` | Max tokens parameter | +| `manifest.runtime.max_turns` | `invoke(..., {"recursion_limit": ...})` | Multiplied by 2 for safety | +| `agents/` (sub-agents) | `create_agent()` + `@tool` delegate | Each sub-agent becomes a callable tool | +| `compliance.supervision` | System prompt section | Embedded as compliance constraints | +| `delegation` | System prompt section | Delegation instructions | +| `memory/MEMORY.md` | System prompt section | Embedded as memory context | + +## Provider Detection + +The adapter automatically detects the provider based on model name: + +| Model Prefix | Provider | Pip Package | Environment Variable | +|--------------|----------|-------------|---------------------| +| `gpt-*` | OpenAI | `langchain-openai` | `OPENAI_API_KEY` | +| `o1-*`, `o2-*`, `o3-*`, `o4-*` | OpenAI | `langchain-openai` | `OPENAI_API_KEY` | +| `claude-*` | Anthropic | `langchain-anthropic` | `ANTHROPIC_API_KEY` | + +**Unsupported models** (will throw error): +- Gemini models (`gemini-*`) +- Llama models (`llama*`) +- Mistral models (`mistral*`) +- Custom/local models + +## Usage Examples + +### Export to LangChain + +```bash +# Export to stdout +gitagent export --format langchain -d ./my-agent + +# Save to file +gitagent export --format langchain -d ./my-agent -o agent.py + +# Export from git repository +gitagent export --format langchain --repo https://github.com/user/agent.git -o agent.py +``` + +**Output Structure:** +```python +""" +LangChain agent definition for my-agent v1.0.0 +Generated by gitagent export + +Prerequisites: + pip install langchain langchain-openai + +What is mapped: + - System prompt (SOUL.md, RULES.md, skills, compliance) + - Tool stubs from tools/*.yaml + - Model and temperature from agent.yaml + - Knowledge documents (always_load) inlined into system prompt +""" + +import os +import sys +from langchain.chat_models import init_chat_model +from langchain.agents import create_agent +from langchain.tools import tool + +# --- Tool definitions --- +@tool +def my_tool(param: str) -> str: + """Tool description""" + # TODO: Implement tool logic + return "Not implemented" + +SYSTEM_PROMPT = """...""" + +if __name__ == "__main__": + if not os.environ.get("OPENAI_API_KEY"): + print("Error: OPENAI_API_KEY environment variable is not set") + sys.exit(1) + + tools = [my_tool] + + my_agent = create_agent( + model=init_chat_model("gpt-4o", temperature=0.7), + tools=tools, + system_prompt=SYSTEM_PROMPT, + name="my_agent", + ) + + user_input = " ".join(sys.argv[1:]) if len(sys.argv) > 1 else "Hello, what can you help me with?" + result = my_agent.invoke( + {"messages": [{"role": "user", "content": user_input}]} + ) + print(result["messages"][-1].content) +``` + +### Run with LangChain + +```bash +# Run agent using LangChain adapter +gitagent run ./my-agent --adapter langchain + +# Run with custom prompt +gitagent run ./my-agent --adapter langchain -p "Explain quantum computing" + +# Run from git repository +gitagent run --repo https://github.com/user/agent.git --adapter langchain +``` + +**What Happens:** +1. Generates LangChain Python script in temporary file +2. Creates/reuses Python virtual environment at `~/.gitagent/gitagent-env` +3. Installs required packages (langchain, provider packages) +4. Executes the script with the agent directory as working directory +5. Cleans up temporary script file on exit + +### Running Exported Scripts Manually + +```bash +# Set API key +export OPENAI_API_KEY=your-api-key-here +# Or for Anthropic +export ANTHROPIC_API_KEY=your-api-key-here + +# Run the exported script +python agent.py + +# Run with custom prompt +python agent.py "What is quantum computing?" +``` + +## Tool Mapping + +### Input Schema Conversion + +gitagent tool YAML schemas are converted to Python type hints: + +| JSON Schema Type | Python Type | +|-----------------|-------------| +| `string` | `str` | +| `integer` | `int` | +| `number` | `float` | +| `boolean` | `bool` | +| `array` | `list` | +| `object` | `dict` | + +**Example:** + +**gitagent (tools/search.yaml):** +```yaml +name: search-web +description: Search the web for information +input_schema: + properties: + query: + type: string + max_results: + type: integer + include_images: + type: boolean + required: + - query +``` + +**LangChain (generated):** +```python +@tool +def search_web(query: str, max_results: int = None, include_images: bool = None) -> str: + """Search the web for information""" + # TODO: Implement tool logic + return "Not implemented" +``` + +### Tool Name Conversion + +Tool names with hyphens are converted to Python-compatible identifiers: + +- `search-web` → `search_web` +- `generate-report` → `generate_report` +- `my-custom-tool` → `my_custom_tool` + +## Sub-Agent Mapping + +gitagent sub-agents are converted to LangChain agent delegates with tool wrappers. + +**gitagent (agent.yaml):** +```yaml +agents: + fact-checker: + description: Verifies factual claims + model: + preferred: gpt-4o-mini +``` + +**LangChain (generated):** +```python +# Sub-agent creation +_fact_checker = create_agent( + model=init_chat_model("gpt-4o-mini", temperature=0.7), + tools=[], + system_prompt="""...""", + name="fact_checker", +) + +# Delegation tool +@tool +def delegate_to_fact_checker(query: str) -> str: + """Verifies factual claims — Delegate a task to the fact-checker sub-agent.""" + result = _fact_checker.invoke( + {"messages": [{"role": "user", "content": query}]} + ) + return result["messages"][-1].content + +# Main agent includes delegation tool +tools = [delegate_to_fact_checker, ...] +``` + +### Multi-Provider Sub-Agents + +When sub-agents use different providers than the main agent, the script: +- Installs all required provider packages +- Checks all required API keys +- Creates separate model instances for each provider + +**Example:** +```python +# Main agent uses OpenAI +main_agent = create_agent( + model=init_chat_model("gpt-4o", temperature=0.7), + ... +) + +# Sub-agent uses Anthropic +_helper = create_agent( + model=init_chat_model("claude-3-5-sonnet", temperature=0.5), + ... +) +``` + +## System Prompt Construction + +The system prompt is built from multiple sources in this order: + +1. **Agent header**: Name, version, description +2. **SOUL.md**: Core identity and personality +3. **RULES.md**: Constraints and boundaries +4. **DUTIES.md**: Role and responsibilities +5. **Skills**: Each skill with name, description, and instructions +6. **Knowledge**: Always-load documents from `knowledge/index.yaml` +7. **Compliance**: Constraints from `compliance` section +8. **Delegation**: Instructions for sub-agent delegation +9. **Memory**: Content from `memory/MEMORY.md` + +**Example:** +```python +SYSTEM_PROMPT = """# my-agent v1.0.0 +A helpful assistant + +# Soul +I am a friendly assistant... + +# Rules +- Never share credentials +- Always verify facts + +## Skill: research +Research and summarize topics +[skill instructions] + +## Knowledge: regulations.md +[document content] + +## Compliance Constraints +- All decisions require human approval + +## Delegation +Available sub-agents: +- Use "delegate_to_helper" tool to delegate to helper + +## Memory +[memory content] +""" +``` + +## What Maps Cleanly + +✅ **Fully Supported:** +- Agent identity and personality (SOUL.md) +- Rules and constraints (RULES.md) +- Duties and responsibilities (DUTIES.md) +- Skills with full instructions +- Tool definitions (as stubs) +- Model preferences (OpenAI, Anthropic) +- Temperature and max_tokens +- Sub-agent delegation +- Knowledge documents (always_load) +- Compliance constraints +- Memory context +- Max turns → recursion_limit + +## What Requires Manual Setup + +⚠️ **Not Automatically Mapped:** + +### 1. Tool Implementation + +**Issue:** Generated tools are stubs with `return "Not implemented"`. + +**Workaround:** +- Manually implement tool logic in the generated Python file +- Or import tool implementations from separate modules +- Or use LangChain's built-in tools + +**Example:** +```python +@tool +def search_web(query: str) -> str: + """Search the web for information""" + # Manual implementation + import requests + response = requests.get(f"https://api.search.com?q={query}") + return response.json()["results"] +``` + +### 2. Knowledge Retrieval + +**Issue:** Only `always_load` documents are embedded. Dynamic retrieval is not supported. + +**Workaround:** +- Embed all critical documents with `always_load: true` +- Or implement custom RAG logic in the Python script +- Or use LangChain's vector store integrations + +### 3. Hooks and Lifecycle Events + +**Issue:** gitagent hooks are not converted to LangChain callbacks. + +**Workaround:** +- Manually implement LangChain callbacks +- Use LangChain's callback system for lifecycle events +- Document hook requirements in comments + +### 4. MCP Servers + +**Issue:** gitagent MCP server integration is not portable to LangChain. + +**Workaround:** +- Convert MCP tools to regular LangChain tools +- Or use LangChain's tool integrations +- Document MCP requirements in README + +### 5. Workflows + +**Issue:** gitagent SkillsFlow YAML is not converted. + +**Workaround:** +- Implement workflow logic in tool implementations +- Or use LangChain's agent executor with custom logic +- Document workflow patterns in system prompt + +### 6. Memory Persistence + +**Issue:** LangChain's `create_agent()` manages memory automatically, but persistence is not configured. + +**Workaround:** +- Manually add LangChain memory components +- Use `ConversationBufferMemory` or similar +- Implement custom persistence layer + +**Example:** +```python +from langchain.memory import ConversationBufferMemory + +memory = ConversationBufferMemory(return_messages=True) + +my_agent = create_agent( + model=init_chat_model("gpt-4o", temperature=0.7), + tools=tools, + system_prompt=SYSTEM_PROMPT, + name="my_agent", + memory=memory, # Add memory +) +``` + +### 7. Unsupported Models + +**Issue:** Only OpenAI and Anthropic models are supported. + +**Workaround:** +- Use supported models (gpt-*, o1-*, claude-*) +- Or manually modify the generated script for other providers +- Or contribute provider support to gitagent + +## Environment Variables + +The generated script checks for required API keys: + +### OpenAI Models +```bash +export OPENAI_API_KEY=sk-... +``` + +### Anthropic Models +```bash +export ANTHROPIC_API_KEY=sk-ant-... +``` + +### Multiple Providers +When using sub-agents with different providers: +```bash +export OPENAI_API_KEY=sk-... +export ANTHROPIC_API_KEY=sk-ant-... +``` + +## Best Practices + +### When Exporting + +1. **Use supported models** in `agent.yaml`: + - OpenAI: `gpt-4o`, `gpt-4`, `o1-mini`, `o3-mini` + - Anthropic: `claude-opus-4-6`, `claude-3-5-sonnet`, `claude-haiku-4-5` + +2. **Embed critical knowledge** with `always_load: true`: + ```yaml + # knowledge/index.yaml + documents: + - path: regulations.md + always_load: true + ``` + +3. **Keep skills self-contained** - full instructions in SKILL.md + +4. **Document tool requirements** in tool descriptions + +5. **Set appropriate temperature** for your use case: + - Creative tasks: 0.7-1.0 + - Factual tasks: 0.0-0.3 + +### When Running + +1. **Set API keys** before running: + ```bash + export OPENAI_API_KEY=your-key + # Or + export ANTHROPIC_API_KEY=your-key + ``` + +2. **Use virtual environment** - the runner creates one automatically at `~/.gitagent/gitagent-env` + +3. **Monitor token usage** - LangChain doesn't enforce max_tokens strictly + +4. **Test with simple prompts** first before complex tasks + +### When Implementing Tools + +1. **Keep tool functions simple** - single responsibility + +2. **Add error handling**: + ```python + @tool + def my_tool(param: str) -> str: + """Tool description""" + try: + # Implementation + return result + except Exception as e: + return f"Error: {str(e)}" + ``` + +3. **Return strings** - LangChain tools should return string results + +4. **Add type hints** - helps with debugging and IDE support + +## Troubleshooting + +### "Model not supported by LangChain adapter" + +**Solution:** +- Use OpenAI models: `gpt-4o`, `gpt-4`, `o1-mini`, `o3-mini` +- Or Anthropic models: `claude-opus-4-6`, `claude-3-5-sonnet` +- Update `agent.yaml` with a supported model + +### "API key environment variable is not set" + +**Solution:** +```bash +# For OpenAI +export OPENAI_API_KEY=sk-your-key-here + +# For Anthropic +export ANTHROPIC_API_KEY=sk-ant-your-key-here +``` + +### "Failed to install required packages" + +**Solution:** +```bash +# Manually install in the venv +~/.gitagent/gitagent-env/bin/pip install langchain langchain-openai + +# Or delete venv and let runner recreate it +rm -rf ~/.gitagent/gitagent-env +``` + +### "Python 3 not found" + +**Solution:** +```bash +# Install Python 3.8+ +# macOS +brew install python3 + +# Ubuntu/Debian +sudo apt install python3 python3-pip + +# Windows +# Download from python.org +``` + +### Tools return "Not implemented" + +**Solution:** +- This is expected for exported scripts +- Manually implement tool logic in the generated Python file +- Or use LangChain's built-in tools + +### Sub-agent not working + +**Solution:** +- Ensure sub-agent has `agent.yaml` in `agents//` directory +- Verify sub-agent model is supported +- Check that both API keys are set if using different providers + +## Advanced Usage + +### Custom Tool Implementation + +Replace stub implementations with real logic: + +```python +@tool +def search_web(query: str, max_results: int = 5) -> str: + """Search the web for information""" + import requests + + response = requests.get( + "https://api.search.com/search", + params={"q": query, "limit": max_results}, + headers={"Authorization": f"Bearer {os.environ['SEARCH_API_KEY']}"} + ) + + results = response.json()["results"] + return "\n".join([f"- {r['title']}: {r['snippet']}" for r in results]) +``` + +### Adding Memory Persistence + +```python +from langchain.memory import ConversationBufferMemory +from langchain.memory.chat_message_histories import FileChatMessageHistory + +# Persistent memory +history = FileChatMessageHistory("chat_history.json") +memory = ConversationBufferMemory( + chat_memory=history, + return_messages=True +) + +my_agent = create_agent( + model=init_chat_model("gpt-4o", temperature=0.7), + tools=tools, + system_prompt=SYSTEM_PROMPT, + name="my_agent", + memory=memory, +) +``` + +### Custom Callbacks + +```python +from langchain.callbacks.base import BaseCallbackHandler + +class MyCallbackHandler(BaseCallbackHandler): + def on_tool_start(self, serialized, input_str, **kwargs): + print(f"Tool started: {serialized['name']}") + + def on_tool_end(self, output, **kwargs): + print(f"Tool finished: {output}") + +# Use callbacks +result = my_agent.invoke( + {"messages": [{"role": "user", "content": user_input}]}, + {"callbacks": [MyCallbackHandler()]} +) +``` + +## Resources + +- [LangChain Documentation](https://python.langchain.com/) +- [LangChain Agents Guide](https://python.langchain.com/docs/modules/agents/) +- [LangChain Tools](https://python.langchain.com/docs/modules/agents/tools/) +- [gitagent Specification](../../spec/SPECIFICATION.md) +- [Example Agents](../../examples/) + +## Limitations Summary + +| Feature | Export | Run | Notes | +|---------|--------|-----|-------| +| Identity (SOUL.md) | ✅ | ✅ | Full support | +| Rules (RULES.md) | ✅ | ✅ | Full support | +| Duties (DUTIES.md) | ✅ | ✅ | Full support | +| Skills | ✅ | ✅ | Full support | +| Tools | ⚠️ | ⚠️ | Stubs only, requires implementation | +| Model (OpenAI) | ✅ | ✅ | Full support | +| Model (Anthropic) | ✅ | ✅ | Full support | +| Model (Other) | ❌ | ❌ | Not supported | +| Temperature | ✅ | ✅ | Full support | +| Max tokens | ✅ | ✅ | Full support | +| Max turns | ✅ | ✅ | Converted to recursion_limit | +| Sub-agents | ✅ | ✅ | Full support | +| Knowledge (always_load) | ✅ | ✅ | Embedded in prompt | +| Knowledge (dynamic) | ❌ | ❌ | Not supported | +| Compliance | ✅ | ✅ | Embedded in prompt | +| Delegation | ✅ | ✅ | Full support | +| Memory (context) | ✅ | ✅ | Embedded in prompt | +| Memory (persistence) | ❌ | ❌ | Requires manual setup | +| Hooks | ❌ | ❌ | Not supported | +| Workflows | ❌ | ❌ | Not supported | +| MCP servers | ❌ | ❌ | Not supported | + +**Legend:** +- ✅ Fully supported +- ⚠️ Partial support or manual setup required +- ❌ Not supported diff --git a/src/adapters/langchain.test.ts b/src/adapters/langchain.test.ts new file mode 100644 index 0000000..cdf1554 --- /dev/null +++ b/src/adapters/langchain.test.ts @@ -0,0 +1,482 @@ +/** + * Tests for the LangChain adapter. + * + * Uses Node.js built-in test runner (node --test). + */ +import { test, describe } from 'node:test'; +import assert from 'node:assert/strict'; +import { mkdtempSync, writeFileSync, mkdirSync } from 'node:fs'; +import { join } from 'node:path'; +import { tmpdir } from 'node:os'; + +import { exportToLangChain, detectProvider } from './langchain.js'; + +// --------------------------------------------------------------------------- +// Helpers +// --------------------------------------------------------------------------- + +function makeAgentDir(opts: { + name?: string; + description?: string; + soul?: string; + rules?: string; + model?: string; + temperature?: number; + maxTokens?: number; + maxTurns?: number; + skills?: Array<{ name: string; description: string; instructions: string }>; + tools?: Array<{ name: string; description: string; params?: Record }>; + subAgents?: Array<{ name: string; description: string; model?: string }>; +}): string { + const dir = mkdtempSync(join(tmpdir(), 'gitagent-langchain-test-')); + + const agentYaml: any = { + spec_version: '0.1.0', + name: opts.name ?? 'test-agent', + version: '0.1.0', + description: opts.description ?? 'A test agent', + }; + + if (opts.model || opts.temperature || opts.maxTokens) { + agentYaml.model = {}; + if (opts.model) agentYaml.model.preferred = opts.model; + if (opts.temperature !== undefined || opts.maxTokens !== undefined) { + agentYaml.model.constraints = {}; + if (opts.temperature !== undefined) agentYaml.model.constraints.temperature = opts.temperature; + if (opts.maxTokens !== undefined) agentYaml.model.constraints.max_tokens = opts.maxTokens; + } + } + + if (opts.maxTurns) { + agentYaml.runtime = { max_turns: opts.maxTurns }; + } + + if (opts.subAgents && opts.subAgents.length > 0) { + agentYaml.agents = {}; + agentYaml.delegation = { mode: 'manual' }; + for (const sub of opts.subAgents) { + agentYaml.agents[sub.name] = { description: sub.description }; + + const subDir = join(dir, 'agents', sub.name); + mkdirSync(subDir, { recursive: true }); + + const subYaml: any = { + spec_version: '0.1.0', + name: sub.name, + version: '0.1.0', + description: sub.description, + }; + if (sub.model) { + subYaml.model = { preferred: sub.model }; + } + + writeFileSync( + join(subDir, 'agent.yaml'), + `spec_version: '0.1.0'\nname: ${sub.name}\nversion: '0.1.0'\ndescription: '${sub.description}'\n${sub.model ? `model:\n preferred: ${sub.model}\n` : ''}`, + 'utf-8', + ); + writeFileSync(join(subDir, 'SOUL.md'), `I am ${sub.name}`, 'utf-8'); + } + } + + writeFileSync( + join(dir, 'agent.yaml'), + Object.entries(agentYaml).map(([k, v]) => { + if (typeof v === 'object') { + return `${k}:\n${JSON.stringify(v, null, 2).split('\n').map(l => ' ' + l.replace(/["{},]/g, '')).join('\n')}`; + } + return `${k}: ${typeof v === 'string' ? `'${v}'` : v}`; + }).join('\n'), + 'utf-8', + ); + + if (opts.soul !== undefined) { + writeFileSync(join(dir, 'SOUL.md'), opts.soul, 'utf-8'); + } + + if (opts.rules !== undefined) { + writeFileSync(join(dir, 'RULES.md'), opts.rules, 'utf-8'); + } + + if (opts.skills) { + for (const skill of opts.skills) { + const skillDir = join(dir, 'skills', skill.name); + mkdirSync(skillDir, { recursive: true }); + writeFileSync( + join(skillDir, 'SKILL.md'), + `---\nname: ${skill.name}\ndescription: '${skill.description}'\n---\n\n${skill.instructions}\n`, + 'utf-8', + ); + } + } + + if (opts.tools) { + const toolsDir = join(dir, 'tools'); + mkdirSync(toolsDir, { recursive: true }); + for (const tool of opts.tools) { + const toolYaml: any = { + name: tool.name, + description: tool.description, + }; + if (tool.params) { + toolYaml.input_schema = { + properties: tool.params, + required: Object.entries(tool.params) + .filter(([_, schema]) => (schema as any).required) + .map(([name]) => name), + }; + } + writeFileSync( + join(toolsDir, `${tool.name}.yaml`), + `name: ${tool.name}\ndescription: ${tool.description}\n${tool.params ? `input_schema:\n properties:\n${Object.entries(tool.params).map(([name, schema]) => ` ${name}:\n type: ${schema.type}`).join('\n')}\n required: [${Object.entries(tool.params).filter(([_, s]) => (s as any).required).map(([n]) => n).join(', ')}]\n` : ''}`, + 'utf-8', + ); + } + } + + return dir; +} + +// --------------------------------------------------------------------------- +// detectProvider +// --------------------------------------------------------------------------- + +describe('detectProvider', () => { + test('detects OpenAI for gpt-4o', () => { + const result = detectProvider('gpt-4o'); + assert.ok(result); + assert.equal(result.provider, 'openai'); + assert.equal(result.pipPackage, 'langchain-openai'); + assert.equal(result.envVar, 'OPENAI_API_KEY'); + }); + + test('detects OpenAI for gpt-4', () => { + const result = detectProvider('gpt-4'); + assert.ok(result); + assert.equal(result.provider, 'openai'); + assert.equal(result.pipPackage, 'langchain-openai'); + }); + + test('detects OpenAI for o1-mini', () => { + const result = detectProvider('o1-mini'); + assert.ok(result); + assert.equal(result.provider, 'openai'); + }); + + test('detects OpenAI for o3-mini', () => { + const result = detectProvider('o3-mini'); + assert.ok(result); + assert.equal(result.provider, 'openai'); + }); + + test('detects Anthropic for claude-3-5-sonnet', () => { + const result = detectProvider('claude-3-5-sonnet'); + assert.ok(result); + assert.equal(result.provider, 'anthropic'); + assert.equal(result.pipPackage, 'langchain-anthropic'); + assert.equal(result.envVar, 'ANTHROPIC_API_KEY'); + }); + + test('detects Anthropic for claude-opus-4-6', () => { + const result = detectProvider('claude-opus-4-6'); + assert.ok(result); + assert.equal(result.provider, 'anthropic'); + }); + + test('returns null for unsupported model', () => { + const result = detectProvider('llama3.1'); + assert.equal(result, null); + }); + + test('returns null for gemini models', () => { + const result = detectProvider('gemini-pro'); + assert.equal(result, null); + }); +}); + +// --------------------------------------------------------------------------- +// exportToLangChain — basic structure +// --------------------------------------------------------------------------- + +describe('exportToLangChain — basic structure', () => { + test('generates valid Python code with LangChain imports', () => { + const dir = makeAgentDir({ name: 'test-agent', model: 'gpt-4o' }); + const result = exportToLangChain(dir); + + assert.match(result, /from langchain\.chat_models import init_chat_model/); + assert.match(result, /from langchain\.agents import create_agent/); + assert.match(result, /from langchain\.tools import tool/); + }); + + test('includes pip install instructions with correct packages', () => { + const dir = makeAgentDir({ name: 'test-agent', model: 'gpt-4o' }); + const result = exportToLangChain(dir); + + assert.match(result, /pip install langchain langchain-openai/); + }); + + test('includes Anthropic package for claude models', () => { + const dir = makeAgentDir({ name: 'test-agent', model: 'claude-3-5-sonnet' }); + const result = exportToLangChain(dir); + + assert.match(result, /pip install langchain langchain-anthropic/); + }); + + test('includes agent name and description in docstring', () => { + const dir = makeAgentDir({ name: 'my-agent', description: 'My test description' }); + const result = exportToLangChain(dir); + + assert.match(result, /my-agent/); + assert.match(result, /My test description/); + }); + + test('includes SOUL.md content in system prompt', () => { + const dir = makeAgentDir({ soul: '# Soul\n\nI am helpful and precise.' }); + const result = exportToLangChain(dir); + + assert.match(result, /I am helpful and precise/); + }); + + test('includes RULES.md content in system prompt', () => { + const dir = makeAgentDir({ rules: '# Rules\n\nNever share credentials.' }); + const result = exportToLangChain(dir); + + assert.match(result, /Never share credentials/); + }); +}); + +// --------------------------------------------------------------------------- +// exportToLangChain — model configuration +// --------------------------------------------------------------------------- + +describe('exportToLangChain — model configuration', () => { + test('uses default gpt-4o when no model specified', () => { + const dir = makeAgentDir({}); + const result = exportToLangChain(dir); + + assert.match(result, /init_chat_model\("gpt-4o"/); + }); + + test('uses specified model', () => { + const dir = makeAgentDir({ model: 'claude-opus-4-6' }); + const result = exportToLangChain(dir); + + assert.match(result, /init_chat_model\("claude-opus-4-6"/); + }); + + test('includes temperature parameter', () => { + const dir = makeAgentDir({ model: 'gpt-4o', temperature: 0.3 }); + const result = exportToLangChain(dir); + + assert.match(result, /temperature=0\.3/); + }); + + test('includes max_tokens parameter when specified', () => { + const dir = makeAgentDir({ model: 'gpt-4o', maxTokens: 4096 }); + const result = exportToLangChain(dir); + + assert.match(result, /max_tokens=4096/); + }); + + test('includes recursion_limit when max_turns specified', () => { + const dir = makeAgentDir({ model: 'gpt-4o', maxTurns: 10 }); + const result = exportToLangChain(dir); + + assert.match(result, /recursion_limit.*20/); + }); +}); + +// --------------------------------------------------------------------------- +// exportToLangChain — environment variables +// --------------------------------------------------------------------------- + +describe('exportToLangChain — environment variables', () => { + test('checks OPENAI_API_KEY for OpenAI models', () => { + const dir = makeAgentDir({ model: 'gpt-4o' }); + const result = exportToLangChain(dir); + + assert.match(result, /OPENAI_API_KEY/); + assert.match(result, /os\.environ\.get\("OPENAI_API_KEY"\)/); + }); + + test('checks ANTHROPIC_API_KEY for Anthropic models', () => { + const dir = makeAgentDir({ model: 'claude-3-5-sonnet' }); + const result = exportToLangChain(dir); + + assert.match(result, /ANTHROPIC_API_KEY/); + assert.match(result, /os\.environ\.get\("ANTHROPIC_API_KEY"\)/); + }); +}); + +// --------------------------------------------------------------------------- +// exportToLangChain — tools +// --------------------------------------------------------------------------- + +describe('exportToLangChain — tools', () => { + test('generates tool stubs with @tool decorator', () => { + const dir = makeAgentDir({ + tools: [ + { name: 'search-web', description: 'Search the web', params: { query: { type: 'string', required: true } } }, + ], + }); + const result = exportToLangChain(dir); + + assert.match(result, /@tool/); + assert.match(result, /def search_web\(/); + assert.match(result, /Search the web/); + }); + + test('converts tool names with hyphens to underscores', () => { + const dir = makeAgentDir({ + tools: [ + { name: 'my-custom-tool', description: 'A tool' }, + ], + }); + const result = exportToLangChain(dir); + + assert.match(result, /def my_custom_tool\(/); + }); + + test('generates correct Python type annotations', () => { + const dir = makeAgentDir({ + tools: [ + { + name: 'test-tool', + description: 'Test', + params: { + name: { type: 'string', required: true }, + count: { type: 'integer', required: true }, + ratio: { type: 'number' }, + enabled: { type: 'boolean' }, + }, + }, + ], + }); + const result = exportToLangChain(dir); + + assert.match(result, /name: str/); + assert.match(result, /count: int/); + assert.match(result, /ratio: float = None/); + assert.match(result, /enabled: bool = None/); + }); +}); + +// --------------------------------------------------------------------------- +// exportToLangChain — sub-agents +// --------------------------------------------------------------------------- + +describe('exportToLangChain — sub-agents', () => { + test('generates sub-agent delegates', () => { + const dir = makeAgentDir({ + model: 'gpt-4o', + subAgents: [ + { name: 'fact-checker', description: 'Verifies facts', model: 'gpt-4o-mini' }, + ], + }); + const result = exportToLangChain(dir); + + assert.match(result, /_fact_checker = create_agent\(/); + assert.match(result, /def delegate_to_fact_checker\(/); + assert.match(result, /Verifies facts/); + }); + + test('sub-agent uses its own model', () => { + const dir = makeAgentDir({ + model: 'gpt-4o', + subAgents: [ + { name: 'helper', description: 'Helps', model: 'claude-3-5-sonnet' }, + ], + }); + const result = exportToLangChain(dir); + + assert.match(result, /init_chat_model\("claude-3-5-sonnet"/); + }); + + test('includes both provider packages when sub-agent uses different provider', () => { + const dir = makeAgentDir({ + model: 'gpt-4o', + subAgents: [ + { name: 'helper', description: 'Helps', model: 'claude-3-5-sonnet' }, + ], + }); + const result = exportToLangChain(dir); + + assert.match(result, /langchain-openai/); + assert.match(result, /langchain-anthropic/); + }); + + test('checks both API keys when sub-agent uses different provider', () => { + const dir = makeAgentDir({ + model: 'gpt-4o', + subAgents: [ + { name: 'helper', description: 'Helps', model: 'claude-3-5-sonnet' }, + ], + }); + const result = exportToLangChain(dir); + + assert.match(result, /OPENAI_API_KEY/); + assert.match(result, /ANTHROPIC_API_KEY/); + }); +}); + +// --------------------------------------------------------------------------- +// exportToLangChain — skills +// --------------------------------------------------------------------------- + +describe('exportToLangChain — skills', () => { + test('includes skill instructions in system prompt', () => { + const dir = makeAgentDir({ + skills: [ + { name: 'code-review', description: 'Reviews code', instructions: 'Check for bugs and style issues.' }, + ], + }); + const result = exportToLangChain(dir); + + assert.match(result, /code-review/); + assert.match(result, /Check for bugs and style issues/); + }); + + test('includes multiple skills', () => { + const dir = makeAgentDir({ + skills: [ + { name: 'skill-a', description: 'Skill A', instructions: 'Do A.' }, + { name: 'skill-b', description: 'Skill B', instructions: 'Do B.' }, + ], + }); + const result = exportToLangChain(dir); + + assert.match(result, /skill-a/); + assert.match(result, /skill-b/); + assert.match(result, /Do A/); + assert.match(result, /Do B/); + }); +}); + +// --------------------------------------------------------------------------- +// exportToLangChain — error handling +// --------------------------------------------------------------------------- + +describe('exportToLangChain — error handling', () => { + test('throws error for unsupported model', () => { + const dir = makeAgentDir({ model: 'llama3.1' }); + + assert.throws( + () => exportToLangChain(dir), + /Model "llama3\.1" is not supported/ + ); + }); + + test('throws error for unsupported sub-agent model', () => { + const dir = makeAgentDir({ + model: 'gpt-4o', + subAgents: [ + { name: 'helper', description: 'Helps', model: 'gemini-pro' }, + ], + }); + + assert.throws( + () => exportToLangChain(dir), + /Sub-agent model "gemini-pro" is not supported/ + ); + }); +}); diff --git a/src/commands/import.ts b/src/commands/import.ts index fa07732..4f51ea3 100644 --- a/src/commands/import.ts +++ b/src/commands/import.ts @@ -521,8 +521,7 @@ function parseSections(markdown: string): [string, string][] { export const importCommand = new Command('import') .description('Import from other agent formats') -.requiredOption('--from ', 'Source format (claude, cursor, crewai, opencode, gemini)') -.requiredOption('--from ', 'Source format (claude, cursor, crewai, opencode, codex)') + .requiredOption('--from ', 'Source format (claude, cursor, crewai, opencode, gemini, codex)') .argument('', 'Source file or directory path') .option('-d, --dir ', 'Target directory', '.') .action((sourcePath: string, options: ImportOptions) => { @@ -546,15 +545,15 @@ export const importCommand = new Command('import') case 'opencode': importFromOpenCode(sourcePath, targetDir); break; -case 'gemini': + case 'gemini': importFromGemini(sourcePath, targetDir); break; + case 'codex': + importFromCodex(sourcePath, targetDir); + break; default: error(`Unknown format: ${options.from}`); - info('Supported formats: claude, cursor, crewai, opencode, gemini'); -case 'codex': - importFromCodex(sourcePath, targetDir); - info('Supported formats: claude, cursor, crewai, opencode, codex'); + info('Supported formats: claude, cursor, crewai, opencode, gemini, codex'); process.exit(1); } diff --git a/src/runners/git.ts b/src/runners/git.ts index d9cc34a..cc9f421 100644 --- a/src/runners/git.ts +++ b/src/runners/git.ts @@ -12,7 +12,6 @@ import { runWithNanobot } from './nanobot.js'; import { runWithLyzr } from './lyzr.js'; import { runWithGitHub } from './github.js'; import { runWithOpenCode } from './opencode.js'; -import { runWithLangChain } from './langchain.js'; import { error, info, success, label, heading, divider, warn } from '../utils/format.js'; export interface GitRunOptions { @@ -114,9 +113,6 @@ export async function runWithGit( case 'github': await runWithGitHub(agentDir, manifest, { prompt: options.prompt }); break; - case 'langchain': - runWithLangChain(agentDir, manifest); - break; case 'prompt': console.log(exportToSystemPrompt(agentDir)); break;