Skip to main content
โšก Calmops

AI Agents: AutoGPT vs LangChain vs CrewAI - Framework Comparison

Introduction

AI agents have emerged as a transformative paradigm, enabling LLMs to take actions, use tools, and collaborate autonomously. This guide compares three leading frameworks: AutoGPT, LangChain Agents, and CrewAI.

Each framework offers different approaches to building agentic applications, from simple single-agent systems to complex multi-agent collaborations.

Understanding AI Agents

AI agents extend LLMs with:

  • Tool Use: Search, calculations, API calls
  • Memory: Conversation history, persistent storage
  • Planning: Break down complex tasks
  • Reflection: Evaluate and improve actions
# Basic agent loop
def agent_loop(goal, tools, max_iterations=10):
    for _ in range(max_iterations):
        # Think: Analyze situation
        thought = llm.think(goal, history, tools)
        
        # Act: Use tool or respond
        if thought.needs_tool:
            result = tools[thought.tool].execute(thought.args)
            history.add(result)
        else:
            return thought.response
        
        # Observe: Check if goal achieved
        if llm.judge_completion(goal, history):
            return final_answer

AutoGPT: Autonomous AI Agent

AutoGPT pioneered autonomous agent systems with GPT-4, enabling self-driven task completion.

AutoGPT Core Features

  • Self-prompting: Agent generates its own sub-tasks
  • Web Access: Browse and extract from websites
  • File Operations: Read, write, manage files
  • Long-term Memory: SQLite-backed persistent memory

AutoGPT Setup and Usage

# Install AutoGPT
pip install autogpt

# Basic AutoGPT configuration
from autogpt.agent import Agent
from autogpt.config import Config

config = Config()
config.ai_name = "Research-Assistant"
config.ai_role = "helpful research assistant"
config.tools = ["browser", "file_operations", "search"]
config.max_tokens = 4000

# Create and run agent
agent = Agent(config)
result = agent.start("Research the latest developments in quantum computing")
print(result)

Custom AutoGPT Tools

# Creating custom tools for AutoGPT
from autogpt.tools import Tool
from pydantic import BaseModel

class SearchInput(BaseModel):
    query: str
    max_results: int = 5

search_tool = Tool(
    name="web_search",
    description="Search the web for information",
    args_schema=SearchInput,
    function=async def web_search(query: str, max_results: int = 5):
        from duckduckgo_search import ddgs
        results = ddgs.text(query, max_results=max_results)
        return [{"title": r["title"], "url": r["href"]} for r in results]
)

# Register tool
agent.register_tool(search_tool)

# Agent uses tool autonomously
agent.start("Find and summarize the top 5 articles about climate change")

AutoGPT with Memory

# AutoGPT memory configuration
from autogpt.memory import LocalSummaryMemory, PineconeMemory
from autogpt.config import Config

config = Config()

# Local SQLite memory
memory = LocalSummaryMemory(
    db_path="./memory.db",
    summary_threshold=5
)

# Or Pinecone for vector memory
vector_memory = PineconeMemory(
    api_key=config.pinecone_api_key,
    index_name="autogpt-memory",
    embedding_dim=1536
)

# Create agent with memory
agent = Agent(config, memory=vector_memory)

# Agent remembers past interactions
agent.start("Help me write a book about AI")  # Session 1
agent.start("Continue where we left off")       # Session 2 - remembers!

LangChain: Building Agentic Apps

LangChain provides a comprehensive framework for building LLM applications with powerful agent abstractions.

LangChain Agent Types

from langchain.agents import AgentType, initialize_agent, load_tools
from langchain.llms import OpenAI
from langchain.chat_models import ChatOpenAI

llm = ChatOpenAI(temperature=0, model="gpt-4")

# Load pre-built tools
tools = load_tools(["serpapi", "llm-math"], llm=llm)

# Initialize agent with tools
agent = initialize_agent(
    tools,
    llm,
    agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
    verbose=True
)

# Run agent
result = agent.run(
    "What was the exact population of Tokyo in 2024, multiplied by 2?"
)

Custom LangChain Tools

from langchain.tools import tool
from langchain.utilities import SerpAPIWrapper

# Define custom tool
@tool
def calculate_compound_interest(
    principal: float,
    rate: float,
    years: int,
    compounds_per_year: int = 12
) -> str:
    """Calculate compound interest for an investment.
    
    Args:
        principal: Initial investment amount
        rate: Annual interest rate (as decimal, e.g., 0.05 for 5%)
        years: Number of years
        compounds_per_year: Times interest compounds per year
    """
    amount = principal * (1 + rate / compounds_per_year) ** (compounds_per_year * years)
    return f"${amount:,.2f}"

@tool
def get_stock_price(symbol: str) -> str:
    """Get current stock price for a given symbol.
    
    Args:
        symbol: Stock ticker symbol (e.g., AAPL, GOOGL)
    """
    # In production, use real API
    prices = {"AAPL": "178.50", "GOOGL": "140.25", "MSFT": "378.90"}
    return prices.get(symbol.upper(), "Unknown")

# Use custom tools
tools = [calculate_compound_interest, get_stock_price]
agent = initialize_agent(tools, llm, agent=AgentType.CHAT_ZERO_SHOT_REACT_DESCRIPTION)

result = agent.run(
    "If I invest $10,000 in AAPL at 5% compound interest for 10 years, "
    "and also tell me GOOGL's current price"
)

LangChain ReAct Agent

from langchain.agents import AgentExecutor, create_react_agent
from langchain.prompts import PromptTemplate
from langchain import LLMChain

# Custom ReAct prompt
react_prompt = PromptTemplate.from_template("""Answer the following questions as best as possible. You have access to the following tools:

{tools}

Use the following format:

Question: the input question you must answer
Thought: you should always think about what to do
Action: the action to take, should be one of [{tool_names}]
Action Input: the input to the action
Observation: the result of the action
... (this Thought/Action/Action Input/Observation can repeat N times)
Thought: I now know the final answer
Final Answer: the final answer to the original question

Begin!

Question: {input}
{agent_scratchpad}""")

# Create ReAct agent
agent = create_react_agent(llm, tools, react_prompt)
agent_executor = AgentExecutor.from_agent_and_tools(
    agent=agent,
    tools=tools,
    verbose=True,
    max_iterations=10,
    handle_parsing_errors="I encountered an error. Please try again."
)

result = agent_executor.invoke({"input": "What is the cube root of 1728?"})

LangChain Agent with Memory

from langchain.memory import ConversationBufferMemory, ChatMessageHistory
from langchain.agents import AgentExecutor

# Add memory to agent
memory = ConversationBufferMemory(
    memory_key="chat_history",
    return_messages=True
)

# Create agent with memory
agent = initialize_agent(
    tools,
    llm,
    agent=AgentType.CONVERSATIONAL_REACT_DESCRIPTION,
    memory=memory,
    verbose=True
)

# Multi-turn conversation
agent.run("What's the weather in San Francisco?")
agent.run("What about Tokyo?")  # Remembers context!

CrewAI: Multi-Agent Framework

CrewAI enables building multi-agent systems where AI agents collaborate as a “crew” to accomplish complex tasks.

CrewAI Core Concepts

from crewai import Agent, Task, Crew, Process
from crewai.tools import SerpApiTool, BrowserTool

# Define tools
search_tool = SerpApiTool()
browser_tool = BrowserTool()

# Create agents
researcher = Agent(
    role="Research Analyst",
    goal="Find comprehensive information on the given topic",
    backstory="Expert at gathering and analyzing information",
    tools=[search_tool, browser_tool],
    verbose=True
)

writer = Agent(
    role="Content Writer",
    goal="Create engaging content from research",
    backstory="Skilled writer who transforms complex info into clear content",
    tools=[],
    verbose=True
)

# Define tasks
research_task = Task(
    description="Research the impact of AI on healthcare",
    agent=researcher,
    expected_output="Comprehensive research report"
)

write_task = Task(
    description="Write an article based on research",
    agent=writer,
    expected_output="Polished article"
)

# Create crew
crew = Crew(
    agents=[researcher, writer],
    tasks=[research_task, write_task],
    process=Process.sequential  # Sequential or hierarchical
)

# Execute
result = crew.kickoff()
print(result)

CrewAI Hierarchical Process

# Hierarchical process with manager
from crewai import Agent, Task, Crew, Process

# Manager agent oversees the crew
manager = Agent(
    role="Project Manager",
    goal="Ensure crew produces quality work on time",
    backstory="Experienced manager who delegates effectively",
    tools=[],
    verbose=True
)

# Specialist agents
data_analyst = Agent(
    role="Data Analyst",
    goal="Analyze data accurately",
    backstory="Expert in statistical analysis",
    tools=[search_tool],
    verbose=True
)

visualizer = Agent(
    role="Data Visualizer",
    goal="Create compelling visualizations",
    backstory="Designs clear, impactful charts",
    tools=[],
    verbose=True
)

# Tasks
analyze_task = Task(
    description="Analyze sales data trends",
    agent=data_analyst
)

visualize_task = Task(
    description="Create charts from analysis",
    agent=visualizer
)

# Hierarchical crew with manager
crew = Crew(
    agents=[data_analyst, visualizer],
    tasks=[analyze_task, visualize_task],
    process=Process.hierarchical,
    manager_agent=manager  # Manager coordinates
)

CrewAI Custom Tools

from crewai.tools import BaseTool
from pydantic import BaseModel, Field

class CodeExecutionInput(BaseModel):
    code: str = Field(description="Python code to execute")

class CodeExecutionTool(BaseTool):
    name: str = "execute_python"
    description: str = "Execute Python code and return result"
    args_schema: type[BaseModel] = CodeExecutionInput
    
    def _run(self, code: str) -> str:
        try:
            result = exec(code, {"__builtins__": __builtins__})
            return str(result)
        except Exception as e:
            return f"Error: {str(e)}"

# Add custom tool to agent
code_tool = CodeExecutionTool()

developer = Agent(
    role="Python Developer",
    goal="Write and execute code",
    backstory="Expert Python developer",
    tools=[code_tool]
)

Comparison: AutoGPT vs LangChain vs CrewAI

Aspect AutoGPT LangChain CrewAI
Primary Use Autonomous agents Agent framework Multi-agent systems
Complexity High Medium Low-Medium
Customization Limited Very High High
Multi-agent Basic Manual Built-in
Learning Curve Steep Moderate Low
Production Ready Experimental Yes Yes
Documentation Limited Excellent Good

When to Use Each Framework

Use AutoGPT When:

  • You need fully autonomous task completion
  • Experimenting with AI agent capabilities
  • Building proof-of-concept agents
# Good: AutoGPT for autonomous research
agent = Agent(config)
agent.start("Find and summarize all news about AI regulation in 2024")

Use LangChain When:

  • Building production LLM applications
  • Need fine-grained control over agent behavior
  • Complex tool orchestration required
# Good: LangChain for production apps
agent = create_react_agent(llm, custom_tools, prompt)
# Full control over every component

Use CrewAI When:

  • Building multi-agent systems
  • Collaborative task completion
  • Need clear role-based agents
# Good: CrewAI for team collaboration
crew = Crew(agents=[researcher, writer, editor], tasks=tasks)
crew.kickoff()  # Agents collaborate automatically

Bad Practices to Avoid

Bad Practice 1: No Tool Limits

# Bad: Agent can do anything without constraints
agent = initialize_agent(all_tools, llm)  # Dangerous!
# Could: delete files, send emails, spend money

# Good: Limit tools and add safeguards
safe_tools = [search_tool, read_tool]
agent = initialize_agent(safe_tools, llm, max_iterations=5)

Bad Practice 2: Infinite Loops

# Bad: No max iterations
agent = Agent(max_iterations=999999)  # Could run forever!

# Good: Set reasonable limits
agent = Agent(
    max_iterations=10,
    max_execution_time=300  # 5 minutes max
)

Bad Practice 3: No Output Validation

# Bad: Blindly trust agent output
result = agent.run("Calculate anything")
print(result)  # Could be wrong or harmful

# Good: Validate and verify
result = agent.run("Calculate compound interest")
validate_result(result)  # Check correctness

Good Practices Summary

Tool Definition Best Practices

# Good: Comprehensive tool documentation
@tool
def analyze_data(
    file_path: str,
    analysis_type: str = "basic"
) -> dict:
    """Analyze data from a CSV file.
    
    Args:
        file_path: Path to CSV file (must be in /data/)
        analysis_type: Type of analysis - 'basic', 'statistical', or 'full'
    
    Returns:
        Dictionary with analysis results
    
    Raises:
        ValueError: If file not in /data/ directory
    """
    # Implementation

Agent Memory Management

# Good: Efficient memory usage
from langchain.memory import BufferWindowMemory

memory = ConversationBufferMemory(
    memory_key="chat_history",
    k=5  # Only keep last 5 interactions
)

# Or use summarization for long conversations
from langchain.memory import SummarizerMixin

memory = SummarizerMixin(
    llm=llm,
    max_tokens=2000  # Summarize when exceeded
)

Error Handling

# Good: Robust error handling
from langchain.tools import ToolException

@tool
def risky_operation(input: str) -> str:
    try:
        return do_operation(input)
    except Exception as e:
        raise ToolException(f"Operation failed: {str(e)}")

# Agent handles gracefully
agent = initialize_agent(
    [risky_operation],
    llm,
    handle_parsing_errors="Operation failed. Try a different approach."
)

External Resources

Comments