simplify mono agent

This commit is contained in:
Gaetan Hurel 2025-06-27 09:38:55 +02:00
parent 9afce9f7ea
commit 60cd7c11ed
No known key found for this signature in database
5 changed files with 138 additions and 270 deletions

View File

@ -27,8 +27,7 @@ User Input → ReAct Agent → Tools (Shell + Log Analyzer) → Response
## Files ## Files
- `main.py`: Main application with ReAct agent implementation - `main.py`: Main application with ReAct agent implementation
- `log_analyzer.py`: Specialized tool for analyzing log files - `custom_tools/`: Directory containing custom tools (poem generation tool)
- `loghub/`: Symbolic link to log files directory
## Tools Available ## Tools Available
@ -37,11 +36,12 @@ User Input → ReAct Agent → Tools (Shell + Log Analyzer) → Response
- File operations - File operations
- Network diagnostics - Network diagnostics
2. **Log Analyzer Tool**: Analyze log files with different modes: 2. **Poem Tool**: Generate beautiful poems with different themes:
- `error_patterns`: Find and categorize error messages - `nature`: Poems about nature and the environment
- `frequency`: Analyze frequency of different log patterns - `tech`: Poems about technology and programming
- `timeline`: Show chronological patterns of events - `motivational`: Inspirational and motivational poems
- `summary`: Provide an overall summary of the log file - `friendship`: Poems about friendship and relationships
- `random`: Randomly selects from any available poem type
## Usage ## Usage

View File

@ -0,0 +1 @@
"""Custom tools package for the LangGraph demo agent."""

View File

@ -0,0 +1,81 @@
import random
from langchain_core.tools import tool
@tool
def print_poem(poem_type: str = "random") -> str:
"""
Print a beautiful poem for the user.
Args:
poem_type: Type of poem to print. Options: "nature", "tech", "motivational", "random"
Returns:
A beautiful poem as a string
"""
poems = {
"nature": """
🌿 Nature's Symphony 🌿
In the whisper of the wind through ancient trees,
Where sunlight dances on the morning breeze,
The earth awakens with a gentle song,
A melody that's carried all along.
Rivers flow with stories untold,
Mountains stand majestic and bold,
In nature's embrace, we find our peace,
Where all our worries and troubles cease.
""",
"tech": """
💻 Digital Dreams 💻
In lines of code, our dreams take flight,
Binary stars illuminate the night,
Algorithms dance in silicon halls,
While innovation answers progress calls.
From circuits small to networks vast,
We build the future, learn from the past,
In every byte and every bit,
Human creativity and logic fit.
""",
"motivational": """
Rise and Shine
Every dawn brings a chance anew,
To chase the dreams that call to you,
Though mountains high may block your way,
Your spirit grows stronger every day.
The path is long, the journey tough,
But you, my friend, are strong enough,
With courage as your faithful guide,
Success will walk right by your side.
""",
"friendship": """
🤝 Bonds of Friendship 🤝
In laughter shared and tears that fall,
True friendship conquers over all,
Through seasons change and years that pass,
These precious bonds forever last.
A friend's warm smile, a helping hand,
Together strong, united we stand,
In friendship's light, we find our way,
Brightening each and every day.
"""
}
# If random or invalid type, pick a random poem
if poem_type == "random" or poem_type not in poems:
poem_type = random.choice(list(poems.keys()))
selected_poem = poems[poem_type]
return f"Here's a {poem_type} poem for you:\n{selected_poem}"

View File

@ -1,142 +0,0 @@
import os
import re
from collections import Counter
from typing import List, Dict, Any
from langchain_core.tools import tool
@tool
def analyze_log_file(file_path: str, analysis_type: str = "error_patterns") -> Dict[str, Any]:
"""
Analyze log files for common sysadmin debugging patterns.
Args:
file_path: Path to the log file (relative to loghub directory)
analysis_type: Type of analysis - "error_patterns", "frequency", "timeline", or "summary"
Returns:
Dictionary with analysis results
"""
try:
# Construct full path
if not file_path.startswith('/'):
full_path = f"loghub/{file_path}"
else:
full_path = file_path
if not os.path.exists(full_path):
return {"error": f"File not found: {full_path}"}
with open(full_path, 'r', encoding='utf-8', errors='ignore') as f:
lines = f.readlines()
if analysis_type == "error_patterns":
return _analyze_error_patterns(lines, file_path)
elif analysis_type == "frequency":
return _analyze_frequency(lines, file_path)
elif analysis_type == "timeline":
return _analyze_timeline(lines, file_path)
elif analysis_type == "summary":
return _analyze_summary(lines, file_path)
else:
return {"error": f"Unknown analysis type: {analysis_type}"}
except Exception as e:
return {"error": f"Error analyzing file: {str(e)}"}
def _analyze_error_patterns(lines: List[str], file_path: str) -> Dict[str, Any]:
"""Analyze error patterns in log lines."""
error_keywords = ['error', 'fail', 'exception', 'critical', 'fatal', 'denied', 'refused', 'timeout']
error_lines = []
error_counts = Counter()
for i, line in enumerate(lines, 1):
line_lower = line.lower()
for keyword in error_keywords:
if keyword in line_lower:
error_lines.append(f"Line {i}: {line.strip()}")
error_counts[keyword] += 1
break
return {
"file": file_path,
"analysis_type": "error_patterns",
"total_lines": len(lines),
"error_lines_count": len(error_lines),
"error_keywords_frequency": dict(error_counts.most_common()),
"sample_errors": error_lines[:10], # First 10 error lines
"summary": f"Found {len(error_lines)} error-related lines out of {len(lines)} total lines"
}
def _analyze_frequency(lines: List[str], file_path: str) -> Dict[str, Any]:
"""Analyze frequency patterns in logs."""
# Extract common patterns (simplified)
patterns = Counter()
for line in lines:
# Remove timestamps and specific values for pattern matching
cleaned = re.sub(r'\d+', 'NUM', line)
cleaned = re.sub(r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}', 'IP', cleaned)
cleaned = re.sub(r'[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}', 'UUID', cleaned)
patterns[cleaned.strip()] += 1
return {
"file": file_path,
"analysis_type": "frequency",
"total_lines": len(lines),
"unique_patterns": len(patterns),
"most_common_patterns": [{"pattern": p, "count": c} for p, c in patterns.most_common(10)],
"summary": f"Found {len(patterns)} unique patterns in {len(lines)} lines"
}
def _analyze_timeline(lines: List[str], file_path: str) -> Dict[str, Any]:
"""Analyze timeline patterns in logs."""
timestamps = []
# Try to extract timestamps (simplified for demo)
timestamp_patterns = [
r'(\w{3}\s+\d{1,2}\s+\d{2}:\d{2}:\d{2})', # Jun 14 15:16:01
r'(\[\w{3}\s+\w{3}\s+\d{2}\s+\d{2}:\d{2}:\d{2}\s+\d{4}\])', # [Sun Dec 04 04:47:44 2005]
]
for line in lines[:100]: # Sample first 100 lines for demo
for pattern in timestamp_patterns:
match = re.search(pattern, line)
if match:
timestamps.append(match.group(1))
break
return {
"file": file_path,
"analysis_type": "timeline",
"total_lines": len(lines),
"timestamps_found": len(timestamps),
"sample_timestamps": timestamps[:10],
"summary": f"Extracted {len(timestamps)} timestamps from first 100 lines"
}
def _analyze_summary(lines: List[str], file_path: str) -> Dict[str, Any]:
"""Provide a general summary of the log file."""
total_lines = len(lines)
# Basic statistics
avg_line_length = sum(len(line) for line in lines) / total_lines if total_lines > 0 else 0
empty_lines = sum(1 for line in lines if not line.strip())
# Sample content
sample_lines = [line.strip() for line in lines[:5] if line.strip()]
return {
"file": file_path,
"analysis_type": "summary",
"total_lines": total_lines,
"empty_lines": empty_lines,
"average_line_length": round(avg_line_length, 2),
"sample_content": sample_lines,
"summary": f"Log file with {total_lines} lines, average length {avg_line_length:.1f} characters"
}

View File

@ -3,11 +3,11 @@ from langchain.chat_models import init_chat_model
from langchain_community.tools.shell.tool import ShellTool from langchain_community.tools.shell.tool import ShellTool
from langgraph.prebuilt import create_react_agent from langgraph.prebuilt import create_react_agent
from langchain_core.messages import HumanMessage from langchain_core.messages import HumanMessage
from log_analyzer import analyze_log_file from custom_tools.poem_tool import print_poem
def create_agent(): def create_agent():
"""Create and return a ReAct agent with shell and log analysis capabilities.""" """Create and return a ReAct agent with shell and poem capabilities."""
# Initialize the chat model (using OpenAI GPT-4) # Initialize the chat model (using OpenAI GPT-4)
# Make sure you have set your OPENAI_API_KEY environment variable # Make sure you have set your OPENAI_API_KEY environment variable
@ -15,32 +15,32 @@ def create_agent():
# Define the tools available to the agent # Define the tools available to the agent
shell_tool = ShellTool() shell_tool = ShellTool()
tools = [shell_tool, analyze_log_file] tools = [shell_tool, print_poem]
# Create a ReAct agent with system prompt
system_prompt = """You are a helpful assistant with access to shell commands and log analysis capabilities.
You can:
1. Execute shell commands using the shell tool to interact with the system
2. Analyze log files using the analyze_log_file tool to help with debugging and system administration tasks
The log analyzer can process files in the loghub directory with different analysis types:
- "error_patterns": Find and categorize error messages
- "frequency": Analyze frequency of different log patterns
- "timeline": Show chronological patterns of events
- "summary": Provide an overall summary of the log file
When helping users:
- Be thorough in your analysis
- Explain what you're doing and why
- Use appropriate tools based on the user's request
- If analyzing logs, suggest which analysis type might be most helpful
- Always be cautious with shell commands and explain what they do
Available log files are in the loghub directory with subdirectories for different systems like:
Android, Apache, BGL, Hadoop, HDFS, HealthApp, HPC, Linux, Mac, OpenSSH, OpenStack, Proxifier, Spark, Thunderbird, Windows, Zookeeper
"""
# Create a ReAct agent with simplified system prompt
system_prompt = """You are a helpful assistant with access to shell commands and poem generation capabilities.
You can:
1. Execute shell commands using the shell tool to interact with the system
2. Generate and print beautiful poems using the print_poem tool
The poem tool can create different types of poems:
- "nature": Poems about nature and the environment
- "tech": Poems about technology and programming
- "motivational": Inspirational and motivational poems
- "friendship": Poems about friendship and relationships
- "random": Randomly selects from any available poem type
When helping users:
- Be friendly and helpful
- Use appropriate tools based on the user's request
- Always be cautious with shell commands and explain what they do
- For system monitoring commands like 'top', 'ps', etc., use appropriate flags to avoid hanging
- When users ask for poems or want to brighten their day, use the poem tool
"""
# Create the ReAct agent # Create the ReAct agent
agent = create_react_agent( agent = create_react_agent(
llm, llm,
@ -51,87 +51,23 @@ Android, Apache, BGL, Hadoop, HDFS, HealthApp, HPC, Linux, Mac, OpenSSH, OpenSta
return agent return agent
def stream_agent_updates(agent, user_input: str, conversation_history: list): def run_agent_query(agent, user_input: str, conversation_history: list):
"""Stream agent updates for a user input with conversation history.""" """Run a simple agent query and display results cleanly."""
# Create a human message # Create a human message
message = HumanMessage(content=user_input) message = HumanMessage(content=user_input)
# Add the new message to conversation history # Add the new message to conversation history
conversation_history.append(message) conversation_history.append(message)
print("\nAgent: ", end="", flush=True) # Use the agent's stream method for clean output like the LangChain tutorial
for step in agent.stream({"messages": conversation_history}, stream_mode="values"):
step["messages"][-1].pretty_print()
# Use the agent's stream method to get real-time updates with full conversation # Add the agent's response to conversation history
final_response = "" if step and "messages" in step:
tool_calls_made = False conversation_history.append(step["messages"][-1])
for event in agent.stream({"messages": conversation_history}, stream_mode="updates"):
for node_name, node_output in event.items():
if node_name == "agent" and "messages" in node_output:
last_message = node_output["messages"][-1]
# Check if this is a tool call
if hasattr(last_message, 'tool_calls') and last_message.tool_calls:
tool_calls_made = True
for tool_call in last_message.tool_calls:
print(f"\n🔧 Using tool: {tool_call['name']}")
if tool_call.get('args'):
print(f" Args: {tool_call['args']}")
# Check if this is the final response (no tool calls)
elif hasattr(last_message, 'content') and last_message.content and not getattr(last_message, 'tool_calls', None):
final_response = last_message.content
elif node_name == "tools" and "messages" in node_output:
# Show tool results
for msg in node_output["messages"]:
if hasattr(msg, 'content'):
print(f"\n📋 Tool result: {msg.content[:200]}{'...' if len(msg.content) > 200 else ''}")
# Print the final response
if final_response:
if tool_calls_made:
print(f"\n\n{final_response}")
else:
print(final_response)
# Add the agent's response to conversation history
from langchain_core.messages import AIMessage
conversation_history.append(AIMessage(content=final_response))
else:
print("No response generated.")
print() # Add newline
def visualize_agent(agent):
"""Display the agent's graph structure."""
try:
print("\n📊 Agent Graph Structure:")
print("=" * 40)
# Get the graph and display its structure
graph = agent.get_graph()
# Print nodes
print("Nodes:")
for node_id in graph.nodes:
print(f" - {node_id}")
# Print edges
print("\nEdges:")
for edge in graph.edges:
print(f" - {edge}")
print("=" * 40)
print("This agent follows the ReAct (Reasoning and Acting) pattern:")
print("1. Receives user input")
print("2. Reasons about what tools to use")
print("3. Executes tools when needed")
print("4. Provides final response")
print("=" * 40)
except Exception as e:
print(f"Could not visualize agent: {e}")
def main(): def main():
# Check if required API keys are set # Check if required API keys are set
@ -140,27 +76,23 @@ def main():
print("You can set it by running: export OPENAI_API_KEY='your-api-key-here'") print("You can set it by running: export OPENAI_API_KEY='your-api-key-here'")
return return
print("🤖 LangGraph Log Analysis Agent") print("🤖 LangGraph Simple Demo Agent")
print("Type 'quit', 'exit', or 'q' to exit the chat.") print("Type 'quit', 'exit', or 'q' to exit the chat.")
print("Type 'help' or 'h' for help and examples.") print("Type 'help' or 'h' for help and examples.")
print("Type 'graph' to see the agent structure.")
print("Type 'clear' or 'reset' to clear conversation history.") print("Type 'clear' or 'reset' to clear conversation history.")
print("⚠️ WARNING: This agent has shell access - use with caution!") print("⚠️ WARNING: This agent has shell access - use with caution!")
print("📊 Available log analysis capabilities:") print("🎭 Available capabilities:")
print(" - Analyze log files in the loghub directory") print(" - Generate beautiful poems for any occasion")
print(" - Execute shell commands for system administration") print(" - Execute shell commands for system tasks")
print(" - Help with debugging and troubleshooting") print(" - Help with general assistance")
print("-" * 60) print("-" * 60)
# Create the agent # Create the agent
try: try:
agent = create_agent() agent = create_agent()
print("✅ Log Analysis Agent initialized successfully!") print("✅ Simple Demo Agent initialized successfully!")
print("💡 Try asking: 'Analyze the Apache logs for error patterns'") print("💡 Try asking: 'Write me a nature poem'")
print("💡 Or: 'List the available log files in the loghub directory'") print("💡 Or: 'Show me the current directory'")
# Show agent structure
visualize_agent(agent)
except Exception as e: except Exception as e:
print(f"❌ Error initializing agent: {e}") print(f"❌ Error initializing agent: {e}")
@ -180,17 +112,13 @@ def main():
print("Commands:") print("Commands:")
print(" - quit/exit/q: Exit the agent") print(" - quit/exit/q: Exit the agent")
print(" - help/h: Show this help") print(" - help/h: Show this help")
print(" - graph: Show agent structure") print(" - clear/reset: Clear conversation history")
print("\nExample queries:") print("\nExample queries:")
print(" - 'Analyze the Apache logs for error patterns'") print(" - 'Write me a motivational poem'")
print(" - 'Show me a summary of the HDFS logs'") print(" - 'Generate a tech poem'")
print(" - 'List all available log files'") print(" - 'Show me a random poem'")
print(" - 'Find error patterns in Linux logs'") print(" - 'List files in current directory'")
print(" - 'Check disk usage on the system'") print(" - 'Check disk usage on the system'")
print(" - 'clear': Clear conversation history")
continue
elif user_input.lower() in ["graph", "structure"]:
visualize_agent(agent)
continue continue
elif user_input.lower() in ["clear", "reset"]: elif user_input.lower() in ["clear", "reset"]:
conversation_history = [] conversation_history = []
@ -198,7 +126,7 @@ def main():
continue continue
if user_input.strip(): if user_input.strip():
stream_agent_updates(agent, user_input, conversation_history) run_agent_query(agent, user_input, conversation_history)
else: else:
print("Please enter a message.") print("Please enter a message.")
@ -210,4 +138,4 @@ def main():
if __name__ == "__main__": if __name__ == "__main__":
main() main()