diff --git a/main.py b/main.py index 851b396..799d73f 100644 --- a/main.py +++ b/main.py @@ -1,78 +1,130 @@ import os -from typing import Annotated -from typing_extensions import TypedDict from langchain.chat_models import init_chat_model from langchain_community.tools.shell.tool import ShellTool -from langgraph.graph import StateGraph, START, END -from langgraph.graph.message import add_messages -from langgraph.prebuilt import ToolNode, tools_condition +from langgraph.prebuilt import create_react_agent +from langchain_core.messages import HumanMessage from log_analyzer import analyze_log_file -class State(TypedDict): - # Messages have the type "list". The `add_messages` function - # in the annotation defines how this state key should be updated - # (in this case, it appends messages to the list, rather than overwriting them) - messages: Annotated[list, add_messages] - - -def create_chatbot(): - """Create and return a compiled chatbot graph with shell capabilities.""" - - # Initialize the StateGraph - graph_builder = StateGraph(State) +def create_agent(): + """Create and return a ReAct agent with shell and log analysis capabilities.""" # Initialize the chat model (using OpenAI GPT-4) # Make sure you have set your OPENAI_API_KEY environment variable llm = init_chat_model("openai:gpt-4o-mini") - # Define the tools + # Define the tools available to the agent shell_tool = ShellTool() tools = [shell_tool, analyze_log_file] - # Bind tools to the LLM so it knows how to use them - llm_with_tools = llm.bind_tools(tools) + # Create a ReAct agent with system prompt + system_prompt = """You are a helpful assistant with access to shell commands and log analysis capabilities. + +You can: +1. Execute shell commands using the shell tool to interact with the system +2. Analyze log files using the analyze_log_file tool to help with debugging and system administration tasks + +The log analyzer can process files in the loghub directory with different analysis types: +- "error_patterns": Find and categorize error messages +- "frequency": Analyze frequency of different log patterns +- "timeline": Show chronological patterns of events +- "summary": Provide an overall summary of the log file + +When helping users: +- Be thorough in your analysis +- Explain what you're doing and why +- Use appropriate tools based on the user's request +- If analyzing logs, suggest which analysis type might be most helpful +- Always be cautious with shell commands and explain what they do + +Available log files are in the loghub directory with subdirectories for different systems like: +Android, Apache, BGL, Hadoop, HDFS, HealthApp, HPC, Linux, Mac, OpenSSH, OpenStack, Proxifier, Spark, Thunderbird, Windows, Zookeeper +""" - def chatbot(state: State): - """Chatbot node function that processes messages.""" - # Print the messages being processed - print("Current messages:", state["messages"]) - return {"messages": [llm_with_tools.invoke(state["messages"])]} - - # Add the chatbot node to the graph - graph_builder.add_node("chatbot", chatbot) - - # Add the tool node to handle tool calls - tool_node = ToolNode(tools=tools) - graph_builder.add_node("tools", tool_node) - - # Add conditional edges to route between chatbot and tools - graph_builder.add_conditional_edges( - "chatbot", - tools_condition, + # Create the ReAct agent + agent = create_react_agent( + llm, + tools, + prompt=system_prompt ) - # Add edges - graph_builder.add_edge(START, "chatbot") - graph_builder.add_edge("tools", "chatbot") - - # Compile the graph - graph = graph_builder.compile() - - return graph + return agent -def stream_graph_updates(graph, user_input: str, conversation_state: dict): - """Stream graph updates for a user input while maintaining conversation history.""" - # Add the new user message to the existing conversation - conversation_state["messages"].append({"role": "user", "content": user_input}) +def stream_agent_updates(agent, user_input: str): + """Stream agent updates for a user input.""" + # Create a human message + message = HumanMessage(content=user_input) - # Stream the graph with the full conversation history - for event in graph.stream(conversation_state): - for value in event.values(): - # Update conversation state with new messages - conversation_state["messages"] = value["messages"] - print("Assistant:", value["messages"][-1].content) + print("\nAgent: ", end="", flush=True) + + # Use the agent's stream method to get real-time updates + final_response = "" + tool_calls_made = False + + for event in agent.stream({"messages": [message]}, stream_mode="updates"): + for node_name, node_output in event.items(): + if node_name == "agent" and "messages" in node_output: + last_message = node_output["messages"][-1] + + # Check if this is a tool call + if hasattr(last_message, 'tool_calls') and last_message.tool_calls: + tool_calls_made = True + for tool_call in last_message.tool_calls: + print(f"\nšŸ”§ Using tool: {tool_call['name']}") + if tool_call.get('args'): + print(f" Args: {tool_call['args']}") + + # Check if this is the final response (no tool calls) + elif hasattr(last_message, 'content') and last_message.content and not getattr(last_message, 'tool_calls', None): + final_response = last_message.content + + elif node_name == "tools" and "messages" in node_output: + # Show tool results + for msg in node_output["messages"]: + if hasattr(msg, 'content'): + print(f"\nšŸ“‹ Tool result: {msg.content[:200]}{'...' if len(msg.content) > 200 else ''}") + + # Print the final response + if final_response: + if tool_calls_made: + print(f"\n\n{final_response}") + else: + print(final_response) + else: + print("No response generated.") + + print() # Add newline + + +def visualize_agent(agent): + """Display the agent's graph structure.""" + try: + print("\nšŸ“Š Agent Graph Structure:") + print("=" * 40) + # Get the graph and display its structure + graph = agent.get_graph() + + # Print nodes + print("Nodes:") + for node_id in graph.nodes: + print(f" - {node_id}") + + # Print edges + print("\nEdges:") + for edge in graph.edges: + print(f" - {edge}") + + print("=" * 40) + print("This agent follows the ReAct (Reasoning and Acting) pattern:") + print("1. Receives user input") + print("2. Reasons about what tools to use") + print("3. Executes tools when needed") + print("4. Provides final response") + print("=" * 40) + + except Exception as e: + print(f"Could not visualize agent: {e}") def main(): @@ -82,22 +134,31 @@ def main(): print("You can set it by running: export OPENAI_API_KEY='your-api-key-here'") return - print("šŸ¤– LangGraph Chatbot with Shell Access") + print("šŸ¤– LangGraph Log Analysis Agent") print("Type 'quit', 'exit', or 'q' to exit the chat.") - print("āš ļø WARNING: This bot has shell access - use with caution!") - print("-" * 50) + print("Type 'help' or 'h' for help and examples.") + print("Type 'graph' to see the agent structure.") + print("āš ļø WARNING: This agent has shell access - use with caution!") + print("šŸ“Š Available log analysis capabilities:") + print(" - Analyze log files in the loghub directory") + print(" - Execute shell commands for system administration") + print(" - Help with debugging and troubleshooting") + print("-" * 60) - # Create the chatbot + # Create the agent try: - graph = create_chatbot() - print("āœ… Chatbot with shell tool initialized successfully!") + agent = create_agent() + print("āœ… Log Analysis Agent initialized successfully!") + print("šŸ’” Try asking: 'Analyze the Apache logs for error patterns'") + print("šŸ’” Or: 'List the available log files in the loghub directory'") + + # Show agent structure + visualize_agent(agent) + except Exception as e: - print(f"āŒ Error initializing chatbot: {e}") + print(f"āŒ Error initializing agent: {e}") return - # Initialize conversation state to maintain history - conversation_state = {"messages": []} - # Start the chat loop while True: try: @@ -105,9 +166,25 @@ def main(): if user_input.lower() in ["quit", "exit", "q"]: print("šŸ‘‹ Goodbye!") break + elif user_input.lower() in ["help", "h"]: + print("\nšŸ†˜ Help:") + print("Commands:") + print(" - quit/exit/q: Exit the agent") + print(" - help/h: Show this help") + print(" - graph: Show agent structure") + print("\nExample queries:") + print(" - 'Analyze the Apache logs for error patterns'") + print(" - 'Show me a summary of the HDFS logs'") + print(" - 'List all available log files'") + print(" - 'Find error patterns in Linux logs'") + print(" - 'Check disk usage on the system'") + continue + elif user_input.lower() in ["graph", "structure"]: + visualize_agent(agent) + continue if user_input.strip(): - stream_graph_updates(graph, user_input, conversation_state) + stream_agent_updates(agent, user_input) else: print("Please enter a message.")