# Multi-agent sysadmin assistant using LangChain + LangGraph Supervisor # Requires: `pip install langchain-openai langgraph langgraph-supervisor` from __future__ import annotations from supervisor import create_sysadmin_supervisor from utils import print_step_info, explain_supervisor_pattern if __name__ == "__main__": # Create the supervisor supervisor = create_sysadmin_supervisor() # Example run - demonstrating both invoke and streaming with debug output query = { "messages": [ { "role": "user", "content": "Nginx returns 502 Bad Gateway on my server. What can I do?", } ] } print("šŸš€ Starting multi-agent sysadmin analysis...") print(f"šŸ“ User Query: {query['messages'][0]['content']}") print("=" * 80) # Show explanation of the supervisor pattern explain_supervisor_pattern() print("\n=== Using invoke() method ===") result = supervisor.invoke(query) print("\nšŸ“Š FINAL RESULT:") print("-" * 40) print(result["messages"][-1].content) print("-" * 40) print(f"\nšŸ“ˆ Total messages exchanged: {len(result['messages'])}") print("\n=== Using stream() method for detailed step-by-step analysis ===") step_count = 0 max_steps = 20 # Prevent infinite loops try: chunks_processed = [] for chunk in supervisor.stream(query): step_count += 1 chunks_processed.append(chunk) print_step_info(step_count, chunk) # Safety check to prevent infinite loops if step_count >= max_steps: print(f"\nāš ļø Reached maximum steps ({max_steps}), stopping stream...") break print(f"\nāœ… Streaming completed successfully with {step_count} steps") print(f"šŸ“Š Total chunks processed: {len(chunks_processed)}") # Check if the last chunk contains a complete final response if chunks_processed: last_chunk = chunks_processed[-1] print(f"šŸ” Last chunk keys: {list(last_chunk.keys()) if isinstance(last_chunk, dict) else type(last_chunk)}") except Exception as e: print(f"\nāŒ Streaming error after {step_count} steps: {e}") print("šŸ’” The invoke() method worked fine, so the supervisor itself is functional.") import traceback traceback.print_exc()