72 lines
2.8 KiB
Python
72 lines
2.8 KiB
Python
# Multi-agent sysadmin assistant using LangChain + LangGraph Supervisor
|
|
# Requires: `pip install langchain-openai langgraph langgraph-supervisor`
|
|
|
|
from __future__ import annotations
|
|
|
|
from supervisor import create_sysadmin_supervisor
|
|
from utils import print_step_info, explain_supervisor_pattern
|
|
|
|
if __name__ == "__main__":
|
|
# Create the supervisor
|
|
supervisor = create_sysadmin_supervisor()
|
|
|
|
# Interactive conversation loop
|
|
messages = []
|
|
print("Welcome to the multi-agent sysadmin assistant!")
|
|
print("Type your sysadmin question below. Type 'exit' to quit.")
|
|
while True:
|
|
user_input = input("\n📝 User: ")
|
|
if user_input.strip().lower() == 'exit':
|
|
print("Goodbye!")
|
|
break
|
|
messages.append({"role": "user", "content": user_input})
|
|
query = {"messages": messages}
|
|
|
|
print("\n=== Using invoke() method ===")
|
|
result = supervisor.invoke(query)
|
|
|
|
print("\n📊 FINAL RESULT:")
|
|
print("-" * 40)
|
|
print(result["messages"][-1].content)
|
|
print("-" * 40)
|
|
print(f"\n📈 Total messages exchanged: {len(result['messages'])}")
|
|
|
|
# Add the assistant's reply to the conversation history
|
|
messages.append({"role": "assistant", "content": result["messages"][-1].content})
|
|
|
|
# Ask if the user wants to continue
|
|
cont = input("\nWould you like to continue the conversation? (y/n): ")
|
|
if cont.strip().lower() not in ('y', 'yes'):
|
|
print("Session ended.")
|
|
break
|
|
|
|
print("\n=== Using stream() method for detailed step-by-step analysis ===")
|
|
step_count = 0
|
|
max_steps = 20 # Prevent infinite loops
|
|
|
|
try:
|
|
chunks_processed = []
|
|
for chunk in supervisor.stream(query):
|
|
step_count += 1
|
|
chunks_processed.append(chunk)
|
|
print_step_info(step_count, chunk)
|
|
|
|
# Safety check to prevent infinite loops
|
|
if step_count >= max_steps:
|
|
print(f"\n⚠️ Reached maximum steps ({max_steps}), stopping stream...")
|
|
break
|
|
|
|
print(f"\n✅ Streaming completed successfully with {step_count} steps")
|
|
print(f"📊 Total chunks processed: {len(chunks_processed)}")
|
|
|
|
# Check if the last chunk contains a complete final response
|
|
if chunks_processed:
|
|
last_chunk = chunks_processed[-1]
|
|
print(f"🔍 Last chunk keys: {list(last_chunk.keys()) if isinstance(last_chunk, dict) else type(last_chunk)}")
|
|
|
|
except Exception as e:
|
|
print(f"\n❌ Streaming error after {step_count} steps: {e}")
|
|
print("💡 The invoke() method worked fine, so the supervisor itself is functional.")
|
|
import traceback
|
|
traceback.print_exc()
|