87 lines
3.6 KiB
Python
87 lines
3.6 KiB
Python
# Multi-agent sysadmin assistant using LangChain + LangGraph Supervisor
|
|
# Requires: `pip install langchain-openai langgraph langgraph-supervisor`
|
|
|
|
from __future__ import annotations
|
|
|
|
from supervisor import create_sysadmin_supervisor
|
|
from utils import print_step_info, explain_supervisor_pattern
|
|
|
|
if __name__ == "__main__":
|
|
# Create the supervisor
|
|
supervisor = create_sysadmin_supervisor()
|
|
|
|
# Interactive conversation loop
|
|
messages = []
|
|
print("Welcome to the multi-agent sysadmin assistant!")
|
|
print("Type your sysadmin question below. Type 'exit' to quit.")
|
|
print("\n💡 Note: When agents execute shell commands, you may see command output")
|
|
print(" appear between the structured step logs. This is normal behavior.")
|
|
print(" The output belongs to the agent that was most recently active.")
|
|
while True:
|
|
user_input = input("\n📝 User: ")
|
|
if user_input.strip().lower() == 'exit':
|
|
print("Goodbye!")
|
|
break
|
|
messages.append({"role": "user", "content": user_input})
|
|
query = {"messages": messages}
|
|
|
|
print("\n=== Processing with detailed step-by-step analysis ===")
|
|
step_count = 0
|
|
max_steps = 20 # Prevent infinite loops
|
|
final_result = None
|
|
|
|
try:
|
|
chunks_processed = []
|
|
for chunk in supervisor.stream(query):
|
|
step_count += 1
|
|
chunks_processed.append(chunk)
|
|
print_step_info(step_count, chunk)
|
|
|
|
# Store the final result for conversation history
|
|
if isinstance(chunk, dict):
|
|
for agent_name, agent_data in chunk.items():
|
|
if 'messages' in agent_data and agent_data['messages']:
|
|
last_msg = agent_data['messages'][-1]
|
|
if hasattr(last_msg, 'content') and last_msg.content:
|
|
final_result = last_msg.content
|
|
|
|
# Safety check to prevent infinite loops
|
|
if step_count >= max_steps:
|
|
print(f"\n⚠️ Reached maximum steps ({max_steps}), stopping stream...")
|
|
break
|
|
|
|
print(f"\n✅ Analysis completed with {step_count} steps")
|
|
|
|
# Add the assistant's reply to the conversation history
|
|
if final_result:
|
|
messages.append({"role": "assistant", "content": final_result})
|
|
|
|
print(f"\n📊 FINAL SUMMARY:")
|
|
print("-" * 60)
|
|
if final_result:
|
|
print(final_result)
|
|
else:
|
|
print("Analysis completed - check the detailed steps above for results")
|
|
print("-" * 60)
|
|
|
|
except Exception as e:
|
|
print(f"\n❌ Streaming error after {step_count} steps: {e}")
|
|
print("💡 Falling back to basic invoke method...")
|
|
try:
|
|
result = supervisor.invoke(query)
|
|
final_result = result["messages"][-1].content
|
|
messages.append({"role": "assistant", "content": final_result})
|
|
print(f"\n📊 FINAL RESULT:")
|
|
print("-" * 40)
|
|
print(final_result)
|
|
print("-" * 40)
|
|
except Exception as fallback_error:
|
|
print(f"❌ Fallback also failed: {fallback_error}")
|
|
continue
|
|
|
|
# Ask if the user wants to continue
|
|
cont = input("\nWould you like to continue the conversation? (y/n): ")
|
|
if cont.strip().lower() not in ('y', 'yes'):
|
|
print("Session ended.")
|
|
break
|