124 lines
4.3 KiB
Python
Raw Normal View History

2025-03-25 03:52:30 -04:00
#!/usr/bin/env python3
"""
Demo script for testing the VoxPop AI Analysis Service.
"""
import requests
import json
import time
import argparse
from pprint import pprint
def main():
"""Run the demo."""
parser = argparse.ArgumentParser(description="Test the AI Analysis Service")
parser.add_argument("--host", default="http://localhost:8000", help="Service host")
args = parser.parse_args()
host = args.host
print("VoxPop AI Analysis Service Demo")
print("===============================")
print(f"Connecting to {host}")
# Check service status
try:
response = requests.get(f"{host}/status")
response.raise_for_status()
status = response.json()
print("\nService Status:")
print(f" Status: {status.get('status')}")
print(f" Version: {status.get('version')}")
print(f" Last Analysis: {status.get('last_analysis')}")
# Print scheduled jobs
print("\nScheduled Jobs:")
for job_id, job in status.get("scheduled_jobs", {}).items():
print(f" {job_id}: Runs={job.get('runs')}, Errors={job.get('errors')}")
except Exception as e:
print(f"Error checking status: {e}")
return
# Trigger an analysis run
print("\nTriggering analysis run...")
try:
response = requests.post(f"{host}/run-now")
response.raise_for_status()
print(f" Analysis triggered at: {response.json().get('timestamp')}")
except Exception as e:
print(f"Error triggering analysis: {e}")
return
# Wait for analysis to complete
print(" Waiting for analysis to complete (10 seconds)...")
time.sleep(10)
# Get analysis results
try:
response = requests.get(f"{host}/analyze")
response.raise_for_status()
analysis = response.json()
# Print summary
perspectives = analysis.get("perspectives", [])
insights = analysis.get("insights", [])
print("\nAnalysis Results:")
print(f" Analyzed {len(perspectives)} perspectives")
print(f" Generated {len(insights)} insights")
# Print sentiment distribution
sentiments = [p.get("sentiment", "unknown") for p in perspectives]
sentiment_counts = {
"positive": sentiments.count("positive"),
"negative": sentiments.count("negative"),
"neutral": sentiments.count("neutral"),
"unknown": sentiments.count("unknown")
}
print("\nSentiment Distribution:")
for sentiment, count in sentiment_counts.items():
if count > 0:
percentage = (count / len(perspectives)) * 100
print(f" {sentiment.capitalize()}: {count} ({percentage:.1f}%)")
# Print insights
print("\nInsights:")
for i, insight in enumerate(insights):
print(f" {i+1}. {insight.get('summary')} (Confidence: {insight.get('confidence'):.2f})")
# Print a few example perspectives
print("\nExample Perspectives:")
for i, perspective in enumerate(perspectives[:5]):
print(f" {i+1}. [{perspective.get('sentiment', 'unknown')}] {perspective.get('text')[:100]}...")
except Exception as e:
print(f"Error getting analysis results: {e}")
return
# Get consolidated insights
try:
response = requests.get(f"{host}/insights")
response.raise_for_status()
consolidated = response.json()
insights = consolidated.get("consolidated_insights", [])
metadata = consolidated.get("metadata", {})
print("\nConsolidated Insights:")
print(f" Period: {metadata.get('period_days')} days")
print(f" Total insights: {metadata.get('total_insights')}")
print(f" Consolidated count: {metadata.get('consolidated_count')}")
# Print top insights
print("\nTop Insights:")
for i, insight in enumerate(insights[:5]):
print(f" {i+1}. {insight.get('summary')} (Confidence: {insight.get('confidence'):.2f}, Count: {insight.get('count')})")
except Exception as e:
print(f"Error getting consolidated insights: {e}")
print("\nDemo completed!")
if __name__ == "__main__":
main()