-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathexample_usage.py
More file actions
executable file
·99 lines (81 loc) · 3.14 KB
/
example_usage.py
File metadata and controls
executable file
·99 lines (81 loc) · 3.14 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
#!/usr/bin/env python3
"""
Example usage of the Log Analyzer modules
Demonstrates how to use the log collector and LLM analyzer programmatically
"""
import yaml
from log_collector import LogCollector
from llm_analyzer import LLMAnalyzer
def main():
print("=" * 60)
print("Linux Log Analyzer - Example Usage")
print("=" * 60)
# Load configuration
print("\n1. Loading configuration...")
with open('config.yaml', 'r') as f:
config = yaml.safe_load(f)
print("✓ Configuration loaded")
# Initialize log collector
print("\n2. Initializing log collector...")
collector = LogCollector(config)
print("✓ Log collector initialized")
# Collect logs
print("\n3. Collecting logs from system...")
logs = collector.collect_all_logs()
print(f"✓ Collected {len(logs)} log entries")
if not logs:
print("\n⚠ No logs collected. Check your configuration and permissions.")
return
# Display sample logs
print("\n4. Sample log entries:")
print("-" * 60)
for i, log in enumerate(logs[:5]):
print(f"\nLog {i+1}:")
print(f" Timestamp: {log.timestamp}")
print(f" Source: {log.source}")
print(f" Priority: {log.priority}")
print(f" Message: {log.message[:100]}...")
# Initialize LLM analyzer
print("\n5. Initializing LLM analyzer...")
analyzer = LLMAnalyzer(config)
print("✓ LLM analyzer initialized")
# Extract insights
print("\n6. Extracting statistical insights...")
insights = analyzer.extract_key_insights(logs)
print(f"✓ Insights extracted:")
print(f" - Total logs: {insights['total_logs']}")
print(f" - Errors: {insights['error_count']}")
print(f" - Warnings: {insights['warning_count']}")
print(f" - Unique sources: {len(insights['sources'])}")
# Perform LLM analysis
print("\n7. Performing LLM analysis...")
print(" (This requires a running LLM API endpoint)")
try:
result = analyzer.analyze_logs(logs[:50], analysis_type='summary')
if result['success']:
print("✓ Analysis completed successfully")
print("\n" + "=" * 60)
print("ANALYSIS RESULTS")
print("=" * 60)
print(result['analysis'])
print("=" * 60)
print(f"\nModel used: {result['model']}")
print(f"Logs analyzed: {result['logs_analyzed']}")
else:
print(f"✗ Analysis failed: {result['error']}")
print("\nNote: Make sure your LLM API endpoint is running and accessible.")
print(f"Current endpoint: {config['llm']['api_base']}")
except Exception as e:
print(f"✗ Error during analysis: {e}")
print("\nTroubleshooting:")
print("1. Check if your LLM API is running")
print("2. Verify the API endpoint in config.yaml")
print("3. Ensure the API key is correct")
print("\n" + "=" * 60)
print("Example completed!")
print("=" * 60)
print("\nTo use the web interface, run:")
print(" streamlit run app.py")
if __name__ == "__main__":
main()
# Made with Bob