darkplex-core/cortex/intelligence/anticipator.py
Claudia fd7d75c0ed
Some checks failed
Tests / test (push) Failing after 2s
Merge darkplex-core into cortex — unified intelligence layer v0.2.0
- Merged all unique darkplex-core modules into cortex:
  - intelligence/ subfolder (anticipator, collective, shared_memory, knowledge_cleanup, temporal, llm_extractor, loop)
  - governance/ subfolder (policy engine, risk scorer, evidence, enforcer, report generator)
  - entity_manager.py, knowledge_extractor.py
- Fixed bare 'from intelligence.' imports to 'from cortex.intelligence.'
- Added 'darkplex' CLI alias alongside 'cortex'
- Package renamed to darkplex-core v0.2.0
- 405 tests passing (was 234)
- 14 new test files covering all merged modules
2026-02-12 08:43:02 +01:00

193 lines
6.8 KiB
Python

"""Proactive Intelligence: pattern-based predictions and anticipation.
Detects patterns in historical events and generates proactive alerts:
- SSL certificate expiry approaching
- Recurring issues (same error pattern at predictable intervals)
- Usage pattern anomalies
- Resource exhaustion trends
"""
from __future__ import annotations
import logging
import os
from dataclasses import dataclass, field
from datetime import datetime, timedelta, timezone
from enum import Enum
from typing import Any, Callable
logger = logging.getLogger(__name__)
class AlertSeverity(Enum):
INFO = "info"
WARNING = "warning"
CRITICAL = "critical"
@dataclass
class Prediction:
"""A proactive prediction about a future event."""
pattern_name: str
description: str
severity: AlertSeverity
predicted_time: datetime | None = None
confidence: float = 0.0 # 0.0-1.0
recommended_action: str = ""
metadata: dict[str, Any] = field(default_factory=dict)
@dataclass
class PatternDefinition:
"""Definition of a detectable pattern."""
name: str
description: str
detector: Callable[[list[dict[str, Any]]], Prediction | None]
class Anticipator:
"""Proactive intelligence engine that detects patterns and generates predictions.
Usage:
anticipator = Anticipator()
anticipator.register_pattern(ssl_expiry_pattern)
predictions = anticipator.analyze(events)
"""
def __init__(self) -> None:
self.patterns: list[PatternDefinition] = []
self._register_builtin_patterns()
def register_pattern(self, pattern: PatternDefinition) -> None:
"""Register a new pattern detector."""
self.patterns.append(pattern)
logger.info("Registered pattern: %s", pattern.name)
def analyze(self, events: list[dict[str, Any]]) -> list[Prediction]:
"""Analyze events against all registered patterns.
Args:
events: List of event dicts with at minimum 'timestamp', 'type', 'data'.
Returns:
List of predictions, sorted by severity (critical first).
"""
predictions: list[Prediction] = []
for pattern in self.patterns:
try:
prediction = pattern.detector(events)
if prediction:
predictions.append(prediction)
logger.info(
"Pattern detected: %s (severity: %s, confidence: %.2f)",
prediction.pattern_name,
prediction.severity.value,
prediction.confidence,
)
except Exception:
logger.exception("Pattern detector failed: %s", pattern.name)
# Sort: critical first, then by confidence
severity_order = {AlertSeverity.CRITICAL: 0, AlertSeverity.WARNING: 1, AlertSeverity.INFO: 2}
predictions.sort(key=lambda p: (severity_order.get(p.severity, 3), -p.confidence))
return predictions
def _register_builtin_patterns(self) -> None:
"""Register built-in pattern detectors."""
self.register_pattern(PatternDefinition(
name="ssl_cert_expiry",
description="Detects SSL certificates approaching expiry",
detector=_detect_ssl_expiry,
))
self.register_pattern(PatternDefinition(
name="recurring_error",
description="Detects recurring error patterns",
detector=_detect_recurring_errors,
))
self.register_pattern(PatternDefinition(
name="usage_spike",
description="Detects unusual usage spikes",
detector=_detect_usage_spike,
))
def _detect_ssl_expiry(events: list[dict[str, Any]]) -> Prediction | None:
"""Detect SSL certificates that will expire within 14 days."""
now = datetime.now(timezone.utc)
threshold = timedelta(days=14)
for event in events:
if event.get("type") != "ssl_cert_check":
continue
expiry_str = event.get("data", {}).get("expiry")
if not expiry_str:
continue
try:
expiry = datetime.fromisoformat(expiry_str)
if expiry.tzinfo is None:
expiry = expiry.replace(tzinfo=timezone.utc)
except (ValueError, TypeError):
continue
remaining = expiry - now
if remaining < threshold:
domain = event.get("data", {}).get("domain", "unknown")
severity = AlertSeverity.CRITICAL if remaining.days < 3 else AlertSeverity.WARNING
return Prediction(
pattern_name="ssl_cert_expiry",
description=f"SSL certificate for {domain} expires in {remaining.days} days",
severity=severity,
predicted_time=expiry,
confidence=0.95,
recommended_action=f"Renew SSL certificate for {domain}",
metadata={"domain": domain, "days_remaining": remaining.days},
)
return None
def _detect_recurring_errors(events: list[dict[str, Any]]) -> Prediction | None:
"""Detect recurring error patterns (same error type appearing 3+ times)."""
error_counts: dict[str, int] = {}
for event in events:
if event.get("type") == "error":
error_key = event.get("data", {}).get("error_type", "unknown")
error_counts[error_key] = error_counts.get(error_key, 0) + 1
for error_type, count in error_counts.items():
if count >= 3:
return Prediction(
pattern_name="recurring_error",
description=f"Recurring error '{error_type}' detected ({count} occurrences)",
severity=AlertSeverity.WARNING,
confidence=min(0.5 + count * 0.1, 0.95),
recommended_action=f"Investigate root cause of '{error_type}'",
metadata={"error_type": error_type, "count": count},
)
return None
def _detect_usage_spike(events: list[dict[str, Any]]) -> Prediction | None:
"""Detect unusual usage spikes (>2x average in recent window)."""
usage_events = [e for e in events if e.get("type") == "usage_metric"]
if len(usage_events) < 10:
return None
values = [e.get("data", {}).get("value", 0) for e in usage_events]
avg = sum(values) / len(values)
recent = values[-3:] if len(values) >= 3 else values
recent_avg = sum(recent) / len(recent) if recent else 0
if avg > 0 and recent_avg > avg * 2:
return Prediction(
pattern_name="usage_spike",
description=f"Usage spike detected: recent avg {recent_avg:.1f} vs overall {avg:.1f}",
severity=AlertSeverity.WARNING,
confidence=0.7,
recommended_action="Investigate usage spike — potential anomaly or load increase",
metadata={"average": avg, "recent_average": recent_avg, "ratio": recent_avg / avg},
)
return None