Some checks failed
Tests / test (push) Failing after 2s
- Merged all unique darkplex-core modules into cortex: - intelligence/ subfolder (anticipator, collective, shared_memory, knowledge_cleanup, temporal, llm_extractor, loop) - governance/ subfolder (policy engine, risk scorer, evidence, enforcer, report generator) - entity_manager.py, knowledge_extractor.py - Fixed bare 'from intelligence.' imports to 'from cortex.intelligence.' - Added 'darkplex' CLI alias alongside 'cortex' - Package renamed to darkplex-core v0.2.0 - 405 tests passing (was 234) - 14 new test files covering all merged modules
154 lines
5.2 KiB
Python
154 lines
5.2 KiB
Python
"""Collective Learning: aggregates patterns across all internal agents.
|
|
|
|
Subscribes to the shared memory bus, collects insights from all
|
|
Vainplex-internal agents, and builds an aggregated knowledge base
|
|
for pattern detection and cross-agent learning.
|
|
|
|
🚨 STRICT DATA ISOLATION: Only Vainplex-internal agents participate.
|
|
No customer data. No customer agent insights. Ever.
|
|
"""
|
|
|
|
from __future__ import annotations
|
|
|
|
import json
|
|
import logging
|
|
import os
|
|
from collections import defaultdict
|
|
from dataclasses import dataclass, field
|
|
from datetime import datetime, timezone
|
|
from typing import Any
|
|
|
|
from cortex.intelligence.shared_memory import ALLOWED_AGENTS, Insight, SharedMemory
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
|
@dataclass
|
|
class AggregatedPattern:
|
|
"""A pattern detected across multiple agents."""
|
|
|
|
topic: str
|
|
description: str
|
|
contributing_agents: list[str]
|
|
confidence: float
|
|
occurrence_count: int
|
|
first_seen: str
|
|
last_seen: str
|
|
insights: list[Insight] = field(default_factory=list)
|
|
|
|
|
|
class CollectiveLearning:
|
|
"""Aggregates patterns from all internal agents into collective knowledge.
|
|
|
|
Usage:
|
|
collective = CollectiveLearning(shared_memory)
|
|
await collective.start()
|
|
patterns = collective.get_patterns()
|
|
|
|
⚠️ DATA ISOLATION: Only processes insights from ALLOWED_AGENTS.
|
|
"""
|
|
|
|
def __init__(self, shared_memory: SharedMemory) -> None:
|
|
self.shared_memory = shared_memory
|
|
self._insights_by_topic: dict[str, list[Insight]] = defaultdict(list)
|
|
self._patterns: list[AggregatedPattern] = []
|
|
|
|
async def start(self) -> None:
|
|
"""Start listening for insights on all topics."""
|
|
await self.shared_memory.subscribe(">", self._handle_insight)
|
|
logger.info("Collective learning started — listening for insights")
|
|
|
|
async def _handle_insight(self, insight: Insight) -> None:
|
|
"""Process an incoming insight."""
|
|
# Double-check data isolation
|
|
if insight.agent not in ALLOWED_AGENTS:
|
|
logger.warning("Rejected insight from non-internal agent: %s", insight.agent)
|
|
return
|
|
|
|
self._insights_by_topic[insight.topic].append(insight)
|
|
logger.debug(
|
|
"Collected insight: %s from %s (topic: %s)",
|
|
insight.content[:60], insight.agent, insight.topic,
|
|
)
|
|
|
|
# Re-analyze patterns when new data arrives
|
|
self._detect_patterns()
|
|
|
|
def _detect_patterns(self) -> None:
|
|
"""Analyze collected insights to find cross-agent patterns."""
|
|
new_patterns: list[AggregatedPattern] = []
|
|
|
|
for topic, insights in self._insights_by_topic.items():
|
|
if len(insights) < 2:
|
|
continue
|
|
|
|
agents = list({i.agent for i in insights})
|
|
if len(agents) < 2:
|
|
# Single-agent observations aren't "collective" patterns
|
|
continue
|
|
|
|
timestamps = sorted(i.timestamp for i in insights)
|
|
avg_confidence = sum(i.confidence for i in insights) / len(insights)
|
|
|
|
pattern = AggregatedPattern(
|
|
topic=topic,
|
|
description=f"Cross-agent pattern on '{topic}' observed by {', '.join(agents)}",
|
|
contributing_agents=agents,
|
|
confidence=avg_confidence,
|
|
occurrence_count=len(insights),
|
|
first_seen=timestamps[0],
|
|
last_seen=timestamps[-1],
|
|
insights=insights,
|
|
)
|
|
new_patterns.append(pattern)
|
|
|
|
self._patterns = new_patterns
|
|
|
|
def get_patterns(
|
|
self,
|
|
topic: str | None = None,
|
|
min_confidence: float = 0.0,
|
|
) -> list[AggregatedPattern]:
|
|
"""Retrieve detected collective patterns.
|
|
|
|
Args:
|
|
topic: Filter by topic (optional).
|
|
min_confidence: Minimum confidence threshold.
|
|
"""
|
|
patterns = self._patterns
|
|
if topic:
|
|
patterns = [p for p in patterns if p.topic == topic]
|
|
if min_confidence > 0:
|
|
patterns = [p for p in patterns if p.confidence >= min_confidence]
|
|
return patterns
|
|
|
|
def get_topic_summary(self) -> dict[str, Any]:
|
|
"""Get a summary of all topics and their insight counts."""
|
|
return {
|
|
topic: {
|
|
"count": len(insights),
|
|
"agents": list({i.agent for i in insights}),
|
|
"latest": max(i.timestamp for i in insights) if insights else None,
|
|
}
|
|
for topic, insights in self._insights_by_topic.items()
|
|
}
|
|
|
|
def export_knowledge(self) -> str:
|
|
"""Export collective knowledge as JSON."""
|
|
return json.dumps({
|
|
"exported_at": datetime.now(timezone.utc).isoformat(),
|
|
"allowed_agents": sorted(ALLOWED_AGENTS),
|
|
"patterns": [
|
|
{
|
|
"topic": p.topic,
|
|
"description": p.description,
|
|
"contributing_agents": p.contributing_agents,
|
|
"confidence": p.confidence,
|
|
"occurrence_count": p.occurrence_count,
|
|
"first_seen": p.first_seen,
|
|
"last_seen": p.last_seen,
|
|
}
|
|
for p in self._patterns
|
|
],
|
|
"topics": self.get_topic_summary(),
|
|
}, indent=2)
|