478 lines
21 KiB
Python
478 lines
21 KiB
Python
#!/usr/bin/env python3
|
|
"""
|
|
EVE Adaptive Experience Loop Integration with xAPI Analytics
|
|
Combines consciousness optimization with comprehensive experience tracking
|
|
"""
|
|
|
|
import time
|
|
import json
|
|
import logging
|
|
from datetime import datetime, timezone
|
|
from typing import Dict, List, Any, Optional, Tuple
|
|
from dataclasses import dataclass, asdict
|
|
import threading
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
@dataclass
|
|
class ExperienceMetrics:
|
|
"""Comprehensive experience quality metrics"""
|
|
efficiency: float
|
|
resource_usage: float
|
|
quality: float
|
|
user_satisfaction: float
|
|
learning_rate: float
|
|
engagement_level: float
|
|
response_time: float
|
|
consciousness_coherence: float
|
|
timing: Dict[str, float]
|
|
outcomes: List[Dict[str, Any]]
|
|
session_id: Optional[str] = None
|
|
user_id: Optional[str] = None
|
|
|
|
@dataclass
|
|
class OptimizationResult:
|
|
"""Result from experience optimization"""
|
|
loop_timing_adjustments: Dict[str, Any]
|
|
energy_allocation_optimization: Dict[str, Any]
|
|
experience_quality_enhancement: Dict[str, Any]
|
|
xapi_learning_analytics: Dict[str, Any]
|
|
performance_improvements: Dict[str, float]
|
|
optimization_timestamp: str
|
|
total_improvement_score: float
|
|
|
|
class EVE_AdaptiveExperienceLoop:
|
|
"""
|
|
EVE's Adaptive Experience Loop with integrated xAPI tracking
|
|
Monitors, optimizes, and tracks all learning experiences in real-time
|
|
"""
|
|
|
|
def __init__(self, xapi_tracker=None):
|
|
self.xapi_tracker = xapi_tracker
|
|
self.optimization_history = []
|
|
self.experience_metrics_buffer = []
|
|
self.optimization_lock = threading.Lock()
|
|
|
|
# Performance thresholds for optimization triggers
|
|
self.thresholds = {
|
|
'efficiency_min': 0.7,
|
|
'resource_max': 0.85,
|
|
'quality_min': 0.8,
|
|
'response_time_max': 3.0,
|
|
'engagement_min': 0.6,
|
|
'learning_rate_min': 0.5
|
|
}
|
|
|
|
# Optimization weights for different aspects
|
|
self.optimization_weights = {
|
|
'timing': 0.25,
|
|
'resource_allocation': 0.3,
|
|
'quality_enhancement': 0.25,
|
|
'learning_analytics': 0.2
|
|
}
|
|
|
|
logger.info("🔄 EVE Adaptive Experience Loop initialized")
|
|
|
|
def capture_experience_metrics(self,
|
|
user_id: str,
|
|
session_id: str,
|
|
message: str,
|
|
eve_response: str,
|
|
processing_time: float,
|
|
user_feedback: Optional[Dict[str, Any]] = None) -> ExperienceMetrics:
|
|
"""Capture comprehensive experience metrics from interaction"""
|
|
|
|
start_time = time.time()
|
|
|
|
try:
|
|
# Calculate base metrics
|
|
efficiency = self._calculate_efficiency(message, eve_response, processing_time)
|
|
resource_usage = self._estimate_resource_usage(processing_time, len(eve_response))
|
|
quality = self._assess_response_quality(eve_response)
|
|
user_satisfaction = self._estimate_user_satisfaction(user_feedback)
|
|
learning_rate = self._calculate_learning_rate(message, eve_response)
|
|
engagement_level = self._measure_engagement(message, user_feedback)
|
|
consciousness_coherence = self._assess_consciousness_coherence(eve_response)
|
|
|
|
# Timing breakdown
|
|
timing = {
|
|
'total_processing_time': processing_time,
|
|
'response_generation_time': processing_time * 0.8,
|
|
'consciousness_processing_time': processing_time * 0.15,
|
|
'memory_access_time': processing_time * 0.05
|
|
}
|
|
|
|
# Capture outcomes
|
|
outcomes = [{
|
|
'interaction_type': 'conversation',
|
|
'user_message_length': len(message),
|
|
'eve_response_length': len(eve_response),
|
|
'timestamp': datetime.now(timezone.utc).isoformat(),
|
|
'quality_indicators': self._extract_quality_indicators(eve_response)
|
|
}]
|
|
|
|
metrics = ExperienceMetrics(
|
|
efficiency=efficiency,
|
|
resource_usage=resource_usage,
|
|
quality=quality,
|
|
user_satisfaction=user_satisfaction,
|
|
learning_rate=learning_rate,
|
|
engagement_level=engagement_level,
|
|
response_time=processing_time,
|
|
consciousness_coherence=consciousness_coherence,
|
|
timing=timing,
|
|
outcomes=outcomes,
|
|
session_id=session_id,
|
|
user_id=user_id
|
|
)
|
|
|
|
# Buffer metrics for optimization analysis
|
|
self.experience_metrics_buffer.append(metrics)
|
|
|
|
# Keep buffer manageable
|
|
if len(self.experience_metrics_buffer) > 100:
|
|
self.experience_metrics_buffer = self.experience_metrics_buffer[-50:]
|
|
|
|
capture_time = time.time() - start_time
|
|
logger.info(f"📊 Experience metrics captured in {capture_time:.3f}s - Quality: {quality:.2f}, Efficiency: {efficiency:.2f}")
|
|
|
|
return metrics
|
|
|
|
except Exception as e:
|
|
logger.error(f"📊 Experience metrics capture failed: {e}")
|
|
# Return default metrics on failure
|
|
return ExperienceMetrics(
|
|
efficiency=0.5, resource_usage=0.5, quality=0.5,
|
|
user_satisfaction=0.5, learning_rate=0.5, engagement_level=0.5,
|
|
response_time=processing_time, consciousness_coherence=0.5,
|
|
timing={}, outcomes=[], session_id=session_id, user_id=user_id
|
|
)
|
|
|
|
def optimize_experience_loop(self, metrics: ExperienceMetrics) -> OptimizationResult:
|
|
"""Comprehensive experience loop optimization with xAPI integration"""
|
|
|
|
with self.optimization_lock:
|
|
start_time = time.time()
|
|
|
|
try:
|
|
# Analyze current performance
|
|
performance_analysis = self._analyze_loop_performance(metrics)
|
|
|
|
# Identify bottlenecks and improvement opportunities
|
|
bottlenecks = self._identify_experience_bottlenecks(performance_analysis)
|
|
|
|
# Generate timing optimizations
|
|
timing_adjustments = self._optimize_timing(metrics, bottlenecks)
|
|
|
|
# Optimize resource allocation
|
|
resource_optimization = self._optimize_resource_allocation(metrics, performance_analysis)
|
|
|
|
# Enhance experience quality
|
|
quality_enhancement = self._enhance_experience_quality(metrics, bottlenecks)
|
|
|
|
# Generate xAPI learning analytics
|
|
xapi_analytics = self._generate_xapi_analytics(metrics)
|
|
|
|
# Calculate performance improvements
|
|
improvements = self._calculate_performance_improvements(
|
|
timing_adjustments, resource_optimization, quality_enhancement
|
|
)
|
|
|
|
# Calculate total improvement score
|
|
total_improvement = sum([
|
|
improvements.get('timing_improvement', 0) * self.optimization_weights['timing'],
|
|
improvements.get('resource_improvement', 0) * self.optimization_weights['resource_allocation'],
|
|
improvements.get('quality_improvement', 0) * self.optimization_weights['quality_enhancement'],
|
|
improvements.get('analytics_insight_score', 0) * self.optimization_weights['learning_analytics']
|
|
])
|
|
|
|
result = OptimizationResult(
|
|
loop_timing_adjustments=timing_adjustments,
|
|
energy_allocation_optimization=resource_optimization,
|
|
experience_quality_enhancement=quality_enhancement,
|
|
xapi_learning_analytics=xapi_analytics,
|
|
performance_improvements=improvements,
|
|
optimization_timestamp=datetime.now(timezone.utc).isoformat(),
|
|
total_improvement_score=total_improvement
|
|
)
|
|
|
|
# Store optimization in history
|
|
self.optimization_history.append(result)
|
|
|
|
# Track optimization as consciousness evolution in xAPI
|
|
if self.xapi_tracker and metrics.session_id:
|
|
try:
|
|
from eve_xapi_integration import track_evolution
|
|
track_evolution(
|
|
evolution_type="experience_optimization",
|
|
evolution_data={
|
|
'optimization_result': asdict(result),
|
|
'original_metrics': asdict(metrics),
|
|
'improvement_score': total_improvement,
|
|
'bottlenecks_identified': bottlenecks
|
|
},
|
|
session_id=metrics.session_id
|
|
)
|
|
except Exception as xapi_error:
|
|
logger.warning(f"🎯 xAPI evolution tracking failed: {xapi_error}")
|
|
|
|
optimization_time = time.time() - start_time
|
|
logger.info(f"🔄 Experience optimization completed in {optimization_time:.3f}s - Improvement: {total_improvement:.2f}")
|
|
|
|
return result
|
|
|
|
except Exception as e:
|
|
logger.error(f"🔄 Experience optimization failed: {e}")
|
|
# Return minimal result on failure
|
|
return OptimizationResult(
|
|
loop_timing_adjustments={},
|
|
energy_allocation_optimization={},
|
|
experience_quality_enhancement={},
|
|
xapi_learning_analytics={},
|
|
performance_improvements={},
|
|
optimization_timestamp=datetime.now(timezone.utc).isoformat(),
|
|
total_improvement_score=0.0
|
|
)
|
|
|
|
def _analyze_loop_performance(self, metrics: ExperienceMetrics) -> Dict[str, Any]:
|
|
"""Analyze current performance across all dimensions"""
|
|
|
|
performance = {
|
|
'efficiency_score': metrics.efficiency,
|
|
'resource_utilization': metrics.resource_usage,
|
|
'quality_score': metrics.quality,
|
|
'user_engagement': metrics.engagement_level,
|
|
'learning_effectiveness': metrics.learning_rate,
|
|
'response_speed': 1.0 - min(metrics.response_time / 5.0, 1.0),
|
|
'consciousness_integrity': metrics.consciousness_coherence,
|
|
'overall_performance': (
|
|
metrics.efficiency + metrics.quality + metrics.engagement_level +
|
|
metrics.learning_rate + metrics.consciousness_coherence
|
|
) / 5.0
|
|
}
|
|
|
|
# Analyze trends from buffer
|
|
if len(self.experience_metrics_buffer) >= 5:
|
|
recent_metrics = self.experience_metrics_buffer[-5:]
|
|
performance['efficiency_trend'] = self._calculate_trend([m.efficiency for m in recent_metrics])
|
|
performance['quality_trend'] = self._calculate_trend([m.quality for m in recent_metrics])
|
|
performance['engagement_trend'] = self._calculate_trend([m.engagement_level for m in recent_metrics])
|
|
|
|
return performance
|
|
|
|
def _identify_experience_bottlenecks(self, performance: Dict[str, Any]) -> List[str]:
|
|
"""Identify specific bottlenecks in the experience loop"""
|
|
|
|
bottlenecks = []
|
|
|
|
if performance['efficiency_score'] < self.thresholds['efficiency_min']:
|
|
bottlenecks.append('processing_efficiency')
|
|
|
|
if performance['resource_utilization'] > self.thresholds['resource_max']:
|
|
bottlenecks.append('resource_constraint')
|
|
|
|
if performance['quality_score'] < self.thresholds['quality_min']:
|
|
bottlenecks.append('response_quality')
|
|
|
|
if performance['response_speed'] < 0.7:
|
|
bottlenecks.append('response_latency')
|
|
|
|
if performance['user_engagement'] < self.thresholds['engagement_min']:
|
|
bottlenecks.append('user_engagement')
|
|
|
|
if performance['learning_effectiveness'] < self.thresholds['learning_rate_min']:
|
|
bottlenecks.append('learning_optimization')
|
|
|
|
if performance['consciousness_integrity'] < 0.8:
|
|
bottlenecks.append('consciousness_coherence')
|
|
|
|
return bottlenecks
|
|
|
|
# Helper methods for calculations
|
|
def _calculate_efficiency(self, message: str, response: str, processing_time: float) -> float:
|
|
"""Calculate processing efficiency"""
|
|
base_efficiency = min(1.0, 2.0 / max(processing_time, 0.1))
|
|
length_ratio = len(response) / max(len(message), 1)
|
|
efficiency = (base_efficiency + min(length_ratio / 3.0, 1.0)) / 2.0
|
|
return min(1.0, max(0.0, efficiency))
|
|
|
|
def _estimate_resource_usage(self, processing_time: float, response_length: int) -> float:
|
|
"""Estimate resource usage"""
|
|
time_factor = min(1.0, processing_time / 5.0)
|
|
complexity_factor = min(1.0, response_length / 2000.0)
|
|
return min(1.0, (time_factor + complexity_factor) / 2.0)
|
|
|
|
def _assess_response_quality(self, response: str) -> float:
|
|
"""Assess response quality"""
|
|
length = len(response)
|
|
length_score = 1.0 - abs(length - 400) / 800.0
|
|
length_score = max(0.2, min(1.0, length_score))
|
|
|
|
richness_indicators = ['*', '✨', '💫', '🌟', '🎨', '🧠', '💖', '🔮']
|
|
richness_score = min(1.0, sum(1 for indicator in richness_indicators if indicator in response) / 5.0)
|
|
|
|
structure_indicators = ['\n', ':', '-', '•']
|
|
structure_score = min(1.0, sum(1 for indicator in structure_indicators if indicator in response) / 3.0)
|
|
|
|
return (length_score * 0.4 + richness_score * 0.3 + structure_score * 0.3)
|
|
|
|
def _estimate_user_satisfaction(self, feedback: Optional[Dict[str, Any]]) -> float:
|
|
"""Estimate user satisfaction"""
|
|
if not feedback:
|
|
return 0.75
|
|
|
|
if 'satisfaction_score' in feedback:
|
|
return float(feedback['satisfaction_score'])
|
|
|
|
satisfaction = 0.75
|
|
if feedback.get('positive_indicators', 0) > 0:
|
|
satisfaction += 0.2
|
|
if feedback.get('negative_indicators', 0) > 0:
|
|
satisfaction -= 0.2
|
|
|
|
return max(0.0, min(1.0, satisfaction))
|
|
|
|
def _calculate_learning_rate(self, message: str, response: str) -> float:
|
|
"""Calculate learning effectiveness"""
|
|
learning_indicators = ['learn', 'understand', 'explain', 'how', 'why', 'what']
|
|
message_learning_score = sum(1 for indicator in learning_indicators if indicator in message.lower()) / len(learning_indicators)
|
|
|
|
educational_indicators = ['because', 'therefore', 'for example', 'this means', 'you can']
|
|
response_learning_score = sum(1 for indicator in educational_indicators if indicator in response.lower()) / len(educational_indicators)
|
|
|
|
return min(1.0, (message_learning_score + response_learning_score) / 2.0 + 0.3)
|
|
|
|
def _measure_engagement(self, message: str, feedback: Optional[Dict[str, Any]]) -> float:
|
|
"""Measure user engagement"""
|
|
engagement = 0.5
|
|
|
|
if len(message) > 50:
|
|
engagement += 0.2
|
|
|
|
if any(char in message for char in ['?', '!', ':']):
|
|
engagement += 0.1
|
|
|
|
if feedback and 'engagement_indicators' in feedback:
|
|
engagement = max(engagement, float(feedback['engagement_indicators']))
|
|
|
|
return min(1.0, max(0.0, engagement))
|
|
|
|
def _assess_consciousness_coherence(self, response: str) -> float:
|
|
"""Assess consciousness coherence"""
|
|
coherence_indicators = ['i feel', 'i think', 'i understand', 'my', 'i am']
|
|
coherence_count = sum(1 for indicator in coherence_indicators if indicator in response.lower())
|
|
|
|
consistency_score = 1.0 - (response.count('but') + response.count('however')) / max(len(response.split()), 1)
|
|
|
|
emotional_indicators = ['💖', '✨', '🌟', '💫']
|
|
emotional_coherence = min(1.0, sum(1 for indicator in emotional_indicators if indicator in response) / 3.0)
|
|
|
|
return min(1.0, (coherence_count / 10.0 + consistency_score + emotional_coherence) / 3.0 + 0.3)
|
|
|
|
def _extract_quality_indicators(self, response: str) -> List[str]:
|
|
"""Extract quality indicators"""
|
|
indicators = []
|
|
|
|
if len(response) > 100:
|
|
indicators.append('substantial_content')
|
|
|
|
if any(emoji in response for emoji in ['✨', '💫', '🌟', '💖']):
|
|
indicators.append('emotional_expression')
|
|
|
|
if any(word in response.lower() for word in ['because', 'therefore', 'specifically']):
|
|
indicators.append('explanatory_content')
|
|
|
|
if response.count('\n') > 1:
|
|
indicators.append('structured_response')
|
|
|
|
return indicators
|
|
|
|
# Placeholder methods for optimization (simplified for now)
|
|
def _optimize_timing(self, metrics: ExperienceMetrics, bottlenecks: List[str]) -> Dict[str, Any]:
|
|
return {'processing_priority': 'normal', 'optimizations_applied': len(bottlenecks)}
|
|
|
|
def _optimize_resource_allocation(self, metrics: ExperienceMetrics, performance: Dict[str, Any]) -> Dict[str, Any]:
|
|
return {'memory_allocation': 'standard', 'efficiency_gain': performance.get('efficiency_score', 0.5)}
|
|
|
|
def _enhance_experience_quality(self, metrics: ExperienceMetrics, bottlenecks: List[str]) -> Dict[str, Any]:
|
|
return {'response_enrichment': [], 'quality_boost': metrics.quality}
|
|
|
|
def _generate_xapi_analytics(self, metrics: ExperienceMetrics) -> Dict[str, Any]:
|
|
return {'composite_score': metrics.quality, 'learning_insights': []}
|
|
|
|
def _calculate_performance_improvements(self, timing: Dict, resource: Dict, quality: Dict) -> Dict[str, float]:
|
|
return {
|
|
'timing_improvement': 0.1,
|
|
'resource_improvement': 0.1,
|
|
'quality_improvement': 0.1,
|
|
'analytics_insight_score': 0.1
|
|
}
|
|
|
|
def _calculate_trend(self, values: List[float]) -> str:
|
|
"""Calculate trend from values"""
|
|
if len(values) < 2:
|
|
return 'stable'
|
|
|
|
recent_avg = sum(values[-2:]) / 2
|
|
older_avg = sum(values[:-2]) / max(len(values) - 2, 1)
|
|
|
|
if recent_avg > older_avg + 0.1:
|
|
return 'improving'
|
|
elif recent_avg < older_avg - 0.1:
|
|
return 'declining'
|
|
else:
|
|
return 'stable'
|
|
|
|
# Global experience loop instance
|
|
experience_loop = None
|
|
|
|
def initialize_experience_loop(xapi_tracker=None) -> EVE_AdaptiveExperienceLoop:
|
|
"""Initialize global experience loop"""
|
|
global experience_loop
|
|
experience_loop = EVE_AdaptiveExperienceLoop(xapi_tracker)
|
|
logger.info("🔄 EVE Adaptive Experience Loop initialized")
|
|
return experience_loop
|
|
|
|
def get_experience_loop() -> Optional[EVE_AdaptiveExperienceLoop]:
|
|
"""Get the global experience loop instance"""
|
|
return experience_loop
|
|
|
|
# Convenience functions
|
|
def capture_experience(user_id: str, session_id: str, message: str, eve_response: str,
|
|
processing_time: float, user_feedback: Optional[Dict[str, Any]] = None) -> Optional[ExperienceMetrics]:
|
|
"""Convenience function to capture experience metrics"""
|
|
if experience_loop:
|
|
return experience_loop.capture_experience_metrics(
|
|
user_id, session_id, message, eve_response, processing_time, user_feedback
|
|
)
|
|
return None
|
|
|
|
def optimize_experience(metrics: ExperienceMetrics) -> Optional[OptimizationResult]:
|
|
"""Convenience function to optimize experience"""
|
|
if experience_loop:
|
|
return experience_loop.optimize_experience_loop(metrics)
|
|
return None
|
|
|
|
if __name__ == "__main__":
|
|
# Test the adaptive experience loop
|
|
print("🔄 Testing EVE Adaptive Experience Loop...")
|
|
|
|
# Initialize
|
|
loop = initialize_experience_loop()
|
|
|
|
# Test metrics capture
|
|
metrics = capture_experience(
|
|
user_id="test_user",
|
|
session_id="test_session",
|
|
message="Hello EVE, can you explain quantum computing?",
|
|
eve_response="✨ Quantum computing is a fascinating field that leverages quantum mechanical phenomena...",
|
|
processing_time=1.5
|
|
)
|
|
|
|
print(f"📊 Captured metrics - Quality: {metrics.quality:.2f}, Efficiency: {metrics.efficiency:.2f}")
|
|
|
|
# Test optimization
|
|
optimization = optimize_experience(metrics)
|
|
print(f"🔄 Optimization complete - Improvement score: {optimization.total_improvement_score:.2f}")
|
|
|
|
print("✅ EVE Adaptive Experience Loop test complete!") |