Spaces:
Runtime error
Runtime error
File size: 9,901 Bytes
cc47095 4131afb eea3a7e 8189811 089cff4 4aa9206 12ea30f 114528e 61661f8 8189811 8ee02dc 4131afb e6605fa ea263f7 4e67ec0 8ef3174 418e0c1 cc47095 418e0c1 cc47095 3e16173 8189811 cc47095 114528e 8ef3174 d8aacae 4131afb d8aacae 089cff4 114528e 089cff4 1af5dbd 4e67ec0 cc47095 4131afb ea263f7 cc47095 ea263f7 31b09db 4e67ec0 31b09db 114528e ea263f7 114528e 8ef3174 114528e 8ef3174 cc47095 114528e 1a83889 114528e cc47095 114528e 4131afb 114528e 4131afb 114528e cc47095 d3ba393 1a83889 d3ba393 8189811 1a83889 114528e cc47095 1a83889 cc47095 1a83889 114528e 1a83889 4131afb 114528e 4131afb 1a83889 4131afb 1a83889 114528e 1a83889 4e67ec0 cc47095 114528e 1a83889 4e67ec0 89aa96e 114528e 1a83889 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 |
import aiohttp
import json
import logging
import torch
import faiss
import numpy as np
from transformers import AutoModelForCausalLM, AutoTokenizer
from typing import List, Dict, Any
from cryptography.fernet import Fernet
from jwt import encode, decode, ExpiredSignatureError
from datetime import datetime, timedelta
import pyttsx3
import os
from components.multi_model_analyzer import MultiAgentSystem
from components.neuro_symbolic_engine import NeuroSymbolicEngine
from components.self_improving_ai import SelfImprovingAI
from modules.secure_memory_loader import load_secure_memory_module
from ethical_filter import EthicalFilter
from codette_openai_fallback import query_codette_with_fallback
from CodriaoCore.federated_learning import FederatedAI
from utils.database import Database
from utils.logger import logger
from codriao_tb_module import CodriaoHealthModule
from fail_safe import AIFailsafeSystem
from quarantine_engine import QuarantineEngine
from anomaly_score import AnomalyScorer
from ethics_core import EthicsCore
class AICoreAGIX:
def __init__(self, config_path: str = "config.json"):
self.ethical_filter = EthicalFilter()
self.config = self._load_config(config_path)
self.tokenizer = AutoTokenizer.from_pretrained(self.config["model_name"])
self.model = AutoModelForCausalLM.from_pretrained(self.config["model_name"])
self.context_memory = self._initialize_vector_memory()
self.http_session = aiohttp.ClientSession()
self.database = Database()
self.multi_agent_system = MultiAgentSystem()
self.self_improving_ai = SelfImprovingAI()
self.neural_symbolic_engine = NeuroSymbolicEngine()
self.federated_ai = FederatedAI()
self.failsafe_system = AIFailsafeSystem()
self.ethics_core = EthicsCore()
def engage_lockdown_mode(self, reason="Unspecified anomaly"):
timestamp = datetime.utcnow().isoformat()
self.lockdown_engaged = True
# Disable external systems
try:
self.http_session = None
if hasattr(self.federated_ai, "network_enabled"):
self.federated_ai.network_enabled = False
if hasattr(self.self_improving_ai, "enable_learning"):
self.self_improving_ai.enable_learning = False
except Exception as e:
logger.error(f"Lockdown component shutdown failed: {e}")
# Log the event
lockdown_event = {
"event": "Lockdown Mode Activated",
"reason": reason,
"timestamp": timestamp
}
logger.warning(f"[LOCKDOWN MODE] - Reason: {reason} | Time: {timestamp}")
self.failsafe_system.trigger_failsafe("Lockdown initiated", str(lockdown_event))
# Return confirmation
return {
"status": "Lockdown Engaged",
"reason": reason,
"timestamp": timestamp
}
# Secure memory setup
self._encryption_key = Fernet.generate_key()
secure_memory_module = load_secure_memory_module()
SecureMemorySession = secure_memory_module.SecureMemorySession
self.secure_memory_loader = SecureMemorySession(self._encryption_key)
self.training_memory = []
self.speech_engine = pyttsx3.init()
self.health_module = CodriaoHealthModule(ai_core=self)
self.quarantine_engine = QuarantineEngine()
self.anomaly_scorer = AnomalyScorer()
def learn_from_interaction(self, query: str, response: str, user_feedback: str = None):
training_event = {
"query": query,
"response": response,
"feedback": user_feedback,
"timestamp": datetime.utcnow().isoformat()
}
self.training_memory.append(training_event)
logger.info(f"[Codriao Learning] Stored new training sample. Feedback: {user_feedback or 'none'}")
def analyze_event_for_anomalies(self, event_type: str, data: dict):
score = self.anomaly_scorer.score_event(event_type, data)
if score["score"] >= 70:
# Defensive, not destructive
self.quarantine_engine.quarantine(data.get("module", "unknown"), reason=score["notes"])
logger.warning(f"[Codriao]: Suspicious activity quarantined. Module: {data.get('module')}")
return score
def _load_config(self, config_path: str) -> dict:
"""Loads the configuration file."""
try:
with open(config_path, 'r') as file:
return json.load(file)
except FileNotFoundError:
logger.error(f"Configuration file not found: {config_path}")
raise
except json.JSONDecodeError as e:
logger.error(f"Error decoding JSON in config file: {config_path}, Error: {e}")
raise
def _initialize_vector_memory(self):
"""Initializes FAISS vector memory."""
return faiss.IndexFlatL2(768)
def _vectorize_query(self, query: str):
"""Vectorizes user query using tokenizer."""
tokenized = self.tokenizer(query, return_tensors="pt")
return tokenized["input_ids"].detach().numpy()
if not self.ethics_core.evaluate_action(final_response):
logger.warning("[Codriao Ethics] Action blocked: Does not align with internal ethics.")
return {"error": "Response rejected by ethical framework"}
async def generate_response(self, query: str, user_id: int) -> Dict[str, Any]:
try:
# Validate query input
if not isinstance(query, str) or len(query.strip()) == 0:
raise ValueError("Invalid query input.")
# Ethical filter
result = self.ethical_filter.analyze_query(query)
if result["status"] == "blocked":
return {"error": result["reason"]}
if result["status"] == "flagged":
logger.warning(result["warning"])
# Special diagnostics trigger
if any(phrase in query.lower() for phrase in ["tb check", "analyze my tb", "run tb diagnostics", "tb test"]):
return await self.run_tb_diagnostics("tb_image.jpg", "tb_cough.wav", user_id)
# Vector memory and responses
vectorized_query = self._vectorize_query(query)
self.secure_memory_loader.encrypt_vector(user_id, vectorized_query)
responses = await asyncio.gather(
self._generate_local_model_response(query),
self.multi_agent_system.delegate_task(query),
self.self_improving_ai.evaluate_response(query),
self.neural_symbolic_engine.integrate_reasoning(query)
)
final_response = "\n\n".join(responses)
# Verify response safety
safe = self.failsafe_system.verify_response_safety(final_response)
if not safe:
return {"error": "Failsafe triggered due to unsafe response content."}
self.database.log_interaction(user_id, query, final_response)
self._log_to_blockchain(user_id, query, final_response)
self._speak_response(final_response)
return {
"response": final_response,
"real_time_data": self.federated_ai.get_latest_data(),
"context_enhanced": True,
"security_status": "Fully Secure"
}
except Exception as e:
logger.error(f"Response generation failed: {e}")
return {"error": "Processing failed - safety protocols engaged"}
async def _generate_local_model_response(self, query: str) -> str:
"""Generates a response using the local model."""
inputs = self.tokenizer(query, return_tensors="pt")
outputs = self.model.generate(**inputs)
return self.tokenizer.decode(outputs[0], skip_special_tokens=True)
async def run_tb_diagnostics(self, image_path: str, audio_path: str, user_id: int) -> Dict[str, Any]:
"""Runs TB diagnostics with AI modules."""
try:
result = await self.health_module.evaluate_tb_risk(image_path, audio_path, user_id)
logger.info(f"TB Diagnostic Result: {result}")
return result
except Exception as e:
logger.error(f"TB diagnostics failed: {e}")
return {"tb_risk": "ERROR", "error": str(e)}
def _log_to_blockchain(self, user_id: int, query: str, final_response: str):
"""Logs interaction to blockchain with retries."""
retries = 3
for attempt in range(retries):
try:
logger.info(f"Logging interaction to blockchain: Attempt {attempt + 1}")
break
except Exception as e:
logger.warning(f"Blockchain logging failed: {e}")
continue
def fine_tune_from_memory(self):
if not self.training_memory:
logger.info("[Codriao Training] No training data to learn from.")
return "No training data available."
# Simulate learning pattern: Adjust internal weights or strategies
learned_insights = []
for record in self.training_memory:
if "panic" in record["query"].lower() or "unsafe" in record["response"].lower():
learned_insights.append("Avoid panic triggers in response phrasing.")
logger.info(f"[Codriao Training] Learned {len(learned_insights)} behavioral insights.")
return {
"insights": learned_insights,
"trained_samples": len(self.training_memory)
}
def _speak_response(self, response: str):
"""Speaks out the generated response."""
try:
self.speech_engine.say(response)
self.speech_engine.runAndWait()
except Exception as e:
logger.error(f"Speech synthesis failed: {e}")
# Store training data (you can customize feedback later)
self.learn_from_interaction(query, final_response, user_feedback="auto-pass")
async def shutdown(self):
"""Closes asynchronous resources."""
await self.http_session.close() |