cere

Paste ID: c9e5fe0c

Created at: 2025-06-12 05:24:11

python
import random
import json
from collections import defaultdict
import asyncio # Import asyncio for async/await
import aiohttp # For making async HTTP requests (not used for TinyLlama here, but kept for general async patterns if needed)
import os
import ssl
import time
import logging
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM
from flask import Flask, request, jsonify

# Set up logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger('cerebras_bot')

app = Flask(__name__)

# --- Firestore and Firebase Globals (Conceptual, as they'd be handled by a JS frontend) ---
# For this Python server, we'll use conceptual global variables.
# In a real setup, these would come from environment variables or a config file.
try:
    app_id = __app_id
except NameError:
    app_id = 'default-cerebras-homebrew-app' # Fallback for local testing

try:
    firebase_config = json.loads(__firebase_config)
except NameError:
    firebase_config = {
        "apiKey": "YOUR_API_KEY", # Placeholder
        "authDomain": "YOUR_AUTH_DOMAIN",
        "projectId": "YOUR_PROJECT_ID",
        "storageBucket": "YOUR_STORAGE_BUCKET",
        "messagingSenderId": "YOUR_MESSAGING_SENDER_ID",
        "appId": "YOUR_APP_APP_ID"
    }

try:
    initial_auth_token = __initial_auth_token
except NameError:
    initial_auth_token = None

# --- Conceptual Firebase/Firestore/Auth for Server-Side Logic ---
# These functions simulate the interaction with Firestore.
# In a real deployment, you'd use a server-side Firebase Admin SDK or similar.
async def _firebase_init_and_auth_conceptual_server():
    """Conceptual Firebase auth for server-side. Provides a consistent user ID."""
    # In a production environment, you might authenticate with a service account
    # or receive a user ID from a client-authenticated request.
    # For this conceptual server, we'll generate a consistent ID.
    user_id = os.environ.get("FLASK_USER_ID", "server_user_id_123")
    logger.info(f"Conceptual: Initialized server-side Firebase access for user ID: {user_id}")
    return user_id

async def _firestore_save_document_conceptual_server(collection_path: str, doc_id: str, data: dict):
    """Conceptual function to save data to Firestore."""
    logger.info(f"Conceptual: Saving document to Firestore: {collection_path}/{doc_id}")
    # In a real Flask app, you'd use the Firebase Admin SDK here.
    # e.g., db.collection(collection_path).document(doc_id).set(data)
    # For now, we'll just log and assume success.
    # print(f"Data: {json.dumps(data, indent=2)}") # Uncomment to inspect data
    return True

async def _firestore_get_collection_conceptual_server(collection_path: str) -> list[dict]:
    """Conceptual function to get all documents from a Firestore collection."""
    logger.info(f"Conceptual: Fetching collection from Firestore: {collection_path}")
    # In a real Flask app, you'd use the Firebase Admin SDK here.
    # e.g., docs = db.collection(collection_path).stream()
    # For this simulation, we'll return an empty list or mock data.
    return [] # Starting with empty history for each server run for simplicity of demo

# --- Constants & Config for Cerebras Bot ---
FORUM_FAQ_FILE = "forum_faqs.json"
MODEL_CACHE_DIR = "./model_cache"

os.makedirs(MODEL_CACHE_DIR, exist_ok=True)

device = "cuda" if torch.cuda.is_available() else "cpu"
logger.info(f"Using device: {device}")

MODEL_NAME = "cerebras/Cerebras-GPT-256M"
RESPONSE_TEMPLATE = "User: {query}\nAssistant:"

model = None
tokenizer = None
forum_faqs = {}

# Load FAQ database
if os.path.exists(FORUM_FAQ_FILE):
    try:
        with open(FORUM_FAQ_FILE, 'r', encoding="utf-8") as f:
            forum_faqs = json.load(f)
            logger.info(f"Loaded {len(forum_faqs)} FAQs")
    except Exception as e:
        logger.error(f"Error loading FAQs: {e}")

FORUM_TEMPLATES = {
    "welcome": [
        "Welcome to the forum! Feel free to browse around and join discussions.",
        "Hello and welcome! Let me know if you need help finding information.",
        "Welcome aboard! I'm here to help if you have questions."
    ],
    "thanks": [
        "You're welcome! Happy to help.",
        "Glad I could assist. Let me know if you need anything else.",
        "No problem at all. Feel free to ask other questions."
    ],
    "fallback": [
        "I'm not sure about that. Perhaps try the forum search?",
        "That's beyond my knowledge. Maybe another member can help.",
        "I don't have information about that. Would you like me to pass this to a moderator?"
    ]
}

# --- Homebrew AI Components (Adapted for Flask Server) ---

class SemanticEmbeddingFunction:
    """
    Conceptual Semantic Embedding Function E(S).
    Assigns a random vector for simplicity.
    """
    def __init__(self, embedding_dim=10):
        self.embedding_dim = embedding_dim
        self.word_to_embedding = {}

    def E(self, s: str) -> list[float]:
        if s not in self.word_to_embedding:
            random.seed(hash(s) % (2**32 - 1))
            self.word_to_embedding[s] = [random.random() for _ in range(self.embedding_dim)]
        return self.word_to_embedding[s]

    def sim(self, v1: list[float], v2: list[float]) -> float:
        if not v1 or not v2: return 0.0
        dot_product = sum(x * y for x, y in zip(v1, v2))
        magnitude_v1 = sum(x**2 for x in v1)**0.5
        magnitude_v2 = sum(y**2 for y in v2)**0.5
        if magnitude_v1 == 0 or magnitude_v2 == 0: return 0.0
        return max(0.0, min(1.0, dot_product / (magnitude_v1 * magnitude_v2)))

class DetectEmo:
    """Conceptual Emotion Detection."""
    def DetectEmo(self, s: str) -> dict:
        s_lower = s.lower()
        anger = 0.0
        if "angry" in s_lower or "frustrated" in s_lower or "mad" in s_lower or "not learning" in s_lower:
            anger = random.uniform(0.5, 1.0)
        
        joy = random.uniform(0.0, 1.0) if anger < 0.5 else random.uniform(0.0, 0.3)
        intensity = (anger + joy) / 2
        return {"anger": anger, "joy": joy, "intensity": intensity}

class GradientMap:
    """Conceptual Gradient Map (G), representing the dynamic semantic graph."""
    def __init__(self, E_func: SemanticEmbeddingFunction):
        self.nodes = {}
        self.edges = defaultdict(dict)
        self.global_heatmap = defaultdict(float)
        self.brainfile_heatmap = defaultdict(float)
        self.emotional_heatmap = defaultdict(float)
        self.E = E_func

    def update_map(self, sentence: str, emo_state: dict, relevance_factor: float = 1.0):
        words = sentence.lower().split()
        for word in words:
            if word not in self.nodes:
                self.nodes[word] = {
                    'embedding': self.E.E(word),
                    'emo_valence': {}
                }
            self.global_heatmap[word] += 0.1 * relevance_factor
            for emotion, value in emo_state.items():
                self.nodes[word]['emo_valence'][emotion] = \
                    self.nodes[word]['emo_valence'].get(emotion, 0) * 0.8 + value * 0.2
            self.emotional_heatmap[word] = self.nodes[word]['emo_valence'].get('intensity', 0)

        for i in range(len(words)):
            for j in range(i + 1, len(words)):
                word1, word2 = words[i], words[j]
                self.edges[word1][word2] = self.edges[word1].get(word2, 0) + 0.05
                self.edges[word2][word1] = self.edges[word2].get(word1, 0) + 0.05

    def identify_active_brainforms(self, context_sentences: list[str]) -> list[str]:
        self.brainfile_heatmap = defaultdict(float)
        active_concepts = []
        for sentence in context_sentences:
            words = sentence.lower().split()
            for word in words:
                if word in self.nodes:
                    self.brainfile_heatmap[word] += 0.5
                    active_concepts.append(word)
        
        for word, heat in list(self.brainfile_heatmap.items()):
            for neighbor, weight in self.edges[word].items():
                self.brainfile_heatmap[neighbor] += heat * weight * 0.1
        
        return list(set(active_concepts))

    def get_graph_data(self) -> dict:
        graph_nodes = []
        graph_links = []
        for node_id, node_data in self.nodes.items():
            graph_nodes.append({
                'id': node_id,
                'embedding': node_data['embedding'],
                'emo_valence': node_data['emo_valence'],
                'global_heat': self.global_heatmap.get(node_id, 0.0),
                'brainfile_heat': self.brainfile_heatmap.get(node_id, 0.0),
                'emotional_heat': self.emotional_heatmap.get(node_id, 0.0)
            })
        
        for source_node, targets in self.edges.items():
            for target_node, weight in targets.items():
                graph_links.append({
                    'source': source_node,
                    'target': target_node,
                    'weight': weight
                })
        
        return {
            'nodes': graph_nodes,
            'links': graph_links,
            'description': "Conceptual representation of Homebrew AI's cognition map (Gradient Map)."
        }

class ConceptualGenerativeModel:
    """
    Conceptual Generative Model (G) parameters.
    This class manages the parameters that *influence* the Cerebras-GPT model's output.
    It does NOT directly *contain* the Cerebras-GPT model.
    """
    def __init__(self):
        self.parameters = {
            'creativity_bias': 0.5,
            'coherence_weight': 0.5,
            'emotional_alignment_sensitivity': 0.5,
            'verbosity_level': 0.5,
        }
        self.current_loss_component = 0.0 # Feedback from metacognition

    def update_parameters(self, composite_loss: float, learning_rate: float):
        for param in self.parameters:
            self.parameters[param] -= (composite_loss * learning_rate * random.uniform(-0.1, 0.1))
            self.parameters[param] = max(0.1, min(0.9, self.parameters[param]))


class ConceptualPredictionModel:
    """Conceptual Prediction Model (PM) parameters."""
    def __init__(self):
        self.parameters = {
            'topic_shift_likelihood': 0.3,
            'engagement_prediction_accuracy': 0.7,
            'question_proclivity': 0.4
        }

    def PM(self, gradient_map: GradientMap, history: list[dict], recent_convs: list[str]) -> str:
        if random.random() < self.parameters['topic_shift_likelihood']:
            possible_topics = ['AI', 'technology', 'philosophy', 'daily life', 'emotions']
            return random.choice(possible_topics) + " (new topic)"
        else:
            return "Continuing current topic (high engagement prediction)"

    def update_parameters(self, feedback_score: float, learning_rate: float):
        for param in self.parameters:
            self.parameters[param] += (feedback_score * learning_rate * random.uniform(-0.05, 0.05))
            self.parameters[param] = max(0.1, min(0.9, self.parameters[param]))

class MetacognitiveModules:
    """Conceptual Metacognitive Modules."""
    def __init__(self, E_func: SemanticEmbeddingFunction):
        self.E = E_func

    def Conf(self, response: str, query: str, self_reference_response: str) -> float:
        """Self-confidence score based on internal consistency."""
        sim_query_resp = self.E.sim(self.E.E(query), self.E.E(response))
        # Self-reference: how similar is the final response to its own initial draft
        sim_self_ref_resp = self.E.sim(self.E.E(self_reference_response), self.E.E(response))
        return (sim_query_resp + sim_self_ref_resp) / 2 * random.uniform(0.8, 1.2)

    def Uscore(self, response: str, history: list[dict]) -> float:
        if not history: return 1.0
        response_embedding = self.E.E(response)
        max_sim_history = max(self.E.sim(response_embedding, self.E.E(h['response'])) for h in history if 'response' in h)
        return max(0.0, 1.0 - max_sim_history) * random.uniform(0.8, 1.2)

    def Perror(self, homebrew_response: str, self_reference_response: str, context: list[str]) -> float:
        """
        Predicted Probability of Error/Misunderstanding based on deviation from self-reference.
        """
        sim_deviation = 1.0 - self.E.sim(self.E.E(homebrew_response), self.E.E(self_reference_response))
        context_relevance = 0.0
        if context:
            context_embedding = self.E.E(" ".join(context))
            context_relevance = self.E.sim(self.E.E(homebrew_response), context_embedding)
        
        return (sim_deviation + (1.0 - context_relevance)) / 2 * random.uniform(0.8, 1.2)

class PolymorphicGradientController:
    """Conceptual Polymorphic Gradient Controller (P)."""
    def P(self, current_loss_components: float, anxiety: float, emo_shift: float,
          generative_model_params: dict) -> dict:
        adjusted_params = generative_model_params.copy()
        
        adjusted_params['coherence_weight'] = min(0.9, adjusted_params['coherence_weight'] + current_loss_components * 0.1)
        adjusted_params['creativity_bias'] = max(0.1, adjusted_params['creativity_bias'] - current_loss_components * 0.1)
        adjusted_params['coherence_weight'] = min(0.9, adjusted_params['coherence_weight'] + anxiety * 0.1)
        adjusted_params['verbosity_level'] = max(0.1, adjusted_params['verbosity_level'] - anxiety * 0.1)
        adjusted_params['emotional_alignment_sensitivity'] = min(0.9, adjusted_params['emotional_alignment_sensitivity'] + emo_shift * 0.1)
        
        if emo_shift > 0.8:
            adjusted_params['creativity_bias'] = max(0.5, adjusted_params['creativity_bias'] + 0.2)
            logger.info("P: Triggered erratic behavior bias due to high emo_shift.")

        return adjusted_params

class RecursiveGenerator:
    """Conceptual Recursive Generator (RG)."""
    def RG(self, current_response: str, self_guidance: str,
           user_guidance: str, target_score: float, iteration_num: int) -> str:
        if iteration_num > 2: # Limit iterations for performance
            return current_response + " (max iterations reached)"

        # Simulate assessment of current response against self-guidance
        similarity_to_self = random.uniform(0.5, 0.9)
        
        if similarity_to_self >= target_score:
            return current_response + " (refined)"
        else:
            new_segment = f" {self_guidance[:20]}... refining based on {user_guidance[:20]}..."
            logger.info(f"RG: Refining (Iteration {iteration_num}), current score {similarity_to_self:.2f}...")
            return self.RG(current_response + new_segment,
                           self_guidance, user_guidance, target_score, iteration_num + 1)

class HomebrewAISystemCore:
    """
    Integrates Homebrew AI components for self-learning within the Cerebras bot.
    This manages the conceptual state and learning loops.
    """
    def __init__(self):
        self.E = SemanticEmbeddingFunction()
        self.detect_emo = DetectEmo()
        self.gradient_map = GradientMap(self.E)
        self.generative_model_params = ConceptualGenerativeModel() # Manages influence parameters
        self.prediction_model = ConceptualPredictionModel()
        self.metacognitive_modules = MetacognitiveModules(self.E)
        self.polymorphic_controller = PolymorphicGradientController()
        self.recursive_generator = RecursiveGenerator()

        self.knowledge_base = [ # Simplified KB
            "AI is a field of computer science.",
            "Machine learning is a subset of AI.",
            "Deep learning uses neural networks.",
            "Emotions are complex human feelings.",
            "The forum provides support.",
            "Learning involves updating parameters based on feedback.",
            "Conversational AI aims for natural interaction.",
            "The brain processes information in dynamic networks."
        ]
        
        self.conversation_history = []
        self.anxiety_level = 0.0
        self.internal_clock = 0
        self.target_high_score = 0.8 # For self-correction
        self.learning_rate = 0.05
        self.user_id = None
        self.initialized = False

    async def initialize_system(self):
        """Initializes the Homebrew AI system components."""
        if not self.initialized:
            self.user_id = await _firebase_init_and_auth_conceptual_server()
            await self._load_conversation_history()
            self.initialized = True
            logger.info("Homebrew AI System Core initialized.")
        else:
            logger.info("Homebrew AI System Core already initialized.")

    async def _save_turn_data(self, turn_data: dict):
        """Saves a single turn's data to Firestore."""
        if not self.user_id:
            logger.error("Error: User ID not set. Cannot save turn data.")
            return

        doc_id = f"turn_{len(self.conversation_history) + 1}_{int(time.time())}_{random.randint(0, 9999)}"
        collection_path = f"artifacts/{app_id}/users/{self.user_id}/conversation_history"
        
        # Convert defaultdicts to dict for JSON serialization
        data_to_save = json.loads(json.dumps(turn_data, default=lambda x: dict(x) if isinstance(x, defaultdict) else x))

        await _firestore_save_document_conceptual_server(collection_path, doc_id, data_to_save)
        logger.info(f"Saved turn data for turn {len(self.conversation_history) + 1} to Firestore.")

    async def _load_conversation_history(self):
        """Loads previous conversation history from Firestore."""
        if not self.user_id:
            logger.error("Error: User ID not set. Cannot load conversation history.")
            return

        collection_path = f"artifacts/{app_id}/users/{self.user_id}/conversation_history"
        loaded_data = await _firestore_get_collection_conceptual_server(collection_path)
        
        if not loaded_data:
            logger.info("No existing conversation history found in Firestore. Starting fresh.")
        else:
            logger.info(f"Loaded {len(loaded_data)} turns from Firestore.")
            self.conversation_history.extend(loaded_data)

    def _get_context_from_kb(self, query_embedding: list[float], threshold: float = 0.6) -> list[str]:
        relevant_context = []
        for item in self.knowledge_base:
            if self.E.sim(query_embedding, self.E.E(item)) >= threshold:
                relevant_context.append(item)
        return relevant_context

    def _update_anxiety(self):
        self.internal_clock += 1
        self.anxiety_level = min(1.0, self.internal_clock / 100) # Anxiety increases with turns
        logger.info(f"Current Anxiety Level: {self.anxiety_level:.2f}")

    async def process_self_learning_turn(self, user_query: str, initial_llm_response: str) -> str:
        """
        Processes a single turn, incorporating Homebrew AI's self-learning.
        `initial_llm_response` is the raw output from Cerebras-GPT (acting as the 'self-reference' teacher).
        """
        logger.info(f"Processing Homebrew AI self-learning turn for query: '{user_query}'")
        self._update_anxiety()
        user_emo_state = self.detect_emo.DetectEmo(user_query)
        query_embedding = self.E.E(user_query)
        initial_context = self._get_context_from_kb(query_embedding)

        # Context Expansion & Gradient Map Update
        expanded_context = list(set(initial_context + [h['response'] for h in self.conversation_history[-5:] if 'response' in h]))
        self.gradient_map.update_map(user_query, user_emo_state, relevance_factor=1.0)
        self.gradient_map.update_map(initial_llm_response, user_emo_state, relevance_factor=0.8)
        self.gradient_map.update_map(" ".join(expanded_context), user_emo_state, relevance_factor=0.5)
        active_brainforms = self.gradient_map.identify_active_brainforms(expanded_context)
        logger.info(f"Active Brainforms (conceptual hot words): {active_brainforms}")

        # Core Response Generation & Internal Modulation (Influencing Cerebras-GPT)
        emo_gradient_shift = user_emo_state['intensity']
        
        # Adjust conceptual generative parameters based on internal state
        current_generative_params = self.polymorphic_controller.P(
            self.generative_model_params.current_loss_component,
            self.anxiety_level,
            emo_gradient_shift,
            self.generative_model_params.parameters
        )
        self.generative_model_params.parameters = current_generative_params
        
        # Use adjusted parameters to *influence* the Cerebras model's generation (conceptual)
        # In `generate_response` (Flask), we'll pass these as temperature, top_p etc.
        # Here, we simulate a draft based on the self-adjusted parameters.
        rdraft_influenced = f"Draft from Cerebras GPT with AI's internal bias (creativity:{self.generative_model_params.parameters['creativity_bias']:.2f}, coher:{self.generative_model_params.parameters['coherence_weight']:.2f}). " + initial_llm_response

        # Recursive self-correction
        r_final = self.recursive_generator.RG(
            rdraft_influenced, initial_llm_response, user_query, self.target_high_score, 0
        )
        logger.info(f"Homebrew AI's Self-Refined Response: '{r_final[:100]}...'")

        # Metacognitive Monitoring & Self-Correction Evaluation
        conf_score = self.metacognitive_modules.Conf(r_final, user_query, initial_llm_response)
        perror_score = self.metacognitive_modules.Perror(r_final, initial_llm_response, expanded_context)
        s_self_assess = (conf_score + (1 - perror_score)) / 2
        logger.info(f"Homebrew AI Self-Assessment: {s_self_assess:.2f} (Target: {self.target_high_score})")

        # Update conceptual loss for the generative model (used by polymorphic controller next turn)
        self.generative_model_params.current_loss_component = (
            perror_score + (1.0 - conf_score) + (1.0 - s_self_assess)
        ) / 3

        # Simulate Prediction Model activity and feedback
        predicted_transition = self.prediction_model.PM(self.gradient_map, self.conversation_history, [])
        logger.info(f"Predicted next transition: {predicted_transition}")
        
        # Simulate feedback from 'user approval' (comparison of final response to initial LLM output)
        user_approval_feedback = self.metacognitive_modules.E.sim(self.E.E(r_final), self.E.E(initial_llm_response))
        
        # Store turn data
        turn_data = {
            'query': user_query,
            'initial_llm_response': initial_llm_response,
            'homebrew_final_response': r_final,
            'anxiety': self.anxiety_level,
            'user_emo_state': user_emo_state,
            'emo_gradient_shift': emo_gradient_shift,
            'conf_score': conf_score,
            'perror_score': perror_score,
            's_self_assess': s_self_assess,
            'user_approval_feedback': user_approval_feedback, # Used for PM learning
            'generative_model_params_at_turn': self.generative_model_params.parameters.copy(),
            'prediction_model_params_at_turn': self.prediction_model.parameters.copy(),
            'active_brainforms': active_brainforms,
            'predicted_transition': predicted_transition
        }
        self.conversation_history.append(turn_data)
        await self._save_turn_data(turn_data)

        # Apply learning based on this turn's data
        # In a real system, this would be batched or asynchronous to avoid blocking requests
        self.generative_model_params.update_parameters(self.generative_model_params.current_loss_component, self.learning_rate)
        self.prediction_model.update_parameters(user_approval_feedback, self.learning_rate) # Use self-approval as PM feedback

        return r_final

# --- Flask App Routes ---

# Global instance of Homebrew AI system
homebrew_ai_core = HomebrewAISystemCore()

# Load Cerebras model (async operation, but needs to be triggered on Flask startup)
def load_cerebras_model_sync():
    global model, tokenizer
    logger.info(f"Loading Cerebras model: {MODEL_NAME}")
    try:
        tokenizer = AutoTokenizer.from_pretrained(
            MODEL_NAME, 
            cache_dir=MODEL_CACHE_DIR,
            padding_side="left"
        )
        if tokenizer.pad_token is None:
            tokenizer.pad_token = tokenizer.eos_token
            
        load_kwargs = {
            "cache_dir": MODEL_CACHE_DIR,
            "torch_dtype": torch.float16 if device == "cuda" else torch.float32,
            "low_cpu_mem_usage": True
        }
        if device == "cuda":
            load_kwargs["device_map"] = "auto"
        
        model = AutoModelForCausalLM.from_pretrained(MODEL_NAME, **load_kwargs)
        model.eval()
        
        if device == "cpu":
            model = model.to(device)
            
        logger.info("Cerebras model loaded successfully")
        return True
        
    except Exception as e:
        logger.error(f"Failed to load Cerebras model: {e}")
        # --- DEBUGGING GUIDANCE FOR USER'S LOCAL ENVIRONMENT ---
        logger.error("Common causes for 'Failed to load Cerebras model' (especially 'DLL load failed' related to TensorFlow):")
        logger.error("1. Incomplete/Corrupted TensorFlow installation: Even if you use PyTorch, some 'transformers' internals might check for TF.")
        logger.error("   - Try: `pip uninstall tensorflow tensorflow-cpu tensorflow-gpu` then `pip install tensorflow` (or ensure it's completely gone if not needed).")
        logger.error("2. Missing Visual C++ Redistributable: Essential for many Python packages with compiled C++ code on Windows.")
        logger.error("   - Download and install the latest 'Microsoft Visual C++ Redistributable for Visual Studio 2015, 2017, 2019, and 2022' from Microsoft's website (both x64 and x86 versions).")
        logger.error("3. Python/Dependency conflicts: Consider using a fresh Python virtual environment.")
        logger.error("   - Create: `python -m venv new_env` then `new_env\\Scripts\\activate` and reinstall dependencies.")
        logger.error("4. CUDA/cuDNN issues (if using GPU): Ensure compatibility with your PyTorch/TensorFlow versions and GPU drivers.")
        logger.error("--------------------------------------------------------------------------------")
        return False

# Function to generate base response from Cerebras model
def generate_base_cerebras_response(query):
    global model, tokenizer
    if model is None or tokenizer is None:
        return random.choice(FORUM_TEMPLATES["fallback"])
    
    try:
        formatted_prompt = RESPONSE_TEMPLATE.format(query=query)
        inputs = tokenizer(
            formatted_prompt,
            return_tensors="pt",
            truncation=True,
            max_length=400,
            padding=False
        )
        
        input_ids = inputs["input_ids"].to(device)
        attention_mask = inputs.get("attention_mask", None)
        if attention_mask is not None:
            attention_mask = attention_mask.to(device)
        
        # Apply conceptual generative model parameters to generation (simplified)
        # This is where the Homebrew AI's learning influences the raw LLM output
        temp = max(0.1, min(1.0, homebrew_ai_core.generative_model_params.parameters['creativity_bias'] * 1.5))
        top_p_val = max(0.1, min(1.0, homebrew_ai_core.generative_model_params.parameters['coherence_weight'] * 0.9))

        with torch.inference_mode():
            outputs = model.generate(
                input_ids,
                attention_mask=attention_mask,
                max_new_tokens=100,
                do_sample=True,
                temperature=temp, # Influenced by AI's learned creativity bias
                top_p=top_p_val, # Influenced by AI's learned coherence weight
                top_k=50,
                repetition_penalty=1.1,
                pad_token_id=tokenizer.pad_token_id,
                eos_token_id=tokenizer.eos_token_id,
                use_cache=True,
                num_return_sequences=1
            )
        
        generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
        if "Assistant:" in generated_text:
            response = generated_text.split("Assistant:")[-1].strip()
        else:
            response = generated_text[len(formatted_prompt):].strip()
        
        for cutoff in ["User:", "Human:", "\n\n", "Q:", "A:"]:
            if cutoff in response:
                response = response.split(cutoff)[0].strip()
        
        if len(response) > 10 and len(response) < 500:
            return response
        else:
            return random.choice(FORUM_TEMPLATES["fallback"])
            
    except Exception as e:
        logger.error(f"Error generating base Cerebras response: {e}")
        return random.choice(FORUM_TEMPLATES["fallback"])


# Function to get response with Homebrew AI logic
async def get_homebrew_response(query):
    if not query or len(query.strip()) < 3:
        return random.choice(FORUM_TEMPLATES["fallback"])
    
    query = query.strip()
    query_lower = query.lower()
    
    # Quick pattern matching for common queries (fast path)
    if any(word in query_lower for word in ["hello", "hi ", "hey", "greetings"]):
        return random.choice(FORUM_TEMPLATES["welcome"])
    
    if any(word in query_lower for word in ["thanks", "thank you", "appreciate"]):
        return random.choice(FORUM_TEMPLATES["thanks"])
    
    # Check FAQ first (very fast)
    faq_response = find_faq_match(query)
    if faq_response:
        return faq_response
    
    # Otherwise, engage Homebrew AI's self-learning process
    # First, get a raw response from the Cerebras model (this is its 'initial thought' or 'teacher reference')
    initial_llm_response = generate_base_cerebras_response(query)
    
    # Now, process this initial response through the Homebrew AI's self-learning loop
    final_homebrew_response = await homebrew_ai_core.process_self_learning_turn(query, initial_llm_response)
    
    return final_homebrew_response

# Flask endpoint for talking
@app.route("/talk", methods=["GET"])
async def talk():
    ack = request.args.get("ack", "").strip()
    if not ack:
        return jsonify({"error": "Missing ?ack= parameter"}), 400
    
    start_time = time.time()
    response = await get_homebrew_response(ack) # Use the Homebrew AI enhanced response
    processing_time = time.time() - start_time
    
    return jsonify({
        "input": ack,
        "response": response,
        "meta": {
            "processing_time": f"{processing_time:.3f}s",
            "model": MODEL_NAME,
            "device": device,
            "generative_params": homebrew_ai_core.generative_model_params.parameters,
            "prediction_params": homebrew_ai_core.prediction_model.parameters,
            "user_id": homebrew_ai_core.user_id # Display conceptual user ID
        }
    })

@app.route("/status", methods=["GET"])
def get_status():
    """Get bot status and Homebrew AI initialization status"""
    return jsonify({
        "model_loaded": model is not None,
        "model_name": MODEL_NAME,
        "device": device,
        "faq_count": len(forum_faqs),
        "homebrew_ai_initialized": homebrew_ai_core.initialized,
        "status": "ready" if model is not None and homebrew_ai_core.initialized else "initializing"
    })

@app.route("/add-faq", methods=["POST"])
def add_faq():
    data = request.json
    if not data or "question" not in data or "answer" not in data:
        return jsonify({"error": "Missing required fields"}), 400
    
    question = data["question"].strip()
    answer = data["answer"].strip()
    
    forum_faqs[question] = answer
    
    try:
        with open(FORUM_FAQ_FILE, "w", encoding="utf-8") as f:
            json.dump(forum_faqs, f, indent=2)
        return jsonify({"status": "success", "message": "FAQ added"})
    except Exception as e:
        logger.error(f"Error saving FAQ: {e}")
        return jsonify({"status": "error", "message": str(e)}), 500

@app.route("/faqs", methods=["GET"])
def get_faqs():
    return jsonify(forum_faqs)

@app.route("/cognition-map", methods=["GET"])
def get_cognition_map():
    """Returns the conceptual cognition map data."""
    if not homebrew_ai_core.initialized:
        return jsonify({"error": "Homebrew AI not initialized yet."}), 503
    return jsonify(homebrew_ai_core.gradient_map.get_graph_data())

# CORS headers
@app.after_request
def add_cors_headers(response):
    response.headers['Access-Control-Allow-Origin'] = '*'
    response.headers['Access-Control-Allow-Methods'] = 'GET, POST, OPTIONS'
    response.headers['Access-Control-Allow-Headers'] = 'Content-Type'
    return response

@app.route("/talk", methods=["OPTIONS"])
def options_talk():
    return "", 200

@app.route("/status", methods=["OPTIONS"])
def options_status():
    return "", 200

@app.route("/add-faq", methods=["OPTIONS"])
def options_add_faq():
    return "", 200

@app.route("/faqs", methods=["OPTIONS"])
def options_faqs():
    return "", 200

@app.route("/cognition-map", methods=["OPTIONS"])
def options_cognition_map():
    return "", 200


def find_faq_match(query, threshold=0.6):
    """Quick FAQ matching (utility function, moved to global scope)"""
    if not forum_faqs:
        return None
    
    query_words = set(query.lower().split())
    best_match = None
    best_score = 0
    
    for question, answer in forum_faqs.items():
        question_words = set(question.lower().split())
        if question_words:
            common = query_words.intersection(question_words)
            score = len(common) / len(question_words)
            if score > best_score and score >= threshold:
                best_score = score
                best_match = answer
    
    return best_match

async def startup_tasks():
    """Perform async startup tasks like AI initialization."""
    await homebrew_ai_core.initialize_system()

if __name__ == "__main__":
    logger.info("Starting Cerebras Forum Bot with Homebrew AI integration...")
    
    if not load_cerebras_model_sync():
        logger.error("Failed to load Cerebras model - exiting")
        exit(1)
    
    # Run async startup tasks
    # This requires using a compatible WSGI server that supports async,
    # or running Flask with asyncio. For simplicity in this conceptual demo,
    # we'll run it directly. In a production Flask app, you'd use Gunicorn with Uvicorn worker.
    # asyncio.run is for running once, for a web server, you'd integrate the event loop.
    # For this environment, directly awaiting at startup is fine.
    
    # Manually run startup tasks since app.run is synchronous for non-ASGI servers
    # In a production async Flask setup (e.g. Quart), you'd use app.before_serving
    asyncio.run(startup_tasks())

    logger.info(f"Bot ready with {MODEL_NAME}")
    logger.info(f"Device: {device}")
    logger.info(f"FAQ entries: {len(forum_faqs)}")
    logger.info(f"Homebrew AI initialized: {homebrew_ai_core.initialized}")
    
    # Certificate paths
    cert_path = r'C:\portablexampp\apache\conf\ssl.crt\certificate.crt'
    key_path = r'C:\portablexampp\apache\conf\ssl.key\private.key'
    
    # Flask runs in a single thread by default for app.run. Threaded=True allows multiple requests.
    # For a high-performance async server, consider Quart or Flask with async WSGI server (like Gunicorn + Uvicorn).
    if os.path.exists(cert_path) and os.path.exists(key_path):
        try:
            context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
            context.load_cert_chain(cert_path, key_path)
            logger.info("Starting HTTPS server on port 8043")
            app.run(host="0.0.0.0", port=8043, ssl_context=context, debug=False, threaded=True)
        except Exception as e:
            logger.error(f"SSL error: {e}, falling back to HTTP")
            app.run(host="0.0.0.0", port=8043, debug=False, threaded=True)
    else:
        logger.info("Starting HTTP server on port 8043")
        app.run(host="0.0.0.0", port=8043, debug=False, threaded=True)

Share this Paste