this is my code can you return me a full patched c...

Créé le : 11 février 2025

Répondu en utilisant O3 Mini High par Chat01

Question

this is my code can you return me a full patched cause i got problems when i deploy on groq :
"""

app.py

from gevent import monkey
monkey.patch_all()
import os
import re
import json
import logging
from typing import Any, Dict, List, Optional, Tuple, Generator
import time
import requests
from bs4 import BeautifulSoup
from concurrent.futures import ThreadPoolExecutor, as_completed

from flask import Flask, request, Response, jsonify, stream_with_context
from flask_cors import CORS

from bs4 import BeautifulSoup <- Import redondant supprimé

from groq import Groq
from e2b_code_interpreter import Sandbox

Importation des décorateurs tenacity

from tenacity import retry, stop_after_attempt, wait_exponential, retry_if_exception_type
import httpx # Importez httpx pour vérifier le type d'exception

Configuration du logging

logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(name)

app = Flask(name)
CORS(app)

Clés API (variables d'environnement ou valeurs par défaut sécurisées)

GROQ_API_KEY = os.environ.get("GROQ_API_KEY")
E2B_API_KEY = os.environ.get("E2B_API_KEY")

PISTON_API_URL = "https://emkc.org/api/v2/piston/execute" # URL de l'API Piston
WHOOGLE_INSTANCE_URL = "https://chatapp-1-u8t2.onrender.com" # URL de votre instance Whoogle

groq_client = Groq(api_key=GROQ_API_KEY)
sandbox = Sandbox(api_key=E2B_API_KEY, timeout=600)

VALID_MODELS = [
"llama-3.3-70b-versatile","distil-whisper-large-v3-en", "gemma2-9b-it", "llama-3.1-8b-instant",
"llama-guard-3-8b", "llama3-70b-8192", "llama3-8b-8192", "mixtral-8x7b-32768", "whisper-large-v3",
"whisper-large-v3-turbo", "deepseek-r1-distill-llama-70b-specdec", "deepseek-r1-distill-llama-70b",
"llama-3.3-specdec", "llama-3.2-1b-preview", "llama-3.2-3b-preview", "llama-3.2-11b-vision-preview",
"llama-3.2-90b-vision-preview", "llama3-70b-8192","qwen-2.5-32b "
]
VALID_MODELS_LOW_TOKEN = [ # Smaller models for token saving
"qwen-2.5-32b","llama-3.1-8b-instant", "gemma2-9b-it", "mixtral-8x7b-32768"
]
DEFAULT_CHAT_MODEL_LOW_TOKEN = "qwen-2.5-32b"
SUMMARIZATION_MODEL = "qwen-2.5-32b" # Smaller model for summarization

CODE_PATTERN = re.compile(r"python\s*([\s\S]*?)")
CODE_REQUIRED_MARKER = "[CODE_REQUIRED]"
CANVAS_START_MARKER = "[CANVAS_START]"
CANVAS_END_MARKER = "[CANVAS_END]"

Limites pour le contexte web (Whoogle) - Reduced limits for token saving

MAX_CONTEXT_CHARS_PER_PAGE = 1000 # Reduced context per page
MAX_TOTAL_CONTEXT_CHARS = 5000 # Reduced total context
MAX_WEB_LINKS = 5 # Reduced number of web links
WHOOGLE_CACHE: Dict[str, str] = {}

CHAT_HISTORY_MESSAGE_LIMIT = 10 # Limit chat history for token saving

http_session = requests.Session()
http_session.headers.update({"User-Agent": "PlinkoChatBot/1.0"})

def operate_requests(prompt) -> Generator[Dict[str, str], None, Tuple[str, List[str]]]:
"""
Fonction regroupant toutes les opérations :
1. Recherche sur Whoogle.
2. Limitation à 7 liens.
3. Extraction et résumé du contenu de chaque page (en parallèle).
4. Retourne le contexte obtenu et la liste des sources.
"""
# Utilisation de la clé API Groq définie globalement
global GROQ_API_KEY # Déclarer que nous utilisons la variable globale (optionnel ici, mais bonne pratique)
groq_client = Groq(api_key=GROQ_API_KEY)

text
def whoogle_search(query): """ Effectue une recherche via Whoogle et retourne une liste de liens. """ base_url = WHOOGLE_INSTANCE_URL + "/search" # Utilisation de WHOOGLE_INSTANCE_URL params = {"q": query} headers = {"User-Agent": "Mozilla/5.0"} try: response = requests.get(base_url, params=params, headers=headers) response.raise_for_status() except requests.exceptions.RequestException as e: print(f"Erreur lors de la requête Whoogle: {e}") return [] soup = BeautifulSoup(response.text, "html.parser") # Extraire uniquement les liens commençant par "http" links = [result["href"] for result in soup.find_all("a", href=True) if result["href"].startswith("http")] return links @retry(stop=stop_after_attempt(5), # Réessayer jusqu'à 5 fois wait=wait_exponential(multiplier=1, min=4, max=60), # Délai exponentiel (min 4s, max 60s) retry=retry_if_exception_type(httpx.HTTPError)) # Réessayer seulement pour les erreurs HTTP (incluant 429) def summarize_with_groq(query, text_content): """ Utilise l'API Groq pour extraire les informations importantes du texte. Retry avec tenacity en cas d'erreur 429. """ try: response = groq_client.chat.completions.create( model=SUMMARIZATION_MODEL, # Smaller Groq model for summarization messages=[ { "role": "system", "content": ( "Vous êtes un assistant expert en extraction d'informations. " "Votre tâche est de lire le texte fourni et d'extraire les informations les plus importantes " "qui répondent à la question de recherche suivante : '{}'. " "Créez une note concise résumant les points clés et les informations pertinentes. " # Concise instruction "Soyez précis et factuel." ).format(query) }, { "role": "user", "content": text_content[:MAX_CONTEXT_CHARS_PER_PAGE] # Limit text content } ], temperature=0.2 # Température basse pour des réponses factuelles ) return response.choices[0].message.content.strip() except httpx.HTTPError as e: # Intercepter explicitement httpx.HTTPError pour tenacity if e.response.status_code == 429: logger.warning(f"Erreur 429 (Too Many Requests) lors de l'appel à l'API Groq (summarize_with_groq). Reessai en cours... Details: {e}") raise e # Raise pour que tenacity gère le retry else: # Pour les autres erreurs HTTP, logger et laisser remonter logger.error(f"Erreur HTTP lors de l'appel à l'API Groq (summarize_with_groq) (non-429). Details: {e}") raise # Raise pour signaler l'erreur except Exception as e: logger.error(f"Erreur inattendue lors de l'appel à l'API Groq (summarize_with_groq). Détails: {e}") return "Erreur lors de la synthèse avec Groq." def fetch_and_summarize(link, query): """ Récupère le contenu d'une page et utilise Groq pour en extraire une note résumée. """ try: r = requests.get(link, timeout=20) # Timeout pour éviter les blocages r.raise_for_status() soup_page = BeautifulSoup(r.text, 'html.parser') text_content = soup_page.get_text(separator='\n', strip=True) print(f"{link} : OK - Extraction du contenu...") summary_note = summarize_with_groq(query, text_content) # Appel à la fonction retryable print(f"{link} : OK - Note Groq ajoutée.") return f"Source: {link}\nNote:\n{summary_note}\n\n" except requests.exceptions.RequestException as e: print(f"{link} : Échec de la récupération de la page.") return f"Source: {link}\nNote:\nErreur de récupération de la page.\n\n" except Exception as e: print(f"{link} : Erreur lors du traitement de la page : {e}") return f"Source: {link}\nNote:\nErreur lors du traitement de la page: {e}\n\n" def fetch_pages_and_summarize(links: List[str], query: str) -> str: # Added type hints """ Utilise un pool de threads pour traiter plusieurs pages en parallèle et retourne le contexte concaténé. """ results = [] with ThreadPoolExecutor(max_workers=10) as executor: future_to_link = {executor.submit(fetch_and_summarize, link, query): link for link in links} for future in as_completed(future_to_link): result = future.result() results.append(result) return "".join(results) # --- Déroulement principal de la fonction operate_requests --- yield {"type": "web_search_status", "content": "start"} # Signaler le début de la recherche web # 1. Recherche sur Whoogle all_links = whoogle_search(prompt) if not all_links: print("Aucun résultat de recherche trouvé.") # Improved message for no search results yield {"type": "web_search_status", "content": "end"} # Signaler la fin de la recherche web yield "", [] # Retourner une liste de sources vide return # 2. Limiter à MAX_WEB_LINKS liens limited_links = all_links[:MAX_WEB_LINKS] # Limit to fewer links print(f"{len(limited_links)} lien(s) utilisé(s) pour la recherche.") # 3. Extraction et résumé en parallèle context = fetch_pages_and_summarize(limited_links, prompt) yield {"type": "web_search_status", "content": "end"} # Signaler la fin de la recherche web yield context, limited_links # Retourner aussi les liens utilisés return

def extract_code_blocks(text: str) -> Tuple[Optional[str], Optional[str]]:
"""Extrait les blocs de code Python et gère le marqueur [CODE_REQUIRED]."""
if CODE_REQUIRED_MARKER in text:
text_no_marker = text.replace(CODE_REQUIRED_MARKER, "", 1).strip()
code_match = CODE_PATTERN.search(text_no_marker)
if code_match:
return code_match.group(1).strip(), None
else:
code_match = CODE_PATTERN.search(text)
if code_match:
return None, code_match.group(1).strip() # Code à afficher (sans exécution)
return None, None

def run_code_in_sandbox(code: str) -> str:
"""Exécute le code Python dans le sandbox e2b et retourne le résultat."""
start_time = time.time()
try:
logger.info("Début exécution code dans sandbox e2b.")
execution = None
# Amélioration (mineure) : Utilisation de next pour trouver la première méthode disponible
method_name = next((method for method in ["run_code_sync", "execute_sync", "execute_code", "execute", "run_code", "run"] if hasattr(sandbox, method)), None)
if method_name:
logger.info(f"Utilisation de la méthode sandbox.{method_name}")
execution = getattr(sandbox, method_name)(code)
else: # Added else block for clarity
error_msg = "Aucune méthode d'exécution de code disponible dans le sandbox."
logger.error(error_msg)
return error_msg
logs = execution.logs
stdout_logs = logs.stdout if hasattr(logs, 'stdout') else []
stderr_logs = logs.stderr if hasattr(logs, 'stderr') else []
stdout = "\n".join(stdout_logs)
stderr = "\n".join(stderr_logs)
if stderr:
return f"Stdout:\n{stdout}\n\nStderr:\n{stderr}"
return stdout
except Exception as e:
duration = time.time() - start_time
error_msg = f"Erreur lors de l'exécution du code dans e2b après {duration:.2f} secondes: {str(e)}"
logger.exception(error_msg)
return f"Erreur exécution code: {str(e)}"

def extract_numeric_result(exec_result: str) -> Optional[str]:
"""Extrait la dernière ligne numérique du résultat d'exécution de code."""
if "Stdout:" in exec_result:
stdout_section = exec_result.split("Stdout:\n", 1)[1].split("\n\nStderr:")[0].strip()
else:
stdout_section = exec_result.strip()
lines = [l.strip() for l in stdout_section.splitlines() if l.strip()]
if not lines:
return None
last_line = lines[-1]
if re.search(r'\d', last_line):
return last_line
return None

@app.route('/models', methods=['GET'])
def get_models() -> Response:
"""Retourne la liste des modèles valides."""
return jsonify({"models": VALID_MODELS_LOW_TOKEN}) # Return only smaller models in model list

@app.route('/chat', methods=['POST'])
def chat_endpoint() -> Response:
"""
Point d'entrée principal pour le chat.
Gère le streaming SSE, l'exécution de code et les canvas.
Intègre la recherche web si activée (configurable via requête).
"""

text
@stream_with_context @retry(stop=stop_after_attempt(5), # Réessayer jusqu'à 5 fois wait=wait_exponential(multiplier=1, min=4, max=60), # Délai exponentiel (min 4s, max 60s) retry=retry_if_exception_type(httpx.HTTPError)) # Réessayer seulement pour les erreurs HTTP (incluant 429) def stream_response(): data = request.get_json() if not data: logger.warning("Requête chat invalide: corps de requête manquant.") yield format_sse_event({"type": "error", "content": "Requête chat invalide : corps de requête manquant."}) return enable_web_search = data.get('enable_web_search', True) # Recherche web configurable via requête, activée par défaut messages = data.get('messages', []) model: str = data.get('model', DEFAULT_CHAT_MODEL_LOW_TOKEN).strip() # Default to smaller model execute_code: bool = data.get('execute_code', True) if not messages or not isinstance(messages, list) or not model or model not in VALID_MODELS_LOW_TOKEN: # Validate against smaller model list logger.warning("Requête chat invalide: messages=%s, model=%s", messages, model) yield format_sse_event({"type": "error", "content": "Requête chat invalide : messages ou modèle incorrects."}) return system_web_context = "" # Initialisation par défaut ici last_user_message = messages[-1]['content'] if messages else "" web_sources: List[str] = [] # Initialisation de la liste des sources web if enable_web_search and last_user_message: web_search_generator = operate_requests(last_user_message) # operate_requests devient un générateur try: while True: # Corrected while loop event = next(web_search_generator) # Récupérer les événements de statut de recherche if isinstance(event, dict) and event.get('type') == 'web_search_status': yield format_sse_event(event) # Forward l'événement de statut au client elif isinstance(event, tuple) and len(event) == 2: # Réception du contexte et des sources à la fin system_web_context, web_sources = event # unpack le tuple break # Sortir de la boucle après avoir reçu le contexte et les sources elif isinstance(event, tuple) and event == ("", []): # Correctly handle the tuple case for no results system_web_context = "" # Assign empty string to system_web_context web_sources = [] # Assign empty list to web_sources break except StopIteration: # Handle StopIteration properly outside the loop system_web_context = "" # Ensure default context if generator finishes prematurely web_sources = [] break except StopIteration: # Handle StopIteration if generator is empty from the start system_web_context = "" web_sources = [] else: system_web_context = "" # Prompt système factorisé et simplifié - CONCISE PROMPT FOR TOKEN SAVING base_prompt = "🎯 **Rôle : Expert en calcul et canvas.**\n" # Very short role "**Mission :** Répondre, calculer, canvas interactifs. Style expert. Concise.\n\n" # Very concise mission web_context_prompt_part = f"**CONTEXTE WEB :**\n{system_web_context}\n\n" if system_web_context else "" # Shorter section title instructions_prompt_part = "**CALCUL NUMÉRIQUE :**\n" # Shortened instruction section titles "1. Phrase intro calcul Python.\n" "2. `[CODE_REQUIRED]`.\n" "3. Bloc PYTHON minimal, lisible, `print(resultat_numerique)`.\n" "**[APRES EXECUTION CODE]** Continue, explique résultat code en DETAILS. Fais avancer conversation.\n\n" # Concise after-code instructions "**CANVAS CODE :**\n" "1. Canvas code interactif.\n" "2. `[CANVAS_START]`...`[CANVAS_END]`.\n" "**CANVAS TEXTE :**\n" "1. Canvas texte interactif (markdown).\n" "2. `[CANVAS_START]`...`[CANVAS_END]`.\n" "**CHAT CANVAS :** MODIFIER canvas. NOUVEAU canvas complet `[CANVAS_START]`...`[CANVAS_END]`.\n" # Even shorter canvas chat instructions "**PAS CALCUL NI CANVAS :** Réponse directe, concise. Contexte conversation." # Shortened no-action instructions system_prompt_content = base_prompt + web_context_prompt_part + instructions_prompt_part system_prompt = {"role": "system", "content": system_prompt_content} messages_with_system_prompt = [system_prompt] + messages[-CHAT_HISTORY_MESSAGE_LIMIT:] # Apply chat history limit full_reply_text = "" code_to_execute = None code_to_display = None is_code_used = False canvas_content = None code_executed = False prompt_tokens = 0 # Note: Comptage des tokens non implémenté ici completion_tokens = 0 # Note: Comptage des tokens non implémenté ici total_tokens = 0 # Note: Comptage des tokens non implémenté ici initial_response_completed = False yield format_sse_event({"type": "analysis_status", "content": "start"}) try: response_stream = groq_client.chat.completions.create( model=model, messages=messages_with_system_prompt, temperature=0.0, stream=True, ) for chunk in response_stream: if initial_response_completed: break delta_content = chunk.choices[0].delta.content or "" delta_content_no_marker = delta_content.replace(CODE_REQUIRED_MARKER, "") full_reply_text += delta_content_no_marker if CANVAS_START_MARKER in full_reply_text and CANVAS_END_MARKER in full_reply_text: start_index = full_reply_text.find(CANVAS_START_MARKER) + len(CANVAS_START_MARKER) end_index = full_reply_text.find(CANVAS_END_MARKER) canvas_content_extracted = full_reply_text[start_index:end_index].strip() canvas_content = canvas_content_extracted delta_content_for_chat = delta_content_no_marker.replace(CANVAS_START_MARKER, "").replace(CANVAS_END_MARKER, "") yield format_sse_event({"type": "text", "content": delta_content_for_chat}) yield format_sse_event({"type": "canvas_start", "content": canvas_content}) continue else: yield format_sse_event({"type": "text", "content": delta_content_no_marker}) code_to_execute, code_to_display = extract_code_blocks(full_reply_text) if code_to_execute: initial_response_completed = True if execute_code and code_to_execute: is_code_used = True logger.info("Envoi événement CODE_BLOCK") yield format_sse_event({"type": "code_block", "content": code_to_execute}) yield format_sse_event({"type": "code_execution_start"}) execution_result = run_code_in_sandbox(code_to_execute) numeric_result = extract_numeric_result(execution_result) if numeric_result: code_executed = True logger.info("Envoi événement EXECUTED_CODE_RESULT") yield format_sse_event({"type": "executed_code_result", "content": numeric_result}) conclusion_prompt_content = ( f"**[APRES CODE]** Resultat: **{numeric_result}**. Continue conversation." # Very concise post-code prompt ) conclusion_prompt = [{"role": "user", "content": conclusion_prompt_content}] conclusion_stream = groq_client.chat.completions.create( model=model, messages=conclusion_prompt, temperature=0.0, stream=True, ) for chunk in conclusion_stream: delta_content = chunk.choices[0].delta.content or "" yield format_sse_event({"type": "text", "content": delta_content}) yield format_sse_event({"type": "final"}) else: code_executed = True logger.info("Envoi événement EXECUTED_CODE_ERROR") yield format_sse_event({"type": "executed_code_error", "content": execution_result}) yield format_sse_event({"type": "final", "result": "Fin de la réponse (erreur ou pas de résultat numérique)."}) if web_sources: # Envoyer les sources web si elles existent yield format_sse_event({"type": "web_sources", "content": web_sources}) yield format_sse_event({"type": "sources_start"}) # Indiquer le début des sources for source_url in web_sources: yield format_sse_event({"type": "source_item", "content": source_url}) # Envoyer chaque source individuellement yield format_sse_event({"type": "sources_end"}) # Indiquer la fin des sources prompt_tokens = 0 # Note: Comptage des tokens non implémenté ici completion_tokens = 0 # Note: Comptage des tokens non implémenté ici total_tokens = prompt_tokens + completion_tokens # Note: Comptage des tokens non implémenté ici if not code_executed: logger.info("Envoi événement FINAL (sans code exécuté)") yield format_sse_event({"type": "final", "prompt_tokens": prompt_tokens, "completion_tokens": completion_tokens, "total_tokens": total_tokens}) else: yield format_sse_event({"type": "final", "prompt_tokens": prompt_tokens, "completion_tokens": completion_tokens, "total_tokens": total_tokens}) except httpx.HTTPError as e: # Intercepter explicitement httpx.HTTPError pour tenacity dans stream_response if e.response.status_code == 429: error_message = f"Erreur 429 (Too Many Requests) lors de l'appel à l'API Groq (stream_response). Réessai automatique par Tenacity. Détails: {e}" logger.warning(error_message) # Log warning, tenacity va retry yield format_sse_event({"type": "error", "content": "Erreur temporaire de l'IA. Réessayez svp."}) # More user-friendly error message raise e # Reraise pour que tenacity retry else: # Pour les autres erreurs HTTP error_message = f"Erreur HTTP lors de l'appel à l'API Groq (stream_response) (non-429). Détails: {e}" logger.exception(error_message) yield format_sse_event({"type": "error", "content": error_message}) except Exception as e: error_message = f"Erreur serveur IA: {str(e)}" logger.exception(error_message) yield format_sse_event({"type": "error", "content": error_message}) finally: yield format_sse_event({"type": "analysis_status", "content": "end"}) yield format_sse_event({"type": "done", "is_code_execution": is_code_used}) return Response(stream_with_context(stream_response()), mimetype='text/event-stream')

@app.route('/canvas_modify', methods=['POST'])
def canvas_modify_endpoint() -> Response:
data = request.get_json()
if not data:
logger.warning("Requête canvas_modify sans données.")
return jsonify({"error": "Requête canvas_modify invalide : corps de requête manquant."}), 400
user_message = data.get('message')
canvas_content = data.get('canvas_content')
model = data.get('model', DEFAULT_CHAT_MODEL_LOW_TOKEN).strip() # Default to smaller model for canvas too
if not user_message or canvas_content is None:
logger.warning("Requête canvas_modify incomplète: message=%s, canvas_content=%s", user_message, canvas_content)
return jsonify({"error": "Requête canvas_modify invalide : message ou contenu canvas manquant."}), 400
canvas_chat_messages = [
{"role": "system", "content": ("Chat canvas. Modifier canvas. " # Even shorter canvas system prompt
"NOUVEAU canvas complet [CANVAS_START]...[CANVAS_END].")}, # Even shorter canvas system prompt
{"role": "assistant", "content": f"[CANVAS_START]\n{canvas_content}\n[CANVAS_END] Canvas actuel. Modifier?"}, # Even shorter canvas assistant message
{"role": "user", "content": user_message}
]

text
@retry(stop=stop_after_attempt(5), # Réessayer jusqu'à 5 fois wait=wait_exponential(multiplier=1, min=4, max=60), # Délai exponentiel (min 4s, max 60s) retry=retry_if_exception_type(httpx.HTTPError)) # Réessayer seulement pour les erreurs HTTP (incluant 429) def canvas_modify_groq_call(canvas_chat_messages, model): """Fonction retryable pour l'appel à Groq dans canvas_modify_endpoint""" return groq_client.chat.completions.create( model=model, messages=canvas_chat_messages, temperature=0.0, stream=False, ) try: response_stream = canvas_modify_groq_call(canvas_chat_messages, model) # Appel à la fonction retryable ai_response_text = response_stream.choices[0].message.content or "" if CANVAS_START_MARKER in ai_response_text and CANVAS_END_MARKER in ai_response_text: start_index = ai_response_text.find(CANVAS_START_MARKER) + len(CANVAS_START_MARKER) end_index = ai_response_text.find(CANVAS_END_MARKER) updated_canvas_content = ai_response_text[start_index:end_index].strip() response_message = "Canvas mis à jour." # Short response logger.info("Canvas modifié avec succès.") return jsonify({"updated_canvas_content": updated_canvas_content, "response_message": response_message}) else: logger.error("Réponse IA canvas_modify invalide: marqueurs CANVAS_START/END manquants.") return jsonify({"error": "Réponse IA invalide pour la modification du canvas", "full_response": ai_response_text}), 500 except httpx.HTTPError as e: # Intercepter explicitement httpx.HTTPError if e.response.status_code == 429: error_message = f"Erreur 429 (Too Many Requests) lors de l'appel à l'API Groq (canvas_modify_endpoint). Après plusieurs essais, opération échouée. Details: {e}" // Improved error message else: error_message = f"Erreur HTTP lors de l'appel à l'API Groq (canvas_modify_endpoint) (non-429). Détails: {e}" logger.exception(error_message) # Log l'exception complète (traceback) return jsonify({"error": "Erreur lors de la modification du canvas après plusieurs tentatives.", "details": error_message}), 500 except Exception as e: error_message = f"Erreur serveur (canvas_modify): {str(e)}" logger.exception(error_message) return jsonify({"error": error_message}), 500

@app.route('/run', methods=['POST'])
def run_code_endpoint() -> Response:
"""
Point d'accès pour exécuter du code via l'API Piston (pour canvas code).
Langages supportés par Piston.
"""
data = request.get_json()
if not data:
logger.warning("Requête run sans données.")
return jsonify({"error": "Requête run invalide : corps de requête manquant."}), 400
language = data.get("language")
code = data.get("code")
stdin = data.get("stdin", "")
if not language or not code:
logger.warning("Requête run incomplète: language=%s, code=%s", language, code)
return jsonify({"error": "Requête run invalide : langage et code requis."}), 400
payload = {
"language": language,
"version": "*",
"files": [{"content": code}],
"stdin": stdin
}
try:
piston_response = http_session.post(PISTON_API_URL, json=payload, timeout=30)
piston_response.raise_for_status()
logger.info("Code exécuté avec Piston avec succès.")
return jsonify(piston_response.json())
except requests.RequestException as e:
error_message = f"Erreur lors de l'appel à l'API Piston: {str(e)}"
logger.exception(error_message)
piston_text = piston_response.text if 'piston_response' in locals() and piston_response is not None else ""
return jsonify({"error": "Erreur d'exécution du code via l'API Piston.", "piston_response": piston_text, "details": error_message}), 500
except Exception as e:
error_message = f"Erreur inattendue lors de l'exécution du code: {str(e)}"
logger.exception(error_message)
return jsonify({"error": "Erreur inattendue lors de l'exécution du code.", "details": error_message}), 500

@app.route('/run', methods=['OPTIONS'])
def run_code_options() -> Response:
"""Gère les requêtes OPTIONS pour l'endpoint /run (CORS preflight)."""
return jsonify({'message': 'OPTIONS requests allowed'}), 200

def format_sse_event(data: Dict[str, Any]) -> str:
"""Formate les données en event SSE (Server-Sent Events)."""
return f"data: {json.dumps(data)}\n\n"

if name == 'main':
if os.environ.get('FLASK_ENV') == 'development':
app.run(debug=True, port=5000)
"""
// app.tsx
import React, { useState, useRef, useEffect } from 'react';
import ReactMarkdown from 'react-markdown';
import { Prism } from 'react-syntax-highlighter/dist/esm';
import { Light as SyntaxHighlighterLight } from 'react-syntax-highlighter/dist/esm';
import { atomDark } from 'react-syntax-highlighter/dist/esm/styles/prism';
import { python as prismPython, javascript as prismJavascript, c, cpp as prismCpp, java as prismJava } from 'react-syntax-highlighter/dist/esm/languages/prism';

// Import CodeMirror et ses extensions
import CodeMirror from '@uiw/react-codemirror';
import { python } from '@codemirror/lang-python';
import { javascript } from '@codemirror/lang-javascript';
import { java } from '@codemirror/lang-java';
import { cpp } from '@codemirror/lang-cpp';
import { markdown } from '@codemirror/lang-markdown';

// Helper function to register languages for react-syntax-highlighter
const registerLanguage = (name: string, language: any) => {
if (Prism && Prism.languages && !Prism.languages[name]) {
Prism.registerLanguage(name, language);
} else if (Prism && Prism.languages && Prism.languages[name]) {
console.warn(`Language "${name}" is already registered.`);
} else {
console.warn("Warning: Prism.registerLanguage is not available. Syntax highlighting might not work correctly.");
}
};

// Enregistrement des langages pour la coloration syntaxique
registerLanguage('python', prismPython);
registerLanguage('javascript', prismJavascript);
registerLanguage('c', c);
registerLanguage('cpp', prismCpp);
registerLanguage('java', prismJava);

interface ChatMessage {
sender: 'User' | 'AI';
text: string;
codeBlock?: string;
isCodeCollapsed?: boolean;
canvasContent?: string;
hasCanvas?: boolean;
isTextCanvas?: boolean;
analysisStatus?: string;
executedCodeError?: string;
isCodeExecuting?: boolean;
codeExecutionOutput?: string | null;
promptTokens?: number; // Token usage info
completionTokens?: number; // Token usage info
totalTokens?: number; // Token usage info
webSources?: string[]; // Liste des sources web
}

interface CanvasChatMessage {
sender: 'User' | 'AI';
text: string;
}

const App: React.FC = () => {
const [message, setMessage] = useState('');
const [chat, setChat] = useState<ChatMessage[]>([]);
const chatDisplayRef = useRef<HTMLDivElement>(null);
const [isSending, setIsSending] = useState(false);
const [availableModels, setAvailableModels] = useState<string[]>([]);
const [selectedModel, setSelectedModel] = useState<string>('');
const [isCanvasActive, setIsCanvasActive] = useState(false);
const [currentCanvasContent, setCurrentCanvasContent] = useState('');
const [canvasChatMessages, setCanvasChatMessages] = useState<CanvasChatMessage[]>([]);
const [canvasMessage, setCanvasMessage] = useState('');
const canvasChatDisplayRef = useRef<HTMLDivElement>(null);
const [selectedLanguage, setSelectedLanguage] = useState<'python' | 'c' | 'cpp' | 'java' | 'javascript'>('python');
const [codeOutput, setCodeOutput] = useState('');
const [activeCanvasMessageIndex, setActiveCanvasMessageIndex] = useState<number | null>(null);
const [isTextCanvas, setIsTextCanvas] = useState(false);
const [isWebSearchEnabled, setIsWebSearchEnabled] = useState(false);
const [analysisStatus, setAnalysisStatus] = useState<string | null>(null);
const [webSources, setWebSources] = useState<string[]>([]); // State pour les sources web
const [showSources, setShowSources] = useState(false); // State pour afficher/cacher les sources

text
// Référence pour la console (output) afin d'automatiquement défiler vers le bas const consoleOutputRef = useRef<HTMLDivElement>(null); const VALID_MODELS_FRONTEND = [ // Match VALID_MODELS_LOW_TOKEN in Python "llama-3.1-8b-instant", "gemma2-9b-it", "mixtral-8x7b-32768" ]; useEffect(() => { const fetchModels = async () => { try { const response = await fetch('https://pl1nkochat.onrender.com/models'); if (response.ok) { const data = await response.json(); setAvailableModels(data.models.filter(model => VALID_MODELS_FRONTEND.includes(model))); // Filter models on frontend if (data.models.length > 0) { setSelectedModel(data.models.find(model => VALID_MODELS_FRONTEND.includes(model)) || VALID_MODELS_FRONTEND[0] || ''); // Select default or first valid model } } else { console.error("Failed to fetch models:", response.status); setChat(prevChat => [...prevChat, { sender: 'AI', text: \`Erreur de chargement des modèles: \${response.status}\` }]); } } catch (error) { console.error("Error fetching models:", error); setChat(prevChat => [...prevChat, { sender: 'AI', text: "Erreur de communication avec le serveur pour les modèles." }]); } }; fetchModels(); }, []); useEffect(() => { if (!isCanvasActive && chatDisplayRef.current) { chatDisplayRef.current.scrollTop = chatDisplayRef.current.scrollHeight; } if (isCanvasActive && canvasChatDisplayRef.current) { canvasChatDisplayRef.current.scrollTop = canvasChatDisplayRef.current.scrollHeight; } }, [chat, canvasChatMessages, isCanvasActive]); // Auto-scroll de la console de sortie lorsque codeOutput change useEffect(() => { if (consoleOutputRef.current) { consoleOutputRef.current.scrollTop = consoleOutputRef.current.scrollHeight; } }, [codeOutput]); const handleSubmit = async (e: React.FormEvent) => { e.preventDefault(); if (message.trim() === '' || isSending || isCanvasActive) return; setIsSending(true); setAnalysisStatus('Analyse en cours...'); setWebSources([]); // Réinitialiser les sources web à chaque nouvelle requête const userMessage = message; setChat(prevChat => [...prevChat, { sender: 'User', text: userMessage }]); setMessage(''); setAnalysisStatus('Réponse en cours...'); const messagesForApi = [ ...chat.map(msg => ({ role: msg.sender === 'User' ? 'user' : 'assistant', content: msg.text })), { role: 'user', content: userMessage } ]; setChat(prevChat => [ ...prevChat, { sender: 'AI', text: '', isCodeCollapsed: true, analysisStatus: 'Réponse en cours...', isCodeExecuting: false, codeExecutionOutput: null, webSources: [] } ]); try { const response = await fetch('https://pl1nkochat.onrender.com/chat', { method: 'POST', headers: { 'Content-Type': 'application/json', 'Accept': 'text/event-stream' }, body: JSON.stringify({ messages: messagesForApi, model: selectedModel, enable_web_search: isWebSearchEnabled }) }); if (!response.ok) { const errorText = await response.text(); setChat(prevChat => { const lastIndex = prevChat.length - 1; return [ ...prevChat.slice(0, lastIndex), { sender: 'AI', text: \`Erreur: \${response.status} - \${errorText}\`, analysisStatus: 'Erreur' } ]; }); setIsSending(false); setAnalysisStatus('Erreur lors de la communication avec le serveur.'); return; } const reader = response.body!.getReader(); const decoder = new TextDecoder(); while (true) { const { done, value } = await reader.read(); if (done) break; const chunk = decoder.decode(value); const events = chunk.split('\\n\\n').filter(line => line.startsWith("data: ")); for (const eventLine of events) { const eventData = JSON.parse(eventLine.substring(6)); console.log("Événement reçu:", eventData); if (eventData.type === 'text') { setChat(prevChat => { const lastIndex = prevChat.length - 1; const lastMessage = prevChat[lastIndex]; if (lastMessage) { const newText = lastMessage.text + eventData.content; const updatedMessage = { ...lastMessage, text: newText, isCodeExecuting: false }; return [...prevChat.slice(0, lastIndex), updatedMessage]; } return prevChat; }); } else if (eventData.type === 'code_block') { setChat(prevChat => { const lastIndex = prevChat.length - 1; const lastMessage = prevChat[lastIndex]; if (lastMessage) { const updatedMessage = { ...lastMessage, codeBlock: eventData.content, isCodeExecuting: false }; return [...prevChat.slice(0, lastIndex), updatedMessage]; } return prevChat; }); } else if (eventData.type === 'code_execution_start') { setChat(prevChat => { const lastIndex = prevChat.length - 1; const lastMessage = prevChat[lastIndex]; if (lastMessage) { const updatedMessage = { ...lastMessage, isCodeExecuting: true, codeExecutionOutput: 'Execution en cours...' }; return [...prevChat.slice(0, lastIndex), updatedMessage]; } return prevChat; }); } else if (eventData.type === 'executed_code_result') { setChat(prevChat => { const lastIndex = prevChat.length - 1; const lastMessage = prevChat[lastIndex]; if (lastMessage) { const updatedMessage = { ...lastMessage, codeExecutionOutput: eventData.content, isCodeExecuting: false }; return [...prevChat.slice(0, lastIndex), updatedMessage]; } return prevChat; }); } else if (eventData.type === 'canvas_start') { const canvasContent = eventData.content; const isText = canvasContent.trim().startsWith('#') || canvasContent.trim().startsWith('* ') || !canvasContent.includes('```'); setIsTextCanvas(isText); const cleanedCanvasContent = canvasContent.replace(/```(python|javascript|c|cpp|java)\\n/g, '```\\n'); setChat(prevChat => { const lastIndex = prevChat.length - 1; const lastMessage = prevChat[lastIndex]; if (lastMessage) { const updatedMessage = { ...lastMessage, canvasContent: cleanedCanvasContent, hasCanvas: true, isTextCanvas: isText, isCodeExecuting: false }; return [...prevChat.slice(0, lastIndex), updatedMessage]; } return prevChat; }); } else if (eventData.type === 'final') { setAnalysisStatus(null); setChat(prevChat => { const lastIndex = prevChat.length - 1; const lastMessage = prevChat[lastIndex]; if (lastMessage) { const updatedMessage = { ...lastMessage, promptTokens: eventData.prompt_tokens, completionTokens: eventData.completion_tokens, totalTokens: eventData.total_tokens }; return [...prevChat.slice(0, lastIndex), updatedMessage]; } return prevChat; }); } else if (eventData.type === 'error') { setChat(prevChat => { const lastIndex = prevChat.length - 1; return [ ...prevChat.slice(0, lastIndex), { sender: 'AI', text: \`Erreur IA: \${eventData.content}\`, analysisStatus: 'Erreur IA' } ]; }); setIsSending(false); setAnalysisStatus('Erreur de l\'IA.'); return; } else if (eventData.type === 'analysis_status') { setChat(prevChat => { const lastIndex = prevChat.length - 1; const lastMessage = prevChat[lastIndex]; if (lastMessage) { const updatedMessage = { ...lastMessage, analysisStatus: eventData.content, isCodeExecuting: false }; return [...prevChat.slice(0, lastIndex), updatedMessage]; } return prevChat; }); setAnalysisStatus(eventData.content); } else if (eventData.type === 'executed_code_error') { setChat(prevChat => { const lastIndex = prevChat.length - 1; const lastMessage = prevChat[lastIndex]; if (lastMessage) { const updatedMessage = { ...lastMessage, executedCodeError: eventData.content, isCodeExecuting: false, codeExecutionOutput: eventData.content }; return [...prevChat.slice(0, lastIndex), updatedMessage]; } return prevChat; }); } else if (eventData.type === 'web_search_status') { if (eventData.content === 'start') { setAnalysisStatus('Recherche internet en cours...'); } else if (eventData.content === 'end') { setAnalysisStatus('Réponse en cours...'); // Retour à l'analyse après la recherche } } else if (eventData.type === 'web_sources') { setWebSources(eventData.content); setChat(prevChat => { const lastIndex = prevChat.length - 1; const lastMessage = prevChat[lastIndex]; if (lastMessage) { const updatedMessage = { ...lastMessage, webSources: eventData.content }; return [...prevChat.slice(0, lastIndex), updatedMessage]; } return prevChat; }); } } } } catch (error) { console.error("Erreur lors de la communication avec le backend:", error); setChat(prevChat => { const lastIndex = prevChat.length - 1; return [ ...prevChat.slice(0, lastIndex), { sender: 'AI', text: "Erreur de communication avec l'IA.", analysisStatus: 'Erreur réseau' } ]; }); setAnalysisStatus('Erreur réseau.'); } finally { setIsSending(false); if (!analysisStatus) { setAnalysisStatus('Terminé'); setTimeout(() => setAnalysisStatus(null), 3000); } } }; const toggleCodeBlockCollapse = (index: number) => { setChat(prevChat => prevChat.map((msg, i) => i === index ? { ...msg, isCodeCollapsed: !msg.isCodeCollapsed } : msg ) ); }; const handleCanvasChange = (newContent: string) => { const cleanedContent = newContent.replace(/```(python|javascript|c|cpp|java)\\n/g, '```\\n'); setCurrentCanvasContent(cleanedContent); }; const handleCanvasMessageSubmit = async (e: React.FormEvent) => { e.preventDefault(); if (canvasMessage.trim() === '') return; const userCanvasMessage = canvasMessage; setCanvasChatMessages(prevMessages => [...prevMessages, { sender: 'User', text: userCanvasMessage }]); setCanvasMessage(''); try { const response = await fetch('https://pl1nkochat.onrender.com/canvas_modify', { method: 'POST', headers: { 'Content-Type': 'application/json' }, body: JSON.stringify({ message: userCanvasMessage, canvas_content: currentCanvasContent, model: selectedModel }) }); if (!response.ok) { console.error("Erreur Canvas Chat:", response.status, await response.text()); setCanvasChatMessages(prevMessages => [...prevMessages, { sender: 'AI', text: \`Erreur: \${response.status} - Chat Canvas\` }]); return; } const result = await response.json(); if (result && result.updated_canvas_content) { const cleanedContent = result.updated_canvas_content.replace(/```(python|javascript|c|cpp|java)\\n/g, '```\\n'); setCurrentCanvasContent(cleanedContent); setCanvasChatMessages(prevMessages => [...prevMessages, { sender: 'AI', text: result.response_message || "Canvas mis à jour." }]); } else if (result && result.error) { setCanvasChatMessages(prevMessages => [...prevMessages, { sender: 'AI', text: \`Erreur IA Canvas: \${result.error}\` }]); } } catch (error) { console.error("Erreur Canvas Chat Backend:", error); setCanvasChatMessages(prevMessages => [...prevMessages, { sender: 'AI', text: "Erreur de communication avec l'IA (Canvas Chat)." }]); } }; const handleLanguageChange = (e: React.ChangeEvent<HTMLSelectElement>) => { setSelectedLanguage(e.target.value as typeof selectedLanguage); }; const handleCodeExecution = async () => { setCodeOutput('Execution en cours...'); try { const response = await fetch("https://pl1nkochat.onrender.com/run", { method: "POST", headers: { "Content-Type": "application/json" }, body: JSON.stringify({ language: selectedLanguage, code: currentCanvasContent, stdin: "" }) }); const result = await response.json(); setCodeOutput(result.run?.stdout || result.error || "Erreur d'exécution du code!"); } catch (error) { console.error("Erreur Exécution Code:", error); setCodeOutput("Erreur lors de l'exécution du code."); } }; const handleStartNewChat = () => { setIsCanvasActive(false); setCanvasChatMessages([]); setCurrentCanvasContent(''); setCodeOutput(''); setActiveCanvasMessageIndex(null); setIsTextCanvas(false); setChat([]); setWebSources([]); // Reset web sources on new chat }; const handleOpenCanvas = (index: number, canvasContent: string, isTextCanvasProp: boolean) => { setIsCanvasActive(true); const cleanedContent = canvasContent.replace(/```(python|javascript|c|cpp|java)\\n/g, '```\\n'); setCurrentCanvasContent(cleanedContent); setCanvasChatMessages([]); setActiveCanvasMessageIndex(index); setIsTextCanvas(isTextCanvasProp); }; const handleModelChange = (e: React.ChangeEvent<HTMLSelectElement>) => { setSelectedModel(e.target.value); }; const handleWebSearchToggle = (e: React.ChangeEvent<HTMLInputElement>) => { setIsWebSearchEnabled(e.target.checked); }; const toggleSourcesVisibility = () => { setShowSources(!showSources); }; return ( <div style={{ margin: '20px', display: 'flex', flexDirection: 'column', height: '95vh' }}> {!isCanvasActive && ( <center> <h1>PLINKO CHATTE</h1> </center> )} <div style={{ display: 'flex', flexDirection: 'row', width: '100%', height: 'calc(100% - 100px)' }}> {!isCanvasActive && ( <div style={{ flex: 1, height: '100%' }}> <div ref={chatDisplayRef} style={{ flexGrow: 1, overflowY: 'auto', marginBottom: '20px', border: '1px solid #ccc', padding: '10px', height: 'calc(100% - 100px)' }} > {chat.map((msg, index) => ( <div key={index} style={{ marginBottom: '15px', textAlign: msg.sender === 'User' ? 'right' : 'left' }}> <strong>{msg.sender}:</strong> <div> {msg.analysisStatus && <div style={{ fontSize: '0.8em', color: 'grey' }}>{msg.analysisStatus}</div>} <div style={{ whiteSpace: 'pre-wrap', wordWrap: 'break-word', position: 'relative' }}> {msg.sender === 'AI' ? ( <> <ReactMarkdown components={{ code: ({ className, children, ...props }) => { const language = className?.match(/language-(\\w+)/)?.[1] || ''; return ( <SyntaxHighlighterLight style={atomDark as any} language={language} PreTag="div" children={String(children).replace(/\\n$/, '')} {...props} /> ); } }} > {msg.codeBlock ? msg.text.replace(/```python\\s*([\\s\\S]*?)```/g, '') : msg.text} </ReactMarkdown> {msg.webSources && msg.webSources.length > 0 && ( <div style={{display: 'inline-block', marginLeft: '5px'}}> <button onClick={toggleSourcesVisibility} style={{ background: 'none', border: 'none', color: '#007bff', padding: 0, cursor: 'pointer', fontSize: '0.8em', textDecoration: 'underline' }} > {showSources ? 'Cacher Sources' : 'Afficher Sources'} </button> {showSources && ( <div style={{ position: 'absolute', zIndex: 10, backgroundColor: '#f9f9f9', border: '1px solid #ccc', padding: '10px', borderRadius: '5px', left: '0', top: '100%', marginTop: '5px', width: '250px', textAlign: 'left', boxShadow: '2px 2px 5px rgba(0,0,0,0.3)' }}> <strong>Sources:</strong> <ul> {msg.webSources.map((source, index) => ( <li key={index} style={{fontSize: '0.8em', wordWrap: 'break-word'}}> <a href={source} target="_blank" rel="noopener noreferrer">{source}</a> </li> ))} </ul> </div> )} </div> )} </> ) : ( msg.text )} </div> {msg.sender === 'AI' && msg.codeBlock && ( <div style={{ marginTop: '10px', border: '1px solid #ccc', borderRadius: '5px', overflow: 'hidden' }}> <div style={{ backgroundColor: '#f0f0f0', padding: '8px', cursor: 'pointer', display: 'flex', justifyContent: 'space-between', alignItems: 'center' }} onClick={() => toggleCodeBlockCollapse(index)} > <strong>Code Python Généré</strong> <span>{msg.isCodeCollapsed ? '▼' : '▲'}</span> </div> <div style={{ padding: '10px', display: msg.isCodeCollapsed ? 'none' : 'block', backgroundColor: '#f9f9f9' }} > <SyntaxHighlighterLight style={atomDark as any} language="python" PreTag="div" > {msg.codeBlock} </SyntaxHighlighterLight> </div> </div> )} {msg.sender === 'AI' && msg.isCodeExecuting && msg.codeExecutionOutput && ( <div style={{ marginTop: '10px', padding: '10px', backgroundColor: '#e0f7fa', borderRadius: '5px', border: '1px solid #80deea' }}> <strong>Code Execution:</strong> <pre style={{ whiteSpace: 'pre-wrap', wordWrap: 'break-word', marginTop: '5px', fontFamily: 'monospace', fontSize: '0.9em' }}> {msg.codeExecutionOutput} </pre> </div> )} {msg.sender === 'AI' && !msg.isCodeExecuting && msg.codeExecutionOutput && !msg.executedCodeError && ( <div style={{ marginTop: '10px', padding: '10px', backgroundColor: '#c8e6c9', borderRadius: '5px', border: '1px solid #81c784' }}> <strong>Code Output:</strong> <pre style={{ whiteSpace: 'pre-wrap', wordWrap: 'break-word', marginTop: '5px', fontFamily: 'monospace', fontSize: '0.9em' }}> {msg.codeExecutionOutput} </pre> </div> )} {msg.sender === 'AI' && msg.executedCodeError && ( <div style={{ marginTop: '10px', padding: '10px', backgroundColor: '#ffe0b2', borderRadius: '5px', border: '1px solid #ffb74d' }}> <strong>Erreur d'exécution du code:</strong> <pre style={{ whiteSpace: 'pre-wrap', wordWrap: 'break-word', marginTop: '5px', fontFamily: 'monospace', fontSize: '0.9em' }}> {msg.executedCodeError} </pre> </div> )} {msg.sender === 'AI' && msg.hasCanvas && !isCanvasActive && ( <button onClick={() => handleOpenCanvas(index, msg.canvasContent || '', msg.isTextCanvas || false)} style={{ marginTop: '10px', padding: '8px 15px', borderRadius: '5px', backgroundColor: '#007bff', color: 'white', border: 'none', cursor: 'pointer' }} > Ouvrir Canvas </button> )} {msg.sender === 'AI' && msg.totalTokens !== undefined && ( <div style={{ marginTop: '5px', fontSize: '0.8em', color: 'grey' }}> Tokens utilisés: {msg.totalTokens} (Prompt: {msg.promptTokens}, Completion: {msg.completionTokens}) </div> )} </div> </div> ))} </div> {analysisStatus && <div style={{ marginBottom: '10px', color: 'grey', textAlign: 'center' }}>{analysisStatus}</div>} <form onSubmit={handleSubmit} style={{ display: 'flex', flexDirection: 'column', marginTop: '10px' }}> <textarea value={message} onChange={e => setMessage(e.target.value)} placeholder="Tapez votre message... (Chat Principal)" rows={3} style={{ padding: '8px', borderRadius: '5px', border: '1px solid #ccc' }} disabled={isSending || isCanvasActive} /> <div style={{ marginBottom: '10px', display: 'flex', alignItems: 'center', justifyContent: 'space-between' }}> <div> <label htmlFor="model-select" style={{ display: 'block', marginBottom: '5px', color: '#ccc' }}> Select Model: </label> <select id="model-select" value={selectedModel} onChange={handleModelChange} style={{ width: '100%', padding: '8px', borderRadius: '5px', backgroundColor: '#333', color: 'white', border: '1px solid #555', }} > {availableModels.map((model) => ( <option key={model} value={model}> {model} </option> ))} </select> </div> <div style={{ display: 'flex', alignItems: 'center' }}> <label htmlFor="web-search-toggle" style={{ color: '#ccc', marginRight: '10px' }}> Recherche Web: </label> <input id="web-search-toggle" type="checkbox" checked={isWebSearchEnabled} onChange={handleWebSearchToggle} style={{ cursor: 'pointer' }} /> </div> </div> <button type="submit" style={{ padding: '10px', borderRadius: '5px', backgroundColor: '#4CAF50', color: 'white', border: 'none', cursor: 'pointer' }} disabled={isSending || isCanvasActive}> {isSending ? "Envoi en cours..." : "Envoyer"} </button> </form> <button onClick={handleStartNewChat} style={{ marginTop: '10px', padding: '10px 15px', borderRadius: '8px', backgroundColor: '#607d8b', color: 'white', border: 'none', cursor: 'pointer', fontWeight: 'bold', transition: 'background-color 0.2s ease', }} > Nouveau Chat </button> </div> )} {isCanvasActive && ( <div style={{ display: 'flex', width: '100%', height: '100%', gap: '20px', position: 'fixed', top: 0, left: 0, right: 0, bottom: 0, }}> <div style={{ width: '25%', background: 'linear-gradient(145deg, #3a3d49, #2d2f36)', borderRadius: '8px', padding: '15px', display: 'flex', flexDirection: 'column', boxShadow: '0 4px 12px rgba(0,0,0,0.3)' }}> <h3 style={{ marginBottom: '10px', color: '#ccc' }}>Canvas Chat</h3> <button onClick={handleStartNewChat} style={{ padding: '10px 15px', borderRadius: '8px', backgroundColor: '#0a84ff', color: 'white', border: 'none', cursor: 'pointer', marginBottom: '20px', fontWeight: 'bold', transition: 'background-color 0.2s ease', }} > Retour au Chat Principal </button> <div ref={canvasChatDisplayRef} style={{ flexGrow: 1, overflowY: 'auto', marginBottom: '10px', color: '#ececf1', height: 'calc(100% - 180px)' }} > {canvasChatMessages.map((msg, index) => ( <div key={index} style={{ textAlign: msg.sender === 'User' ? 'right' : 'left', marginBottom: '5px', color: msg.sender === 'User' ? '#90caf9' : '#c5e1a5', }} > <strong>{msg.sender}:</strong> {msg.text} </div> ))} </div> <form onSubmit={handleCanvasMessageSubmit} style={{ display: 'flex', marginTop: '10px' }}> <textarea value={canvasMessage} onChange={(e) => setCanvasMessage(e.target.value)} placeholder="Modifier le canvas ici..." rows={2} style={{ flexGrow: 1, padding: '8px', borderRadius: '5px', border: 'none', backgroundColor: '#4d4e5c', color: '#ececf1', marginRight: '8px', resize: 'none', }} /> <button type="submit" style={{ padding: '8px 15px', borderRadius: '5px', backgroundColor: '#0a84ff', color: 'white', border: 'none', cursor: 'pointer', }} > Send </button> </form> </div> <div style={{ flex: 1, display: 'flex', flexDirection: 'column', backgroundColor: '#1e1e1e', borderRadius: '8px', overflow: 'hidden', boxShadow: '0 4px 12px rgba(0,0,0,0.3)' }}> <div style={{ backgroundColor: '#2d2d2d', padding: '8px 10px', display: 'flex', alignItems: 'center', borderBottom: '1px solid #444' }}> <span style={{ color: '#ccc', fontFamily: 'monospace', marginRight: '10px' }}> {isTextCanvas ? 'text_canvas.md' : \`main.\${selectedLanguage === 'python' ? 'py' : (selectedLanguage === 'javascript' ? 'js' : (selectedLanguage === 'c' ? 'c' : (selectedLanguage === 'cpp' ? 'cpp' : (selectedLanguage === 'java' ? 'java' : 'txt'))))}\`} </span> {!isTextCanvas && ( <select id="language-select" value={selectedLanguage} onChange={handleLanguageChange} style={{ padding: '5px 10px', borderRadius: '4px', backgroundColor: '#333', color: 'white', border: '1px solid #555', marginRight: '10px' }} > <option value="python">Python</option> <option value="javascript">JavaScript</option> <option value="c">C</option> <option value="cpp">C++</option> <option value="java">Java</option> </select> )} {!isTextCanvas && ( <button onClick={handleCodeExecution} style={{ marginLeft: 'auto', padding: '5px 10px', backgroundColor: '#0a84ff', color: '#fff', border: 'none', borderRadius: '4px', cursor: 'pointer' }} > Run </button> )} </div> {/* Zone d'édition du code (CodeMirror) dans un conteneur scrollable */} <div style={{ flexGrow: 1, overflowY: 'auto' }}> <CodeMirror value={currentCanvasContent} height="100%" theme="dark" extensions={ isTextCanvas ? [markdown()] : [ selectedLanguage === 'python' ? python() : selectedLanguage === 'javascript' ? javascript() : selectedLanguage === 'java' ? java() : (selectedLanguage === 'cpp' || selectedLanguage === 'c') ? cpp() : [] ] } onChange={(value) => handleCanvasChange(value)} /> </div> {codeOutput && !isTextCanvas && ( <div ref={consoleOutputRef} style={{ backgroundColor: '#1a1a1a', padding: '10px', borderTop: '1px solid #444', maxHeight: '200px', overflowY: 'auto' }} > <strong style={{ color: '#eee' }}>Output:</strong> <SyntaxHighlighterLight style={atomDark as any} language={selectedLanguage} PreTag="div" > {codeOutput} </SyntaxHighlighterLight> </div> )} </div> </div> )} </div> </div> );

};

export default App;

i want you return me a coherent code without bug

File "/opt/render/project/src/.venv/lib/python3.11/site-packages/gunicorn/app/wsgiapp.py", line 57, in load
return self.load_wsgiapp()
^^^^^^^^^^^^^^^^^^^
File "/opt/render/project/src/.venv/lib/python3.11/site-packages/gunicorn/app/wsgiapp.py", line 47, in load_wsgiapp
return util.import_app(self.app_uri)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/opt/render/project/src/.venv/lib/python3.11/site-packages/gunicorn/util.py", line 370, in import_app
mod = importlib.import_module(module)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/importlib/init.py", line 126, in import_module
return _bootstrap._gcd_import(name[level:], package, level)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "<frozen importlib._bootstrap>", line 1204, in _gcd_import
File "<frozen importlib._bootstrap>", line 1176, in _find_and_load
File "<frozen importlib._bootstrap>", line 1147, in _find_and_load_unlocked
File "<frozen importlib._bootstrap>", line 690, in _load_unlocked
File "<frozen importlib._bootstrap_external>", line 936, in exec_module
File "<frozen importlib._bootstrap_external>", line 1074, in get_code
File "<frozen importlib._bootstrap_external>", line 1004, in source_to_code
File "<frozen importlib._bootstrap>", line 241, in _call_with_frames_removed
File "/opt/render/project/src/app.py", line 310
except StopIteration: # Handle StopIteration properly outside the loop
^^^^^^
SyntaxError: invalid syntax

p to date, audited 2313 packages in 7s
368 packages are looking for funding
run npm fund for details
101 vulnerabilities (48 moderate, 27 high, 26 critical)
To address issues that do not require attention, run:
npm audit fix
To address all issues (including breaking changes), run:
npm audit fix --force
Run npm audit for details.

[email protected] build
cross-env NODE_OPTIONS=--openssl-legacy-provider react-scripts build
Creating an optimized production build...
Failed to compile.
[eslint]
src/App.tsx
Syntax error: Invalid character (22:21)
==> Build failed 😞
==> Common ways to troubleshoot your deploy: https://render.com/docs/troubleshooting-deploys

Réflexion

Analyzing error in Python code

The user is asking for a patched version of the code that can deploy on Groq, and there's an error message from the Python code. It points to a "SyntaxError" at line 310 with "except StopIteration:". However, "except StopIteration:" seems syntactically correct? I'll take a look at the error, which is specifically in the file "/opt/render/project/src/app.py". The snippet provided is a multi-line string starting with triple quotes; I'll count the lines to locate line 310 and look deeper into this!

Investigating syntax issue in code block

The error message points to the "except StopIteration:" in the code, most likely within a "while True:" loop. In the "chat_endpoint" function, the code has this structure:

text
while True: # various code except StopIteration: # handle exception

I think the problem might be that the "except StopIteration:" is mistakenly inside the "while" loop's try block. I suspect the "try" should actually cover the entire loop, not just part of it, meaning the indentation could be off. Time to fix the indentation!

Correcting comment syntax and indentation issues

It looks like there are a couple of issues in the code. The "except StopIteration:" is incorrectly indented inside the "while True" loop. To fix this, it should be unindented to the same level as the "while" statement. Additionally, I noticed double slashes (//) for comments, which isn’t valid in Python. For instance, in the canvas_modify_endpoint, the comment should use # instead.

I'll go ahead and adjust these issues:

  1. Unindent the except block.
  2. Replace // with # for proper comments.

Investigating syntax issues with TSX

I see in the TypeScript (TSX) code that the error message points to "Invalid character (22:21)", which might mean there’s a non-ASCII character, like an emoji, causing the issue. I don't spot an obvious error in the imports or the App component, so it could be an issue with a comment or possibly invisible characters.

I'll look into the possibility that the emoji in the Python code (🎯) is causing the error in the TypeScript file, particularly around line 22. It could be due to the file’s encoding or some unescaped special characters. I'll check for any characters that may break the TSX compiler.

NoneNone

NoneNone

NoneNone

NoneNone

None

Raisonné des problèmes de déploiement et du code pendant 3m 16s

Réponse

Below is a “patched” version of your code that fixes two kinds of issues we found:

  1. Python (app.py) fixes:

    • The try/except block inside your web‐search loop was mis‐indented. In the new version the entire while‑loop is wrapped by try/except so that a StopIteration is caught outside the loop.
    • Several string literals that spanned multiple lines (with comments on the same lines) have been wrapped in parentheses so that Python “adjacent literal concatenation” works correctly.
    • Any “JavaScript‐style” comments (using “// …”) have been replaced by Python comments (“# …”).
  2. TypeScript/React (app.tsx) fixes:

    • We “re‐typed” the suspicious block (for example, the “registerLanguage” block) so that no stray non‐ASCII or non‐UTF8 character remains.
    • (If you still see an “invalid character” error, please be sure that your file is saved in UTF‑8 encoding.)

Below are the complete patched files:


app.py

python
# app.py from gevent import monkey monkey.patch_all() import os import re import json import logging from typing import Any, Dict, List, Optional, Tuple, Generator import time import requests from bs4 import BeautifulSoup from concurrent.futures import ThreadPoolExecutor, as_completed from flask import Flask, request, Response, jsonify, stream_with_context from flask_cors import CORS from groq import Groq from e2b_code_interpreter import Sandbox # Importation des décorateurs tenacity from tenacity import retry, stop_after_attempt, wait_exponential, retry_if_exception_type import httpx # Importez httpx pour vérifier le type d'exception # Configuration du logging logging.basicConfig(level=logging.INFO) logger = logging.getLogger(__name__) app = Flask(__name__) CORS(app) # Clés API (variables d'environnement ou valeurs par défaut sécurisées) GROQ_API_KEY = os.environ.get("GROQ_API_KEY") E2B_API_KEY = os.environ.get("E2B_API_KEY") PISTON_API_URL = "https://emkc.org/api/v2/piston/execute" # URL de l'API Piston WHOOGLE_INSTANCE_URL = "https://chatapp-1-u8t2.onrender.com" # URL de votre instance Whoogle groq_client = Groq(api_key=GROQ_API_KEY) sandbox = Sandbox(api_key=E2B_API_KEY, timeout=600) VALID_MODELS = [ "llama-3.3-70b-versatile", "distil-whisper-large-v3-en", "gemma2-9b-it", "llama-3.1-8b-instant", "llama-guard-3-8b", "llama3-70b-8192", "llama3-8b-8192", "mixtral-8x7b-32768", "whisper-large-v3", "whisper-large-v3-turbo", "deepseek-r1-distill-llama-70b-specdec", "deepseek-r1-distill-llama-70b", "llama-3.3-specdec", "llama-3.2-1b-preview", "llama-3.2-3b-preview", "llama-3.2-11b-vision-preview", "llama-3.2-90b-vision-preview", "llama-3.2-90b-vision-preview", "qwen-2.5-32b" ] VALID_MODELS_LOW_TOKEN = [ # Smaller models for token saving "qwen-2.5-32b", "llama-3.1-8b-instant", "gemma2-9b-it", "mixtral-8x7b-32768" ] DEFAULT_CHAT_MODEL_LOW_TOKEN = "qwen-2.5-32b" SUMMARIZATION_MODEL = "qwen-2.5-32b" # Smaller model for summarization CODE_PATTERN = re.compile(r"```python\s*([\s\S]*?)```") CODE_REQUIRED_MARKER = "[CODE_REQUIRED]" CANVAS_START_MARKER = "[CANVAS_START]" CANVAS_END_MARKER = "[CANVAS_END]" # Limites pour le contexte web (Whoogle) - Reduced limits for token saving MAX_CONTEXT_CHARS_PER_PAGE = 1000 # Reduced context per page MAX_TOTAL_CONTEXT_CHARS = 5000 # Reduced total context MAX_WEB_LINKS = 5 # Reduced number of web links WHOOGLE_CACHE: Dict[str, str] = {} CHAT_HISTORY_MESSAGE_LIMIT = 10 # Limit chat history for token saving http_session = requests.Session() http_session.headers.update({"User-Agent": "PlinkoChatBot/1.0"}) def operate_requests(prompt) -> Generator[Dict[str, str], None, Tuple[str, List[str]]]: """ Fonction regroupant toutes les opérations : 1. Recherche sur Whoogle. 2. Limitation à 7 liens. 3. Extraction et résumé du contenu de chaque page (en parallèle). 4. Retourne le contexte obtenu et la liste des sources. """ global GROQ_API_KEY groq_client = Groq(api_key=GROQ_API_KEY) def whoogle_search(query): """ Effectue une recherche via Whoogle et retourne une liste de liens. """ base_url = WHOOGLE_INSTANCE_URL + "/search" params = {"q": query} headers = {"User-Agent": "Mozilla/5.0"} try: response = requests.get(base_url, params=params, headers=headers) response.raise_for_status() except requests.exceptions.RequestException as e: print(f"Erreur lors de la requête Whoogle: {e}") return [] soup = BeautifulSoup(response.text, "html.parser") links = [result["href"] for result in soup.find_all("a", href=True) if result["href"].startswith("http")] return links @retry(stop=stop_after_attempt(5), wait=wait_exponential(multiplier=1, min=4, max=60), retry=retry_if_exception_type(httpx.HTTPError)) def summarize_with_groq(query, text_content): """ Utilise l'API Groq pour extraire les informations importantes du texte. Retry avec tenacity en cas d'erreur 429. """ try: response = groq_client.chat.completions.create( model=SUMMARIZATION_MODEL, messages=[ { "role": "system", "content": ( "Vous êtes un assistant expert en extraction d'informations. " "Votre tâche est de lire le texte fourni et d'extraire les informations les plus importantes " "qui répondent à la question de recherche suivante : '{}'. " "Créez une note concise résumant les points clés et les informations pertinentes. " "Soyez précis et factuel." ).format(query) }, { "role": "user", "content": text_content[:MAX_CONTEXT_CHARS_PER_PAGE] } ], temperature=0.2 ) return response.choices[0].message.content.strip() except httpx.HTTPError as e: if e.response.status_code == 429: logger.warning(f"Erreur 429 (Too Many Requests) lors de l'appel à l'API Groq (summarize_with_groq). Reessai en cours... Details: {e}") raise e else: logger.error(f"Erreur HTTP lors de l'appel à l'API Groq (summarize_with_groq) (non-429). Details: {e}") raise except Exception as e: logger.error(f"Erreur inattendue lors de l'appel à l'API Groq (summarize_with_groq). Détails: {e}") return "Erreur lors de la synthèse avec Groq." def fetch_and_summarize(link, query): """ Récupère le contenu d'une page et utilise Groq pour en extraire une note résumée. """ try: r = requests.get(link, timeout=20) r.raise_for_status() soup_page = BeautifulSoup(r.text, 'html.parser') text_content = soup_page.get_text(separator='\n', strip=True) print(f"{link} : OK - Extraction du contenu...") summary_note = summarize_with_groq(query, text_content) print(f"{link} : OK - Note Groq ajoutée.") return f"Source: {link}\nNote:\n{summary_note}\n\n" except requests.exceptions.RequestException as e: print(f"{link} : Échec de la récupération de la page.") return f"Source: {link}\nNote:\nErreur de récupération de la page.\n\n" except Exception as e: print(f"{link} : Erreur lors du traitement de la page : {e}") return f"Source: {link}\nNote:\nErreur lors du traitement de la page: {e}\n\n" def fetch_pages_and_summarize(links: List[str], query: str) -> str: """ Utilise un pool de threads pour traiter plusieurs pages en parallèle et retourne le contexte concaténé. """ results = [] with ThreadPoolExecutor(max_workers=10) as executor: future_to_link = {executor.submit(fetch_and_summarize, link, query): link for link in links} for future in as_completed(future_to_link): result = future.result() results.append(result) return "".join(results) yield {"type": "web_search_status", "content": "start"} all_links = whoogle_search(prompt) if not all_links: print("Aucun résultat de recherche trouvé.") yield {"type": "web_search_status", "content": "end"} yield "", [] return limited_links = all_links[:MAX_WEB_LINKS] print(f"{len(limited_links)} lien(s) utilisé(s) pour la recherche.") context = fetch_pages_and_summarize(limited_links, prompt) yield {"type": "web_search_status", "content": "end"} yield context, limited_links return def extract_code_blocks(text: str) -> Tuple[Optional[str], Optional[str]]: """Extrait les blocs de code Python et gère le marqueur [CODE_REQUIRED].""" if CODE_REQUIRED_MARKER in text: text_no_marker = text.replace(CODE_REQUIRED_MARKER, "", 1).strip() code_match = CODE_PATTERN.search(text_no_marker) if code_match: return code_match.group(1).strip(), None else: code_match = CODE_PATTERN.search(text) if code_match: return None, code_match.group(1).strip() return None, None def run_code_in_sandbox(code: str) -> str: """Exécute le code Python dans le sandbox e2b et retourne le résultat.""" start_time = time.time() try: logger.info("Début exécution code dans sandbox e2b.") execution = None method_name = next((method for method in ["run_code_sync", "execute_sync", "execute_code", "execute", "run_code", "run"] if hasattr(sandbox, method)), None) if method_name: logger.info(f"Utilisation de la méthode sandbox.{method_name}") execution = getattr(sandbox, method_name)(code) else: error_msg = "Aucune méthode d'exécution de code disponible dans le sandbox." logger.error(error_msg) return error_msg logs = execution.logs stdout_logs = logs.stdout if hasattr(logs, 'stdout') else [] stderr_logs = logs.stderr if hasattr(logs, 'stderr') else [] stdout = "\n".join(stdout_logs) stderr = "\n".join(stderr_logs) if stderr: return f"Stdout:\n{stdout}\n\nStderr:\n{stderr}" return stdout except Exception as e: duration = time.time() - start_time error_msg = f"Erreur lors de l'exécution du code dans e2b après {duration:.2f} secondes: {str(e)}" logger.exception(error_msg) return f"Erreur exécution code: {str(e)}" def extract_numeric_result(exec_result: str) -> Optional[str]: """Extrait la dernière ligne numérique du résultat d'exécution de code.""" if "Stdout:" in exec_result: stdout_section = exec_result.split("Stdout:\n", 1)[1].split("\n\nStderr:")[0].strip() else: stdout_section = exec_result.strip() lines = [l.strip() for l in stdout_section.splitlines() if l.strip()] if not lines: return None last_line = lines[-1] if re.search(r'\d', last_line): return last_line return None @app.route('/models', methods=['GET']) def get_models() -> Response: """Retourne la liste des modèles valides.""" return jsonify({"models": VALID_MODELS_LOW_TOKEN}) @app.route('/chat', methods=['POST']) def chat_endpoint() -> Response: """ Point d'entrée principal pour le chat. Gère le streaming SSE, l'exécution de code et les canvas. Intègre la recherche web si activée (configurable via requête). """ @stream_with_context @retry(stop=stop_after_attempt(5), wait=wait_exponential(multiplier=1, min=4, max=60), retry=retry_if_exception_type(httpx.HTTPError)) def stream_response(): data = request.get_json() if not data: logger.warning("Requête chat invalide: corps de requête manquant.") yield format_sse_event({"type": "error", "content": "Requête chat invalide : corps de requête manquant."}) return enable_web_search = data.get('enable_web_search', True) messages = data.get('messages', []) model: str = data.get('model', DEFAULT_CHAT_MODEL_LOW_TOKEN).strip() execute_code: bool = data.get('execute_code', True) if not messages or not isinstance(messages, list) or not model or model not in VALID_MODELS_LOW_TOKEN: logger.warning("Requête chat invalide: messages=%s, model=%s", messages, model) yield format_sse_event({"type": "error", "content": "Requête chat invalide : messages ou modèle incorrects."}) return system_web_context = "" last_user_message = messages[-1]['content'] if messages else "" web_sources: List[str] = [] if enable_web_search and last_user_message: web_search_generator = operate_requests(last_user_message) try: while True: event = next(web_search_generator) if isinstance(event, dict) and event.get('type') == 'web_search_status': yield format_sse_event(event) elif isinstance(event, tuple) and len(event) == 2: system_web_context, web_sources = event break elif isinstance(event, tuple) and event == ("", []): system_web_context = "" web_sources = [] break except StopIteration: system_web_context = "" web_sources = [] else: system_web_context = "" base_prompt = ( "🎯 **Rôle : Expert en calcul et canvas.**\n" # Very short role "**Mission :** Répondre, calculer, canvas interactifs. Style expert. Concise.\n\n" # Very concise mission ) web_context_prompt_part = f"**CONTEXTE WEB :**\n{system_web_context}\n\n" if system_web_context else "" instructions_prompt_part = ( "**CALCUL NUMÉRIQUE :**\n" # Shortened instruction section titles "1. Phrase intro calcul Python.\n" "2. `[CODE_REQUIRED]`.\n" "3. Bloc PYTHON minimal, lisible, `print(resultat_numerique)`.\n" "**[APRES EXECUTION CODE]** Continue, explique résultat code en DETAILS. Fais avancer conversation.\n\n" # Concise after-code instructions "**CANVAS CODE :**\n" "1. Canvas code interactif.\n" "2. `[CANVAS_START]`...`[CANVAS_END]`.\n" "**CANVAS TEXTE :**\n" "1. Canvas texte interactif (markdown).\n" "2. `[CANVAS_START]`...`[CANVAS_END]`.\n" "**CHAT CANVAS :** MODIFIER canvas. NOUVEAU canvas complet `[CANVAS_START]`...`[CANVAS_END]`.\n" # Even shorter canvas chat instructions "**PAS CALCUL NI CANVAS :** Réponse directe, concise. Contexte conversation." # Shortened no-action instructions ) system_prompt_content = base_prompt + web_context_prompt_part + instructions_prompt_part system_prompt = {"role": "system", "content": system_prompt_content} messages_with_system_prompt = [system_prompt] + messages[-CHAT_HISTORY_MESSAGE_LIMIT:] full_reply_text = "" code_to_execute = None code_to_display = None is_code_used = False canvas_content = None code_executed = False prompt_tokens = 0 completion_tokens = 0 total_tokens = 0 initial_response_completed = False yield format_sse_event({"type": "analysis_status", "content": "start"}) try: response_stream = groq_client.chat.completions.create( model=model, messages=messages_with_system_prompt, temperature=0.0, stream=True, ) for chunk in response_stream: if initial_response_completed: break delta_content = chunk.choices[0].delta.content or "" delta_content_no_marker = delta_content.replace(CODE_REQUIRED_MARKER, "") full_reply_text += delta_content_no_marker if CANVAS_START_MARKER in full_reply_text and CANVAS_END_MARKER in full_reply_text: start_index = full_reply_text.find(CANVAS_START_MARKER) + len(CANVAS_START_MARKER) end_index = full_reply_text.find(CANVAS_END_MARKER) canvas_content_extracted = full_reply_text[start_index:end_index].strip() canvas_content = canvas_content_extracted delta_content_for_chat = delta_content_no_marker.replace(CANVAS_START_MARKER, "").replace(CANVAS_END_MARKER, "") yield format_sse_event({"type": "text", "content": delta_content_for_chat}) yield format_sse_event({"type": "canvas_start", "content": canvas_content}) continue else: yield format_sse_event({"type": "text", "content": delta_content_no_marker}) code_to_execute, code_to_display = extract_code_blocks(full_reply_text) if code_to_execute: initial_response_completed = True if execute_code and code_to_execute: is_code_used = True logger.info("Envoi événement CODE_BLOCK") yield format_sse_event({"type": "code_block", "content": code_to_execute}) yield format_sse_event({"type": "code_execution_start"}) execution_result = run_code_in_sandbox(code_to_execute) numeric_result = extract_numeric_result(execution_result) if numeric_result: code_executed = True logger.info("Envoi événement EXECUTED_CODE_RESULT") yield format_sse_event({"type": "executed_code_result", "content": numeric_result}) conclusion_prompt_content = ( f"**[APRES CODE]** Resultat: **{numeric_result}**. Continue conversation." ) conclusion_prompt = [{"role": "user", "content": conclusion_prompt_content}] conclusion_stream = groq_client.chat.completions.create( model=model, messages=conclusion_prompt, temperature=0.0, stream=True, ) for chunk in conclusion_stream: delta_content = chunk.choices[0].delta.content or "" yield format_sse_event({"type": "text", "content": delta_content}) yield format_sse_event({"type": "final"}) else: code_executed = True logger.info("Envoi événement EXECUTED_CODE_ERROR") yield format_sse_event({"type": "executed_code_error", "content": execution_result}) yield format_sse_event({"type": "final", "result": "Fin de la réponse (erreur ou pas de résultat numérique)."}) if web_sources: yield format_sse_event({"type": "web_sources", "content": web_sources}) yield format_sse_event({"type": "sources_start"}) for source_url in web_sources: yield format_sse_event({"type": "source_item", "content": source_url}) yield format_sse_event({"type": "sources_end"}) prompt_tokens = 0 completion_tokens = 0 total_tokens = prompt_tokens + completion_tokens if not code_executed: logger.info("Envoi événement FINAL (sans code exécuté)") yield format_sse_event({"type": "final", "prompt_tokens": prompt_tokens, "completion_tokens": completion_tokens, "total_tokens": total_tokens}) else: yield format_sse_event({"type": "final", "prompt_tokens": prompt_tokens, "completion_tokens": completion_tokens, "total_tokens": total_tokens}) except httpx.HTTPError as e: if e.response.status_code == 429: error_message = f"Erreur 429 (Too Many Requests) lors de l'appel à l'API Groq (stream_response). Réessai automatique par Tenacity. Détails: {e}" logger.warning(error_message) yield format_sse_event({"type": "error", "content": "Erreur temporaire de l'IA. Réessayez svp."}) raise e else: error_message = f"Erreur HTTP lors de l'appel à l'API Groq (stream_response) (non-429). Détails: {e}" logger.exception(error_message) yield format_sse_event({"type": "error", "content": error_message}) except Exception as e: error_message = f"Erreur serveur IA: {str(e)}" logger.exception(error_message) yield format_sse_event({"type": "error", "content": error_message}) finally: yield format_sse_event({"type": "analysis_status", "content": "end"}) yield format_sse_event({"type": "done", "is_code_execution": is_code_used}) return Response(stream_with_context(stream_response()), mimetype='text/event-stream') @app.route('/canvas_modify', methods=['POST']) def canvas_modify_endpoint() -> Response: data = request.get_json() if not data: logger.warning("Requête canvas_modify sans données.") return jsonify({"error": "Requête canvas_modify invalide : corps de requête manquant."}), 400 user_message = data.get('message') canvas_content = data.get('canvas_content') model = data.get('model', DEFAULT_CHAT_MODEL_LOW_TOKEN).strip() if not user_message or canvas_content is None: logger.warning("Requête canvas_modify incomplète: message=%s, canvas_content=%s", user_message, canvas_content) return jsonify({"error": "Requête canvas_modify invalide : message ou contenu canvas manquant."}), 400 canvas_chat_messages = [ {"role": "system", "content": ("Chat canvas. Modifier canvas. " "NOUVEAU canvas complet `[CANVAS_START]`...`[CANVAS_END]`.")}, {"role": "assistant", "content": f"[CANVAS_START]\n{canvas_content}\n[CANVAS_END] Canvas actuel. Modifier?"}, {"role": "user", "content": user_message} ] @retry(stop=stop_after_attempt(5), wait=wait_exponential(multiplier=1, min=4, max=60), retry=retry_if_exception_type(httpx.HTTPError)) def canvas_modify_groq_call(canvas_chat_messages, model): """Fonction retryable pour l'appel à Groq dans canvas_modify_endpoint""" return groq_client.chat.completions.create( model=model, messages=canvas_chat_messages, temperature=0.0, stream=False, ) try: response_stream = canvas_modify_groq_call(canvas_chat_messages, model) ai_response_text = response_stream.choices[0].message.content or "" if CANVAS_START_MARKER in ai_response_text and CANVAS_END_MARKER in ai_response_text: start_index = ai_response_text.find(CANVAS_START_MARKER) + len(CANVAS_START_MARKER) end_index = ai_response_text.find(CANVAS_END_MARKER) updated_canvas_content = ai_response_text[start_index:end_index].strip() response_message = "Canvas mis à jour." logger.info("Canvas modifié avec succès.") return jsonify({"updated_canvas_content": updated_canvas_content, "response_message": response_message}) else: logger.error("Réponse IA canvas_modify invalide: marqueurs CANVAS_START/END manquants.") return jsonify({"error": "Réponse IA invalide pour la modification du canvas", "full_response": ai_response_text}), 500 except httpx.HTTPError as e: if e.response.status_code == 429: error_message = f"Erreur 429 (Too Many Requests) lors de l'appel à l'API Groq (canvas_modify_endpoint). Après plusieurs essais, opération échouée. Details: {e}" # Improved error message else: error_message = f"Erreur HTTP lors de l'appel à l'API Groq (canvas_modify_endpoint) (non-429). Détails: {e}" logger.exception(error_message) return jsonify({"error": "Erreur lors de la modification du canvas après plusieurs tentatives.", "details": error_message}), 500 except Exception as e: error_message = f"Erreur serveur (canvas_modify): {str(e)}" logger.exception(error_message) return jsonify({"error": error_message}), 500 @app.route('/run', methods=['POST']) def run_code_endpoint() -> Response: """ Point d'accès pour exécuter du code via l'API Piston (pour canvas code). Langages supportés par Piston. """ data = request.get_json() if not data: logger.warning("Requête run sans données.") return jsonify({"error": "Requête run invalide : corps de requête manquant."}), 400 language = data.get("language") code = data.get("code") stdin = data.get("stdin", "") if not language or not code: logger.warning("Requête run incomplète: language=%s, code=%s", language, code) return jsonify({"error": "Requête run invalide : langage et code requis."}), 400 payload = { "language": language, "version": "*", "files": [{"content": code}], "stdin": stdin } try: piston_response = http_session.post(PISTON_API_URL, json=payload, timeout=30) piston_response.raise_for_status() logger.info("Code exécuté avec Piston avec succès.") return jsonify(piston_response.json()) except requests.RequestException as e: error_message = f"Erreur lors de l'appel à l'API Piston: {str(e)}" logger.exception(error_message) piston_text = piston_response.text if 'piston_response' in locals() and piston_response is not None else "" return jsonify({"error": "Erreur d'exécution du code via l'API Piston.", "piston_response": piston_text, "details": error_message}), 500 except Exception as e: error_message = f"Erreur inattendue lors de l'exécution du code: {str(e)}" logger.exception(error_message) return jsonify({"error": "Erreur inattendue lors de l'exécution du code.", "details": error_message}), 500 @app.route('/run', methods=['OPTIONS']) def run_code_options() -> Response: """Gère les requêtes OPTIONS pour l'endpoint /run (CORS preflight).""" return jsonify({'message': 'OPTIONS requests allowed'}), 200 def format_sse_event(data: Dict[str, Any]) -> str: """Formate les données en event SSE (Server-Sent Events).""" return f"data: {json.dumps(data)}\n\n" if __name__ == '__main__': if os.environ.get('FLASK_ENV') == 'development': app.run(debug=True, port=5000)

app.tsx

tsx
import React, { useState, useRef, useEffect } from 'react'; import ReactMarkdown from 'react-markdown'; import { Prism } from 'react-syntax-highlighter/dist/esm'; import { Light as SyntaxHighlighterLight } from 'react-syntax-highlighter/dist/esm'; import { atomDark } from 'react-syntax-highlighter/dist/esm/styles/prism'; import { python as prismPython, javascript as prismJavascript, c, cpp as prismCpp, java as prismJava } from 'react-syntax-highlighter/dist/esm/languages/prism'; // Import CodeMirror et ses extensions import CodeMirror from '@uiw/react-codemirror'; import { python } from '@codemirror/lang-python'; import { javascript } from '@codemirror/lang-javascript'; import { java } from '@codemirror/lang-java'; import { cpp } from '@codemirror/lang-cpp'; import { markdown } from '@codemirror/lang-markdown'; // Helper function to register languages for react-syntax-highlighter const registerLanguage = (name: string, language: any) => { if (Prism && Prism.languages && !Prism.languages[name]) { Prism.registerLanguage(name, language); } else if (Prism && Prism.languages && Prism.languages[name]) { console.warn(`Language "${name}" is already registered.`); } else { console.warn("Warning: Prism.registerLanguage is not available. Syntax highlighting might not work correctly."); } }; // Enregistrement des langages pour la coloration syntaxique registerLanguage('python', prismPython); registerLanguage('javascript', prismJavascript); registerLanguage('c', c); registerLanguage('cpp', prismCpp); registerLanguage('java', prismJava); interface ChatMessage { sender: 'User' | 'AI'; text: string; codeBlock?: string; isCodeCollapsed?: boolean; canvasContent?: string; hasCanvas?: boolean; isTextCanvas?: boolean; analysisStatus?: string; executedCodeError?: string; isCodeExecuting?: boolean; codeExecutionOutput?: string | null; promptTokens?: number; completionTokens?: number; totalTokens?: number; webSources?: string[]; } interface CanvasChatMessage { sender: 'User' | 'AI'; text: string; } const App: React.FC = () => { const [message, setMessage] = useState(''); const [chat, setChat] = useState<ChatMessage[]>([]); const chatDisplayRef = useRef<HTMLDivElement>(null); const [isSending, setIsSending] = useState(false); const [availableModels, setAvailableModels] = useState<string[]>([]); const [selectedModel, setSelectedModel] = useState<string>(''); const [isCanvasActive, setIsCanvasActive] = useState(false); const [currentCanvasContent, setCurrentCanvasContent] = useState(''); const [canvasChatMessages, setCanvasChatMessages] = useState<CanvasChatMessage[]>([]); const [canvasMessage, setCanvasMessage] = useState(''); const canvasChatDisplayRef = useRef<HTMLDivElement>(null); const [selectedLanguage, setSelectedLanguage] = useState<'python' | 'c' | 'cpp' | 'java' | 'javascript'>('python'); const [codeOutput, setCodeOutput] = useState(''); const [activeCanvasMessageIndex, setActiveCanvasMessageIndex] = useState<number | null>(null); const [isTextCanvas, setIsTextCanvas] = useState(false); const [isWebSearchEnabled, setIsWebSearchEnabled] = useState(false); const [analysisStatus, setAnalysisStatus] = useState<string | null>(null); const [webSources, setWebSources] = useState<string[]>([]); const [showSources, setShowSources] = useState(false); const consoleOutputRef = useRef<HTMLDivElement>(null); const VALID_MODELS_FRONTEND = [ "llama-3.1-8b-instant", "gemma2-9b-it", "mixtral-8x7b-32768" ]; useEffect(() => { const fetchModels = async () => { try { const response = await fetch('https://pl1nkochat.onrender.com/models'); if (response.ok) { const data = await response.json(); setAvailableModels(data.models.filter((model: string) => VALID_MODELS_FRONTEND.includes(model))); if (data.models.length > 0) { setSelectedModel(data.models.find((model: string) => VALID_MODELS_FRONTEND.includes(model)) || VALID_MODELS_FRONTEND[0] || ''); } } else { console.error("Failed to fetch models:", response.status); setChat(prevChat => [...prevChat, { sender: 'AI', text: `Erreur de chargement des modèles: ${response.status}` }]); } } catch (error) { console.error("Error fetching models:", error); setChat(prevChat => [...prevChat, { sender: 'AI', text: "Erreur de communication avec le serveur pour les modèles." }]); } }; fetchModels(); }, []); useEffect(() => { if (!isCanvasActive && chatDisplayRef.current) { chatDisplayRef.current.scrollTop = chatDisplayRef.current.scrollHeight; } if (isCanvasActive && canvasChatDisplayRef.current) { canvasChatDisplayRef.current.scrollTop = canvasChatDisplayRef.current.scrollHeight; } }, [chat, canvasChatMessages, isCanvasActive]); useEffect(() => { if (consoleOutputRef.current) { consoleOutputRef.current.scrollTop = consoleOutputRef.current.scrollHeight; } }, [codeOutput]); const handleSubmit = async (e: React.FormEvent) => { e.preventDefault(); if (message.trim() === '' || isSending || isCanvasActive) return; setIsSending(true); setAnalysisStatus('Analyse en cours...'); setWebSources([]); const userMessage = message; setChat(prevChat => [...prevChat, { sender: 'User', text: userMessage }]); setMessage(''); setAnalysisStatus('Réponse en cours...'); const messagesForApi = [ ...chat.map(msg => ({ role: msg.sender === 'User' ? 'user' : 'assistant', content: msg.text })), { role: 'user', content: userMessage } ]; setChat(prevChat => [ ...prevChat, { sender: 'AI', text: '', isCodeCollapsed: true, analysisStatus: 'Réponse en cours...', isCodeExecuting: false, codeExecutionOutput: null, webSources: [] } ]); try { const response = await fetch('https://pl1nkochat.onrender.com/chat', { method: 'POST', headers: { 'Content-Type': 'application/json', 'Accept': 'text/event-stream' }, body: JSON.stringify({ messages: messagesForApi, model: selectedModel, enable_web_search: isWebSearchEnabled }) }); if (!response.ok) { const errorText = await response.text(); setChat(prevChat => { const lastIndex = prevChat.length - 1; return [ ...prevChat.slice(0, lastIndex), { sender: 'AI', text: `Erreur: ${response.status} - ${errorText}`, analysisStatus: 'Erreur' } ]; }); setIsSending(false); setAnalysisStatus('Erreur lors de la communication avec le serveur.'); return; } const reader = response.body!.getReader(); const decoder = new TextDecoder(); while (true) { const { done, value } = await reader.read(); if (done) break; const chunk = decoder.decode(value); const events = chunk.split('\n\n').filter(line => line.startsWith("data: ")); for (const eventLine of events) { const eventData = JSON.parse(eventLine.substring(6)); console.log("Événement reçu:", eventData); if (eventData.type === 'text') { setChat(prevChat => { const lastIndex = prevChat.length - 1; const lastMessage = prevChat[lastIndex]; if (lastMessage) { const newText = lastMessage.text + eventData.content; const updatedMessage = { ...lastMessage, text: newText, isCodeExecuting: false }; return [...prevChat.slice(0, lastIndex), updatedMessage]; } return prevChat; }); } else if (eventData.type === 'code_block') { setChat(prevChat => { const lastIndex = prevChat.length - 1; const lastMessage = prevChat[lastIndex]; if (lastMessage) { const updatedMessage = { ...lastMessage, codeBlock: eventData.content, isCodeExecuting: false }; return [...prevChat.slice(0, lastIndex), updatedMessage]; } return prevChat; }); } else if (eventData.type === 'code_execution_start') { setChat(prevChat => { const lastIndex = prevChat.length - 1; const lastMessage = prevChat[lastIndex]; if (lastMessage) { const updatedMessage = { ...lastMessage, isCodeExecuting: true, codeExecutionOutput: 'Execution en cours...' }; return [...prevChat.slice(0, lastIndex), updatedMessage]; } return prevChat; }); } else if (eventData.type === 'executed_code_result') { setChat(prevChat => { const lastIndex = prevChat.length - 1; const lastMessage = prevChat[lastIndex]; if (lastMessage) { const updatedMessage = { ...lastMessage, codeExecutionOutput: eventData.content, isCodeExecuting: false }; return [...prevChat.slice(0, lastIndex), updatedMessage]; } return prevChat; }); } else if (eventData.type === 'canvas_start') { const canvasContent = eventData.content; const isText = canvasContent.trim().startsWith('#') || canvasContent.trim().startsWith('* ') || !canvasContent.includes('```'); setIsTextCanvas(isText); const cleanedCanvasContent = canvasContent.replace(/```(python|javascript|c|cpp|java)\n/g, '```\n'); setChat(prevChat => { const lastIndex = prevChat.length - 1; const lastMessage = prevChat[lastIndex]; if (lastMessage) { const updatedMessage = { ...lastMessage, canvasContent: cleanedCanvasContent, hasCanvas: true, isTextCanvas: isText, isCodeExecuting: false }; return [...prevChat.slice(0, lastIndex), updatedMessage]; } return prevChat; }); } else if (eventData.type === 'final') { setAnalysisStatus(null); setChat(prevChat => { const lastIndex = prevChat.length - 1; const lastMessage = prevChat[lastIndex]; if (lastMessage) { const updatedMessage = { ...lastMessage, promptTokens: eventData.prompt_tokens, completionTokens: eventData.completion_tokens, totalTokens: eventData.total_tokens }; return [...prevChat.slice(0, lastIndex), updatedMessage]; } return prevChat; }); } else if (eventData.type === 'error') { setChat(prevChat => { const lastIndex = prevChat.length - 1; return [ ...prevChat.slice(0, lastIndex), { sender: 'AI', text: `Erreur IA: ${eventData.content}`, analysisStatus: 'Erreur IA' } ]; }); setIsSending(false); setAnalysisStatus('Erreur de l\'IA.'); return; } else if (eventData.type === 'analysis_status') { setChat(prevChat => { const lastIndex = prevChat.length - 1; const lastMessage = prevChat[lastIndex]; if (lastMessage) { const updatedMessage = { ...lastMessage, analysisStatus: eventData.content, isCodeExecuting: false }; return [...prevChat.slice(0, lastIndex), updatedMessage]; } return prevChat; }); setAnalysisStatus(eventData.content); } else if (eventData.type === 'executed_code_error') { setChat(prevChat => { const lastIndex = prevChat.length - 1; const lastMessage = prevChat[lastIndex]; if (lastMessage) { const updatedMessage = { ...lastMessage, executedCodeError: eventData.content, isCodeExecuting: false, codeExecutionOutput: eventData.content }; return [...prevChat.slice(0, lastIndex), updatedMessage]; } return prevChat; }); } else if (eventData.type === 'web_search_status') { if (eventData.content === 'start') { setAnalysisStatus('Recherche internet en cours...'); } else if (eventData.content === 'end') { setAnalysisStatus('Réponse en cours...'); } } else if (eventData.type === 'web_sources') { setWebSources(eventData.content); setChat(prevChat => { const lastIndex = prevChat.length - 1; const lastMessage = prevChat[lastIndex]; if (lastMessage) { const updatedMessage = { ...lastMessage, webSources: eventData.content }; return [...prevChat.slice(0, lastIndex), updatedMessage]; } return prevChat; }); } } } } catch (error) { console.error("Erreur lors de la communication avec le backend:", error); setChat(prevChat => { const lastIndex = prevChat.length - 1; return [ ...prevChat.slice(0, lastIndex), { sender: 'AI', text: "Erreur de communication avec l'IA.", analysisStatus: 'Erreur réseau' } ]; }); setAnalysisStatus('Erreur réseau.'); } finally { setIsSending(false); if (!analysisStatus) { setAnalysisStatus('Terminé'); setTimeout(() => setAnalysisStatus(null), 3000); } } }; const toggleCodeBlockCollapse = (index: number) => { setChat(prevChat => prevChat.map((msg, i) => i === index ? { ...msg, isCodeCollapsed: !msg.isCodeCollapsed } : msg ) ); }; const handleCanvasChange = (newContent: string) => { const cleanedContent = newContent.replace(/```(python|javascript|c|cpp|java)\n/g, '```\n'); setCurrentCanvasContent(cleanedContent); }; const handleCanvasMessageSubmit = async (e: React.FormEvent) => { e.preventDefault(); if (canvasMessage.trim() === '') return; const userCanvasMessage = canvasMessage; setCanvasChatMessages(prevMessages => [...prevMessages, { sender: 'User', text: userCanvasMessage }]); setCanvasMessage(''); try { const response = await fetch('https://pl1nkochat.onrender.com/canvas_modify', { method: 'POST', headers: { 'Content-Type': 'application/json' }, body: JSON.stringify({ message: userCanvasMessage, canvas_content: currentCanvasContent, model: selectedModel }) }); if (!response.ok) { console.error("Erreur Canvas Chat:", response.status, await response.text()); setCanvasChatMessages(prevMessages => [...prevMessages, { sender: 'AI', text: `Erreur: ${response.status} - Chat Canvas` }]); return; } const result = await response.json(); if (result && result.updated_canvas_content) { const cleanedContent = result.updated_canvas_content.replace(/```(python|javascript|c|cpp|java)\n/g, '```\n'); setCurrentCanvasContent(cleanedContent); setCanvasChatMessages(prevMessages => [...prevMessages, { sender: 'AI', text: result.response_message || "Canvas mis à jour." }]); } else if (result && result.error) { setCanvasChatMessages(prevMessages => [...prevMessages, { sender: 'AI', text: `Erreur IA Canvas: ${result.error}` }]); } } catch (error) { console.error("Erreur Canvas Chat Backend:", error); setCanvasChatMessages(prevMessages => [...prevMessages, { sender: 'AI', text: "Erreur de communication avec l'IA (Canvas Chat)." }]); } }; const handleLanguageChange = (e: React.ChangeEvent<HTMLSelectElement>) => { setSelectedLanguage(e.target.value as typeof selectedLanguage); }; const handleCodeExecution = async () => { setCodeOutput('Execution en cours...'); try { const response = await fetch("https://pl1nkochat.onrender.com/run", { method: "POST", headers: { "Content-Type": "application/json" }, body: JSON.stringify({ language: selectedLanguage, code: currentCanvasContent, stdin: "" }) }); const result = await response.json(); setCodeOutput(result.run?.stdout || result.error || "Erreur d'exécution du code!"); } catch (error) { console.error("Erreur Exécution Code:", error); setCodeOutput("Erreur lors de l'exécution du code."); } }; const handleStartNewChat = () => { setIsCanvasActive(false); setCanvasChatMessages([]); setCurrentCanvasContent(''); setCodeOutput(''); setActiveCanvasMessageIndex(null); setIsTextCanvas(false); setChat([]); setWebSources([]); }; const handleOpenCanvas = (index: number, canvasContent: string, isTextCanvasProp: boolean) => { setIsCanvasActive(true); const cleanedContent = canvasContent.replace(/```(python|javascript|c|cpp|java)\n/g, '```\n'); setCurrentCanvasContent(cleanedContent); setCanvasChatMessages([]); setActiveCanvasMessageIndex(index); setIsTextCanvas(isTextCanvasProp); }; const handleModelChange = (e: React.ChangeEvent<HTMLSelectElement>) => { setSelectedModel(e.target.value); }; const handleWebSearchToggle = (e: React.ChangeEvent<HTMLInputElement>) => { setIsWebSearchEnabled(e.target.checked); }; const toggleSourcesVisibility = () => { setShowSources(!showSources); }; return ( <div style={{ margin: '20px', display: 'flex', flexDirection: 'column', height: '95vh' }}> {!isCanvasActive && ( <center> <h1>PLINKO CHATTE</h1> </center> )} <div style={{ display: 'flex', flexDirection: 'row', width: '100%', height: 'calc(100% - 100px)' }}> {!isCanvasActive && ( <div style={{ flex: 1, height: '100%' }}> <div ref={chatDisplayRef} style={{ flexGrow: 1, overflowY: 'auto', marginBottom: '20px', border: '1px solid #ccc', padding: '10px', height: 'calc(100% - 100px)' }} > {chat.map((msg, index) => ( <div key={index} style={{ marginBottom: '15px', textAlign: msg.sender === 'User' ? 'right' : 'left' }}> <strong>{msg.sender}:</strong> <div> {msg.analysisStatus && <div style={{ fontSize: '0.8em', color: 'grey' }}>{msg.analysisStatus}</div>} <div style={{ whiteSpace: 'pre-wrap', wordWrap: 'break-word', position: 'relative' }}> {msg.sender === 'AI' ? ( <> <ReactMarkdown components={{ code: ({ className, children, ...props }) => { const language = className?.match(/language-(\w+)/)?.[1] || ''; return ( <SyntaxHighlighterLight style={atomDark as any} language={language} PreTag="div" children={String(children).replace(/\n$/, '')} {...props} /> ); } }} > {msg.codeBlock ? msg.text.replace(/```python\s*([\s\S]*?)```/g, '') : msg.text} </ReactMarkdown> {msg.webSources && msg.webSources.length > 0 && ( <div style={{ display: 'inline-block', marginLeft: '5px' }}> <button onClick={toggleSourcesVisibility} style={{ background: 'none', border: 'none', color: '#007bff', padding: 0, cursor: 'pointer', fontSize: '0.8em', textDecoration: 'underline' }} > {showSources ? 'Cacher Sources' : 'Afficher Sources'} </button> {showSources && ( <div style={{ position: 'absolute', zIndex: 10, backgroundColor: '#f9f9f9', border: '1px solid #ccc', padding: '10px', borderRadius: '5px', left: '0', top: '100%', marginTop: '5px', width: '250px', textAlign: 'left', boxShadow: '2px 2px 5px rgba(0,0,0,0.3)' }}> <strong>Sources:</strong> <ul> {msg.webSources.map((source, index) => ( <li key={index} style={{ fontSize: '0.8em', wordWrap: 'break-word' }}> <a href={source} target="_blank" rel="noopener noreferrer">{source}</a> </li> ))} </ul> </div> )} </div> )} </> ) : ( msg.text )} </div> {msg.sender === 'AI' && msg.codeBlock && ( <div style={{ marginTop: '10px', border: '1px solid #ccc', borderRadius: '5px', overflow: 'hidden' }}> <div style={{ backgroundColor: '#f0f0f0', padding: '8px', cursor: 'pointer', display: 'flex', justifyContent: 'space-between', alignItems: 'center' }} onClick={() => toggleCodeBlockCollapse(index)} > <strong>Code Python Généré</strong> <span>{msg.isCodeCollapsed ? '▼' : '▲'}</span> </div> <div style={{ padding: '10px', display: msg.isCodeCollapsed ? 'none' : 'block', backgroundColor: '#f9f9f9' }} > <SyntaxHighlighterLight style={atomDark as any} language="python" PreTag="div" > {msg.codeBlock} </SyntaxHighlighterLight> </div> </div> )} {msg.sender === 'AI' && msg.isCodeExecuting && msg.codeExecutionOutput && ( <div style={{ marginTop: '10px', padding: '10px', backgroundColor: '#e0f7fa', borderRadius: '5px', border: '1px solid #80deea' }}> <strong>Code Execution:</strong> <pre style={{ whiteSpace: 'pre-wrap', wordWrap: 'break-word', marginTop: '5px', fontFamily: 'monospace', fontSize: '0.9em' }}> {msg.codeExecutionOutput} </pre> </div> )} {msg.sender === 'AI' && !msg.isCodeExecuting && msg.codeExecutionOutput && !msg.executedCodeError && ( <div style={{ marginTop: '10px', padding: '10px', backgroundColor: '#c8e6c9', borderRadius: '5px', border: '1px solid #81c784' }}> <strong>Code Output:</strong> <pre style={{ whiteSpace: 'pre-wrap', wordWrap: 'break-word', marginTop: '5px', fontFamily: 'monospace', fontSize: '0.9em' }}> {msg.codeExecutionOutput} </pre> </div> )} {msg.sender === 'AI' && msg.executedCodeError && ( <div style={{ marginTop: '10px', padding: '10px', backgroundColor: '#ffe0b2', borderRadius: '5px', border: '1px solid #ffb74d' }}> <strong>Erreur d'exécution du code:</strong> <pre style={{ whiteSpace: 'pre-wrap', wordWrap: 'break-word', marginTop: '5px', fontFamily: 'monospace', fontSize: '0.9em' }}> {msg.executedCodeError} </pre> </div> )} {msg.sender === 'AI' && msg.hasCanvas && !isCanvasActive && ( <button onClick={() => handleOpenCanvas(index, msg.canvasContent || '', msg.isTextCanvas || false)} style={{ marginTop: '10px', padding: '8px 15px', borderRadius: '5px', backgroundColor: '#007bff', color: 'white', border: 'none', cursor: 'pointer' }} > Ouvrir Canvas </button> )} {msg.sender === 'AI' && msg.totalTokens !== undefined && ( <div style={{ marginTop: '5px', fontSize: '0.8em', color: 'grey' }}> Tokens utilisés: {msg.totalTokens} (Prompt: {msg.promptTokens}, Completion: {msg.completionTokens}) </div> )} </div> </div> ))} </div> {analysisStatus && <div style={{ marginBottom: '10px', color: 'grey', textAlign: 'center' }}>{analysisStatus}</div>} <form onSubmit={handleSubmit} style={{ display: 'flex', flexDirection: 'column', marginTop: '10px' }}> <textarea value={message} onChange={e => setMessage(e.target.value)} placeholder="Tapez votre message... (Chat Principal)" rows={3} style={{ padding: '8px', borderRadius: '5px', border: '1px solid #ccc' }} disabled={isSending || isCanvasActive} /> <div style={{ marginBottom: '10px', display: 'flex', alignItems: 'center', justifyContent: 'space-between' }}> <div> <label htmlFor="model-select" style={{ display: 'block', marginBottom: '5px', color: '#ccc' }}> Select Model: </label> <select id="model-select" value={selectedModel} onChange={handleModelChange} style={{ width: '100%', padding: '8px', borderRadius: '5px', backgroundColor: '#333', color: 'white', border: '1px solid #555', }} > {availableModels.map((model) => ( <option key={model} value={model}> {model} </option> ))} </select> </div> <div style={{ display: 'flex', alignItems: 'center' }}> <label htmlFor="web-search-toggle" style={{ color: '#ccc', marginRight: '10px' }}> Recherche Web: </label> <input id="web-search-toggle" type="checkbox" checked={isWebSearchEnabled} onChange={handleWebSearchToggle} style={{ cursor: 'pointer' }} /> </div> </div> <button type="submit" style={{ padding: '10px', borderRadius: '5px', backgroundColor: '#4CAF50', color: 'white', border: 'none', cursor: 'pointer' }} disabled={isSending || isCanvasActive}> {isSending ? "Envoi en cours..." : "Envoyer"} </button> </form> <button onClick={handleStartNewChat} style={{ marginTop: '10px', padding: '10px 15px', borderRadius: '8px', backgroundColor: '#607d8b', color: 'white', border: 'none', cursor: 'pointer', fontWeight: 'bold', transition: 'background-color 0.2s ease', }} > Nouveau Chat </button> </div> )} {isCanvasActive && ( <div style={{ display: 'flex', width: '100%', height: '100%', gap: '20px', position: 'fixed', top: 0, left: 0, right: 0, bottom: 0, }}> <div style={{ width: '25%', background: 'linear-gradient(145deg, #3a3d49, #2d2f36)', borderRadius: '8px', padding: '15px', display: 'flex', flexDirection: 'column', boxShadow: '0 4px 12px rgba(0,0,0,0.3)' }}> <h3 style={{ marginBottom: '10px', color: '#ccc' }}>Canvas Chat</h3> <button onClick={handleStartNewChat} style={{ padding: '10px 15px', borderRadius: '8px', backgroundColor: '#0a84ff', color: 'white', border: 'none', cursor: 'pointer', marginBottom: '20px', fontWeight: 'bold', transition: 'background-color 0.2s ease', }} > Retour au Chat Principal </button> <div ref={canvasChatDisplayRef} style={{ flexGrow: 1, overflowY: 'auto', marginBottom: '10px', color: '#ececf1', height: 'calc(100% - 180px)' }} > {canvasChatMessages.map((msg, index) => ( <div key={index} style={{ textAlign: msg.sender === 'User' ? 'right' : 'left', marginBottom: '5px', color: msg.sender === 'User' ? '#90caf9' : '#c5e1a5', }} > <strong>{msg.sender}:</strong> {msg.text} </div> ))} </div> <form onSubmit={handleCanvasMessageSubmit} style={{ display: 'flex', marginTop: '10px' }}> <textarea value={canvasMessage} onChange={(e) => setCanvasMessage(e.target.value)} placeholder="Modifier le canvas ici..." rows={2} style={{ flexGrow: 1, padding: '8px', borderRadius: '5px', border: 'none', backgroundColor: '#4d4e5c', color: '#ececf1', marginRight: '8px', resize: 'none', }} /> <button type="submit" style={{ padding: '8px 15px', borderRadius: '5px', backgroundColor: '#0a84ff', color: 'white', border: 'none', cursor: 'pointer', }} > Send </button> </form> </div> <div style={{ flex: 1, display: 'flex', flexDirection: 'column', backgroundColor: '#1e1e1e', borderRadius: '8px', overflow: 'hidden', boxShadow: '0 4px 12px rgba(0,0,0,0.3)' }}> <div style={{ backgroundColor: '#2d2d2d', padding: '8px 10px', display: 'flex', alignItems: 'center', borderBottom: '1px solid #444' }}> <span style={{ **Warning:** The response is not complete, would not consume credits.

Partager cette Q&R