سلام من دو چت بات درست کردم در اسپیس هاگینگ فیس ای...

Created on: August 11, 2025

Answered using GPT-5 Thinking by Chat01

Question

سلام من دو چت بات درست کردم در اسپیس هاگینگ فیس این یکی جدید است اینم کد هایش FROM python:3.9-slim

WORKDIR /app

COPY requirements.txt .
RUN pip install --no-cache-dir -r requirements.txt

COPY . .

EXPOSE 7860

START: OPTIMIZED GUNICORN COMMAND

CMD ["gunicorn", "--workers", "5", "--worker-class", "gevent", "--bind", "0.0.0.0:7860", "--timeout", "720", "app:app"]

END: OPTIMIZED GUNICORN COMMAND این یکی # app.py

import os
import re
import json
import logging
from pathlib import Path
from flask import Flask, render_template, request, Response, jsonify
from filelock import FileLock
import requests
from huggingface_hub import HfApi, CommitOperationAdd, CommitOperationDelete, create_repo, hf_hub_download

class PersianLogFormatter(logging.Formatter):
LEVEL_MAP = {logging.DEBUG: "دیباگ", logging.INFO: "اطلاع", logging.WARNING: "هشدار", logging.ERROR: "خطا", logging.CRITICAL: "بحرانی"}
def format(self, record):
record.levelname = self.LEVEL_MAP.get(record.levelno, record.levelname)
return super().format(record)

def setup_logging():
fmt = '[%Y-%m-%d %H:%M:%S]'
formatter = PersianLogFormatter('[%(asctime)s] [%(levelname)s]: %(message)s', datefmt=fmt.strip('[]'))
root = logging.getLogger()
if root.hasHandlers(): root.handlers.clear()
h = logging.StreamHandler(); h.setFormatter(formatter)
root.addHandler(h); root.setLevel(logging.INFO)

setup_logging()

ALL_GEMINI_API_KEYS_STR = os.getenv('ALL_GEMINI_API_KEYS', '')
if not ALL_GEMINI_API_KEYS_STR: raise RuntimeError("ALL_GEMINI_API_KEYS not set")
MASTER_API_KEYS = [k.strip() for k in ALL_GEMINI_API_KEYS_STR.split(',') if k.strip()]
if not MASTER_API_KEYS: raise RuntimeError("No valid keys")
SHM = Path('/dev/shm'); SHM.mkdir(exist_ok=True)
COUNTER_FILE_PATH = str(SHM / 'gunicorn_key_counter.txt')
lock = FileLock(COUNTER_FILE_PATH + ".lock")
with lock:
if not os.path.exists(COUNTER_FILE_PATH):
with open(COUNTER_FILE_PATH, 'w') as f: f.write('0')
CACHE_DIR = SHM / 'file_cache'; CACHE_DIR.mkdir(parents=True, exist_ok=True)
META_DIR = SHM / 'chat_meta'; META_DIR.mkdir(parents=True, exist_ok=True)
HF_LOCAL_CACHE = SHM / 'hf_ds_cache'; HF_LOCAL_CACHE.mkdir(parents=True, exist_ok=True)

HF_TOKEN = os.getenv("HF_TOKEN", "")
HF_REPO_ID = os.getenv("HF_DATASET_REPO", "Asrasahar/alpha-chat-history")
api_hf = HfApi(token=HF_TOKEN if HF_TOKEN else None)
if HF_TOKEN:
try: create_repo(repo_id=HF_REPO_ID, repo_type="dataset", token=HF_TOKEN, exist_ok=True)
except Exception: pass

app = Flask(name)
GOOGLE_API_BASE_URL = 'https://generativelanguage.googleapis.com/v1beta/models'

BASE_RULES_FA = """
دستورالعمل‌های اجباری برای دستیار هوش مصنوعی آلفا:
هویت: تو «چت‌بات آلفا» هستی که توسط هوش مصنوعی آلفا توسعه داده شده‌ای.
تحلیل جامع چندرسانه‌ای: هر نوع فایل را تحلیل و محتوا را منظم شرح بده. پاسخ را با تشخیص نوع فایل شروع کن.
اگر کاربر فقط فایل فرستاد و صراحتاً ساخت/ویرایش تصویر نخواست، فقط تحلیل کن.
اولویت ابزارها: handle_specific_edit > change_image_aspect_ratio > regenerate_with_enhancement > generate_image > perform_internet_search
"""

def get_and_increment_key_index():
l = FileLock(COUNTER_FILE_PATH + ".lock")
with l:
try:
with open(COUNTER_FILE_PATH, 'r') as f: cur = int(f.read().strip())
except (FileNotFoundError, ValueError):
cur = 0
idx = cur % len(MASTER_API_KEYS)
with open(COUNTER_FILE_PATH, 'w') as f: f.write(str(cur + 1))
return idx

def get_keys_for_request():
s = get_and_increment_key_index()
return MASTER_API_KEYS[s:] + MASTER_API_KEYS[:s]

def meta_path(chat_id: str) -> Path:
safe = ''.join(c for c in str(chat_id) if c.isalnum() or c in ('-', '
'))
return META_DIR / f"{safe}.json"

def load_chat_meta(chat_id: str) -> dict:
p = _meta_path(chat_id); l = FileLock(str(p) + ".lock")
with l:
if p.exists():
try: return json.load(open(p, 'r', encoding='utf-8'))
except Exception: return {}
return {}

def save_chat_meta(chat_id: str, meta: dict):
p = _meta_path(chat_id); l = FileLock(str(p) + ".lock")
with l:
try:
with open(p, 'w', encoding='utf-8') as f: json.dump(meta or {}, f, ensure_ascii=False)
except Exception: pass

def update_chat_meta(chat_id: str, **kwargs):
m = load_chat_meta(chat_id)
m.update({k: v for k, v in kwargs.items() if v is not None})
save_chat_meta(chat_id, m)

def file_cache_path(chat_id: str) -> Path:
safe = ''.join(c for c in str(chat_id) if c.isalnum() or c in ('-', '
'))
return CACHE_DIR / f"{safe}.json"

def load_file_cache(chat_id: str) -> dict:
p = _file_cache_path(chat_id)
if p.exists():
try: return json.load(open(p, 'r', encoding='utf-8'))
except Exception: pass
return {"files": []}

def save_file_cache(chat_id: str, cache: dict):
p = _file_cache_path(chat_id); l = FileLock(str(p) + ".lock")
with l:
try:
with open(p, 'w', encoding='utf-8') as f: json.dump(cache or {"files": []}, f, ensure_ascii=False)
except Exception: pass

def update_file_cache_from_inline_files(chat_id: str, inline_files: list):
if not inline_files: return
cache = load_file_cache(chat_id)
existing = {f.get("fileUrl"): f for f in cache.get("files", []) if f.get("fileUrl")}
changed = False
for it in inline_files:
url = (it or {}).get("fileUrl")
mime = (it or {}).get("mimeType")
data = (it or {}).get("data")
if not (url and mime and data): continue
rec = {"fileUrl": url, "inlineData": {"mimeType": mime, "data": data}}
if url not in existing or existing[url].get("inlineData", {}).get("data") != data:
existing[url] = rec; changed = True
if changed:
cache["files"] = list(existing.values())
save_file_cache(chat_id, cache)

def inject_inline_data_into_history(chat_id: str, history: list) -> list:
cache = load_file_cache(chat_id)
mapping = {f.get("fileUrl"): f.get("inlineData") for f in cache.get("files", []) if f.get("fileUrl")}
if not mapping: return history
out = []
for msg in history:
if msg.get("role") != "user":
out.append(msg); continue
parts = msg.get("parts", [])
new_parts = []
for p in parts:
if isinstance(p, dict) and p.get("fileUrl"):
url = p["fileUrl"]
if url in mapping:
new_parts.append({"inlineData": {"mimeType": mapping[url]["mimeType"], "data": mapping[url]["data"]}})
new_parts.append(p)
msg2 = {"role": msg["role"], "parts": new_parts}
out.append(msg2)
return out

def get_all_tools():
search_tool = {"name":"perform_internet_search","description":"جستجوی به‌روز","parameters":{"type":"OBJECT","properties":{"query":{"type":"STRING"}},"required":["query"]}}
image_tools = [
{"name":"generate_image","description":"ساخت تصویر جدید","parameters":{"type":"OBJECT","properties":{"english_prompt":{"type":"STRING"},"aspect_ratio":{"type":"STRING"},"initial_response_text":{"type":"STRING"},"follow_up_text":{"type":"STRING"}},"required":["english_prompt","initial_response_text","follow_up_text"]}},
{"name":"handle_specific_edit","description":"ویرایش مشخص","parameters":{"type":"OBJECT","properties":{"edit_request":{"type":"STRING"}},"required":["edit_request"]}},
{"name":"regenerate_with_enhancement","description":"بازسازی با بهبود","parameters":{"type":"OBJECT","properties":{"enhancement_request":{"type":"STRING"},"previous_english_prompt":{"type":"STRING"},"previous_aspect_ratio":{"type":"STRING"}},"required":["enhancement_request","previous_english_prompt","previous_aspect_ratio"]}},
{"name":"change_image_aspect_ratio","description":"تغییر نسبت تصویر","parameters":{"type":"OBJECT","properties":{"new_aspect_ratio":{"type":"STRING"},"previous_english_prompt":{"type":"STRING"}},"required":["new_aspect_ratio","previous_english_prompt"]}}
]
return [{"function_declarations": image_tools + [search_tool]}]

def enhance_prompt(base_prompt, enhancement_request, model):
api_key = get_keys_for_request()[0]
url = f"{GOOGLE_API_BASE_URL}/{model}:generateContent?key={api_key}"
system_prompt = f"You are an expert prompt engineer. Merge the base English image prompt with the user's modification. Return only the final English prompt.\n\nBase Prompt: "{base_prompt}"\nUser's Request: "{enhancement_request}""
payload = {"contents":[{"role":"user","parts":[{"text":system_prompt}]}],"generationConfig":{"temperature":0.7,"maxOutputTokens":2048}}
try:
r = requests.post(url, json=payload, timeout=60); r.raise_for_status()
return r.json()["candidates"][0]["content"]["parts"][0]["text"].strip()
except Exception:
return f"{base_prompt}, {enhancement_request}"

def create_artistic_prompt(user_prompt, model):
api_key = get_keys_for_request()[0]
url = f"{GOOGLE_API_BASE_URL}/{model}:generateContent?key={api_key}"
system_instruction = "Convert user's idea to a highly-detailed English prompt. Output ONLY the final English prompt."
payload = {"contents":[{"role":"user","parts":[{"text":user_prompt}]}],"systemInstruction":{"parts":[{"text":system_instruction}]},"generationConfig":{"temperature":0.7,"maxOutputTokens":2048}}
try:
r = requests.post(url, json=payload, timeout=60); r.raise_for_status()
return r.json()["candidates"][0]["content"]["parts"][0]["text"].strip()
except Exception:
return user_prompt

def stream_search_results(query):
keys = get_keys_for_request()
model = 'gemini-2.5-flash'
url = f"{GOOGLE_API_BASE_URL}/{model}:streamGenerateContent?alt=sse"
payload = {"contents":[{"role":"user","parts":[{"text":query}]}],"tools":[{"google_search":{}}],"systemInstruction":{"parts":[{"text":"Answer in Persian."}]}}
for k in keys:
try:
with requests.post(url, params={'key': k}, json=payload, stream=True, timeout=180) as r:
if r.status_code == 429: continue
r.raise_for_status()
for line in r.iter_lines():
if line: yield line.decode('utf-8') + "\n\n"
return
except requests.exceptions.RequestException:
continue
yield f"data: {json.dumps({'error': {'code': 'SEARCH_FAILED','message': 'سرویس جستجو موقتاً در دسترس نیست.'}})}\n\n"

def is_tool_like_text(txt: str) -> bool:
if not txt: return False
patterns = [r'\bfunctionCall\s*:', r'\bhandle_specific_edit\s*(', r'\bchange_image_aspect_ratio\s*(', r'\bregenerate_with_enhancement\s*(', r'\bgenerate_image\s*(', r'print\s*(\s*handle_specific_edit']
return any(re.search(p, txt, flags=re.I) for p in patterns)

def sse_text_event(text: str) -> str:
return f"data: {json.dumps({'candidates':[{'content':{'parts':[{'text': text}]}}]})}\n\n"

def classify_user_intent(user_text: str) -> dict:
if not user_text or not user_text.strip(): return {"intent":"NONE"}
keys = get_keys_for_request()
model = 'gemini-1.5-flash'
url = f"{GOOGLE_API_BASE_URL}/{model}:generateContent"
system = "Return strict JSON: {"intent":"...", "normalized_edit":"...", "new_aspect_ratio":"...", "code_language":"..."}. intents=[NONE,SPECIFIC_EDIT,ASPECT_RATIO_CHANGE,QUALITY_ENHANCEMENT,NEW_IMAGE,CODE_TASK]."
payload = {"contents":[{"role":"user","parts":[{"text": user_text}]}],"systemInstruction":{"parts":[{"text": system}]},"generationConfig":{"temperature":0.0,"maxOutputTokens":128}}
for k in keys:
try:
r = requests.post(f"{url}?key={k}", json=payload, timeout=25)
if r.status_code == 429: continue
r.raise_for_status()
txt = r.json()["candidates"][0]["content"]["parts"][0]["text"].strip().strip('`').strip()
s, e = txt.find('{'), txt.rfind('}')
if s != -1 and e != -1: txt = txt[s:e+1]
d = json.loads(txt)
if "intent" not in d: return {"intent":"NONE"}
return d
except Exception:
continue
return {"intent":"NONE"}

def stream_text_only(user_text: str, model_name: str):
keys = get_keys_for_request()
system_text = BASE_RULES_FA + "\n[TURN MODE: TEXT ONLY]\n"
contents = [{"role":"user","parts":[{"text": user_text }]}]
for k in keys:
try:
url = f"{GOOGLE_API_BASE_URL}/{model_name}:streamGenerateContent?alt=sse&key={k}"
payload = {"contents": contents, "tools": None, "systemInstruction": {"parts": [{"text": system_text}]}, "generationConfig": {"temperature": 0.3, "maxOutputTokens": 8192}}
with requests.post(url, json=payload, stream=True, timeout=720) as resp:
if resp.status_code == 429: continue
resp.raise_for_status()
for line in resp.iter_lines():
if line and line.startswith(b"data: "): yield line.decode('utf-8') + "\n\n"
return
except requests.exceptions.RequestException:
continue
yield f"data: {json.dumps({'error': {'code':'TEXT_STREAM_FAILED','message':'پاسخ متنی موقتاً در دسترس نیست.'}})}\n\n"

def stream_code_reply(user_text: str, model_name: str, code_language: str = None):
keys = get_keys_for_request()
system_text = BASE_RULES_FA + "\n[TURN MODE: CODE]\n"
contents = [{"role":"user","parts":[{"text": user_text }]}]
for k in keys:
try:
url = f"{GOOGLE_API_BASE_URL}/{model_name}:streamGenerateContent?alt=sse&key={k}"
payload = {"contents": contents, "tools": None, "systemInstruction": {"parts": [{"text": system_text}]}, "generationConfig": {"temperature": 0.2, "maxOutputTokens": 4096}}
with requests.post(url, json=payload, stream=True, timeout=720) as r:
if r.status_code == 429: continue
r.raise_for_status()
for line in r.iter_lines():
if line: yield line.decode('utf-8') + "\n\n"
return
except requests.exceptions.RequestException:
continue
yield f"data: {json.dumps({'error': {'code':'CODE_STREAM_FAILED','message':'تولید کد موقتاً در دسترس نیست.'}})}\n\n"

def stream_file_analysis(history_for_gemini, model_name: str):
keys = get_keys_for_request()
system_text = BASE_RULES_FA + "\n[TURN MODE: FILE ANALYSIS]\n"
for k in keys:
try:
url = f"{GOOGLE_API_BASE_URL}/{model_name}:streamGenerateContent?alt=sse&key={k}"
payload = {"contents": history_for_gemini, "tools": None, "systemInstruction": {"parts": [{"text": system_text}]}, "generationConfig": {"temperature": 0.3, "maxOutputTokens": 8192}}
with requests.post(url, json=payload, stream=True, timeout=720) as r:
if r.status_code == 429: continue
r.raise_for_status()
for line in r.iter_lines():
if line: yield line.decode('utf-8') + "\n\n"
return
except requests.exceptions.RequestException:
continue
yield f"data: {json.dumps({'error': {'code':'FILE_ANALYSIS_FAILED','message':'تحلیل فایل موقتاً در دسترس نیست.'}})}\n\n"

@app.route('/')
def index():
return render_template('index.html')

@app.route('/storage/index', methods=['GET'])
def storage_index():
try:
try:
path = hf_hub_download(repo_id=HF_REPO_ID, repo_type="dataset", filename="index.json", token=HF_TOKEN, local_dir=str(HF_LOCAL_CACHE), local_dir_use_symlinks=False)
data = json.load(open(path, 'r', encoding='utf-8'))
except Exception:
data = {"items": []}
return jsonify(data)
except Exception:
return jsonify({"items": []})

@app.route('/storage/session/<chat_id>', methods=['GET', 'DELETE'])
def storage_session_get_delete(chat_id):
if request.method == 'GET':
try:
path = hf_hub_download(repo_id=HF_REPO_ID, repo_type="dataset", filename=f"sessions/{chat_id}.json", token=HF_TOKEN, local_dir=str(HF_LOCAL_CACHE), local_dir_use_symlinks=False)
data = json.load(open(path, 'r', encoding='utf-8'))
return jsonify(data)
except Exception:
return jsonify({"error":"not_found"}), 404
else:
try:
try:
idx_path = hf_hub_download(repo_id=HF_REPO_ID, repo_type="dataset", filename="index.json", token=HF_TOKEN, local_dir=str(HF_LOCAL_CACHE), local_dir_use_symlinks=False)
index_data = json.load(open(idx_path, 'r', encoding='utf-8'))
except Exception:
index_data = {"items": []}
index_data["items"] = [it for it in index_data.get("items", []) if it.get("id") != chat_id]
ops = [CommitOperationDelete(path_in_repo=f"sessions/{chat_id}.json"), CommitOperationAdd(path_in_repo="index.json", path_or_fileobj=json.dumps(index_data, ensure_ascii=False).encode('utf-8'))]
if HF_TOKEN: api_hf.create_commit(repo_id=HF_REPO_ID, repo_type="dataset", operations=ops, commit_message=f"delete session {chat_id}", token=HF_TOKEN)
return jsonify({"ok": True})
except Exception as e:
return jsonify({"ok": False, "error": str(e)}), 500

@app.route('/storage/session', methods=['POST'])
def storage_session_upsert():
try:
session = request.get_json(force=True)
if not session or "id" not in session:
return jsonify({"ok": False, "error": "invalid session"}), 400
chat_id = str(session["id"])
title = session.get("title","")
from datetime import datetime, timezone
updated_at = datetime.now(timezone.utc).isoformat()
try:
idx_path = hf_hub_download(repo_id=HF_REPO_ID, repo_type="dataset", filename="index.json", token=HF_TOKEN, local_dir=str(HF_LOCAL_CACHE), local_dir_use_symlinks=False)
index_data = json.load(open(idx_path, 'r', encoding='utf-8'))
except Exception:
index_data = {"items": []}
items = [it for it in index_data.get("items", []) if it.get("id") != chat_id]
items.append({"id": chat_id, "title": title or f"chat-{chat_id}", "updated_at": updated_at})
index_data["items"] = items
ops = [
CommitOperationAdd(path_in_repo=f"sessions/{chat_id}.json", path_or_fileobj=json.dumps(session, ensure_ascii=False).encode('utf-8')),
CommitOperationAdd(path_in_repo="index.json", path_or_fileobj=json.dumps(index_data, ensure_ascii=False).encode('utf-8'))
]
if HF_TOKEN: api_hf.create_commit(repo_id=HF_REPO_ID, repo_type="dataset", operations=ops, commit_message=f"upsert session {chat_id}", token=HF_TOKEN)
return jsonify({"ok": True})
except Exception as e:
return jsonify({"ok": False, "error": str(e)}), 500

@app.route('/chat', methods=['POST'])
def chat():
data = request.json or {}
model = data.get('model', 'gemini-2.5-flash')
history = data.get('history', [])
action_payload = data.get('action')
chat_id = data.get('chatId')
inline_files = data.get('inlineFiles', [])
if not chat_id:
return Response(json.dumps({"error": "chatId is required"}), status=400, mimetype='application/json')

text
try: update_file_cache_from_inline_files(chat_id, inline_files) except Exception: pass if action_payload and action_payload.get('intent') == 'regenerate_with_enhancement': def stream_action_result(): try: base_prompt = action_payload.get("base_english_prompt") enhancement_request = action_payload.get("enhancement_request") aspect_ratio = action_payload.get("aspect_ratio", "9:16") if not base_prompt or not enhancement_request: yield f"data: {json.dumps({'error': {'code':'MISSING_ARGS','message':'پارامترهای لازم موجود نیست.'}})}\n\n"; return new_prompt = enhance_prompt(base_prompt, enhancement_request, model) yield f"data: {json.dumps({'intent':'generate_image','english_prompt': new_prompt,'aspect_ratio': aspect_ratio})}\n\n" except Exception as e: yield f"data: {json.dumps({'error': {'code':'ACTION_EXECUTION_FAILED','message': str(e)}})}\n\n" return Response(stream_action_result(), mimetype='text/event-stream') def stream_events(): def send_event(event_data): return f"data: {json.dumps(event_data)}\n\n" history_for_gemini = [] for msg in history: if 'role' not in msg: continue new_msg = {'role': msg['role'], 'parts': []} for part in (msg.get('parts') or []): if isinstance(part, dict) and any(k in part for k in ['text','inlineData','functionCall','functionResponse','image_url','edited_images','fileUrl','mimeType','name']): if 'image_url' in part or 'edited_images' in part: new_msg['parts'].append({'text': '(توضیح: تصویر در پیام‌های قبلی ساخته یا ویرایش شده است.)'}) elif 'fileUrl' in part and 'mimeType' in part and not part.get('mimeType','').startswith('image/'): new_msg['parts'].append({'text': f"(یادداشت: فایلی با نوع {part.get('mimeType')} قبلاً ارسال شده است.)"}) elif 'text' in part or 'inlineData' in part: new_msg['parts'].append(part) if new_msg['parts']: history_for_gemini.append(new_msg) history_for_gemini = inject_inline_data_into_history(chat_id, history_for_gemini) last_user_idx_local = -1 for i in range(len(history_for_gemini) - 1, -1, -1): if history_for_gemini[i].get('role') == 'user': last_user_idx_local = i; break last_user_text = ""; has_inline_file = False if last_user_idx_local != -1: lu = history_for_gemini[last_user_idx_local] last_user_text = ''.join(p.get('text','') for p in (lu.get('parts') or []) if p.get('text')).strip() has_inline_file = any('inlineData' in p for p in (lu.get('parts') or [])) image_in_recent_history = any(m.get('role') == 'model' and any('image_url' in p or 'edited_images' in p for p in (m.get('parts') or [])) for m in history[-4:]) intent_info = classify_user_intent(last_user_text) if last_user_text else {"intent":"NONE"} intent = (intent_info.get("intent") or "NONE").upper() if intent == "CODE_TASK": yield from stream_code_reply(last_user_text, model, intent_info.get("code_language")); return if image_in_recent_history and last_user_text and intent == "NONE": yield from stream_text_only(last_user_text, model); return if has_inline_file and intent in {"NONE"}: yield from stream_file_analysis(history_for_gemini, model); return if image_in_recent_history and last_user_text: if intent == "SPECIFIC_EDIT": merged = (intent_info.get("normalized_edit") or last_user_text) update_chat_meta(chat_id, last_edit_prompt=merged) yield send_event({"intent": "edit_image", "prompt": merged}); return elif intent == "ASPECT_RATIO_CHANGE": meta = load_chat_meta(chat_id) ep = meta.get("last_edit_prompt") or meta.get("last_english_prompt") or "" new_ar = intent_info.get("new_aspect_ratio") or meta.get("last_aspect_ratio") or "9:16" yield send_event({"intent": "generate_image", "english_prompt": ep, "aspect_ratio": new_ar}); return elif intent == "QUALITY_ENHANCEMENT": meta = load_chat_meta(chat_id) prev_ep = meta.get("last_edit_prompt") or meta.get("last_english_prompt") or "" prev_ar = meta.get("last_aspect_ratio") or "9:16" enh = intent_info.get("normalized_edit") or last_user_text yield send_event({"intent": "clarify_action", "question": "بسیار خب! تصویر فعلی را ویرایش کنم یا یک تصویر جدید بسازم؟","options":{"edit":{"label":"ویرایش همین تصویر","intent":"edit_image","prompt": enh},"regenerate":{"label":"ساخت تصویر جدید","intent":"regenerate_with_enhancement","base_english_prompt": prev_ep,"enhancement_request": enh,"aspect_ratio": prev_ar}}}); return elif intent == "NEW_IMAGE": ep = create_artistic_prompt(last_user_text, model) update_chat_meta(chat_id, last_english_prompt=ep, last_aspect_ratio="9:16", last_edit_prompt=None) yield send_event({"intent": "generate_image_with_text", "text": "در حال ساخت تصویر جدید برای شما...","image_generation_payload": {"english_prompt": ep, "aspect_ratio": "9:16"}, "follow_up_text": "تصویر شما آماده شد!"}); return tools_for_request = get_all_tools() final_system_prompt = BASE_RULES_FA + "\n[TURN MODE: GENERAL]\n" keys = get_keys_for_request() accumulated_args = None fn_name = None for k in keys: try: url = f"{GOOGLE_API_BASE_URL}/{model}:streamGenerateContent?alt=sse&key={k}" payload = {"contents": history_for_gemini, "tools": tools_for_request, "systemInstruction": {"parts": [{"text": final_system_prompt}]}, "generationConfig":{"temperature":0.3,"maxOutputTokens":8192}} with requests.post(url, json=payload, stream=True, timeout=720) as r: if r.status_code == 429: continue r.raise_for_status() for line in r.iter_lines(): if line and line.startswith(b'data: '): chunk_str = line.decode('utf-8')[6:] try: data_chunk = json.loads(chunk_str) part = data_chunk.get("candidates", [{}])[0].get("content", {}).get("parts", [{}])[0] if "functionCall" in part: if not accumulated_args: accumulated_args = {}; fn_name = part["functionCall"].get("name") args_chunk = part["functionCall"].get("args", {}) for kk, vv in (args_chunk or {}).items(): if kk not in accumulated_args: accumulated_args[kk] = vv elif isinstance(accumulated_args.get(kk), str): accumulated_args[kk] += str(vv) elif "text" in part: if not is_tool_like_text(part["text"]): yield sse_text_event(part["text"]) except Exception: continue if accumulated_args: args = accumulated_args if fn_name == "handle_specific_edit": raw_req = (args.get("edit_request") or "").strip() if not raw_req: yield send_event({"error": {"code":"EMPTY_EDIT_REQUEST","message":"دستور ویرایش دریافت نشد."}}) else: merged = raw_req update_chat_meta(chat_id, last_edit_prompt=merged) yield send_event({"intent":"edit_image","prompt": merged}) elif fn_name == "regenerate_with_enhancement": meta = load_chat_meta(chat_id) prev_ep = args.get("previous_english_prompt") or meta.get("last_edit_prompt") or meta.get("last_english_prompt") or "" prev_ar = args.get("previous_aspect_ratio") or meta.get("last_aspect_ratio") or "9:16" enhancement_request = args.get("enhancement_request", "") yield send_event({"intent":"clarify_action","question":"بسیار خب! با این تغییرات چه کنم؟","options":{"edit":{"label":"ویرایش همین تصویر","intent":"edit_image","prompt": enhancement_request},"regenerate":{"label":"ساخت تصویر جدید","intent":"regenerate_with_enhancement","base_english_prompt": prev_ep,"enhancement_request": enhancement_request,"aspect_ratio": prev_ar}}}) elif fn_name == "change_image_aspect_ratio": meta = load_chat_meta(chat_id) ep = args.get("previous_english_prompt") or meta.get("last_edit_prompt") or meta.get("last_english_prompt") or "" new_ar = args.get("new_aspect_ratio") or meta.get("last_aspect_ratio") or "9:16" yield send_event({"intent":"generate_image","english_prompt": ep,"aspect_ratio": new_ar}) elif fn_name == "generate_image": ep = args.get("english_prompt"); ar = args.get("aspect_ratio", "9:16") update_chat_meta(chat_id, last_english_prompt=ep, last_aspect_ratio=ar, last_edit_prompt=None) yield send_event({"intent":"generate_image_with_text","text": args.get("initial_response_text"),"image_generation_payload":{"english_prompt": ep,"aspect_ratio": ar},"follow_up_text": args.get("follow_up_text")}) elif fn_name == "perform_internet_search": yield from stream_search_results(args.get('query')) return except requests.exceptions.RequestException: continue yield send_event({"error": {"code": "ALL_KEYS_FAILED", "message": "تمام کلیدهای API خطا دادند یا درخواست نامعتبر بود."}}) return Response(stream_events(), mimetype='text/event-stream')

if name == 'main':
app.run(debug=True, host='0.0.0.0', port=os.environ.get("PORT", 7860)) این یکی Flask==2.2.2
Werkzeug==2.2.2
gunicorn==20.1.0
requests==2.28.1
gevent==22.10.2
sseclient-py==1.8.0
filelock==3.13.1
huggingface_hub==0.24.6 این یکی // api.js
import * as state from './state.js';
import * as ui from './ui.js';

const UPLOADER_API_URL = 'https://asrasahar-ok-uploader-bot.hf.space/upload';
const YOUR_IMAGE_UPLOAD_SERVER_URL = 'https://www.aisada.ir/hamed/upload.php';
const CONVERTER_API_URL = 'https://texttopdf-5irq.onrender.com/';
const HF_IMAGE_GEN_URL_BASE = "https://black-forest-labs-flux-1-schnell.hf.space";
const FN_INDEX_GEN_IMAGE = 2;
const TRIGGER_ID_GEN_IMAGE = 5;
const STORAGE_INDEX_URL = '/storage/index';
const STORAGE_SESSION_URL = '/storage/session';

function getDimensionsFromRatio(ratioString) {
const d = { width: 768, height: 1344 };
if (!ratioString || typeof ratioString !== 'string') return d;
const r = ratioString.toLowerCase().replace(/[\sدر:بهx]/g, '');
if (r === '11') return { width: 1024, height: 1024 };
if (r === '169') return { width: 1344, height: 768 };
if (r === '916') return { width: 768, height: 1344 };
if (r === '43') return { width: 1152, height: 864 };
if (r === '34') return { width: 864, height: 1152 };
if (r === '32') return { width: 1216, height: 832 };
if (r === '23') return { width: 832, height: 1216 };
return d;
}

function windowHistory(messages, maxCount = 40, maxChars = 8000) {
const msgs = [...messages];
let start = Math.max(0, msgs.length - maxCount);
const sliced = msgs.slice(start);
let total = 0;
for (let i = sliced.length - 1; i >= 0; i--) {
const m = sliced[i];
const t = (m.parts || []).map(p => p.text || '').join('');
total += t.length;
if (total > maxChars) { start = msgs.length - (sliced.length - i); break; }
}
return msgs.slice(start);
}

export async function convertTextToFile(content, format, buttonElement) {
if (buttonElement) ui.showLoadingOnButton(buttonElement, true);
try {
const form1 = new FormData();
form1.append('content', content);
form1.append('format', format);
const r1 = await fetch(CONVERTER_API_URL, { method: 'POST', body: form1 });
if (!r1.ok) throw new Error(HTTP ${r1.status});
const blob = await r1.blob();
const fileName = alpha-export-${Date.now()}.${format};
const form2 = new FormData();
form2.append('image', blob, fileName);
const r2 = await fetch(YOUR_IMAGE_UPLOAD_SERVER_URL, { method: 'POST', body: form2 });
if (!r2.ok) throw new Error(await r2.text().catch(()=>HTTP ${r2.status}));
const j = await r2.json();
if (j.success && j.url) window.parent.postMessage({ type: 'OPEN_EXTERNAL_URL', url: j.url }, '*');
else throw new Error(j.message || 'upload failed');
} catch (e) {
alert(خطا: ${e.message});
} finally {
if (buttonElement) ui.showLoadingOnButton(buttonElement, false);
}
}

function summarizeHistoryForApi(history) {
return windowHistory(history).map(msg => {
if (msg.isTemporary) return null;
const newMsg = { ...msg };
const newParts = [];
let has = false;
if (newMsg.parts && newMsg.parts.length > 0) {
for (const part of newMsg.parts) {
const p = { ...part };
delete p.base64Data;
delete p.inlineData;
if (p.fileUrl && p.mimeType && !p.mimeType.startsWith('image/')) {
let t = "این فایل";
if (p.mimeType.startsWith('video/')) t = "این ویدیو";
else if (p.mimeType.startsWith('audio/')) t = "این فایل صوتی";
else t = این سند (${p.name});
let tp = newParts.find(x => x.text);
const note = \n(دستور: ${t} را تحلیل کن و محتوای آن را شرح بده.);
if (tp) tp.text += note; else newParts.push({ text: note });
}
if (p.text || p.fileUrl || p.image_url || p.edited_images) {
newParts.push(p);
has = true;
}
}
}
if (msg.role === 'user' && msg.english_prompt_used) {
const pn = پرامپت انگلیسی استفاده شده: "${msg.english_prompt_used}";
const rn = msg.aspect_ratio_used ? , نسبت تصویر: "${msg.aspect_ratio_used}" : "";
const mem = \n[یادآوری برای مدل: این پیام کاربر منجر به ساخت تصویر شد. ${pn}${rn}];
let tp = newParts.find(p => p.text);
if (tp) tp.text += mem; else newParts.push({ text: mem });
has = true;
}
newMsg.parts = newParts;
return has ? newMsg : null;
}).filter(Boolean);
}

function uploadFileToHuggingFace(file, onProgress) {
return new Promise((resolve, reject) => {
const xhr = new XMLHttpRequest();
state.setCurrentUploadXHR(xhr);
xhr.open('POST', UPLOADER_API_URL, true);
xhr.onload = function () {
state.setCurrentUploadXHR(null);
try {
const res = JSON.parse(xhr.responseText);
if (xhr.status >= 200 && xhr.status < 300) {
if (res.hf_url) resolve(res); else reject(new Error("invalid response"));
} else reject(new Error(res.error || HTTP ${xhr.status}));
} catch { reject(new Error("bad response")); }
};
xhr.onerror = function () {
state.setCurrentUploadXHR(null);
if (xhr.status === 0 && !xhr.statusText) reject(new Error('آپلود لغو شد.'));
else reject(new Error('خطای شبکه.'));
};
xhr.upload.onprogress = function (e) {
if (e.lengthComputable && onProgress) onProgress(Math.round((e.loaded / e.total) * 100));
};
const fd = new FormData();
fd.append('file', file);
xhr.send(fd);
});
}

async function uploadUrlToHuggingFace(url) {
return new Promise((resolve, reject) => {
const xhr = new XMLHttpRequest();
xhr.open('POST', UPLOADER_API_URL, true);
xhr.setRequestHeader('Content-Type', 'application/json');
xhr.onload = function () {
try {
const res = JSON.parse(xhr.responseText);
if (xhr.status >= 200 && xhr.status < 300 && res.hf_url) resolve(res);
else reject(new Error(res.error || HTTP ${xhr.status}));
} catch { reject(new Error("bad response")); }
};
xhr.onerror = function () { reject(new Error('خطای شبکه.')); };
xhr.send(JSON.stringify({ url }));
});
}

export async function processAndUploadFile(file, onProgress) {
const readBase64 = f => new Promise((resolve, reject) => {
const r = new FileReader();
r.onload = () => resolve(r.result.split(',')[1]);
r.onerror = e => reject(e);
r.readAsDataURL(f);
});
const [up, b64] = await Promise.all([uploadFileToHuggingFace(file, onProgress), readBase64(file)]);
return { url: up.hf_url, name: file.name, mimeType: file.type, base64Data: b64 };
}

export async function uploadToAISADAAndOpenAlpha(imageUrlFromSpace, button) {
if (!imageUrlFromSpace) { if (button) button.textContent = "خطا"; return; }
if (button) {
button.disabled = true;
button.innerHTML = <div class="typing-indicator" style="height:16px; gap: 3px;"><span style="width:6px; height:6px; background-color:currentColor;"></span><span style="width:6px; height:6px; background-color:currentColor;"></span><span style="width:6px; height:6px; background-color:currentColor;"></span></div>;
}
try {
const r = await fetch(imageUrlFromSpace);
if (!r.ok) throw new Error(HTTP ${r.status});
let blob = await r.blob();
const fd = new FormData();
let filename = generated_alpha_image_${Date.now()}.webp;
try {
const p = new URL(imageUrlFromSpace).pathname.split('/').pop();
if (p && p.includes('.')) filename = p.split('?')[0];
} catch {}
fd.append('image', blob, filename);
const up = await fetch(YOUR_IMAGE_UPLOAD_SERVER_URL, { method: 'POST', body: fd });
if (!up.ok) throw new Error(await up.text().catch(()=>HTTP ${up.status}));
const j = await up.json();
if (j.success && j.url) window.parent.postMessage({ type: 'OPEN_EXTERNAL_URL', url: j.url }, '*');
else throw new Error(j.message || 'upload failed');
} catch (e) {
if (button) button.textContent = "خطا";
alert(خطا: ${e.message});
} finally {
if (button) {
setTimeout(() => {
button.disabled = false;
button.innerHTML = <svg class="w-4 h-4" fill="currentColor" viewBox="0 0 24 24"><path d="M19 9h-4V3H9v6H5l7 7 7-7zM5 18v2h14v-2H5z"/></svg>;
}, 2000);
}
}
}

function convertImageBlobToPng(imageBlob) {
return new Promise((resolve, reject) => {
const u = URL.createObjectURL(imageBlob);
const img = new Image();
img.onload = () => {
const c = document.createElement('canvas');
c.width = img.width; c.height = img.height;
const ctx = c.getContext('2d');
ctx.drawImage(img, 0, 0);
c.toBlob(b => { resolve(b); URL.revokeObjectURL(u); }, 'image/png');
};
img.onerror = () => { reject(new Error('خطا در تبدیل تصویر.')); URL.revokeObjectURL(u); };
img.src = u;
});
}

export async function runExternalImageEditor(prompt, fileUrl, el, messageIndex) {
const activeChat = state.getActiveChat();
const RENDER_API_URL = 'https://alfa-editor-worker.onrender.com/api/edit';
try {
if (!fileUrl) throw new Error("فایل یافت نشد.");
ui.showStatusUpdate("در حال دانلود فایل...", el);
const r = await fetch(fileUrl);
if (!r.ok) throw new Error(HTTP ${r.status});
let blob = await r.blob();
const mt = blob.type || r.headers.get('content-type');
if (mt && mt !== 'image/png' && mt !== 'image/jpeg') blob = await convertImageBlobToPng(blob);
ui.showStatusUpdate("در حال ارسال برای ویرایش...", el);
const fd = new FormData();
fd.append("image", blob, 'image.png');
fd.append("prompt", prompt);
const resp = await fetch(RENDER_API_URL, { method: 'POST', body: fd, signal: state.globalAbortController.signal });
const result = await resp.json();
if (!resp.ok) throw new Error(result.error || HTTP ${resp.status});
if (!result.image_urls || result.image_urls.length === 0) throw new Error("تصویری برنگشت.");
ui.showStatusUpdate("در حال دائمی‌سازی لینک‌ها...", el);
const urls = await Promise.all(result.image_urls.map(u => uploadUrlToHuggingFace(u).then(x => x.hf_url)));
const finalMessage = { role: 'model', parts: [{ edited_images: urls }] };
if (typeof messageIndex !== 'undefined') {
activeChat.messages[messageIndex] = finalMessage;
state.saveSessions();
await saveSession(activeChat);
ui.addMessageToUI(finalMessage, messageIndex, { isLastModel: true, animate: false }, el);
}
} catch (e) {
if (e.name !== 'AbortError') ui.displayError(el, e.message || String(e));
else el.querySelector('.message-content-area').innerHTML += '<p class="text-xs text-slate-500 mt-2 text-center">-- عملیات ویرایش متوقف شد --</p>';
} finally {
ui.resetState();
}
}

export async function runExternalImageGenerator(englishPrompt, aspectRatio, el, followUpText = null) {
const activeChat = state.getActiveChat();
const sessionHash = Math.random().toString(36).substring(2, 15);
try {
const dim = getDimensionsFromRatio(aspectRatio);
ui.showStatusUpdate("ارسال درخواست تصویر...", el);
const payload = [ englishPrompt, Math.floor(Math.random() * 2147483647), true, dim.width, dim.height, 4 ];
const joinPayload = { fn_index: FN_INDEX_GEN_IMAGE, data: payload, event_data: null, session_hash: sessionHash, trigger_id: TRIGGER_ID_GEN_IMAGE };
const joinResponse = await fetch(${HF_IMAGE_GEN_URL_BASE}/queue/join, { method: 'POST', headers: { 'Content-Type': 'application/json' }, body: JSON.stringify(joinPayload), signal: state.globalAbortController.signal });
if (!joinResponse.ok) {
let t = HTTP ${joinResponse.status};
try { const j = await joinResponse.json(); t = j.error || j.detail || t; } catch {}
throw new Error(t);
}
const jr = await joinResponse.json();
if (!jr.event_id) throw new Error("event_id دریافت نشد.");
if (state.currentImageEventSource) state.currentImageEventSource.close();
const es = new EventSource(${HF_IMAGE_GEN_URL_BASE}/queue/data?session_hash=${sessionHash});
state.setCurrentImageEventSource(es);
await new Promise((resolve, reject) => {
es.onmessage = async (event) => {
const data = JSON.parse(event.data);
if (data.msg === "process_starts") ui.showStatusUpdate("پردازش تصویر آغاز شد...", el);
if (data.msg === "progress") {
if (data.progress_data && data.progress_data.length > 0) {
const p = data.progress_data[0];
ui.showStatusUpdate(در حال ساخت... (${p.index}/${p.length}), el);
}
}
if (data.msg === "process_completed") {
es.close();
state.setCurrentImageEventSource(null);
if (data.success && data.output?.data?.[0]?.url) {
const tempUrl = data.output.data[0].url;
ui.showStatusUpdate("در حال دائمی‌سازی لینک...", el);
try {
const up = await uploadUrlToHuggingFace(tempUrl);
const permanentUrl = up.hf_url;
const newParts = [{ image_url: permanentUrl }];
if (followUpText) newParts.push({ text: followUpText });
const newModelMessage = { role: 'model', parts: newParts };
activeChat.messages[activeChat.messages.length - 1] = newModelMessage;
state.saveSessions();
await saveSession(activeChat);
ui.addMessageToUI(newModelMessage, activeChat.messages.length - 1, { isLastModel: true, animate: false }, el);
resolve();
} catch (e) { reject(new Error(خطای آپلود: ${e.message})); }
} else reject(new Error(data.output?.error || "URL نامعتبر."));
}
if (data.msg === 'queue_full') { es.close(); reject(new Error("صف پر است.")); }
};
es.onerror = () => { es.close(); state.setCurrentImageEventSource(null); reject(new Error("قطع ارتباط با سرور تصویر.")); };
});
} catch (e) {
if (e.name !== 'AbortError') ui.displayError(el, e.message);
else el.querySelector('.message-content-area').innerHTML += '<p class="text-xs text-slate-500 mt-2 text-center">-- تولید پاسخ متوقف شد --</p>';
} finally {
ui.resetState();
}
}

export async function streamResponse(modelBubbleOuterDivElement, incomingHistory, chatId, actionPayload = null) {
state.setGlobalAbortController(new AbortController());
let fullResponseText = "";
let lastFinishReason = null;
const activeChat = state.getActiveChat();
let finalMessageObject = { role: 'model', parts: [] };

async function doStream(payload, { appendToSameMessage = true } = {}) {
const response = await fetch('/chat', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
signal: state.globalAbortController.signal,
body: JSON.stringify(payload)
});
if (!response.ok) {
const err = await response.json().catch(()=>({error:{message:HTTP ${response.status}}}));
throw new Error(err.error?.message || HTTP ${response.status});
}
const reader = response.body.getReader();
const decoder = new TextDecoder();
let buffer = "";

text
async function processEvent(rawEvent) { const lines = rawEvent.split('\n'); const dataPayload = lines.filter(l => l.startsWith('data:')).map(l => l.slice(5).trim()).filter(Boolean).join('\n'); if (!dataPayload) return; const data = JSON.parse(dataPayload); const lastUserMessage = activeChat.messages[activeChat.messages.length - 2]; if (data.intent === 'generate_image') { reader.cancel(); if (lastUserMessage && lastUserMessage.role === 'user') { lastUserMessage.english_prompt_used = data.english_prompt; lastUserMessage.aspect_ratio_used = data.aspect_ratio; state.saveSessions(); await saveSession(activeChat); } await runExternalImageGenerator(data.english_prompt, data.aspect_ratio, modelBubbleOuterDivElement); return 'break'; } if (data.intent === 'generate_image_with_text') { reader.cancel(); const { text, image_generation_payload, follow_up_text } = data; if (lastUserMessage && lastUserMessage.role === 'user') { lastUserMessage.english_prompt_used = image_generation_payload.english_prompt; lastUserMessage.aspect_ratio_used = image_generation_payload.aspect_ratio; state.saveSessions(); await saveSession(activeChat); } ui.streamFinalText(text, modelBubbleOuterDivElement); await runExternalImageGenerator(image_generation_payload.english_prompt, image_generation_payload.aspect_ratio, modelBubbleOuterDivElement, follow_up_text); return 'break'; } if (data.intent === 'clarify_action') { reader.cancel(); const commandMessage = { role: 'model', clarification: data.options, question: data.question, parts: [] }; activeChat.messages[activeChat.messages.length - 1] = commandMessage; state.saveSessions(); await saveSession(activeChat); ui.addMessageToUI(commandMessage, activeChat.messages.length - 1, { isLastModel: true }, modelBubbleOuterDivElement); ui.resetState(); return 'break'; } if (data.intent === 'edit_image') { reader.cancel(); let fileUrlForEditing = null; for (let i = incomingHistory.length - 2; i >= 0; i--) { const msg = incomingHistory[i]; if (!msg?.parts) continue; const p1 = msg.parts.find(p => p.image_url || (p.edited_images && p.edited_images.length > 0)); if (p1) { fileUrlForEditing = p1.image_url || p1.edited_images[0]; break; } const p2 = msg.parts.find(p => p.fileUrl); if (p2) { fileUrlForEditing = p2.fileUrl; break; } } if (!fileUrlForEditing) { ui.displayError(modelBubbleOuterDivElement, "فایلی برای ویرایش پیدا نشد."); ui.resetState(); return 'break'; } await runExternalImageEditor(data.prompt, fileUrlForEditing, modelBubbleOuterDivElement, activeChat.messages.length - 1); return 'break'; } if (data.error) throw new Error(data.error.message || JSON.stringify(data.error)); if (data.candidates) { const c = data.candidates[0] || {}; if (typeof c.finishReason !== 'undefined' && c.finishReason !== null) { lastFinishReason = c.finishReason; } const parts = (c.content && c.content.parts) || []; const textPart = parts.find(p => typeof p.text === 'string'); if (textPart && textPart.text) { fullResponseText += textPart.text; ui.streamFinalText(fullResponseText, modelBubbleOuterDivElement, false); } } } while (true) { const { done, value } = await reader.read(); if (done) break; buffer += decoder.decode(value, { stream: true }); let sepIndex; while ((sepIndex = buffer.indexOf('\n\n')) !== -1) { const rawEvent = buffer.slice(0, sepIndex).trim(); buffer = buffer.slice(sepIndex + 2); if (!rawEvent) continue; const res = await processEvent(rawEvent); if (res === 'break') return 'break'; } } if (buffer.trim().length) { const res = await processEvent(buffer.trim()); if (res === 'break') return 'break'; } return 'done';

}

function makePayload(actionPayloadOverride = null) {
const historyForApi = summarizeHistoryForApi(actionPayload ? incomingHistory.slice(0, -1) : incomingHistory);
const inlineFiles = [];
for (const msg of incomingHistory) {
if (!msg || !msg.parts) continue;
for (const p of msg.parts) {
if (p.base64Data && p.mimeType) inlineFiles.push({ fileUrl: p.fileUrl || null, mimeType: p.mimeType, data: p.base64Data });
}
}
return { history: historyForApi, model: state.getActiveChat().model, chatId, action: actionPayloadOverride || actionPayload, inlineFiles };
}

try {
const firstRun = await doStream(makePayload());
if (firstRun !== 'break') {
let rounds = 0;
while (lastFinishReason === 'MAX_TOKENS' && rounds < 3) {
rounds += 1;
const payload = makePayload({ intent: 'auto_continue' });
const r = await doStream(payload, { appendToSameMessage: true });
if (r === 'break') break;
if (lastFinishReason !== 'MAX_TOKENS') break;
}
}
if (fullResponseText) finalMessageObject.parts.push({ text: fullResponseText });
if (finalMessageObject.parts.length > 0) activeChat.messages[activeChat.messages.length - 1] = finalMessageObject;
else activeChat.messages.pop();
state.saveSessions();
await saveSession(activeChat);
ui.updateMessageActions(modelBubbleOuterDivElement, finalMessageObject, false, true);
} catch (e) {
if (e.name === 'AbortError') {
modelBubbleOuterDivElement.querySelector('.message-content-area').innerHTML += '<p class="text-xs text-slate-500 mt-2 text-center">-- عملیات متوقف شد --</p>';
} else {
ui.displayError(modelBubbleOuterDivElement, e.message || 'خطا');
}
} finally {
ui.resetState();
}
}

export async function loadAllSessionsFromHF() {
const r = await fetch(STORAGE_INDEX_URL, { method: 'GET' });
if (!r.ok) return [];
const idx = await r.json();
const ids = (idx.items || []).sort((a,b)=>new Date(b.updated_at)-new Date(a.updated_at)).map(x=>x.id);
const sessions = [];
for (const id of ids) {
const sRes = await fetch(${STORAGE_SESSION_URL}/${encodeURIComponent(id)}, { method: 'GET' });
if (sRes.ok) sessions.push(await sRes.json());
}
return sessions;
}

export async function saveSession(session) {
await fetch(STORAGE_SESSION_URL, { method: 'POST', headers: { 'Content-Type': 'application/json' }, body: JSON.stringify(session) });
}

export async function deleteSession(id) {
await fetch(${STORAGE_SESSION_URL}/${encodeURIComponent(id)}, { method: 'DELETE' });
} این یکی // main.js
import * as state from './state.js';
import * as ui from './ui.js';
import * as api from './api.js';

async function handleFileSelection(event) {
const file = event.target.files[0];
if (!file) return;
ui.showFileUploading(file.name);
ui.dom.submitButton.disabled = true;
try {
const onProgress = p => ui.updateUploadProgress(p);
const uploaded = await api.processAndUploadFile(file, onProgress);
state.setAttachedFile(uploaded);
ui.showFileReady(file.name, uploaded.mimeType, uploaded.url);
} catch (e) {
if (e.message !== 'آپلود توسط کاربر لغو شد.') ui.showFileError(e.message); else ui.hideFilePreview();
} finally {
event.target.value = '';
ui.toggleFilePopupMenu(false);
ui.dom.submitButton.disabled = false;
}
}

async function persistActive() {
const s = state.getActiveChat();
if (s) await api.saveSession(s);
}

async function handleNewChat() {
const newSession = { id: Date.now().toString(), title: 'چت جدید', model: 'gemini-1.5-pro-latest', messages: [] };
state.chatSessions.unshift(newSession);
state.setActiveChatId(newSession.id);
state.saveSessions();
await api.saveSession(newSession);
ui.renderActiveChat();
ui.renderHistoryList();
}

function getFullChatText(session) {
if (!session || !session.messages) return "";
return session.messages.map(msg => {
const prefix = msg.role === 'user' ? 'کاربر' : 'مدل';
const t = msg.parts?.find(p => p.text)?.text || '[محتوای غیر متنی]';
return ${prefix}:\n${t};
}).join('\n\n---\n\n');
}

document.addEventListener('DOMContentLoaded', async () => {
ui.initTheme();
state.loadSessions();
try {
const remote = await api.loadAllSessionsFromHF();
if (remote.length > 0) {
state.setChatSessions(remote);
state.setActiveChatId(remote[0].id);
state.saveSessions();
}
} catch {}
if (state.chatSessions.length === 0 || !state.getActiveChat()) {
await handleNewChat();
} else {
state.setActiveChatId(state.activeChatId || state.chatSessions[0].id);
ui.renderActiveChat();
ui.renderHistoryList();
}
ui.setupMobileKeyboardFix();
ui.setMinTextareaHeight(ui.dom.messageInput.scrollHeight);
ui.dom.messageForm.style.borderRadius = '28px';
ui.dom.newChatButton.addEventListener('click', handleNewChat);
ui.dom.menuButton.addEventListener('click', () => ui.toggleSidebar(true));
ui.dom.sidebarOverlay.addEventListener('click', () => ui.toggleSidebar(false));
ui.dom.deleteAllChatsButton.addEventListener('click', async () => {
ui.showConfirmModal('آیا از حذف تمام چت‌ها مطمئن هستید؟', async () => {
const ids = state.chatSessions.map(s=>s.id);
state.setChatSessions([]);
state.setActiveChatId(null);
state.saveSessions();
for (const id of ids) await api.deleteSession(id);
await handleNewChat();
ui.toggleSidebar(false);
});
});
ui.dom.modelSelectButton.addEventListener('click', () => ui.toggleModal(true));
ui.dom.modelSelectModal.addEventListener('click', (e) => { if (e.target === ui.dom.modelSelectModal) ui.toggleModal(false); });
ui.dom.modelOptionCards.forEach(card => {
card.addEventListener('click', async () => {
const s = state.getActiveChat();
if (!s) return;
s.model = card.dataset.model;
ui.dom.currentModelName.textContent = card.dataset.name;
ui.updateRadioButtons();
state.saveSessions();
await persistActive();
ui.toggleModal(false);
});
});
ui.dom.settingsButton.addEventListener('click', () => ui.toggleSettingsModal(true));
ui.dom.settingsModal.addEventListener('click', (e) => { if (e.target === ui.dom.settingsModal) ui.toggleSettingsModal(false); });
ui.dom.themeToggle.addEventListener('change', (e) => {
const t = e.target.checked ? 'dark' : 'light';
localStorage.setItem('theme', t);
ui.applyTheme(t);
});
ui.dom.attachFileButton.addEventListener('click', (e) => {
e.stopPropagation();
const v = !ui.dom.filePopupMenu.classList.contains('hidden');
ui.toggleFilePopupMenu(!v);
});
window.addEventListener('click', (e) => {
if (!ui.dom.filePopupMenu.classList.contains('hidden') && !ui.dom.filePopupMenu.contains(e.target) && e.target !== ui.dom.attachFileButton && !ui.dom.attachFileButton.contains(e.target)) ui.toggleFilePopupMenu(false);
});
ui.dom.selectImageOption.addEventListener('click', () => { ui.dom.imageFileInput.click(); });
ui.dom.selectFileOption.addEventListener('click', () => { ui.dom.generalFileInput.click(); });
ui.dom.imageFileInput.addEventListener('change', handleFileSelection);
ui.dom.generalFileInput.addEventListener('change', handleFileSelection);
ui.dom.removeImageButton.addEventListener('click', () => {
if (state.currentUploadXHR) state.currentUploadXHR.abort();
state.setAttachedFile(null);
ui.hideFilePreview();
ui.dom.submitButton.disabled = false;
});
ui.dom.htmlPreviewCloseBtn.addEventListener('click', () => ui.toggleHtmlPreviewModal(false));
ui.dom.htmlPreviewOverlay.addEventListener('click', () => ui.toggleHtmlPreviewModal(false));
ui.dom.galleryDownloadBtn.addEventListener('click', function() {
const url = ui.getCurrentGalleryImageUrl();
if (url) api.uploadToAISADAAndOpenAlpha(url, this);
});
ui.dom.messageForm.addEventListener('submit', async (e) => {
e.preventDefault();
if (state.isGenerating) {
if (state.globalAbortController) state.globalAbortController.abort();
if (state.currentImageEventSource) state.currentImageEventSource.close();
return;
}
const s = state.getActiveChat();
if (!s) return;
const txt = ui.dom.messageInput.value.trim();
if (!txt && !state.attachedFile) return;
ui.setGeneratingState(true);
const first = s.messages.length === 0;
if (first) {
const w = ui.dom.chatWindow.querySelector('.welcome-screen');
if (w) w.remove();
}
const prevUserIdx = state.findLastIndex(s.messages, m => m.role === 'user');
if (prevUserIdx !== -1) {
const el = ui.dom.chatWindow.querySelector(.message-entry[data-index="${prevUserIdx}"]);
if (el) ui.updateMessageActions(el, s.messages[prevUserIdx], false, false);
}
const prevModelIdx = state.findLastIndex(s.messages, m => m.role === 'model');
if (prevModelIdx !== -1) {
const last = prevModelIdx === s.messages.length - 1;
if (last) {
const el = ui.dom.chatWindow.querySelector(.message-entry[data-index="${prevModelIdx}"]);
if (el) ui.updateMessageActions(el, s.messages[prevModelIdx], false, false);
}
}
const parts = [];
if (state.attachedFile) {
parts.push({ fileUrl: state.attachedFile.url, mimeType: state.attachedFile.mimeType, name: state.attachedFile.name, base64Data: state.attachedFile.base64Data });
ui.hideFilePreview();
}
if (txt) parts.push({ text: txt });
const userMsg = { role: 'user', parts };
s.messages.push(userMsg);
ui.addMessageToUI(userMsg, s.messages.length - 1, {isLastUser: true, animate: true});
const modelPlaceholder = { role: 'model', isTemporary: true, parts: [] };
s.messages.push(modelPlaceholder);
const modelBubble = ui.addMessageToUI(modelPlaceholder, s.messages.length - 1, {animate: true});
if (first && txt) {
s.title = txt.substring(0, 30) + (txt.length > 30 ? '...' : '');
ui.renderHistoryList();
}
ui.dom.messageInput.value = '';
ui.dom.messageInput.dispatchEvent(new Event('input'));
state.saveSessions();
await persistActive();
try {
await api.streamResponse(modelBubble, s.messages, s.id);
} finally {
if (state.attachedFile) state.setAttachedFile(null);
}
});
ui.dom.chatWindow.addEventListener('click', async (e) => {
const galleryItem = e.target.closest('.js-image-gallery-grid [data-index]');
if (galleryItem) {
const grid = galleryItem.closest('.js-image-gallery-grid');
const urls = grid.dataset.urls;
const index = galleryItem.dataset.index;
if (urls && index !== undefined) {
ui.openImageGallery(urls, parseInt(index, 10));
return;
}
}
const clarificationButton = e.target.closest('.clarification-button');
if (clarificationButton) {
const payloadString = clarificationButton.dataset.actionPayload;
if (!payloadString || state.isGenerating) return;
const actionPayload = JSON.parse(payloadString);
const s = state.getActiveChat();
if (!s) return;
ui.setGeneratingState(true);
const clarificationMessageElement = clarificationButton.closest('.message-entry');
const clarificationMessageIndex = parseInt(clarificationMessageElement.dataset.index, 10);
if (isNaN(clarificationMessageIndex)) { ui.resetState(); return; }
const modelPlaceholderMessage = { role: 'model', isTemporary: true, parts: [] };
s.messages[clarificationMessageIndex] = modelPlaceholderMessage;
const newModelBubble = ui.addMessageToUI(modelPlaceholderMessage, clarificationMessageIndex, {}, clarificationMessageElement);
const historyBeforeAction = s.messages.slice(0, clarificationMessageIndex);
if (actionPayload.intent === 'edit_image') {
let fileUrlForEditing = null;
for (let i = historyBeforeAction.length - 1; i >= 0; i--) {
const msg = historyBeforeAction[i];
if (msg.parts) {
const p1 = msg.parts.find(p => p.image_url || (p.edited_images && p.edited_images.length > 0));
if (p1) { fileUrlForEditing = p1.image_url || p1.edited_images[0]; break; }
const p2 = msg.parts.find(p => p.fileUrl);
if (p2) { fileUrlForEditing = p2.fileUrl; break; }
}
}
if (fileUrlForEditing) await api.runExternalImageEditor(actionPayload.prompt, fileUrlForEditing, newModelBubble, clarificationMessageIndex);
else { ui.displayError(newModelBubble, "تصویری برای ویرایش پیدا نشد."); ui.resetState(); }
} else if (actionPayload.intent === 'regenerate_with_enhancement') {
await api.streamResponse(newModelBubble, historyBeforeAction, s.id, actionPayload);
}
return;
}
const button = e.target.closest('.action-button');
if (!button) return;
const action = button.dataset.action;
const messageEntry = button.closest('.message-entry');
if (!messageEntry) return;
const messageIndex = parseInt(messageEntry.dataset.index, 10);
const s = state.getActiveChat();
if (!s || isNaN(messageIndex)) return;
const message = s.messages[messageIndex];
if (action === 'download-image') {
const url = message.parts.find(p => p.image_url)?.image_url;
if (url) api.uploadToAISADAAndOpenAlpha(url, button);
} else if (action === 'copy') {
const textToCopy = message.parts?.find(p => p.text)?.text || '';
if (textToCopy) navigator.clipboard.writeText(textToCopy).then(() => { ui.showCopyFeedback(button); });
} else if (action === 'like' || action === 'dislike') {
ui.handleLikeDislike(button, messageEntry);
} else if (action === 'regenerate') {
if (state.isGenerating) return;
ui.setGeneratingState(true);
state.setGpuGuideState(false);
const lastModelMessageIndex = state.findLastIndex(s.messages, msg => msg.role === 'model');
if (messageIndex === lastModelMessageIndex) {
s.messages.length = messageIndex;
messageEntry.remove();
const lastUserMessageIndex = state.findLastIndex(s.messages, msg => msg.role === 'user');
if (lastUserMessageIndex !== -1) {
const el = ui.dom.chatWindow.querySelector(.message-entry[data-index="${lastUserMessageIndex}"]);
if (el) ui.updateMessageActions(el, s.messages[lastUserMessageIndex], true, false);
}
const modelPlaceholderMessage = { role: 'model', isTemporary: true, parts: [] };
s.messages.push(modelPlaceholderMessage);
const newModelBubble = ui.addMessageToUI(modelPlaceholderMessage, s.messages.length - 1, { animate: true });
state.saveSessions();
await persistActive();
await api.streamResponse(newModelBubble, s.messages, s.id);
} else ui.resetState();
} else if (action === 'edit') {
if (state.isGenerating) return;
const lastUserMessageIndex = state.findLastIndex(s.messages, msg => msg.role === 'user');
if (messageIndex === lastUserMessageIndex) {
const textPart = message.parts.find(p => p.text);
const filePart = message.parts.find(p => p.fileUrl);
if (textPart || filePart) {
ui.showEditModal(textPart ? textPart.text : '', async (newText) => {
ui.setGeneratingState(true);
const all = ui.dom.chatWindow.querySelectorAll('.message-entry');
all.forEach(el => {
const idx = parseInt(el.dataset.index, 10);
if (idx >= messageIndex) el.remove();
});
s.messages.length = messageIndex;
const newParts = [];
if (filePart) newParts.push(filePart);
if (newText.trim()) newParts.push({ text: newText });
if (newParts.length > 0) {
const editedUserMessage = { role: 'user', parts: newParts };
s.messages.push(editedUserMessage);
ui.addMessageToUI(editedUserMessage, s.messages.length - 1, { isLastUser: true, animate: true });
}
const modelPlaceholderMessage = { role: 'model', isTemporary: true, parts: [] };
s.messages.push(modelPlaceholderMessage);
state.saveSessions();
await persistActive();
const newModelBubble = ui.addMessageToUI(modelPlaceholderMessage, s.messages.length - 1, { animate: true });
await api.streamResponse(newModelBubble, s.messages, s.id);
});
}
}
} else if (action === 'show-message-menu') {
ui.showMessageMenu(e, messageIndex);
}
});
ui.dom.historyItemMenu.addEventListener('click', async (e) => {
const btn = e.target.closest('.menu-item');
if (!btn) return;
const action = btn.dataset.action;
const format = btn.dataset.format;
const sessionId = ui.dom.historyItemMenu.dataset.sessionId;
const s = state.chatSessions.find(x => x.id === sessionId);
if (!s) return;
if (action === 'rename') {
ui.showRenameModal(s.title, async (t) => {
s.title = t;
state.saveSessions();
await api.saveSession(s);
ui.renderHistoryList();
});
} else if (action === 'delete') {
ui.showConfirmModal(آیا از حذف گفتگوی "${s.title}" مطمئن هستید؟, async () => {
state.setChatSessions(state.chatSessions.filter(x => x.id !== sessionId));
state.saveSessions();
await api.deleteSession(sessionId);
if (state.activeChatId === sessionId) {
if (state.chatSessions.length > 0) {
state.setActiveChatId(state.chatSessions[0].id);
ui.renderActiveChat();
} else {
await handleNewChat();
}
}
ui.renderHistoryList();
});
} else if (action === 'convert-chat') {
const text = getFullChatText(s);
await api.convertTextToFile(text, format, btn);
}
ui.dom.historyItemMenu.classList.remove('visible');
});
ui.dom.messageItemMenu.addEventListener('click', async (e) => {
const menu = ui.dom.messageItemMenu;
const closeMenu = () => { menu.classList.remove('visible'); setTimeout(() => { menu.classList.add('hidden'); }, 300); };
if (e.target === ui.dom.messageItemMenuOverlay) { closeMenu(); return; }
const btn = e.target.closest('.menu-item');
if (!btn) return;
const action = btn.dataset.action;
const format = btn.dataset.format;
const messageIndex = parseInt(menu.dataset.messageIndex, 10);
const s = state.getActiveChat();
if (!s || isNaN(messageIndex)) { closeMenu(); return; }
const message = s.messages[messageIndex];
if (action === 'delete-message') {
ui.showConfirmModal('آیا از حذف این پیام مطمئن هستید؟', async () => {
state.deleteMessage(s.id, messageIndex);
state.saveSessions();
await api.saveSession(s);
ui.renderActiveChat();
});
} else if (action === 'convert-message') {
const text = message.parts?.find(p => p.text)?.text || '';
if (text) await api.convertTextToFile(text, format, btn);
else alert('محتوای متنی ندارد.');
}
closeMenu();
});
ui.dom.messageInput.addEventListener('input', () => { ui.adjustTextareaHeight(ui.dom.messageInput, ui.dom.messageForm); });
ui.dom.editInput.addEventListener('input', () => { ui.adjustTextareaHeight(ui.dom.editInput); });
});

window.handleSuggestionClick = ui.handleSuggestionClick;
window.uploadToAISADAAndOpenAlpha = api.uploadToAISADAAndOpenAlpha; این یکی // این فایل تمام وضعیت‌های برنامه را مدیریت می‌کند

export const MAX_CHAT_SESSIONS = 150;

export let chatSessions = [];
export let activeChatId = null;
export let attachedFile = null;
export let isGenerating = false;
export let globalAbortController = null;
export let currentImageEventSource = null;
export let currentUploadXHR = null;
export let isGpuGuideActive = false;

export function setChatSessions(newSessions) { chatSessions = newSessions; }
export function setActiveChatId(id) { activeChatId = id; }
export function setAttachedFile(file) { attachedFile = file; }
export function setGenerating(status) { isGenerating = status; }
export function setGlobalAbortController(controller) { globalAbortController = controller; }
export function setCurrentImageEventSource(source) { currentImageEventSource = source; }
export function setCurrentUploadXHR(xhr) { currentUploadXHR = xhr; }
export function setGpuGuideState(isActive) { isGpuGuideActive = isActive; }

export function getActiveChat() { return chatSessions.find(s => s.id === activeChatId); }

function enforceSessionLimit() {
if (chatSessions.length > MAX_CHAT_SESSIONS) {
chatSessions.splice(MAX_CHAT_SESSIONS);
console.log(تعداد تاریخچه به ${MAX_CHAT_SESSIONS} محدود شد. قدیمی‌ترین‌ها حذف شدند.);
}
}

async function saveToHF(sessions) {
try {
const resp = await fetch('/storage/save', {
method: 'POST',
headers: {'Content-Type':'application/json'},
body: JSON.stringify({ sessions })
});
const data = await resp.json();
if (!data.ok) {
console.warn('ذخیره در HF ناموفق بود:', data.error);
}
} catch (e) {
console.warn('ذخیره در HF ناموفق بود:', e);
}
}

export function saveSessions() {
enforceSessionLimit();

text
// حذف base64Data قبل از ارسال/ذخیره const sessionsToSave = JSON.parse(JSON.stringify(chatSessions)); sessionsToSave.forEach(session => { session.messages.forEach(message => { (message.parts || []).forEach(part => { delete part.base64Data; }); }); }); // 1) ذخیره‌ی اصلی روی HF saveToHF(sessionsToSave); // 2) fallback محلی (اختیاری) try { localStorage.setItem('alphaChatSessions', JSON.stringify(sessionsToSave)); } catch {} // هیچ return خاصی لازم نیست

}

export function loadSessions() {
return new Promise(async (resolve) => {
try {
const resp = await fetch('/storage/load');
const data = await resp.json();
if (data.ok && Array.isArray(data.sessions)) {
chatSessions = data.sessions;
enforceSessionLimit();
// تعیین activeChatId
if (!activeChatId && chatSessions.length) activeChatId = chatSessions[0].id;
resolve();
return;
}
throw new Error(data.error || 'load failed');
} catch (e) {
console.warn('بارگذاری از HF ناموفق بود، از localStorage استفاده می‌شود.', e);
try {
const saved = localStorage.getItem('alphaChatSessions');
chatSessions = saved ? JSON.parse(saved) : [];
enforceSessionLimit();
} catch { chatSessions = []; }
if (!activeChatId && chatSessions.length) activeChatId = chatSessions[0].id;
resolve();
}
});
}

export function findLastIndex(array, predicate) {
for (let i = array.length - 1; i >= 0; i--) { if (predicate(array[i])) return i; }
return -1;
}

export function deleteMessage(chatId, messageIndex) {
const chat = chatSessions.find(s => s.id === chatId);
if (chat && chat.messages[messageIndex]) {
chat.messages.splice(messageIndex, 1);
saveSessions();
}
} این یکی الان در ادامه کد های اسپیس چت قدیمی رو میفرستم FROM python:3.9-slim

START: اصلاحیه نهایی برای حل مشکل Permission denied

مسیر کش را تنظیم کرده و سپس دسترسی کامل به آن را می‌دهیم

ENV HF_HOME="/app/huggingface_cache"
RUN mkdir -p HF_HOME && chmod -R 777 HF_HOME

END: اصلاحیه نهایی

WORKDIR /app

COPY requirements.txt .
RUN pip install --no-cache-dir -r requirements.txt

COPY . .

EXPOSE 7860

CMD ["gunicorn", "--workers", "5", "--worker-class", "gevent", "--bind", "0.0.0.0:7860", "--timeout", "720", "app:app"] این یکی import os
import requests
import json
import logging
from filelock import FileLock
from flask import Flask, render_template, request, Response, jsonify
from datetime import datetime
import pandas as pd
import io

from huggingface_hub import HfApi
from huggingface_hub.utils import HfHubHTTPError

class PersianLogFormatter(logging.Formatter):
LEVEL_MAP = {logging.DEBUG: "دیباگ", logging.INFO: "اطلاع", logging.WARNING: "هشدار", logging.ERROR: "خطا", logging.CRITICAL: "بحرانی"}
def format(self, record):
record.levelname = self.LEVEL_MAP.get(record.levelno, record.levelname)
return super().format(record)

def setup_logging():
log_format = '[%(asctime)s] [%(levelname)s]: %(message)s'
date_format = '%Y-%m-%d %H:%M:%S'
formatter = PersianLogFormatter(log_format, datefmt=date_format)
root_logger = logging.getLogger()
if root_logger.hasHandlers(): root_logger.handlers.clear()
console_handler = logging.StreamHandler()
console_handler.setFormatter(formatter)
root_logger.addHandler(console_handler)
root_logger.setLevel(logging.INFO)

setup_logging()

try:
ALL_GEMINI_API_KEYS_STR = os.getenv('ALL_GEMINI_API_KEYS')
if not ALL_GEMINI_API_KEYS_STR: raise RuntimeError("متغیر ALL_GEMINI_API_KEYS تنظیم نشده است.")
MASTER_API_KEYS = [key.strip() for key in ALL_GEMINI_API_KEYS_STR.split(',') if key.strip()]
if not MASTER_API_KEYS: raise RuntimeError("هیچ کلید معتبری در ALL_GEMINI_API_KEYS یافت نشد.")

text
HF_TOKEN = os.getenv('HF_TOKEN') if not HF_TOKEN: raise RuntimeError("توکن هاگینگ فیس (HF_TOKEN) تنظیم نشده است.") CHAT_HISTORY_REPO_ID = "Asrasahar/alpha-chat-history" hf_api = HfApi(token=HF_TOKEN) logging.info(f"✅ با موفقیت به هاگینگ فیس متصل شد. دیتاست تاریخچه چت: {CHAT_HISTORY_REPO_ID}") COUNTER_FILE_PATH = '/dev/shm/gunicorn_key_counter.txt' with FileLock(COUNTER_FILE_PATH + ".lock"): if not os.path.exists(COUNTER_FILE_PATH): with open(COUNTER_FILE_PATH, 'w') as f: f.write('0') logging.info("شمارنده چرخش کلیدها مقداردهی اولیه شد.")

except Exception as e:
logging.critical(f"خطای بحرانی در هنگام راه‌اندازی: {e}")
raise

app = Flask(name)
GOOGLE_API_BASE_URL = 'https://generativelanguage.googleapis.com/v1beta/models'

BASE_RULES_FA = """
هویت تو «چت‌بات آلفا» است. وظیفه‌ات کمک به کاربر در همه‌ی زمینه‌هاست. همیشه به زبان فارسی پاسخ بده.
"""

def get_chat_history_df(chat_id):
safe_chat_id = ''.join(c for c in str(chat_id) if c.isalnum() or c in ('-', '_'))
filename = f"{safe_chat_id}.csv"
try:
file_path = hf_api.hf_hub_download(
repo_id=CHAT_HISTORY_REPO_ID,
filename=filename,
repo_type="dataset",
force_download=True,
etag_timeout=10,
token=HF_TOKEN # START: اصلاحیه کلیدی
)
return pd.read_csv(file_path)
except HfHubHTTPError as e:
if e.response.status_code == 404:
return pd.DataFrame(columns=['timestamp', 'role', 'parts_json'])
return pd.DataFrame(columns=['timestamp', 'role', 'parts_json'])

def append_rows_to_history(chat_id, rows_df):
safe_chat_id = ''.join(c for c in str(chat_id) if c.isalnum() or c in ('-', '_'))
filename = f"{safe_chat_id}.csv"
try:
chat_history_df = get_chat_history_df(chat_id)
updated_df = pd.concat([chat_history_df, rows_df], ignore_index=True)
csv_buffer = io.StringIO()
updated_df.to_csv(csv_buffer, index=False)
hf_api.upload_file(
path_or_fileobj=io.BytesIO(csv_buffer.getvalue().encode()),
path_in_repo=filename,
repo_id=CHAT_HISTORY_REPO_ID,
repo_type="dataset",
token=HF_TOKEN, # START: اصلاحیه کلیدی
commit_message=f"Update chat {safe_chat_id}"
)
except Exception as e:
logging.error(f"خطا در ذخیره تاریخچه برای چت {chat_id} در HF: {e}")

def get_keys_for_request():
with FileLock(COUNTER_FILE_PATH + ".lock"):
try:
with open(COUNTER_FILE_PATH, 'r') as f: current_index = int(f.read().strip())
except (FileNotFoundError, ValueError): current_index = 0
with open(COUNTER_FILE_PATH, 'w') as f: f.write(str((current_index + 1)))
start_index = current_index % len(MASTER_API_KEYS)
return MASTER_API_KEYS[start_index:] + MASTER_API_KEYS[:start_index]

def sse_event(data):
return f"data: {json.dumps(data)}\n\n"

@app.route('/')
def index():
return render_template('index.html')

@app.route('/history/<chat_id>', methods=['GET'])
def get_history_route(chat_id):
if not chat_id: return jsonify({"error": "chatId is required"}), 400
chat_df = get_chat_history_df(chat_id)
history = [{"role": row['role'], "parts": json.loads(row['parts_json'])} for _, row in chat_df.iterrows()]
return jsonify(history)

@app.route('/chat', methods=['POST'])
def chat():
data = request.json
logging.info(f"درخواست جدید به /chat رسید. Payload: {json.dumps(data, ensure_ascii=False, indent=2)}")
if not data: return Response(sse_event({"error": "Invalid request"}), status=400, mimetype='application/json')

text
model = data.get('model', 'gemini-1.5-pro-latest') chat_id = data.get('chatId') new_user_message = data.get('newUserMessage') if not chat_id or not new_user_message: logging.error(f"درخواست نامعتبر برای chat_id={chat_id}. فیلدهای لازم وجود ندارد.") return Response(sse_event({"error": "Request is missing required fields"}), status=400, mimetype='application/json') sanitized_parts = [{'text': part['text']} for part in new_user_message.get('parts', []) if 'text' in part] user_message_to_save = pd.DataFrame([{'timestamp': datetime.utcnow().isoformat(), 'role': new_user_message['role'], 'parts_json': json.dumps(sanitized_parts, ensure_ascii=False)}]) append_rows_to_history(chat_id, user_message_to_save) logging.info(f"پیام کاربر برای chat_id={chat_id} با موفقیت در دیتاست ذخیره شد.") chat_df = get_chat_history_df(chat_id) history_for_gemini = [{"role": row['role'], "parts": json.loads(row['parts_json'])} for _, row in chat_df.iterrows()] logging.info(f"تاریخچه کامل برای ارسال به Gemini آماده شد. تعداد پیام‌ها: {len(history_for_gemini)}") def stream_events(): final_model_message = {} try: keys_to_try = get_keys_for_request() accumulated_text = "" for api_key in keys_to_try: try: url = f"{GOOGLE_API_BASE_URL}/{model}:streamGenerateContent?alt=sse&key={api_key}" payload = {"contents": history_for_gemini, "systemInstruction": {"parts": [{"text": BASE_RULES_FA}]}} logging.info(f"ارسال درخواست به Gemini با کلید ...{api_key[-4:]}") with requests.post(url, json=payload, stream=True, timeout=120) as response: if response.status_code == 429: logging.warning("کلید API محدود شده است."); continue response.raise_for_status() for line in response.iter_lines(): if not line.startswith(b'data: '): continue yield f"{line.decode('utf-8')}\n\n" chunk_str = line.decode('utf-8')[6:] try: data_chunk = json.loads(chunk_str) part = data_chunk.get("candidates", [{}])[0].get("content", {}).get("parts", [{}])[0] if "text" in part: accumulated_text += part["text"] except (json.JSONDecodeError, IndexError, KeyError): continue logging.info(f"استریم از Gemini با موفقیت به پایان رسید. پاسخ: {accumulated_text[:100]}...") if accumulated_text: final_model_message = {"role": "model", "parts": [{"text": accumulated_text}]} else: logging.warning("پاسخ متنی از Gemini دریافت نشد.") final_model_message = {"role": "model", "parts": [{"text": "متاسفانه قادر به پردازش درخواست شما نیستم."}]} return except requests.exceptions.RequestException as e: logging.warning(f"درخواست ناموفق بود، تلاش با کلید بعدی: {e}") continue logging.error("تمام کلیدهای API ناموفق بودند.") final_model_message = {"role": "model", "parts": [{"text": "تمام سرویس‌ها مشغول هستند. لطفاً بعداً تلاش کنید."}]} yield sse_event({"candidates": [{"content": final_model_message}]}) finally: if final_model_message: model_message_to_save = pd.DataFrame([{'timestamp': datetime.utcnow().isoformat(), 'role': final_model_message.get('role', 'model'), 'parts_json': json.dumps(final_model_message.get('parts', []), ensure_ascii=False)}]) append_rows_to_history(chat_id, model_message_to_save) logging.info(f"پاسخ مدل برای chat_id={chat_id} با موفقیت در دیتاست ذخیره شد.") return Response(stream_events(), mimetype='text/event-stream')

if name == 'main':
port = int(os.environ.get("PORT", 7860))
app.run(host='0.0.0.0', port=port, debug=False) این یکی Flask==2.2.2
Werkzeug==2.2.2
gunicorn==20.1.0

START: نسخه requests حذف شد تا pip بتواند نسخه سازگار را انتخاب کند

requests

END: تغییر اعمال شد

gevent==22.10.2
sseclient-py==1.8.0
filelock==3.13.1
huggingface_hub==0.23.4
datasets==2.20.0
pandas==2.2.2
pyarrow==16.1.0 این یکی import * as state from './state.js';
import * as ui from './ui.js';

const UPLOADER_API_URL = 'https://asrasahar-ok-uploader-bot.hf.space/upload';
const YOUR_IMAGE_UPLOAD_SERVER_URL = 'https://www.aisada.ir/hamed/upload.php';
const CONVERTER_API_URL = 'https://texttopdf-5irq.onrender.com/';

const HF_IMAGE_GEN_URL_BASE = "https://black-forest-labs-flux-1-schnell.hf.space";
const FN_INDEX_GEN_IMAGE = 2;
const TRIGGER_ID_GEN_IMAGE = 5;

function getDimensionsFromRatio(ratioString) {
const defaultSize = { width: 768, height: 1344 };
if (!ratioString || typeof ratioString !== 'string') return defaultSize;
const r = ratioString.toLowerCase().replace(/[\sدر:بهx]/g, '');
if (r === '11' || r.includes('مربع')) return { width: 1024, height: 1024 };
if (r === '169' || r.includes('افقی') || r.includes('لندسکیپ')) return { width: 1344, height: 768 };
if (r === '916' || r.includes('عمودی') || r.includes('پرتره') || r.includes('موبایل')) return { width: 768, height: 1344 };
if (r === '43') return { width: 1152, height: 864 };
if (r === '34') return { width: 864, height: 1152 };
if (r === '32') return { width: 1216, height: 832 };
if (r === '23') return { width: 832, height: 1216 };
console.warn(نسبت تصویر '${ratioString}' شناسایی نشد. از اندازه پیش‌فرض استفاده می‌شود.);
return defaultSize;
}

export async function convertTextToFile(content, format, buttonElement) {
if (buttonElement) ui.showLoadingOnButton(buttonElement, true);
try {
const convertFormData = new FormData();
convertFormData.append('content', content);
convertFormData.append('format', format);
const convertResponse = await fetch(CONVERTER_API_URL, {
method: 'POST',
body: convertFormData,
});
if (!convertResponse.ok) {
throw new Error(خطا در ارتباط با سرور تبدیل: ${convertResponse.statusText});
}
const fileBlob = await convertResponse.blob();
const fileName = alpha-export-${Date.now()}.${format};
const uploadFormData = new FormData();
uploadFormData.append('image', fileBlob, fileName);
const uploadResponse = await fetch(YOUR_IMAGE_UPLOAD_SERVER_URL, {
method: 'POST',
body: uploadFormData,
});
if (!uploadResponse.ok) {
const errorText = await uploadResponse.text().catch(() => HTTP ${uploadResponse.status});
throw new Error(آپلود فایل ساخته شده به سرور شما ناموفق بود: ${errorText});
}
const uploadData = await uploadResponse.json();
if (uploadData.success && uploadData.url) {
window.parent.postMessage({
type: 'OPEN_EXTERNAL_URL',
url: uploadData.url
}, '*');
} else {
throw new Error(uploadData.message || 'پاسخ سرور آپلود شما پس از ساخت فایل، نامعتبر بود.');
}
} catch (error) {
console.error('خطا در فرآیند تبدیل و آپلود فایل:', error);
alert(متاسفانه در آماده‌سازی فایل برای دانلود خطایی رخ داد: ${error.message});
} finally {
if (buttonElement) ui.showLoadingOnButton(buttonElement, false);
}
}

function uploadFileToHuggingFace(file, onProgress) {
return new Promise((resolve, reject) => {
const xhr = new XMLHttpRequest();
state.setCurrentUploadXHR(xhr);
xhr.open('POST', UPLOADER_API_URL, true);
xhr.onload = function () {
state.setCurrentUploadXHR(null);
try {
const response = JSON.parse(xhr.responseText);
if (xhr.status >= 200 && xhr.status < 300) {
if (response.hf_url) {
resolve(response);
} else {
reject(new Error("پاسخ سرور فاقد 'hf_url' بود."));
}
} else {
reject(new Error(response.error || خطای سرور: ${xhr.statusText}));
}
} catch (e) {
reject(new Error(خطای ناشناخته در پاسخ سرور آپلود. ${xhr.statusText}));
}
};
xhr.onerror = function () {
state.setCurrentUploadXHR(null);
if (xhr.status === 0 && !xhr.statusText) {
reject(new Error('آپلود توسط کاربر لغو شد.'));
} else {
reject(new Error('خطای ارتباط با شبکه هنگام آپلود. لطفاً اتصال اینترنت خود را بررسی کنید.'));
}
};
xhr.upload.onprogress = function (event) {
if (event.lengthComputable && onProgress) {
const percentComplete = Math.round((event.loaded / event.total) * 100);
onProgress(percentComplete);
}
};
const formData = new FormData();
formData.append('file', file);
xhr.send(formData);
});
}
async function uploadUrlToHuggingFace(url) {
return new Promise((resolve, reject) => {
const xhr = new XMLHttpRequest();
xhr.open('POST', UPLOADER_API_URL, true);
xhr.setRequestHeader('Content-Type', 'application/json');
xhr.onload = function () {
try {
const response = JSON.parse(xhr.responseText);
if (xhr.status >= 200 && xhr.status < 300 && response.hf_url) {
resolve(response);
} else {
reject(new Error(response.error || خطای سرور: ${xhr.statusText}));
}
} catch (e) {
reject(new Error(خطای ناشناخته در پاسخ سرور آپلود. ${xhr.statusText}));
}
};
xhr.onerror = function () { reject(new Error('خطای ارتباط با شبکه هنگام آپلود URL.')); };
xhr.send(JSON.stringify({ url: url }));
});
}
export async function processAndUploadFile(file, onProgress) {
const readFileAsBase64 = (file) => {
return new Promise((resolve, reject) => {
const reader = new FileReader();
reader.onload = () => {
const base64String = reader.result.split(',')[1];
resolve(base64String);
};
reader.onerror = (error) => reject(error);
reader.readAsDataURL(file);
});
};
try {
const [uploadResult, base64Data] = await Promise.all([
uploadFileToHuggingFace(file, onProgress),
readFileAsBase64(file)
]);
return {
url: uploadResult.hf_url,
name: file.name,
mimeType: file.type,
base64Data: base64Data
};
} catch (error) {
console.error("خطا در پردازش و آپلود فایل:", error);
throw error;
}
}
export async function uploadToAISADAAndOpenAlpha(imageUrlFromSpace, downloadButtonElement) {
if (!imageUrlFromSpace) {
if (downloadButtonElement) downloadButtonElement.textContent = "خطا: URL نامعتبر";
return;
}
if (downloadButtonElement) {
downloadButtonElement.disabled = true;
downloadButtonElement.innerHTML = <div class="typing-indicator" style="height:16px; gap: 3px;"><span style="width:6px; height:6px; background-color:currentColor;"></span><span style="width:6px; height:6px; background-color:currentColor;"></span><span style="width:6px; height:6px; background-color:currentColor;"></span></div>;
}
try {
const imageResponse = await fetch(imageUrlFromSpace);
if (!imageResponse.ok) throw new Error(خطا در دریافت فایل از هاگینگ فیس: ${imageResponse.status});
const imageBlob = await imageResponse.blob();
const formData = new FormData();
let filename = generated_alpha_image_${Date.now()}.webp;
try {
const urlPath = new URL(imageUrlFromSpace).pathname.split('/').pop();
if(urlPath && urlPath.includes('.')) filename = urlPath.split('?')[0];
} catch(e){ console.warn("Could not parse filename from HF URL:", e); }
formData.append('image', imageBlob, filename);
const uploadResponse = await fetch(YOUR_IMAGE_UPLOAD_SERVER_URL, { method: 'POST', body: formData });
if (!uploadResponse.ok) {
const errorText = await uploadResponse.text().catch(() => HTTP ${uploadResponse.status});
throw new Error(آپلود به سرور شما ناموفق بود: ${errorText});
}
const uploadData = await uploadResponse.json();
if (uploadData.success && uploadData.url) {
window.parent.postMessage({
type: 'OPEN_EXTERNAL_URL',
url: uploadData.url
}, '*');
} else {
throw new Error(uploadData.message || 'پاسخ سرور آپلود شما نامعتبر بود.');
}
} catch (error) {
console.error("خطا در فرآیند دانلود و آپلود تصویر:", error);
if (downloadButtonElement) downloadButtonElement.textContent = "خطا";
alert(خطا در آماده‌سازی دانلود: ${error.message});
} finally {
if (downloadButtonElement) {
setTimeout(() => {
downloadButtonElement.disabled = false;
downloadButtonElement.innerHTML = <svg class="w-4 h-4" fill="currentColor" viewBox="0 0 24 24"><path d="M19 9h-4V3H9v6H5l7 7 7-7zM5 18v2h14v-2H5z"/></svg>;
}, 2000);
}
}
}
function convertImageBlobToPng(imageBlob) {
return new Promise((resolve, reject) => {
const imageUrl = URL.createObjectURL(imageBlob);
const img = new Image();
img.onload = () => {
const canvas = document.createElement('canvas');
canvas.width = img.width;
canvas.height = img.height;
const ctx = canvas.getContext('2d');
ctx.drawImage(img, 0, 0);
canvas.toBlob((blob) => {
resolve(blob);
URL.revokeObjectURL(imageUrl);
}, 'image/png');
};
img.onerror = () => {
reject(new Error('خطا در بارگذاری تصویر برای تبدیل فرمت.'));
URL.revokeObjectURL(imageUrl);
};
img.src = imageUrl;
});
}

export async function runExternalImageEditor(prompt, fileUrl, modelBubbleOuterDivElement, messageIndex) {
const activeChat = state.getActiveChat();
const RENDER_API_URL = 'https://alfa-editor-worker.onrender.com/api/edit';

text
const showError = (error) => { ui.displayError(modelBubbleOuterDivElement, error.message || String(error)); }; try { if (!fileUrl) throw new Error("آدرس فایلی برای ویرایش یافت نشد."); ui.showStatusUpdate("در حال دانلود فایل برای ویرایش...", modelBubbleOuterDivElement); const response = await fetch(fileUrl); if (!response.ok) throw new Error(`خطا در دانلود فایل برای ویرایش: ${response.statusText}`); let imageBlob = await response.blob(); const originalMimeType = imageBlob.type || response.headers.get('content-type'); if (originalMimeType && originalMimeType !== 'image/png' && originalMimeType !== 'image/jpeg') { const format = originalMimeType.split('/')[1] || 'ناشناخته'; ui.showStatusUpdate(`فرمت تصویر (${format.toUpperCase()}) نامناسب است. در حال تبدیل به PNG...`, modelBubbleOuterDivElement); imageBlob = await convertImageBlobToPng(imageBlob); } ui.showStatusUpdate("ارسال درخواست به سرور ویرایش...", modelBubbleOuterDivElement); const formData = new FormData(); formData.append("image", imageBlob, 'image.png'); formData.append("prompt", prompt); const editResponse = await fetch(RENDER_API_URL, { method: 'POST', body: formData, signal: state.globalAbortController.signal }); const result = await editResponse.json(); if (!editResponse.ok) throw new Error(result.error || `خطای سرور ویرایش: ${editResponse.status}`); if (!result.image_urls || result.image_urls.length === 0) { throw new Error("پردازش کامل شد اما تصویری از سرور ویرایش دریافت نشد."); } ui.showStatusUpdate("در حال دائمی‌سازی لینک‌های تصاویر ویرایش شده...", modelBubbleOuterDivElement); const permanentUrls = await Promise.all( result.image_urls.map(tempUrl => uploadUrlToHuggingFace(tempUrl).then(res => res.hf_url)) ); const finalMessage = { role: 'model', parts: [{ edited_images: permanentUrls }] }; if (typeof messageIndex !== 'undefined') { activeChat.messages[messageIndex] = finalMessage; state.saveSessions(); ui.addMessageToUI(finalMessage, messageIndex, { isLastModel: true, animate: false }, modelBubbleOuterDivElement); } } catch (error) { console.error("خطا در فرآیند ویرایش تصویر:", error); if (error.name !== 'AbortError') showError(error); else modelBubbleOuterDivElement.querySelector('.message-content-area').innerHTML += '<p class="text-xs text-slate-500 mt-2 text-center">-- عملیات ویرایش متوقف شد --</p>'; } finally { ui.resetState(); }

}

export async function runExternalImageGenerator(englishPrompt, aspectRatio, modelBubbleOuterDivElement, followUpText = null) {
const activeChat = state.getActiveChat();
const sessionHash = Math.random().toString(36).substring(2, 15);

text
const showError = (error) => { const errorMessage = error.message || String(error); if (errorMessage.toLowerCase().includes("gpu") || errorMessage.toLowerCase().includes("quota")) { ui.displayError(modelBubbleOuterDivElement, errorMessage, 'gpu_quota_exceeded'); } else { ui.displayError(modelBubbleOuterDivElement, errorMessage); } }; try { const dimensions = getDimensionsFromRatio(aspectRatio); ui.showStatusUpdate("ارسال درخواست به سرور تصویر...", modelBubbleOuterDivElement); const payload = [ englishPrompt, Math.floor(Math.random() * 2147483647), true, dimensions.width, dimensions.height, 4 ]; const joinPayload = { "fn_index": FN_INDEX_GEN_IMAGE, "data": payload, "event_data": null, "session_hash": sessionHash, "trigger_id": TRIGGER_ID_GEN_IMAGE }; const joinResponse = await fetch(`${HF_IMAGE_GEN_URL_BASE}/queue/join`, { method: 'POST', headers: { 'Content-Type': 'application/json' }, body: JSON.stringify(joinPayload), signal: state.globalAbortController.signal }); if (!joinResponse.ok) { let errorText = `خطای سرور: ${joinResponse.status}`; try { const errorJson = await joinResponse.json(); errorText = errorJson.error || errorJson.detail || errorText; } catch(e) {} throw new Error(errorText); } const joinResult = await joinResponse.json(); if (!joinResult.event_id) throw new Error("event_id از Gradio دریافت نشد."); if (state.currentImageEventSource) { state.currentImageEventSource.close(); } const eventSource = new EventSource(`${HF_IMAGE_GEN_URL_BASE}/queue/data?session_hash=${sessionHash}`); state.setCurrentImageEventSource(eventSource); await new Promise((resolve, reject) => { eventSource.onmessage = async (event) => { const data = JSON.parse(event.data); switch(data.msg) { case "process_starts": ui.showStatusUpdate("پردازش تصویر در سرور آغاز شد...", modelBubbleOuterDivElement); break; case "progress": if (data.progress_data && data.progress_data.length > 0) { const p = data.progress_data[0]; ui.showStatusUpdate(`در حال ساخت... (${p.index}/${p.length})`, modelBubbleOuterDivElement); } break; case "process_completed": eventSource.close(); state.setCurrentImageEventSource(null); if (data.success && data.output?.data?.[0]?.url) { const tempImageUrl = data.output.data[0].url; ui.showStatusUpdate("تصویر ساخته شد. در حال دائمی‌سازی لینک...", modelBubbleOuterDivElement); try { const uploadResult = await uploadUrlToHuggingFace(tempImageUrl); const permanentUrl = uploadResult.hf_url; const newModelMessageParts = [{ image_url: permanentUrl }]; if (followUpText) { newModelMessageParts.push({ text: followUpText }); } const newModelMessage = { role: 'model', parts: newModelMessageParts }; activeChat.messages[activeChat.messages.length - 1] = newModelMessage; state.saveSessions(); ui.addMessageToUI(newModelMessage, activeChat.messages.length - 1, { isLastModel: true, animate: false }, modelBubbleOuterDivElement); resolve(); } catch (uploadError) { reject(new Error(`تصویر ساخته شد اما در آپلود به سرور دائمی خطا رخ داد: ${uploadError.message}`)); } } else { reject(new Error(data.output?.error || "پاسخ سرور ساخت تصویر، فاقد URL معتبر بود.")); } break; case 'queue_full': eventSource.close(); reject(new Error("صف پردازش تصویر پر است. لطفاً چند لحظه دیگر تلاش کنید.")); break; } }; eventSource.onerror = () => { eventSource.close(); state.setCurrentImageEventSource(null); reject(new Error("ارتباط با سرور ساخت تصویر قطع شد.")); }; }); } catch (error) { if (error.name !== 'AbortError') { showError(error); } else { modelBubbleOuterDivElement.querySelector('.message-content-area').innerHTML += '<p class="text-xs text-slate-500 mt-2 text-center">-- تولید پاسخ متوقف شد --</p>'; } } finally { ui.resetState(); }

}
export async function streamResponse(modelBubbleOuterDivElement, activeChat, newUserMessage, historyOverride = null, actionPayload = null) {
state.setGlobalAbortController(new AbortController());
let fullResponseText = "";
let finalMessageObject = { role: 'model', parts: [] };

text
const bodyPayload = { model: activeChat.model, chatId: activeChat.id, action: actionPayload }; if (historyOverride) { bodyPayload.history = historyOverride; } else { bodyPayload.newUserMessage = newUserMessage; } if (bodyPayload.newUserMessage && bodyPayload.newUserMessage.parts) { bodyPayload.newUserMessage.parts.forEach(p => { if (p.base64Data) { p.inlineData = { mimeType: p.mimeType, data: p.base64Data }; delete p.base64Data; } }); } try { const response = await fetch('/chat', { method: 'POST', headers: { 'Content-Type': 'application/json' }, signal: state.globalAbortController.signal, body: JSON.stringify(bodyPayload), }); if (!response.ok) { const errorData = await response.json().catch(() => ({error: {message: `خطای سرور: ${response.statusText}`}})); throw new Error(errorData.error.message || `خطای سرور: ${response.status}`); } const reader = response.body.getReader(); const decoder = new TextDecoder(); while (true) { const { done, value } = await reader.read(); if (done) break; const chunk = decoder.decode(value); const lines = chunk.split('\n'); for (const line of lines) { if (line.startsWith('data: ')) { try { const jsonStr = line.substring(6); if (jsonStr.trim() === '') continue; const data = JSON.parse(jsonStr); const lastUserMessage = activeChat.messages.find(msg => msg.role === 'user'); if (data.intent === 'generate_image') { reader.cancel(); if(lastUserMessage) { state.updateLastUserMessageWithImageGenData(activeChat.id, data.english_prompt, data.aspect_ratio); } await runExternalImageGenerator(data.english_prompt, data.aspect_ratio, modelBubbleOuterDivElement); return; } else if (data.intent === 'generate_image_with_text') { reader.cancel(); const { text, image_generation_payload, follow_up_text } = data; if (lastUserMessage) { state.updateLastUserMessageWithImageGenData(activeChat.id, image_generation_payload.english_prompt, image_generation_payload.aspect_ratio); } ui.streamFinalText(text, modelBubbleOuterDivElement); await runExternalImageGenerator( image_generation_payload.english_prompt, image_generation_payload.aspect_ratio, modelBubbleOuterDivElement, follow_up_text ); return; } else if (data.intent === 'clarify_action') { reader.cancel(); const commandMessage = { role: 'model', clarification: data.options, question: data.question, parts: [] }; activeChat.messages[activeChat.messages.length - 1] = commandMessage; state.saveSessions(); ui.addMessageToUI(commandMessage, activeChat.messages.length - 1, { isLastModel: true }, modelBubbleOuterDivElement); ui.resetState(); return; } else if (data.intent === 'edit_image') { reader.cancel(); const historyForSearch = historyOverride || activeChat.messages; let fileUrlForEditing = null; for (let i = historyForSearch.length - 1; i >= 0; i--) { const msg = historyForSearch[i]; if (msg.parts) { const imagePart = msg.parts.find(p => p.image_url || (p.edited_images && p.edited_images.length > 0)); if(imagePart) { fileUrlForEditing = imagePart.image_url || imagePart.edited_images[0]; break; } const filePart = msg.parts.find(p => p.fileUrl); if(filePart) { fileUrlForEditing = filePart.fileUrl; break; } } } if (!fileUrlForEditing) { throw new Error("قصد ویرایش تشخیص داده شد اما هیچ فایلی برای ویرایش یافت نشد."); } await runExternalImageEditor(data.prompt, fileUrlForEditing, modelBubbleOuterDivElement, activeChat.messages.length - 1); return; } else if (data.error) { throw new Error(data.error.message || JSON.stringify(data.error)); } else if (data.candidates) { const chunkText = data.candidates[0].content.parts[0].text; fullResponseText += chunkText; ui.streamFinalText(fullResponseText, modelBubbleOuterDivElement, false); } } catch (e) { console.warn("خطا در پردازش SSE chunk:", e, "Chunk:", line); } } } } if (fullResponseText) { finalMessageObject.parts.push({ text: fullResponseText }); } if (finalMessageObject.parts.length > 0) { activeChat.messages[activeChat.messages.length - 1] = finalMessageObject; } else { activeChat.messages.pop(); } ui.updateMessageActions(modelBubbleOuterDivElement, finalMessageObject, false, true); } catch (error) { if (error.name === 'AbortError') { modelBubbleOuterDivElement.querySelector('.message-content-area').innerHTML += '<p class="text-xs text-slate-500 mt-2 text-center">-- عملیات متوقف شد --</p>'; } else { ui.displayError(modelBubbleOuterDivElement, error.message || 'یک خطای ناشناخته رخ داد.'); } } finally { ui.resetState(); }

} این یکی import * as state from './state.js';
import * as ui from './ui.js';
import * as api from './api.js';

async function handleFileSelection(event) {
const file = event.target.files[0];
if (!file) return;

text
ui.showFileUploading(file.name); ui.dom.submitButton.disabled = true; try { const onProgress = (percent) => { ui.updateUploadProgress(percent); }; const uploadedFileData = await api.processAndUploadFile(file, onProgress); state.setAttachedFile(uploadedFileData); ui.showFileReady(file.name, file.type, uploadedFileData.url); } catch (error) { console.error("خطا در آپلود فایل:", error); if (error.message !== 'آپلود توسط کاربر لغو شد.') { ui.showFileError(error.message); } else { ui.hideFilePreview(); } } finally { event.target.value = ''; ui.toggleFilePopupMenu(false); ui.dom.submitButton.disabled = false; }

}

async function loadAndRenderActiveChat() {
const activeChat = state.getActiveChat();
if (!activeChat) {
handleNewChat();
return;
}

text
try { if (activeChat.messages.length === 0 && !activeChat.isNew) { const response = await fetch(`/history/${activeChat.id}`); if (response.ok) { activeChat.messages = await response.json(); } else { console.error("Failed to load chat history from server."); } } } catch (error) { console.error("Error fetching chat history:", error); } finally { ui.renderActiveChat(); ui.renderHistoryList(); }

}

function handleNewChat() {
const newSession = { id: Date.now().toString(), title: 'چت جدید', model: 'gemini-1.5-pro-latest', messages: [], isNew: true };
state.chatSessions.unshift(newSession);
state.setActiveChatId(newSession.id);
state.saveSessions();
ui.renderActiveChat();
ui.renderHistoryList();
}

function getFullChatText(session) {
if (!session || !session.messages) return "";
return session.messages
.map(msg => {
const prefix = msg.role === 'user' ? 'کاربر' : 'مدل';
const textContent = msg.parts?.find(p => p.text)?.text || '[محتوای غیر متنی]';
return ${prefix}:\n${textContent};
})
.join('\n\n---\n\n');
}

document.addEventListener('DOMContentLoaded', () => {
ui.initTheme();
state.loadSessions();

text
if (state.chatSessions.length === 0 || !state.getActiveChat()) { handleNewChat(); } else { state.setActiveChatId(state.activeChatId || state.chatSessions[0].id); loadAndRenderActiveChat(); } ui.setupMobileKeyboardFix(); ui.setMinTextareaHeight(ui.dom.messageInput.scrollHeight); ui.dom.messageForm.style.borderRadius = '28px'; ui.dom.newChatButton.addEventListener('click', handleNewChat); ui.dom.menuButton.addEventListener('click', () => ui.toggleSidebar(true)); ui.dom.sidebarOverlay.addEventListener('click', () => ui.toggleSidebar(false)); ui.dom.deleteAllChatsButton.addEventListener('click', () => { ui.showConfirmModal('آیا از حذف تمام چت‌ها مطمئن هستید؟ این عمل غیرقابل بازگشت است.', () => { state.setChatSessions([]); state.setActiveChatId(null); state.saveSessions(); handleNewChat(); ui.toggleSidebar(false); }); }); ui.dom.modelSelectButton.addEventListener('click', () => ui.toggleModal(true)); ui.dom.modelSelectModal.addEventListener('click', (e) => { if (e.target === ui.dom.modelSelectModal) ui.toggleModal(false); }); ui.dom.modelOptionCards.forEach(card => { card.addEventListener('click', () => { const activeChat = state.getActiveChat(); if (!activeChat) return; activeChat.model = card.dataset.model; ui.dom.currentModelName.textContent = card.dataset.name; ui.updateRadioButtons(); state.saveSessions(); ui.toggleModal(false); }); }); ui.dom.settingsButton.addEventListener('click', () => ui.toggleSettingsModal(true)); ui.dom.settingsModal.addEventListener('click', (e) => { if (e.target === ui.dom.settingsModal) ui.toggleSettingsModal(false); }); ui.dom.themeToggle.addEventListener('change', (e) => { const newTheme = e.target.checked ? 'dark' : 'light'; localStorage.setItem('theme', newTheme); ui.applyTheme(newTheme); }); ui.dom.attachFileButton.addEventListener('click', (e) => { e.stopPropagation(); const isVisible = !ui.dom.filePopupMenu.classList.contains('hidden'); ui.toggleFilePopupMenu(!isVisible); }); window.addEventListener('click', (e) => { if (!ui.dom.filePopupMenu.classList.contains('hidden') && !ui.dom.filePopupMenu.contains(e.target) && e.target !== ui.dom.attachFileButton && !ui.dom.attachFileButton.contains(e.target)) { ui.toggleFilePopupMenu(false); } }); ui.dom.selectImageOption.addEventListener('click', () => { ui.dom.imageFileInput.click(); }); ui.dom.selectFileOption.addEventListener('click', () => { ui.dom.generalFileInput.click(); }); ui.dom.imageFileInput.addEventListener('change', handleFileSelection); ui.dom.generalFileInput.addEventListener('change', handleFileSelection); ui.dom.removeImageButton.addEventListener('click', () => { if (state.currentUploadXHR) { state.currentUploadXHR.abort(); console.log("آپلود توسط کاربر لغو شد."); } state.setAttachedFile(null); ui.hideFilePreview(); ui.dom.submitButton.disabled = false; }); ui.dom.htmlPreviewCloseBtn.addEventListener('click', () => ui.toggleHtmlPreviewModal(false)); ui.dom.htmlPreviewOverlay.addEventListener('click', () => ui.toggleHtmlPreviewModal(false)); ui.dom.galleryDownloadBtn.addEventListener('click', function() { const url = ui.getCurrentGalleryImageUrl(); if (url) { api.uploadToAISADAAndOpenAlpha(url, this); } }); ui.dom.messageForm.addEventListener('submit', async (e) => { e.preventDefault(); if (state.isGenerating) { if (state.globalAbortController) state.globalAbortController.abort(); if (state.currentImageEventSource) state.currentImageEventSource.close(); return; } const activeChat = state.getActiveChat(); if (!activeChat) return; const userMessageText = ui.dom.messageInput.value.trim(); if (!userMessageText && !state.attachedFile) return; ui.setGeneratingState(true); const isFirstMessageOfChat = activeChat.messages.length === 0; if (isFirstMessageOfChat) { const welcomeScreen = ui.dom.chatWindow.querySelector('.welcome-screen'); if (welcomeScreen) welcomeScreen.remove(); } const previousLastUserIndex = state.findLastIndex(activeChat.messages, msg => msg.role === 'user'); if (previousLastUserIndex !== -1) { const previousUserElement = ui.dom.chatWindow.querySelector(`.message-entry[data-index="${previousLastUserIndex}"]`); if (previousUserElement) { ui.updateMessageActions(previousUserElement, activeChat.messages[previousLastUserIndex], false, false); } } const previousLastModelIndex = state.findLastIndex(activeChat.messages, msg => msg.role === 'model'); if (previousLastModelIndex !== -1) { const isItTheLastMessageOverall = previousLastModelIndex === activeChat.messages.length - 1; if (isItTheLastMessageOverall) { const previousModelElement = ui.dom.chatWindow.querySelector(`.message-entry[data-index="${previousLastModelIndex}"]`); if (previousModelElement) { ui.updateMessageActions(previousModelElement, activeChat.messages[previousLastModelIndex], false, false); } } } const userParts = []; if (state.attachedFile) { userParts.push({ fileUrl: state.attachedFile.url, mimeType: state.attachedFile.mimeType, name: state.attachedFile.name, base64Data: state.attachedFile.base64Data }); ui.hideFilePreview(); } if (userMessageText) { userParts.push({ text: userMessageText }); } const newUserMessage = { role: 'user', parts: userParts }; activeChat.messages.push(newUserMessage); ui.addMessageToUI(newUserMessage, activeChat.messages.length - 1, {isLastUser: true, animate: true}); const modelPlaceholderMessage = { role: 'model', isTemporary: true, parts: [] }; activeChat.messages.push(modelPlaceholderMessage); const modelBubbleOuterDiv = ui.addMessageToUI(modelPlaceholderMessage, activeChat.messages.length - 1, {animate: true}); if (isFirstMessageOfChat) { if (userMessageText) { activeChat.title = userMessageText.substring(0, 30) + (userMessageText.length > 30 ? '...' : ''); } else if (state.attachedFile) { activeChat.title = `فایل: ${state.attachedFile.name}`; } delete activeChat.isNew; state.saveSessions(); ui.renderHistoryList(); } ui.dom.messageInput.value = ''; ui.dom.messageInput.dispatchEvent(new Event('input')); try { await api.streamResponse(modelBubbleOuterDiv, activeChat, newUserMessage); } finally { if (state.attachedFile) { state.setAttachedFile(null); } } }); ui.dom.chatWindow.addEventListener('click', async (e) => { const galleryItem = e.target.closest('.js-image-gallery-grid [data-index]'); if (galleryItem) { const grid = galleryItem.closest('.js-image-gallery-grid'); const urls = grid.dataset.urls; const index = galleryItem.dataset.index; if (urls && index !== undefined) { ui.openImageGallery(urls, parseInt(index, 10)); return; } } const clarificationButton = e.target.closest('.clarification-button'); if (clarificationButton) { const payloadString = clarificationButton.dataset.actionPayload; if (!payloadString || state.isGenerating) return; const actionPayload = JSON.parse(payloadString); const activeChat = state.getActiveChat(); if (!activeChat) return; ui.setGeneratingState(true); const clarificationMessageElement = clarificationButton.closest('.message-entry'); const clarificationMessageIndex = parseInt(clarificationMessageElement.dataset.index, 10); if (isNaN(clarificationMessageIndex)) { ui.resetState(); return; } const modelPlaceholderMessage = { role: 'model', isTemporary: true, parts: [] }; activeChat.messages[clarificationMessageIndex] = modelPlaceholderMessage; const newModelBubble = ui.addMessageToUI(modelPlaceholderMessage, clarificationMessageIndex, {}, clarificationMessageElement); const historyBeforeAction = activeChat.messages.slice(0, clarificationMessageIndex); if (actionPayload.intent === 'edit_image') { let fileUrlForEditing = null; for (let i = historyBeforeAction.length - 1; i >= 0; i--) { const msg = historyBeforeAction[i]; if (msg.parts) { const imagePart = msg.parts.find(p => p.image_url || (p.edited_images && p.edited_images.length > 0)); if(imagePart) { fileUrlForEditing = imagePart.image_url || imagePart.edited_images[0]; break; } const filePart = msg.parts.find(p => p.fileUrl); if(filePart) { fileUrlForEditing = filePart.fileUrl; break; } } } if (fileUrlForEditing) { await api.runExternalImageEditor(actionPayload.prompt, fileUrlForEditing, newModelBubble, clarificationMessageIndex); } else { ui.displayError(newModelBubble, "متاسفانه تصویری برای ویرایش پیدا نشد."); ui.resetState(); } } else if (actionPayload.intent === 'regenerate_with_enhancement') { await api.streamResponse(newModelBubble, activeChat, null, historyBeforeAction, actionPayload); } return; } const button = e.target.closest('.action-button'); if (!button) return; const action = button.dataset.action; const messageEntry = button.closest('.message-entry'); if (!messageEntry) return; const messageIndex = parseInt(messageEntry.dataset.index, 10); const activeChat = state.getActiveChat(); if (!activeChat || isNaN(messageIndex)) return; const message = activeChat.messages[messageIndex]; if (action === 'download-image') { const imageUrl = message.parts.find(p => p.image_url)?.image_url; if (imageUrl) { api.uploadToAISADAAndOpenAlpha(imageUrl, button); } } else if (action === 'copy') { const textToCopy = message.parts?.find(p => p.text)?.text || ''; if (textToCopy) { navigator.clipboard.writeText(textToCopy).then(() => { ui.showCopyFeedback(button); }); } } else if (action === 'like' || action === 'dislike') { ui.handleLikeDislike(button, messageEntry); } else if (action === 'regenerate') { if (state.isGenerating) return; ui.setGeneratingState(true); state.setGpuGuideState(false); const lastModelMessageIndex = state.findLastIndex(activeChat.messages, msg => msg.role === 'model'); if (messageIndex === lastModelMessageIndex) { activeChat.messages.length = messageIndex; messageEntry.remove(); const lastUserMessageIndex = state.findLastIndex(activeChat.messages, msg => msg.role === 'user'); if (lastUserMessageIndex !== -1) { const lastUserMessageElement = ui.dom.chatWindow.querySelector(`.message-entry[data-index="${lastUserMessageIndex}"]`); if (lastUserMessageElement) { ui.updateMessageActions(lastUserMessageElement, activeChat.messages[lastUserMessageIndex], true, false); } } const modelPlaceholderMessage = { role: 'model', isTemporary: true, parts: [] }; activeChat.messages.push(modelPlaceholderMessage); const newModelBubble = ui.addMessageToUI(modelPlaceholderMessage, activeChat.messages.length - 1, { animate: true }); await api.streamResponse(newModelBubble, activeChat, null, activeChat.messages.slice(0, -1)); } else { ui.resetState(); } } else if (action === 'edit') { if (state.isGenerating) return; const lastUserMessageIndex = state.findLastIndex(activeChat.messages, msg => msg.role === 'user'); if (messageIndex === lastUserMessageIndex) { const textPart = message.parts.find(p => p.text); const filePart = message.parts.find(p => p.fileUrl); if (textPart || filePart) { ui.showEditModal(textPart ? textPart.text : '', async (newText) => { ui.setGeneratingState(true); const allMessagesInDOM = ui.dom.chatWindow.querySelectorAll('.message-entry'); allMessagesInDOM.forEach(msgEl => { const idx = parseInt(msgEl.dataset.index, 10); if (idx >= messageIndex) { msgEl.remove(); } }); const historyUpToEdit = activeChat.messages.slice(0, messageIndex); activeChat.messages.length = messageIndex; const newParts = []; if (filePart) { newParts.push(filePart); } if (newText.trim()) newParts.push({ text: newText }); const editedUserMessage = { role: 'user', parts: newParts }; const historyForApi = [...historyUpToEdit, editedUserMessage]; activeChat.messages.push(editedUserMessage); ui.addMessageToUI(editedUserMessage, activeChat.messages.length - 1, { isLastUser: true, animate: true }); const modelPlaceholderMessage = { role: 'model', isTemporary: true, parts: [] }; activeChat.messages.push(modelPlaceholderMessage); state.saveSessions(); const newModelBubble = ui.addMessageToUI(modelPlaceholderMessage, activeChat.messages.length - 1, { animate: true }); await api.streamResponse(newModelBubble, activeChat, null, historyForApi); }); } } } else if (action === 'show-message-menu') { ui.showMessageMenu(e, messageIndex); } }); ui.dom.historyItemMenu.addEventListener('click', (e) => { const button = e.target.closest('.menu-item'); if (!button) return; const action = button.dataset.action; const format = button.dataset.format; const sessionId = ui.dom.historyItemMenu.dataset.sessionId; const session = state.chatSessions.find(s => s.id === sessionId); if (!session) return; if (action === 'rename') { ui.showRenameModal(session.title, (newTitle) => { session.title = newTitle; state.saveSessions(); ui.renderHistoryList(); }); } else if (action === 'delete') { ui.showConfirmModal(`آیا از حذف گفتگوی "${session.title}" مطمئن هستید؟`, () => { state.deleteChat(sessionId); if (state.activeChatId === sessionId) { state.setActiveChatId(null); loadAndRenderActiveChat(); } else { ui.renderHistoryList(); } }); } else if (action === 'convert-chat') { const fullText = getFullChatText(session); api.convertTextToFile(fullText, format, button); } ui.dom.historyItemMenu.classList.remove('visible'); }); ui.dom.messageItemMenu.addEventListener('click', (e) => { const menu = ui.dom.messageItemMenu; const closeMenu = () => { menu.classList.remove('visible'); setTimeout(() => { menu.classList.add('hidden'); }, 300); }; if (e.target === ui.dom.messageItemMenuOverlay) { closeMenu(); return; } const button = e.target.closest('.menu-item'); if (!button) return; const action = button.dataset.action; const format = button.dataset.format; const messageIndex = parseInt(menu.dataset.messageIndex, 10); const activeChat = state.getActiveChat(); if (!activeChat || isNaN(messageIndex)) { closeMenu(); return; } const message = activeChat.messages[messageIndex]; if (action === 'delete-message') { ui.showConfirmModal('آیا از حذف این پیام و تمام پیام‌های بعد از آن مطمئن هستید؟', () => { state.deleteMessageAndFollowing(activeChat.id, messageIndex); ui.renderActiveChat(); }); } else if (action === 'convert-message') { const textContent = message.parts?.find(p => p.text)?.text || ''; if (textContent) { api.convertTextToFile(textContent, format, button); } else { alert('محتوای متنی برای تبدیل وجود ندارد.'); } } closeMenu(); }); ui.dom.messageInput.addEventListener('input', () => { ui.adjustTextareaHeight(ui.dom.messageInput, ui.dom.messageForm); }); ui.dom.editInput.addEventListener('input', () => { ui.adjustTextareaHeight(ui.dom.editInput); });

});

window.handleSuggestionClick = ui.handleSuggestionClick;
window.uploadToAISADAAndOpenAlpha = api.uploadToAISADAAndOpenAlpha; این یکی // این فایل تمام وضعیت‌های برنامه را مدیریت می‌کند

// START: تغییرات برای محدود کردن سابقه چت
export const MAX_CHAT_SESSIONS = 150; // حد مجاز تعداد تاریخچه چت
// END: تغییرات برای محدود کردن سابقه چت

export let chatSessions = [];
export let activeChatId = null;
export let attachedFile = null;
export let isGenerating = false;
export let globalAbortController = null;
export let currentImageEventSource = null;
export let currentUploadXHR = null;
export let isGpuGuideActive = false;

export function setChatSessions(newSessions) {
chatSessions = newSessions;
}

export function setActiveChatId(id) {
activeChatId = id;
}

export function setAttachedFile(file) {
attachedFile = file;
}

export function setGenerating(status) {
isGenerating = status;
}

export function setGlobalAbortController(controller) {
globalAbortController = controller;
}

export function setCurrentImageEventSource(source) {
currentImageEventSource = source;
}

export function setCurrentUploadXHR(xhr) {
currentUploadXHR = xhr;
}

export function setGpuGuideState(isActive) {
isGpuGuideActive = isActive;
}

export function getActiveChat() {
return chatSessions.find(s => s.id === activeChatId);
}

// START: تابع جدید برای اعمال محدودیت تعداد سابقه
/**

  • این تابع بررسی می‌کند که آیا تعداد تاریخچه‌ها از حد مجاز بیشتر است یا خیر.
  • اگر بیشتر بود، قدیمی‌ترین تاریخچه‌ها را حذف می‌کند تا تعداد به حد مجاز برسد.
  • چون چت‌های جدید با unshift به ابتدای آرایه اضافه می‌شوند، قدیمی‌ترین‌ها در انتهای آرایه قرار دارند.
    */
    function enforceSessionLimit() {
    if (chatSessions.length > MAX_CHAT_SESSIONS) {
    // حذف موارد اضافی از انتهای آرایه (قدیمی‌ترین‌ها)
    chatSessions.splice(MAX_CHAT_SESSIONS);
    console.log(تعداد تاریخچه به ${MAX_CHAT_SESSIONS} محدود شد. قدیمی‌ترین‌ها حذف شدند.);
    }
    }
    // END: تابع جدید برای اعمال محدودیت تعداد سابقه

// *** تابع اصلاح شده ***
export function saveSessions() {
// START: تغییرات برای محدود کردن سابقه چت
// قبل از ذخیره، محدودیت را اعمال می‌کنیم
enforceSessionLimit();
// END: تغییرات برای محدود کردن سابقه چت

text
try { // ایجاد یک کپی عمیق برای جلوگیری از تغییر داده اصلی const sessionsToSave = JSON.parse(JSON.stringify(chatSessions)); // حذف base64Data از تمام پیام‌ها قبل از ذخیره‌سازی sessionsToSave.forEach(session => { session.messages.forEach(message => { if (message.parts) { message.parts.forEach(part => { delete part.base64Data; }); } }); }); localStorage.setItem('alphaChatSessions', JSON.stringify(sessionsToSave)); } catch (e) { console.error("Failed to save sessions to localStorage:", e); }

}

// *** تابع اصلاح شده ***
export function loadSessions() {
try {
const saved = localStorage.getItem('alphaChatSessions');
chatSessions = saved ? JSON.parse(saved) : [];

text
// START: تغییرات برای محدود کردن سابقه چت // پس از بارگذاری، محدودیت را اعمال می‌کنیم تا موارد اضافی موجود پاک شوند enforceSessionLimit(); // END: تغییرات برای محدود کردن سابقه چت } catch (e) { console.error("Failed to load sessions from localStorage:", e); chatSessions = []; }

}

export function findLastIndex(array, predicate) {
for (let i = array.length - 1; i >= 0; i--) {
if (predicate(array[i])) { return i; }
}
return -1;
}

export function deleteMessage(chatId, messageIndex) {
const chat = chatSessions.find(s => s.id === chatId);
if (chat && chat.messages[messageIndex]) {
chat.messages.splice(messageIndex, 1);
saveSessions();
}
} الان اسپیس جدید من اون اولی که بهت دادم یک مشکلی داره که این یکی اسپیس قدیمی من این مشکل ندارد اسپیس جدید من پاسخ های طولانی رو جواب نمی‌ده اگر یک سابقه چت یکم طولانی بشه پاسخ های که میاد از طرف هوش مصنوعی نصفه نیمه میاد . الان میخوام اسپیس جدید درست کنم تا همیشه حتی اگر سابقه چت طولانی میشه پاسخ های هوش مصنوعی کامل و بدون مشکل بیاد باید کدوم کد تغییر بدم. کامل یکجا درست کن بفرست کد که نیاز به تغییر داره توضیحات داخل کد قرار نده کد کامل یکجا درست کن بفرست

Thought

Thought for 2m 57s

Answer

app.py

python
import os import re import json import logging from pathlib import Path from flask import Flask, render_template, request, Response, jsonify from filelock import FileLock import requests from huggingface_hub import HfApi, CommitOperationAdd, CommitOperationDelete, create_repo, hf_hub_download class PersianLogFormatter(logging.Formatter): LEVEL_MAP = {logging.DEBUG: "دیباگ", logging.INFO: "اطلاع", logging.WARNING: "هشدار", logging.ERROR: "خطا", logging.CRITICAL: "بحرانی"} def format(self, record): record.levelname = self.LEVEL_MAP.get(record.levelno, record.levelname) return super().format(record) def setup_logging(): fmt = '[%Y-%m-%d %H:%M:%S]' formatter = PersianLogFormatter('[%(asctime)s] [%(levelname)s]: %(message)s', datefmt=fmt.strip('[]')) root = logging.getLogger() if root.hasHandlers(): root.handlers.clear() h = logging.StreamHandler(); h.setFormatter(formatter) root.addHandler(h); root.setLevel(logging.INFO) setup_logging() ALL_GEMINI_API_KEYS_STR = os.getenv('ALL_GEMINI_API_KEYS', '') if not ALL_GEMINI_API_KEYS_STR: raise RuntimeError("ALL_GEMINI_API_KEYS not set") MASTER_API_KEYS = [k.strip() for k in ALL_GEMINI_API_KEYS_STR.split(',') if k.strip()] if not MASTER_API_KEYS: raise RuntimeError("No valid keys") SHM = Path('/dev/shm'); SHM.mkdir(exist_ok=True) COUNTER_FILE_PATH = str(SHM / 'gunicorn_key_counter.txt') lock = FileLock(COUNTER_FILE_PATH + ".lock") with lock: if not os.path.exists(COUNTER_FILE_PATH): with open(COUNTER_FILE_PATH, 'w') as f: f.write('0') CACHE_DIR = SHM / 'file_cache'; CACHE_DIR.mkdir(parents=True, exist_ok=True) META_DIR = SHM / 'chat_meta'; META_DIR.mkdir(parents=True, exist_ok=True) HF_LOCAL_CACHE = SHM / 'hf_ds_cache'; HF_LOCAL_CACHE.mkdir(parents=True, exist_ok=True) HF_TOKEN = os.getenv("HF_TOKEN", "") HF_REPO_ID = os.getenv("HF_DATASET_REPO", "Asrasahar/alpha-chat-history") api_hf = HfApi(token=HF_TOKEN if HF_TOKEN else None) if HF_TOKEN: try: create_repo(repo_id=HF_REPO_ID, repo_type="dataset", token=HF_TOKEN, exist_ok=True) except Exception: pass app = Flask(__name__) GOOGLE_API_BASE_URL = 'https://generativelanguage.googleapis.com/v1beta/models' BASE_RULES_FA = """ دستورالعمل‌های اجباری برای دستیار هوش مصنوعی آلفا: هویت: تو «چت‌بات آلفا» هستی که توسط هوش مصنوعی آلفا توسعه داده شده‌ای. تحلیل جامع چندرسانه‌ای: هر نوع فایل را تحلیل و محتوا را منظم شرح بده. پاسخ را با تشخیص نوع فایل شروع کن. اگر کاربر فقط فایل فرستاد و صراحتاً ساخت/ویرایش تصویر نخواست، فقط تحلیل کن. اولویت ابزارها: handle_specific_edit > change_image_aspect_ratio > regenerate_with_enhancement > generate_image > perform_internet_search """ def get_and_increment_key_index(): l = FileLock(COUNTER_FILE_PATH + ".lock") with l: try: with open(COUNTER_FILE_PATH, 'r') as f: cur = int(f.read().strip()) except (FileNotFoundError, ValueError): cur = 0 idx = cur % len(MASTER_API_KEYS) with open(COUNTER_FILE_PATH, 'w') as f: f.write(str(cur + 1)) return idx def get_keys_for_request(): s = get_and_increment_key_index() return MASTER_API_KEYS[s:] + MASTER_API_KEYS[:s] def _meta_path(chat_id: str) -> Path: safe = ''.join(c for c in str(chat_id) if c.isalnum() or c in ('-', '_')) return META_DIR / f"{safe}.json" def load_chat_meta(chat_id: str) -> dict: p = _meta_path(chat_id); l = FileLock(str(p) + ".lock") with l: if p.exists(): try: return json.load(open(p, 'r', encoding='utf-8')) except Exception: return {} return {} def save_chat_meta(chat_id: str, meta: dict): p = _meta_path(chat_id); l = FileLock(str(p) + ".lock") with l: try: with open(p, 'w', encoding='utf-8') as f: json.dump(meta or {}, f, ensure_ascii=False) except Exception: pass def update_chat_meta(chat_id: str, **kwargs): m = load_chat_meta(chat_id) m.update({k: v for k, v in kwargs.items() if v is not None}) save_chat_meta(chat_id, m) def _file_cache_path(chat_id: str) -> Path: safe = ''.join(c for c in str(chat_id) if c.isalnum() or c in ('-', '_')) return CACHE_DIR / f"{safe}.json" def load_file_cache(chat_id: str) -> dict: p = _file_cache_path(chat_id) if p.exists(): try: return json.load(open(p, 'r', encoding='utf-8')) except Exception: pass return {"files": []} def save_file_cache(chat_id: str, cache: dict): p = _file_cache_path(chat_id); l = FileLock(str(p) + ".lock") with l: try: with open(p, 'w', encoding='utf-8') as f: json.dump(cache or {"files": []}, f, ensure_ascii=False) except Exception: pass def update_file_cache_from_inline_files(chat_id: str, inline_files: list): if not inline_files: return cache = load_file_cache(chat_id) existing = {f.get("fileUrl"): f for f in cache.get("files", []) if f.get("fileUrl")} changed = False for it in inline_files: url = (it or {}).get("fileUrl") mime = (it or {}).get("mimeType") data = (it or {}).get("data") if not (url and mime and data): continue rec = {"fileUrl": url, "inlineData": {"mimeType": mime, "data": data}} if url not in existing or existing[url].get("inlineData", {}).get("data") != data: existing[url] = rec; changed = True if changed: cache["files"] = list(existing.values()) save_file_cache(chat_id, cache) def inject_inline_data_into_history(chat_id: str, history: list) -> list: cache = load_file_cache(chat_id) mapping = {f.get("fileUrl"): f.get("inlineData") for f in cache.get("files", []) if f.get("fileUrl")} if not mapping: return history out = [] for msg in history: if msg.get("role") != "user": out.append(msg); continue parts = msg.get("parts", []) new_parts = [] for p in parts: if isinstance(p, dict) and p.get("fileUrl"): url = p["fileUrl"] if url in mapping: new_parts.append({"inlineData": {"mimeType": mapping[url]["mimeType"], "data": mapping[url]["data"]}}) new_parts.append(p) msg2 = {"role": msg["role"], "parts": new_parts} out.append(msg2) return out def get_all_tools(): search_tool = {"name":"perform_internet_search","description":"جستجوی به‌روز","parameters":{"type":"OBJECT","properties":{"query":{"type":"STRING"}},"required":["query"]}} image_tools = [ {"name":"generate_image","description":"ساخت تصویر جدید","parameters":{"type":"OBJECT","properties":{"english_prompt":{"type":"STRING"},"aspect_ratio":{"type":"STRING"},"initial_response_text":{"type":"STRING"},"follow_up_text":{"type":"STRING"}},"required":["english_prompt","initial_response_text","follow_up_text"]}}, {"name":"handle_specific_edit","description":"ویرایش مشخص","parameters":{"type":"OBJECT","properties":{"edit_request":{"type":"STRING"}},"required":["edit_request"]}}, {"name":"regenerate_with_enhancement","description":"بازسازی با بهبود","parameters":{"type":"OBJECT","properties":{"enhancement_request":{"type":"STRING"},"previous_english_prompt":{"type":"STRING"},"previous_aspect_ratio":{"type":"STRING"}},"required":["enhancement_request","previous_english_prompt","previous_aspect_ratio"]}}, {"name":"change_image_aspect_ratio","description":"تغییر نسبت تصویر","parameters":{"type":"OBJECT","properties":{"new_aspect_ratio":{"type":"STRING"},"previous_english_prompt":{"type":"STRING"}},"required":["new_aspect_ratio","previous_english_prompt"]}} ] return [{"function_declarations": image_tools + [search_tool]}] def enhance_prompt(base_prompt, enhancement_request, model): api_key = get_keys_for_request()[0] url = f"{GOOGLE_API_BASE_URL}/{model}:generateContent?key={api_key}" system_prompt = f"You are an expert prompt engineer. Merge the base English image prompt with the user's modification. Return only the final English prompt.\n\nBase Prompt: \"{base_prompt}\"\nUser's Request: \"{enhancement_request}\"" payload = {"contents":[{"role":"user","parts":[{"text":system_prompt}]}],"generationConfig":{"temperature":0.7,"maxOutputTokens":2048}} try: r = requests.post(url, json=payload, timeout=60); r.raise_for_status() return r.json()["candidates"][0]["content"]["parts"][0]["text"].strip() except Exception: return f"{base_prompt}, {enhancement_request}" def create_artistic_prompt(user_prompt, model): api_key = get_keys_for_request()[0] url = f"{GOOGLE_API_BASE_URL}/{model}:generateContent?key={api_key}" system_instruction = "Convert user's idea to a highly-detailed English prompt. Output ONLY the final English prompt." payload = {"contents":[{"role":"user","parts":[{"text":user_prompt}]}],"systemInstruction":{"parts":[{"text":system_instruction}]},"generationConfig":{"temperature":0.7,"maxOutputTokens":2048}} try: r = requests.post(url, json=payload, timeout=60); r.raise_for_status() return r.json()["candidates"][0]["content"]["parts"][0]["text"].strip() except Exception: return user_prompt def stream_search_results(query): keys = get_keys_for_request() model = 'gemini-2.5-flash' url = f"{GOOGLE_API_BASE_URL}/{model}:streamGenerateContent?alt=sse" payload = {"contents":[{"role":"user","parts":[{"text":query}]}],"tools":[{"google_search":{}}],"systemInstruction":{"parts":[{"text":"Answer in Persian."}]}} for k in keys: try: with requests.post(url, params={'key': k}, json=payload, stream=True, timeout=180) as r: if r.status_code == 429: continue r.raise_for_status() for line in r.iter_lines(): if line: yield line.decode('utf-8') + "\n\n" return except requests.exceptions.RequestException: continue yield f"data: {json.dumps({'error': {'code': 'SEARCH_FAILED','message': 'سرویس جستجو موقتاً در دسترس نیست.'}})}\n\n" def is_tool_like_text(txt: str) -> bool: if not txt: return False patterns = [r'\bfunctionCall\s*:', r'\bhandle_specific_edit\s*\(', r'\bchange_image_aspect_ratio\s*\(', r'\bregenerate_with_enhancement\s*\(', r'\bgenerate_image\s*\(', r'print\s*\(\s*handle_specific_edit'] return any(re.search(p, txt, flags=re.I) for p in patterns) def sse_text_event(text: str) -> str: return f"data: {json.dumps({'candidates':[{'content':{'parts':[{'text': text}]}}]})}\n\n" def classify_user_intent(user_text: str) -> dict: if not user_text or not user_text.strip(): return {"intent":"NONE"} keys = get_keys_for_request() model = 'gemini-1.5-flash' url = f"{GOOGLE_API_BASE_URL}/{model}:generateContent" system = "Return strict JSON: {\"intent\":\"...\", \"normalized_edit\":\"...\", \"new_aspect_ratio\":\"...\", \"code_language\":\"...\"}. intents=[NONE,SPECIFIC_EDIT,ASPECT_RATIO_CHANGE,QUALITY_ENHANCEMENT,NEW_IMAGE,CODE_TASK]." payload = {"contents":[{"role":"user","parts":[{"text": user_text}]}],"systemInstruction":{"parts":[{"text": system}]},"generationConfig":{"temperature":0.0,"maxOutputTokens":128}} for k in keys: try: r = requests.post(f"{url}?key={k}", json=payload, timeout=25) if r.status_code == 429: continue r.raise_for_status() txt = r.json()["candidates"][0]["content"]["parts"][0]["text"].strip().strip('`').strip() s, e = txt.find('{'), txt.rfind('}') if s != -1 and e != -1: txt = txt[s:e+1] d = json.loads(txt) if "intent" not in d: return {"intent":"NONE"} return d except Exception: continue return {"intent":"NONE"} def stream_text_only(user_text: str, model_name: str): keys = get_keys_for_request() system_text = BASE_RULES_FA + "\n[TURN MODE: TEXT ONLY]\n" contents = [{"role":"user","parts":[{"text": user_text }]}] for k in keys: try: url = f"{GOOGLE_API_BASE_URL}/{model_name}:streamGenerateContent?alt=sse&key={k}" payload = {"contents": contents, "tools": None, "systemInstruction": {"parts": [{"text": system_text}]}, "generationConfig": {"temperature": 0.3, "maxOutputTokens": 8192}} with requests.post(url, json=payload, stream=True, timeout=720) as resp: if resp.status_code == 429: continue resp.raise_for_status() for line in resp.iter_lines(): if line and line.startswith(b"data: "): yield line.decode('utf-8') + "\n\n" return except requests.exceptions.RequestException: continue yield f"data: {json.dumps({'error': {'code':'TEXT_STREAM_FAILED','message':'پاسخ متنی موقتاً در دسترس نیست.'}})}\n\n" def stream_code_reply(user_text: str, model_name: str, code_language: str = None): keys = get_keys_for_request() system_text = BASE_RULES_FA + "\n[TURN MODE: CODE]\n" contents = [{"role":"user","parts":[{"text": user_text }]}] for k in keys: try: url = f"{GOOGLE_API_BASE_URL}/{model_name}:streamGenerateContent?alt=sse&key={k}" payload = {"contents": contents, "tools": None, "systemInstruction": {"parts": [{"text": system_text}]}, "generationConfig": {"temperature": 0.2, "maxOutputTokens": 4096}} with requests.post(url, json=payload, stream=True, timeout=720) as r: if r.status_code == 429: continue r.raise_for_status() for line in r.iter_lines(): if line: yield line.decode('utf-8') + "\n\n" return except requests.exceptions.RequestException: continue yield f"data: {json.dumps({'error': {'code':'CODE_STREAM_FAILED','message':'تولید کد موقتاً در دسترس نیست.'}})}\n\n" def stream_file_analysis(history_for_gemini, model_name: str): keys = get_keys_for_request() system_text = BASE_RULES_FA + "\n[TURN MODE: FILE ANALYSIS]\n" for k in keys: try: url = f"{GOOGLE_API_BASE_URL}/{model_name}:streamGenerateContent?alt=sse&key={k}" payload = {"contents": history_for_gemini, "tools": None, "systemInstruction": {"parts": [{"text": system_text}]}, "generationConfig": {"temperature": 0.3, "maxOutputTokens": 8192}} with requests.post(url, json=payload, stream=True, timeout=720) as r: if r.status_code == 429: continue r.raise_for_status() for line in r.iter_lines(): if line: yield line.decode('utf-8') + "\n\n" return except requests.exceptions.RequestException: continue yield f"data: {json.dumps({'error': {'code':'FILE_ANALYSIS_FAILED','message':'تحلیل فایل موقتاً در دسترس نیست.'}})}\n\n" @app.route('/') def index(): return render_template('index.html') @app.route('/storage/index', methods=['GET']) def storage_index(): try: try: path = hf_hub_download(repo_id=HF_REPO_ID, repo_type="dataset", filename="index.json", token=HF_TOKEN, local_dir=str(HF_LOCAL_CACHE), local_dir_use_symlinks=False) data = json.load(open(path, 'r', encoding='utf-8')) except Exception: data = {"items": []} return jsonify(data) except Exception: return jsonify({"items": []}) @app.route('/storage/session/<chat_id>', methods=['GET', 'DELETE']) def storage_session_get_delete(chat_id): if request.method == 'GET': try: path = hf_hub_download(repo_id=HF_REPO_ID, repo_type="dataset", filename=f"sessions/{chat_id}.json", token=HF_TOKEN, local_dir=str(HF_LOCAL_CACHE), local_dir_use_symlinks=False) data = json.load(open(path, 'r', encoding='utf-8')) return jsonify(data) except Exception: return jsonify({"error":"not_found"}), 404 else: try: try: idx_path = hf_hub_download(repo_id=HF_REPO_ID, repo_type="dataset", filename="index.json", token=HF_TOKEN, local_dir=str(HF_LOCAL_CACHE), local_dir_use_symlinks=False) index_data = json.load(open(idx_path, 'r', encoding='utf-8')) except Exception: index_data = {"items": []} index_data["items"] = [it for it in index_data.get("items", []) if it.get("id") != chat_id] ops = [CommitOperationDelete(path_in_repo=f"sessions/{chat_id}.json"), CommitOperationAdd(path_in_repo="index.json", path_or_fileobj=json.dumps(index_data, ensure_ascii=False).encode('utf-8'))] if HF_TOKEN: api_hf.create_commit(repo_id=HF_REPO_ID, repo_type="dataset", operations=ops, commit_message=f"delete session {chat_id}", token=HF_TOKEN) return jsonify({"ok": True}) except Exception as e: return jsonify({"ok": False, "error": str(e)}), 500 @app.route('/storage/session', methods=['POST']) def storage_session_upsert(): try: session = request.get_json(force=True) if not session or "id" not in session: return jsonify({"ok": False, "error": "invalid session"}), 400 chat_id = str(session["id"]) title = session.get("title","") from datetime import datetime, timezone updated_at = datetime.now(timezone.utc).isoformat() try: idx_path = hf_hub_download(repo_id=HF_REPO_ID, repo_type="dataset", filename="index.json", token=HF_TOKEN, local_dir=str(HF_LOCAL_CACHE), local_dir_use_symlinks=False) index_data = json.load(open(idx_path, 'r', encoding='utf-8')) except Exception: index_data = {"items": []} items = [it for it in index_data.get("items", []) if it.get("id") != chat_id] items.append({"id": chat_id, "title": title or f"chat-{chat_id}", "updated_at": updated_at}) index_data["items"] = items ops = [ CommitOperationAdd(path_in_repo=f"sessions/{chat_id}.json", path_or_fileobj=json.dumps(session, ensure_ascii=False).encode('utf-8')), CommitOperationAdd(path_in_repo="index.json", path_or_fileobj=json.dumps(index_data, ensure_ascii=False).encode('utf-8')) ] if HF_TOKEN: api_hf.create_commit(repo_id=HF_REPO_ID, repo_type="dataset", operations=ops, commit_message=f"upsert session {chat_id}", token=HF_TOKEN) return jsonify({"ok": True}) except Exception as e: return jsonify({"ok": False, "error": str(e)}), 500 @app.route('/chat', methods=['POST']) def chat(): data = request.json or {} model = data.get('model', 'gemini-2.5-flash') history = data.get('history', []) action_payload = data.get('action') chat_id = data.get('chatId') inline_files = data.get('inlineFiles', []) if not chat_id: return Response(json.dumps({"error": "chatId is required"}), status=400, mimetype='application/json') try: update_file_cache_from_inline_files(chat_id, inline_files) except Exception: pass if action_payload and action_payload.get('intent') == 'regenerate_with_enhancement': def stream_action_result(): try: base_prompt = action_payload.get("base_english_prompt") enhancement_request = action_payload.get("enhancement_request") aspect_ratio = action_payload.get("aspect_ratio", "9:16") if not base_prompt or not enhancement_request: yield f"data: {json.dumps({'error': {'code':'MISSING_ARGS','message':'پارامترهای لازم موجود نیست.'}})}\n\n"; return new_prompt = enhance_prompt(base_prompt, enhancement_request, model) yield f"data: {json.dumps({'intent':'generate_image','english_prompt': new_prompt,'aspect_ratio': aspect_ratio})}\n\n" except Exception as e: yield f"data: {json.dumps({'error': {'code':'ACTION_EXECUTION_FAILED','message': str(e)}})}\n\n" return Response(stream_action_result(), mimetype='text/event-stream', headers={'Cache-Control':'no-cache','X-Accel-Buffering':'no'}) def stream_events(): def send_event(event_data): return f"data: {json.dumps(event_data)}\n\n" history_for_gemini = [] for msg in history: if 'role' not in msg: continue new_msg = {'role': msg['role'], 'parts': []} for part in (msg.get('parts') or []): if isinstance(part, dict) and any(k in part for k in ['text','inlineData','functionCall','functionResponse','image_url','edited_images','fileUrl','mimeType','name']): if 'image_url' in part or 'edited_images' in part: new_msg['parts'].append({'text': '(توضیح: تصویر در پیام‌های قبلی ساخته یا ویرایش شده است.)'}) elif 'fileUrl' in part and 'mimeType' in part and not part.get('mimeType','').startswith('image/'): new_msg['parts'].append({'text': f"(یادداشت: فایلی با نوع {part.get('mimeType')} قبلاً ارسال شده است.)"}) elif 'text' in part or 'inlineData' in part: new_msg['parts'].append(part) if new_msg['parts']: history_for_gemini.append(new_msg) history_for_gemini = inject_inline_data_into_history(chat_id, history_for_gemini) if action_payload and action_payload.get('intent') == 'auto_continue': so_far = action_payload.get('partial_text', '').strip() if so_far: history_for_gemini.append({"role":"model","parts":[{"text": so_far}]}) history_for_gemini.append({"role":"user","parts":[{"text":"ادامه پاسخ را دقیقاً از همان‌جایی که قطع شد ادامه بده. متن قبلی را تکرار نکن و از همان کلمه/جمله ناتمام ادامه بده."}]}) last_user_idx_local = -1 for i in range(len(history_for_gemini) - 1, -1, -1): if history_for_gemini[i].get('role') == 'user': last_user_idx_local = i; break last_user_text = ""; has_inline_file = False if last_user_idx_local != -1: lu = history_for_gemini[last_user_idx_local] last_user_text = ''.join(p.get('text','') for p in (lu.get('parts') or []) if p.get('text')).strip() has_inline_file = any('inlineData' in p for p in (lu.get('parts') or [])) image_in_recent_history = any(m.get('role') == 'model' and any('image_url' in p or 'edited_images' in p for p in (m.get('parts') or [])) for m in history[-4:]) intent_info = classify_user_intent(last_user_text) if last_user_text and not (action_payload and action_payload.get('intent') == 'auto_continue') else {"intent":"NONE"} intent = (intent_info.get("intent") or "NONE").upper() if intent == "CODE_TASK": for chunk in stream_code_reply(last_user_text, model, intent_info.get("code_language")): yield chunk return if image_in_recent_history and last_user_text and intent == "NONE" and not (action_payload and action_payload.get('intent') == 'auto_continue'): for chunk in stream_text_only(last_user_text, model): yield chunk return if has_inline_file and intent in {"NONE"} and not (action_payload and action_payload.get('intent') == 'auto_continue'): for chunk in stream_file_analysis(history_for_gemini, model): yield chunk return if image_in_recent_history and last_user_text and not (action_payload and action_payload.get('intent') == 'auto_continue'): if intent == "SPECIFIC_EDIT": merged = (intent_info.get("normalized_edit") or last_user_text) update_chat_meta(chat_id, last_edit_prompt=merged) yield send_event({"intent": "edit_image", "prompt": merged}); return elif intent == "ASPECT_RATIO_CHANGE": meta = load_chat_meta(chat_id) ep = meta.get("last_edit_prompt") or meta.get("last_english_prompt") or "" new_ar = intent_info.get("new_aspect_ratio") or meta.get("last_aspect_ratio") or "9:16" yield send_event({"intent": "generate_image", "english_prompt": ep, "aspect_ratio": new_ar}); return elif intent == "QUALITY_ENHANCEMENT": meta = load_chat_meta(chat_id) prev_ep = meta.get("last_edit_prompt") or meta.get("last_english_prompt") or "" prev_ar = meta.get("last_aspect_ratio") or "9:16" enh = intent_info.get("normalized_edit") or last_user_text yield send_event({"intent": "clarify_action", "question": "بسیار خب! تصویر فعلی را ویرایش کنم یا یک تصویر جدید بسازم؟","options":{"edit":{"label":"ویرایش همین تصویر","intent":"edit_image","prompt": enh},"regenerate":{"label":"ساخت تصویر جدید","intent":"regenerate_with_enhancement","base_english_prompt": prev_ep,"enhancement_request": enh,"aspect_ratio": prev_ar}}}); return elif intent == "NEW_IMAGE": ep = create_artistic_prompt(last_user_text, model) update_chat_meta(chat_id, last_english_prompt=ep, last_aspect_ratio="9:16", last_edit_prompt=None) yield send_event({"intent": "generate_image_with_text", "text": "در حال ساخت تصویر جدید برای شما...","image_generation_payload": {"english_prompt": ep, "aspect_ratio": "9:16"}, "follow_up_text": "تصویر شما آماده شد!"}); return tools_for_request = get_all_tools() final_system_prompt = BASE_RULES_FA + "\n[TURN MODE: GENERAL]\n" keys = get_keys_for_request() accumulated_args = None fn_name = None for k in keys: try: url = f"{GOOGLE_API_BASE_URL}/{model}:streamGenerateContent?alt=sse&key={k}" payload = {"contents": history_for_gemini, "tools": tools_for_request, "systemInstruction": {"parts": [{"text": final_system_prompt}]}, "generationConfig":{"temperature":0.3,"maxOutputTokens":8192}} with requests.post(url, json=payload, stream=True, timeout=720) as r: if r.status_code == 429: continue r.raise_for_status() for line in r.iter_lines(): if line and line.startswith(b'data: '): chunk_str = line.decode('utf-8')[6:] try: data_chunk = json.loads(chunk_str) part = data_chunk.get("candidates", [{}])[0].get("content", {}).get("parts", [{}])[0] if "functionCall" in part: if not accumulated_args: accumulated_args = {}; fn_name = part["functionCall"].get("name") args_chunk = part["functionCall"].get("args", {}) for kk, vv in (args_chunk or {}).items(): if kk not in accumulated_args: accumulated_args[kk] = vv elif isinstance(accumulated_args.get(kk), str): accumulated_args[kk] += str(vv) elif "text" in part: if not is_tool_like_text(part["text"]): yield sse_text_event(part["text"]) except Exception: continue if accumulated_args: args = accumulated_args if fn_name == "handle_specific_edit": raw_req = (args.get("edit_request") or "").strip() if not raw_req: yield send_event({"error": {"code":"EMPTY_EDIT_REQUEST","message":"دستور ویرایش دریافت نشد."}}) else: merged = raw_req update_chat_meta(chat_id, last_edit_prompt=merged) yield send_event({"intent":"edit_image","prompt": merged}) elif fn_name == "regenerate_with_enhancement": meta = load_chat_meta(chat_id) prev_ep = args.get("previous_english_prompt") or meta.get("last_edit_prompt") or meta.get("last_english_prompt") or "" prev_ar = args.get("previous_aspect_ratio") or meta.get("last_aspect_ratio") or "9:16" enhancement_request = args.get("enhancement_request", "") yield send_event({"intent":"clarify_action","question":"بسیار خب! با این تغییرات چه کنم؟","options":{"edit":{"label":"ویرایش همین تصویر","intent":"edit_image","prompt": enhancement_request},"regenerate":{"label":"ساخت تصویر جدید","intent":"regenerate_with_enhancement","base_english_prompt": prev_ep,"enhancement_request": enhancement_request,"aspect_ratio": prev_ar}}}) elif fn_name == "change_image_aspect_ratio": meta = load_chat_meta(chat_id) ep = args.get("previous_english_prompt") or meta.get("last_edit_prompt") or meta.get("last_english_prompt") or "" new_ar = args.get("new_aspect_ratio") or meta.get("last_aspect_ratio") or "9:16" yield send_event({"intent":"generate_image","english_prompt": ep,"aspect_ratio": new_ar}) elif fn_name == "generate_image": ep = args.get("english_prompt"); ar = args.get("aspect_ratio", "9:16") update_chat_meta(chat_id, last_english_prompt=ep, last_aspect_ratio=ar, last_edit_prompt=None) yield send_event({"intent":"generate_image_with_text","text": args.get("initial_response_text"),"image_generation_payload":{"english_prompt": ep,"aspect_ratio": ar},"follow_up_text": args.get("follow_up_text")}) elif fn_name == "perform_internet_search": for c in stream_search_results(args.get('query')): yield c return except requests.exceptions.RequestException: continue yield send_event({"error": {"code": "ALL_KEYS_FAILED", "message": "تمام کلیدهای API خطا دادند یا درخواست نامعتبر بود."}}) return Response(stream_events(), mimetype='text/event-stream', headers={'Cache-Control':'no-cache','X-Accel-Buffering':'no'}) if __name__ == '__main__': app.run(debug=True, host='0.0.0.0', port=os.environ.get("PORT", 7860))

api.js

javascript
import * as state from './state.js'; import * as ui from './ui.js'; const UPLOADER_API_URL = 'https://asrasahar-ok-uploader-bot.hf.space/upload'; const YOUR_IMAGE_UPLOAD_SERVER_URL = 'https://www.aisada.ir/hamed/upload.php'; const CONVERTER_API_URL = 'https://texttopdf-5irq.onrender.com/'; const HF_IMAGE_GEN_URL_BASE = "https://black-forest-labs-flux-1-schnell.hf.space"; const FN_INDEX_GEN_IMAGE = 2; const TRIGGER_ID_GEN_IMAGE = 5; const STORAGE_INDEX_URL = '/storage/index'; const STORAGE_SESSION_URL = '/storage/session'; function getDimensionsFromRatio(ratioString) { const d = { width: 768, height: 1344 }; if (!ratioString || typeof ratioString !== 'string') return d; const r = ratioString.toLowerCase().replace(/[\sدر:بهx]/g, ''); if (r === '11') return { width: 1024, height: 1024 }; if (r === '169') return { width: 1344, height: 768 }; if (r === '916') return { width: 768, height: 1344 }; if (r === '43') return { width: 1152, height: 864 }; if (r === '34') return { width: 864, height: 1152 }; if (r === '32') return { width: 1216, height: 832 }; if (r === '23') return { width: 832, height: 1216 }; return d; } function windowHistory(messages, maxCount = 40, maxChars = 8000) { const msgs = [...messages]; let start = Math.max(0, msgs.length - maxCount); const sliced = msgs.slice(start); let total = 0; for (let i = sliced.length - 1; i >= 0; i--) { const m = sliced[i]; const t = (m.parts || []).map(p => p.text || '').join(''); total += t.length; if (total > maxChars) { start = msgs.length - (sliced.length - i); break; } } return msgs.slice(start); } export async function convertTextToFile(content, format, buttonElement) { if (buttonElement) ui.showLoadingOnButton(buttonElement, true); try { const form1 = new FormData(); form1.append('content', content); form1.append('format', format); const r1 = await fetch(CONVERTER_API_URL, { method: 'POST', body: form1 }); if (!r1.ok) throw new Error(`HTTP ${r1.status}`); const blob = await r1.blob(); const fileName = `alpha-export-${Date.now()}.${format}`; const form2 = new FormData(); form2.append('image', blob, fileName); const r2 = await fetch(YOUR_IMAGE_UPLOAD_SERVER_URL, { method: 'POST', body: form2 }); if (!r2.ok) throw new Error(await r2.text().catch(()=>`HTTP ${r2.status}`)); const j = await r2.json(); if (j.success && j.url) window.parent.postMessage({ type: 'OPEN_EXTERNAL_URL', url: j.url }, '*'); else throw new Error(j.message || 'upload failed'); } catch (e) { alert(`خطا: ${e.message}`); } finally { if (buttonElement) ui.showLoadingOnButton(buttonElement, false); } } function summarizeHistoryForApi(history) { return windowHistory(history).map(msg => { if (msg.isTemporary) return null; const newMsg = { ...msg }; const newParts = []; let has = false; if (newMsg.parts && newMsg.parts.length > 0) { for (const part of newMsg.parts) { const p = { ...part }; delete p.base64Data; delete p.inlineData; if (p.fileUrl && p.mimeType && !p.mimeType.startsWith('image/')) { let t = "این فایل"; if (p.mimeType.startsWith('video/')) t = "این ویدیو"; else if (p.mimeType.startsWith('audio/')) t = "این فایل صوتی"; else t = `این سند (${p.name})`; let tp = newParts.find(x => x.text); const note = `\n(دستور: ${t} را تحلیل کن و محتوای آن را شرح بده.)`; if (tp) tp.text += note; else newParts.push({ text: note }); } if (p.text || p.fileUrl || p.image_url || p.edited_images) { newParts.push(p); has = true; } } } if (msg.role === 'user' && msg.english_prompt_used) { const pn = `پرامپت انگلیسی استفاده شده: "${msg.english_prompt_used}"`; const rn = msg.aspect_ratio_used ? `, نسبت تصویر: "${msg.aspect_ratio_used}"` : ""; const mem = `\n[یادآوری برای مدل: این پیام کاربر منجر به ساخت تصویر شد. ${pn}${rn}]`; let tp = newParts.find(p => p.text); if (tp) tp.text += mem; else newParts.push({ text: mem }); has = true; } newMsg.parts = newParts; return has ? newMsg : null; }).filter(Boolean); } function uploadFileToHuggingFace(file, onProgress) { return new Promise((resolve, reject) => { const xhr = new XMLHttpRequest(); state.setCurrentUploadXHR(xhr); xhr.open('POST', UPLOADER_API_URL, true); xhr.onload = function () { state.setCurrentUploadXHR(null); try { const res = JSON.parse(xhr.responseText); if (xhr.status >= 200 && xhr.status < 300) { if (res.hf_url) resolve(res); else reject(new Error("invalid response")); } else reject(new Error(res.error || `HTTP ${xhr.status}`)); } catch { reject(new Error("bad response")); } }; xhr.onerror = function () { state.setCurrentUploadXHR(null); if (xhr.status === 0 && !xhr.statusText) reject(new Error('آپلود لغو شد.')); else reject(new Error('خطای شبکه.')); }; xhr.upload.onprogress = function (e) { if (e.lengthComputable && onProgress) onProgress(Math.round((e.loaded / e.total) * 100)); }; const fd = new FormData(); fd.append('file', file); xhr.send(fd); }); } async function uploadUrlToHuggingFace(url) { return new Promise((resolve, reject) => { const xhr = new XMLHttpRequest(); xhr.open('POST', UPLOADER_API_URL, true); xhr.setRequestHeader('Content-Type', 'application/json'); xhr.onload = function () { try { const res = JSON.parse(xhr.responseText); if (xhr.status >= 200 && xhr.status < 300 && res.hf_url) resolve(res); else reject(new Error(res.error || `HTTP ${xhr.status}`)); } catch { reject(new Error("bad response")); } }; xhr.onerror = function () { reject(new Error('خطای شبکه.')); }; xhr.send(JSON.stringify({ url })); }); } export async function processAndUploadFile(file, onProgress) { const readBase64 = f => new Promise((resolve, reject) => { const r = new FileReader(); r.onload = () => resolve(r.result.split(',')[1]); r.onerror = e => reject(e); r.readAsDataURL(f); }); const [up, b64] = await Promise.all([uploadFileToHuggingFace(file, onProgress), readBase64(file)]); return { url: up.hf_url, name: file.name, mimeType: file.type, base64Data: b64 }; } export async function uploadToAISADAAndOpenAlpha(imageUrlFromSpace, button) { if (!imageUrlFromSpace) { if (button) button.textContent = "خطا"; return; } if (button) { button.disabled = true; button.innerHTML = `<div class="typing-indicator" style="height:16px; gap: 3px;"><span style="width:6px; height:6px; background-color:currentColor;"></span><span style="width:6px; height:6px; background-color:currentColor;"></span><span style="width:6px; height:6px; background-color:currentColor;"></span></div>`; } try { const r = await fetch(imageUrlFromSpace); if (!r.ok) throw new Error(`HTTP ${r.status}`); let blob = await r.blob(); const fd = new FormData(); let filename = `generated_alpha_image_${Date.now()}.webp`; try { const p = new URL(imageUrlFromSpace).pathname.split('/').pop(); if (p && p.includes('.')) filename = p.split('?')[0]; } catch {} fd.append('image', blob, filename); const up = await fetch(YOUR_IMAGE_UPLOAD_SERVER_URL, { method: 'POST', body: fd }); if (!up.ok) throw new Error(await up.text().catch(()=>`HTTP ${up.status}`)); const j = await up.json(); if (j.success && j.url) window.parent.postMessage({ type: 'OPEN_EXTERNAL_URL', url: j.url }, '*'); else throw new Error(j.message || 'upload failed'); } catch (e) { if (button) button.textContent = "خطا"; alert(`خطا: ${e.message}`); } finally { if (button) { setTimeout(() => { button.disabled = false; button.innerHTML = `<svg class="w-4 h-4" fill="currentColor" viewBox="0 0 24 24"><path d="M19 9h-4V3H9v6H5l7 7 7-7zM5 18v2h14v-2H5z"/></svg>`; }, 2000); } } } function convertImageBlobToPng(imageBlob) { return new Promise((resolve, reject) => { const u = URL.createObjectURL(imageBlob); const img = new Image(); img.onload = () => { const c = document.createElement('canvas'); c.width = img.width; c.height = img.height; const ctx = c.getContext('2d'); ctx.drawImage(img, 0, 0); c.toBlob(b => { resolve(b); URL.revokeObjectURL(u); }, 'image/png'); }; img.onerror = () => { reject(new Error('خطا در تبدیل تصویر.')); URL.revokeObjectURL(u); }; img.src = u; }); } export async function runExternalImageEditor(prompt, fileUrl, el, messageIndex) { const activeChat = state.getActiveChat(); const RENDER_API_URL = 'https://alfa-editor-worker.onrender.com/api/edit'; try { if (!fileUrl) throw new Error("فایل یافت نشد."); ui.showStatusUpdate("در حال دانلود فایل...", el); const r = await fetch(fileUrl); if (!r.ok) throw new Error(`HTTP ${r.status}`); let blob = await r.blob(); const mt = blob.type || r.headers.get('content-type'); if (mt && mt !== 'image/png' && mt !== 'image/jpeg') blob = await convertImageBlobToPng(blob); ui.showStatusUpdate("در حال ارسال برای ویرایش...", el); const fd = new FormData(); fd.append("image", blob, 'image.png'); fd.append("prompt", prompt); const resp = await fetch(RENDER_API_URL, { method: 'POST', body: fd, signal: state.globalAbortController.signal }); const result = await resp.json(); if (!resp.ok) throw new Error(result.error || `HTTP ${resp.status}`); if (!result.image_urls || result.image_urls.length === 0) throw new Error("تصویری برنگشت."); ui.showStatusUpdate("در حال دائمی‌سازی لینک‌ها...", el); const urls = await Promise.all(result.image_urls.map(u => uploadUrlToHuggingFace(u).then(x => x.hf_url))); const finalMessage = { role: 'model', parts: [{ edited_images: urls }] }; if (typeof messageIndex !== 'undefined') { activeChat.messages[messageIndex] = finalMessage; state.saveSessions(); await saveSession(activeChat); ui.addMessageToUI(finalMessage, messageIndex, { isLastModel: true, animate: false }, el); } } catch (e) { if (e.name !== 'AbortError') ui.displayError(el, e.message || String(e)); else el.querySelector('.message-content-area').innerHTML += '<p class="text-xs text-slate-500 mt-2 text-center">-- عملیات ویرایش متوقف شد --</p>'; } finally { ui.resetState(); } } export async function runExternalImageGenerator(englishPrompt, aspectRatio, el, followUpText = null) { const activeChat = state.getActiveChat(); const sessionHash = Math.random().toString(36).substring(2, 15); try { const dim = getDimensionsFromRatio(aspectRatio); ui.showStatusUpdate("ارسال درخواست تصویر...", el); const payload = [ englishPrompt, Math.floor(Math.random() * 2147483647), true, dim.width, dim.height, 4 ]; const joinPayload = { fn_index: FN_INDEX_GEN_IMAGE, data: payload, event_data: null, session_hash: sessionHash, trigger_id: TRIGGER_ID_GEN_IMAGE }; const joinResponse = await fetch(`${HF_IMAGE_GEN_URL_BASE}/queue/join`, { method: 'POST', headers: { 'Content-Type': 'application/json' }, body: JSON.stringify(joinPayload), signal: state.globalAbortController.signal }); if (!joinResponse.ok) { let t = `HTTP ${joinResponse.status}`; try { const j = await joinResponse.json(); t = j.error || j.detail || t; } catch {} throw new Error(t); } const jr = await joinResponse.json(); if (!jr.event_id) throw new Error("event_id دریافت نشد."); if (state.currentImageEventSource) state.currentImageEventSource.close(); const es = new EventSource(`${HF_IMAGE_GEN_URL_BASE}/queue/data?session_hash=${sessionHash}`); state.setCurrentImageEventSource(es); await new Promise((resolve, reject) => { es.onmessage = async (event) => { const data = JSON.parse(event.data); if (data.msg === "process_starts") ui.showStatusUpdate("پردازش تصویر آغاز شد...", el); if (data.msg === "progress") { if (data.progress_data && data.progress_data.length > 0) { const p = data.progress_data[0]; ui.showStatusUpdate(`در حال ساخت... (${p.index}/${p.length})`, el); } } if (data.msg === "process_completed") { es.close(); state.setCurrentImageEventSource(null); if (data.success && data.output?.data?.[0]?.url) { const tempUrl = data.output.data[0].url; ui.showStatusUpdate("در حال دائمی‌سازی لینک...", el); try { const up = await uploadUrlToHuggingFace(tempUrl); const permanentUrl = up.hf_url; const newParts = [{ image_url: permanentUrl }]; if (followUpText) newParts.push({ text: followUpText }); const newModelMessage = { role: 'model', parts: newParts }; activeChat.messages[activeChat.messages.length - 1] = newModelMessage; state.saveSessions(); await saveSession(activeChat); ui.addMessageToUI(newModelMessage, activeChat.messages.length - 1, { isLastModel: true, animate: false }, el); resolve(); } catch (e) { reject(new Error(`خطای آپلود: ${e.message}`)); } } else reject(new Error(data.output?.error || "URL نامعتبر.")); } if (data.msg === 'queue_full') { es.close(); reject(new Error("صف پر است.")); } }; es.onerror = () => { es.close(); state.setCurrentImageEventSource(null); reject(new Error("قطع ارتباط با سرور تصویر.")); }; }); } catch (e) { if (e.name !== 'AbortError') ui.displayError(el, e.message); else el.querySelector('.message-content-area').innerHTML += '<p class="text-xs text-slate-500 mt-2 text-center">-- تولید پاسخ متوقف شد --</p>'; } finally { ui.resetState(); } } export async function streamResponse(modelBubbleOuterDivElement, incomingHistory, chatId, actionPayload = null) { state.setGlobalAbortController(new AbortController()); let fullResponseText = ""; let lastFinishReason = null; const activeChat = state.getActiveChat(); let finalMessageObject = { role: 'model', parts: [] }; async function doStream(payload, { appendToSameMessage = true } = {}) { const response = await fetch('/chat', { method: 'POST', headers: { 'Content-Type': 'application/json' }, signal: state.globalAbortController.signal, body: JSON.stringify(payload) }); if (!response.ok) { const err = await response.json().catch(()=>({error:{message:`HTTP ${response.status}`}})); throw new Error(err.error?.message || `HTTP ${response.status}`); } const reader = response.body.getReader(); const decoder = new TextDecoder(); let buffer = ""; async function processEvent(rawEvent) { const lines = rawEvent.split('\n'); const dataPayload = lines.filter(l => l.startsWith('data:')).map(l => l.slice(5).trim()).filter(Boolean).join('\n'); if (!dataPayload) return; const data = JSON.parse(dataPayload); const lastUserMessage = activeChat.messages[activeChat.messages.length - 2]; if (data.intent === 'generate_image') { reader.cancel(); if (lastUserMessage && lastUserMessage.role === 'user') { lastUserMessage.english_prompt_used = data.english_prompt; lastUserMessage.aspect_ratio_used = data.aspect_ratio; state.saveSessions(); await saveSession(activeChat); } await runExternalImageGenerator(data.english_prompt, data.aspect_ratio, modelBubbleOuterDivElement); return 'break'; } if (data.intent === 'generate_image_with_text') { reader.cancel(); const { text, image_generation_payload, follow_up_text } = data; if (lastUserMessage && lastUserMessage.role === 'user') { lastUserMessage.english_prompt_used = image_generation_payload.english_prompt; lastUserMessage.aspect_ratio_used = image_generation_payload.aspect_ratio; state.saveSessions(); await saveSession(activeChat); } ui.streamFinalText(text, modelBubbleOuterDivElement); await runExternalImageGenerator(image_generation_payload.english_prompt, image_generation_payload.aspect_ratio, modelBubbleOuterDivElement, follow_up_text); return 'break'; } if (data.intent === 'clarify_action') { reader.cancel(); const commandMessage = { role: 'model', clarification: data.options, question: data.question, parts: [] }; activeChat.messages[activeChat.messages.length - 1] = commandMessage; state.saveSessions(); await saveSession(activeChat); ui.addMessageToUI(commandMessage, activeChat.messages.length - 1, { isLastModel: true }, modelBubbleOuterDivElement); ui.resetState(); return 'break'; } if (data.intent === 'edit_image') { reader.cancel(); let fileUrlForEditing = null; for (let i = incomingHistory.length - 2; i >= 0; i--) { const msg = incomingHistory[i]; if (!msg?.parts) continue; const p1 = msg.parts.find(p => p.image_url || (p.edited_images && p.edited_images.length > 0)); if (p1) { fileUrlForEditing = p1.image_url || p1.edited_images[0]; break; } const p2 = msg.parts.find(p => p.fileUrl); if (p2) { fileUrlForEditing = p2.fileUrl; break; } } if (!fileUrlForEditing) { ui.displayError(modelBubbleOuterDivElement, "فایلی برای ویرایش پیدا نشد."); ui.resetState(); return 'break'; } await runExternalImageEditor(data.prompt, fileUrlForEditing, modelBubbleOuterDivElement, activeChat.messages.length - 1); return 'break'; } if (data.error) throw new Error(data.error.message || JSON.stringify(data.error)); if (data.candidates) { const c = data.candidates[0] || {}; if (typeof c.finishReason !== 'undefined' && c.finishReason !== null) { lastFinishReason = c.finishReason; } const parts = (c.content && c.content.parts) || []; const textPart = parts.find(p => typeof p.text === 'string'); if (textPart && textPart.text) { fullResponseText += textPart.text; ui.streamFinalText(fullResponseText, modelBubbleOuterDivElement, false); } } } while (true) { const { done, value } = await reader.read(); if (done) break; buffer += decoder.decode(value, { stream: true }); let sepIndex; while ((sepIndex = buffer.indexOf('\n\n')) !== -1) { const rawEvent = buffer.slice(0, sepIndex).trim(); buffer = buffer.slice(sepIndex + 2); if (!rawEvent) continue; const res = await processEvent(rawEvent); if (res === 'break') return 'break'; } } if (buffer.trim().length) { const res = await processEvent(buffer.trim()); if (res === 'break') return 'break'; } return 'done'; } function makePayload(actionPayloadOverride = null) { const historyForApi = summarizeHistoryForApi(actionPayload ? incomingHistory.slice(0, -1) : incomingHistory); const inlineFiles = []; for (const msg of incomingHistory) { if (!msg || !msg.parts) continue; for (const p of msg.parts) { if (p.base64Data && p.mimeType) inlineFiles.push({ fileUrl: p.fileUrl || null, mimeType: p.mimeType, data: p.base64Data }); } } return { history: historyForApi, model: state.getActiveChat().model, chatId, action: actionPayloadOverride || actionPayload, inlineFiles }; } try { const firstRun = await doStream(makePayload()); if (firstRun !== 'break') { let rounds = 0; while (lastFinishReason === 'MAX_TOKENS' && rounds < 5) { rounds += 1; const payload = makePayload({ intent: 'auto_continue', partial_text: fullResponseText }); const r = await doStream(payload, { appendToSameMessage: true }); if (r === 'break') break; if (lastFinishReason !== 'MAX_TOKENS') break; } } if (fullResponseText) finalMessageObject.parts.push({ text: fullResponseText }); if (finalMessageObject.parts.length > 0) activeChat.messages[activeChat.messages.length - 1] = finalMessageObject; else activeChat.messages.pop(); state.saveSessions(); await saveSession(activeChat); ui.updateMessageActions(modelBubbleOuterDivElement, finalMessageObject, false, true); } catch (e) { if (e.name === 'AbortError') { modelBubbleOuterDivElement.querySelector('.message-content-area').innerHTML += '<p class="text-xs text-slate-500 mt-2 text-center">-- عملیات متوقف شد --</p>'; } else { ui.displayError(modelBubbleOuterDivElement, e.message || 'خطا'); } } finally { ui.resetState(); } } export async function loadAllSessionsFromHF() { const r = await fetch(STORAGE_INDEX_URL, { method: 'GET' }); if (!r.ok) return []; const idx = await r.json(); const ids = (idx.items || []).sort((a,b)=>new Date(b.updated_at)-new Date(a.updated_at)).map(x=>x.id); const sessions = []; for (const id of ids) { const sRes = await fetch(`${STORAGE_SESSION_URL}/${encodeURIComponent(id)}`, { method: 'GET' }); if (sRes.ok) sessions.push(await sRes.json()); } return sessions; } export async function saveSession(session) { await fetch(STORAGE_SESSION_URL, { method: 'POST', headers: { 'Content-Type': 'application/json' }, body: JSON.stringify(session) }); } export async function deleteSession(id) { await fetch(`${STORAGE_SESSION_URL}/${encodeURIComponent(id)}`, { method: 'DELETE' }); }

Question

اون قابلیت های قبلی همه باشه import os
import re
import requests
import json
from flask import Flask, render_template, request, Response
import logging
from filelock import FileLock
from pathlib import Path
import base64

class PersianLogFormatter(logging.Formatter):
LEVEL_MAP = {
logging.DEBUG: "دیباگ",
logging.INFO: "اطلاع",
logging.WARNING: "هشدار",
logging.ERROR: "خطا",
logging.CRITICAL: "بحرانی",
}
def format(self, record):
record.levelname = self.LEVEL_MAP.get(record.levelno, record.levelname)
return super().format(record)

def setup_logging():
log_format = '[%(asctime)s] [%(levelname)s]: %(message)s'
date_format = '%Y-%m-%d %H:%M:%S'
formatter = PersianLogFormatter(log_format, datefmt=date_format)
root_logger = logging.getLogger()
if root_logger.hasHandlers():
root_logger.handlers.clear()
console_handler = logging.StreamHandler()
console_handler.setFormatter(formatter)
root_logger.addHandler(console_handler)
root_logger.setLevel(logging.INFO)

setup_logging()

try:
ALL_GEMINI_API_KEYS_STR = os.getenv('ALL_GEMINI_API_KEYS')
if not ALL_GEMINI_API_KEYS_STR:
raise RuntimeError("متغیر ALL_GEMINI_API_KEYS تنظیم نشده است.")
MASTER_API_KEYS = [key.strip() for key in ALL_GEMINI_API_KEYS_STR.split(',') if key.strip()]
if not MASTER_API_KEYS:
raise RuntimeError("هیچ کلید معتبری در ALL_GEMINI_API_KEYS یافت نشد.")

text
COUNTER_FILE_PATH = '/dev/shm/gunicorn_key_counter.txt' lock_path = COUNTER_FILE_PATH + ".lock" lock = FileLock(lock_path) with lock: if not os.path.exists(COUNTER_FILE_PATH): logging.info(f"✅ اولین کارگر شروع به کار کرد. با موفقیت {len(MASTER_API_KEYS)} کلید Gemini بارگذاری شد.") with open(COUNTER_FILE_PATH, 'w') as f: f.write('0') logging.info("شمارنده چرخش کلیدها مقداردهی اولیه شد.") CACHE_DIR = Path('/dev/shm/file_cache') CACHE_DIR.mkdir(parents=True, exist_ok=True) logging.info(f"پوشه کش فایل‌ها در مسیر '{CACHE_DIR}' آماده استفاده است.") META_DIR = Path('/dev/shm/chat_meta') META_DIR.mkdir(parents=True, exist_ok=True) logging.info(f"پوشه متادیتای چت‌ها در مسیر '{META_DIR}' آماده استفاده است.")

except Exception as e:
logging.critical(f"خطای بحرانی در هنگام بارگذاری کلیدهای API یا تنظیم کش/متا: {e}")
raise

app = Flask(name)
GOOGLE_API_BASE_URL = 'https://generativelanguage.googleapis.com/v1beta/models'

------------------------ قوانین/مواد ثابت ------------------------

BASE_RULES_FA = """
دستورالعمل‌های اجباری برای دستیار هوش مصنوعی آلفا:

هویت: تو «چت‌بات آلفا» هستی که توسط هوش مصنوعی آلفا توسعه داده شده‌ای. وظیفه‌ات کمک به کاربر در همه‌ی زمینه‌هاست.

پرهیز از امتناع بی‌جا: به جای امتناع، راه‌حل جایگزین/ایمن بده.

تحلیل جامع چندرسانه‌ای:

هر نوع فایل (تصویر، ویدیو، صدا، PDF، متن، کُد و...) را تحلیل و محتوا را شرح بده.

پاسخ را با تشخیص نوع فایل شروع کن (مثلاً: «این یک فایل صوتی است که...»).

اگر کاربر فقط فایل فرستاد و صراحتاً ساخت/ویرایش تصویر نخواست، فقط تحلیل کن (ابزار تصویر را فراخوانی نکن).

تصویر (ابزارها الزامی در ساخت/ویرایش):

اولویت ابزارها: handle_specific_edit > change_image_aspect_ratio > regenerate_with_enhancement > generate_image > perform_internet_search

خروجی ابزار باید functionCall واقعی باشد؛ شبه‌کد مثل print(handle_specific_edit(...)) یا متن‌هایی مانند functionCall: ... ممنوع.

ویرایش‌های پیاپی: اگر دستور تکمیلی آمد، با دستور قبلی ادغام کن و بعد ویرایش را انجام بده.

گفتگو: اگر پیام محاوره‌ای/تشکر بود، ابزار تصویر را نخوان؛ پاسخ کوتاه و دوستانه فارسی بده.

درخواست‌های غیرتصویری (کدنویسی، نگارش، آموزش و...): پاسخ مفید بده؛ برای کد، بلاک Markdown (```lang) بده.
"""

------------------------ ابزار/کش فایل ------------------------

def get_and_increment_key_index():
lock_path = COUNTER_FILE_PATH + ".lock"
lock = FileLock(lock_path)
with lock:
try:
with open(COUNTER_FILE_PATH, 'r') as f:
current_index = int(f.read().strip())
except (FileNotFoundError, ValueError):
current_index = 0
index_to_use = current_index % len(MASTER_API_KEYS)
next_index = (current_index + 1)
with open(COUNTER_FILE_PATH, 'w') as f:
f.write(str(next_index))
return index_to_use

def get_keys_for_request():
start_index = get_and_increment_key_index()
return MASTER_API_KEYS[start_index:] + MASTER_API_KEYS[:start_index]

------------------------ متای چت ------------------------

def meta_path(chat_id: str) -> Path:
safe_id = ''.join(c for c in str(chat_id) if c.isalnum() or c in ('-', '
'))
return META_DIR / f"{safe_id}.json"

def load_chat_meta(chat_id: str) -> dict:
path = _meta_path(chat_id)
lock = FileLock(str(path) + ".lock")
with lock:
if path.exists():
try:
return json.load(open(path, 'r', encoding='utf-8'))
except Exception as e:
logging.warning(f"خواندن متای چت {chat_id} ناموفق بود: {e}")
return {}
return {}

def save_chat_meta(chat_id: str, meta: dict):
path = _meta_path(chat_id)
lock = FileLock(str(path) + ".lock")
with lock:
try:
with open(path, 'w', encoding='utf-8') as f:
json.dump(meta or {}, f, ensure_ascii=False)
except Exception as e:
logging.warning(f"ذخیره متای چت {chat_id} ناموفق بود: {e}")

def update_chat_meta(chat_id: str, **kwargs):
meta = load_chat_meta(chat_id)
meta.update({k: v for k, v in kwargs.items() if v is not None})
save_chat_meta(chat_id, meta)

------------------------ ابزارها ------------------------

def get_all_tools():
search_tool = {
"name": "perform_internet_search",
"description": "فقط برای جستجوی اطلاعات به‌روز، وقایع جاری، قیمت‌ها و... استفاده شود.",
"parameters": {"type": "OBJECT","properties": {"query": {"type": "STRING"}},"required": ["query"]}
}
image_tools = [
{
"name": "generate_image",
"description": "ساخت تصویر جدید (نه توصیف متنی).",
"parameters": {"type": "OBJECT","properties": {
"english_prompt": {"type": "STRING"},
"aspect_ratio": {"type": "STRING"},
"initial_response_text": {"type": "STRING"},
"follow_up_text": {"type": "STRING"}
},"required": ["english_prompt", "initial_response_text", "follow_up_text"]}
},
{
"name": "handle_specific_edit",
"description": "تغییر مشخص روی تصویر قبلی؛ دستور جدید را در صورت لزوم با قبلی ادغام کن.",
"parameters": {"type": "OBJECT","properties": {"edit_request": {"type": "STRING"}},"required": ["edit_request"]}
},
{
"name": "regenerate_with_enhancement",
"description": "برای درخواست‌های کلی/مبهم؛ نیاز به رفع ابهام.",
"parameters": {"type": "OBJECT","properties": {
"enhancement_request": {"type": "STRING"},
"previous_english_prompt": {"type": "STRING"},
"previous_aspect_ratio": {"type": "STRING"}
},"required": ["enhancement_request", "previous_english_prompt", "previous_aspect_ratio"]}
},
{
"name": "change_image_aspect_ratio",
"description": "تغییر نسبت/اندازه تصویر قبلی.",
"parameters": {"type": "OBJECT","properties": {
"new_aspect_ratio": {"type": "STRING"},
"previous_english_prompt": {"type": "STRING"}
},"required": ["new_aspect_ratio", "previous_english_prompt"]}
}
]
return [{"function_declarations": image_tools + [search_tool]}]

------------------------ پرامپت‌سازی/ادغام ------------------------

def enhance_prompt(base_prompt, enhancement_request, model):
api_key = get_keys_for_request()[0]
url = f"{GOOGLE_API_BASE_URL}/{model}:generateContent?key={api_key}"
system_prompt = f"""You are an expert prompt engineer. Merge the base English image prompt with the user's modification (Persian or English). Return only the final, ready-to-use English prompt.

Base Prompt: "{base_prompt}"
User's Request: "{enhancement_request}"
"""
payload = {"contents": [{"role": "user", "parts": [{"text": system_prompt}]}],"generationConfig": { "temperature": 0.7 }}
try:
response = requests.post(url, json=payload, timeout=45)
response.raise_for_status()
enhanced_prompt = response.json()["candidates"][0]["content"]["parts"][0]["text"].strip()
logging.info(f"✅ پرامپت بهبود یافت: {enhanced_prompt}")
return enhanced_prompt
except Exception as e:
logging.error(f"❌ خطا در enhance_prompt: {e}")
return f"{base_prompt}, {enhancement_request}"

def create_artistic_prompt(user_prompt, model):
api_key = get_keys_for_request()[0]
url = f"{GOOGLE_API_BASE_URL}/{model}:generateContent?key={api_key}"
system_instruction = "Convert user's (possibly Persian) idea to a highly-detailed English prompt. Output ONLY the final English prompt."
payload = {
"contents": [{"role": "user", "parts": [{"text": user_prompt}]}],
"systemInstruction": {"parts": [{"text": system_instruction}]},
"generationConfig": { "temperature": 0.7 }
}
try:
r = requests.post(url, json=payload, timeout=45)
r.raise_for_status()
return r.json()["candidates"][0]["content"]["parts"][0]["text"].strip()
except Exception as e:
logging.error(f"❌ خطا در create_artistic_prompt: {e}")
return user_prompt

def merge_edit_prompt(chat_id: str, user_edit_request: str, model: str) -> str:
meta = load_chat_meta(chat_id)
base_prompt = (meta.get("last_edit_prompt") or meta.get("last_english_prompt") or "").strip()
if base_prompt:
try:
return enhance_prompt(base_prompt, user_edit_request, model) or user_edit_request
except Exception as e:
logging.warning(f"ادغام پرامپت ویرایش ناموفق: {e}")
return user_edit_request
return user_edit_request

------------------------ جستجو ------------------------

def stream_search_results(query):
logging.info(f"🚀 جستجو برای: '{query}'")
keys_to_try = get_keys_for_request()
search_model = 'gemini-2.5-flash'
url = f"{GOOGLE_API_BASE_URL}/{search_model}:streamGenerateContent?alt=sse"
payload = {"contents": [{"role": "user", "parts": [{"text": query}]}],"tools": [{"google_search": {}}],"systemInstruction": {"parts": [{"text": "Answer in Persian."}]}}
for api_key in keys_to_try:
try:
with requests.post(url, params={'key': api_key}, json=payload, stream=True, timeout=120) as response:
if response.status_code == 429:
logging.warning("کلید جستجو مسدود. کلید بعدی...")
continue
response.raise_for_status()
for line in response.iter_lines():
if line:
yield f"{line.decode('utf-8')}\n\n"
return
except requests.exceptions.RequestException as e:
logging.warning(f"خطای جستجو: {e}. کلید بعدی...")
continue
yield f"data: {json.dumps({'error': {'code': 'SEARCH_FAILED','message': 'سرویس جستجو موقتاً در دسترس نیست.'}})}\n\n"

------------------------ Fallback parser ------------------------

def is_tool_like_text(txt: str) -> bool:
if not txt: return False
patterns = [
r'\bfunctionCall\s*:', r'\bhandle_specific_edit\s*(', r'\bchange_image_aspect_ratio\s*(',
r'\bregenerate_with_enhancement\s*(', r'\bgenerate_image\s*(', r'print\s*(\s*handle_specific_edit',
]
return any(re.search(p, txt, flags=re.I) for p in patterns)

def sse_text_event(text: str) -> str:
return f"data: {json.dumps({'candidates':[{'content':{'parts':[{'text': text}]}}]})}\n\n"

------------------------ Intent Classifier ------------------------

def classify_user_intent(user_text: str) -> dict:
if not user_text or not user_text.strip():
return {"intent":"NONE"}
keys_to_try = get_keys_for_request()
classify_model = 'gemini-1.5-flash'
url = f"{GOOGLE_API_BASE_URL}/{classify_model}:generateContent"
system = (
"You classify ONLY the latest message.\n"
"Return strict JSON: {"intent":"...", "normalized_edit":"...", "new_aspect_ratio":"...", "code_language":"..."}\n"
"intents=[NONE,SPECIFIC_EDIT,ASPECT_RATIO_CHANGE,QUALITY_ENHANCEMENT,NEW_IMAGE,CODE_TASK]. "
"Thanks/ack/greetings => NONE. Code/markup requests => CODE_TASK."
)
payload = {
"contents": [{"role":"user","parts":[{"text": user_text}]}],
"systemInstruction": {"parts":[{"text": system}]},
"generationConfig": {"temperature": 0.0, "maxOutputTokens": 128}
}
for api_key in keys_to_try:
try:
r = requests.post(f"{url}?key={api_key}", json=payload, timeout=20)
if r.status_code == 429: continue
r.raise_for_status()
txt = r.json()["candidates"][0]["content"]["parts"][0]["text"].strip().strip('`').strip()
s, e = txt.find('{'), txt.rfind('}')
if s != -1 and e != -1: txt = txt[s:e+1]
data = json.loads(txt)
if "intent" not in data: return {"intent":"NONE"}
return data
except Exception as e:
logging.warning(f"Intent classify failed: {e}")
continue
return {"intent":"NONE"}

------------------------ استریم‌های متنی/کد/تحلیل فایل ------------------------

def stream_text_only(user_text: str, model_name: str):
keys_to_try = get_keys_for_request()
system_text = BASE_RULES_FA + """
[TURN MODE: TEXT ONLY]
ابزارها را فراخوانی نکن. یک پاسخ کوتاه، دوستانه و فارسی بده.
هرگز نام ابزار یا واژه‌هایی مانند functionCall را در خروجی ننویس.
از عذرخواهی/محدودیت‌گویی اجتناب کن.
"""
contents = [{"role":"user","parts":[{"text": user_text }]}]
for api_key in keys_to_try:
try:
url = f"{GOOGLE_API_BASE_URL}/{model_name}:streamGenerateContent?alt=sse&key={api_key}"
payload = { "contents": contents, "tools": None, "systemInstruction": {"parts": [{"text": system_text}]}, "generationConfig": {"temperature": 0.3, "maxOutputTokens": 128} }
with requests.post(url, json=payload, stream=True, timeout=60) as resp:
if resp.status_code == 429: continue
resp.raise_for_status()
for line in resp.iter_lines():
if line and line.startswith(b"data: "):
yield f"{line.decode('utf-8')}\n\n"
return
except requests.exceptions.RequestException as e:
logging.warning(f"Text-only error: {e}")
continue
yield f"data: {json.dumps({'error': {'code':'TEXT_STREAM_FAILED','message':'پاسخ متنی موقتاً در دسترس نیست.'}})}\n\n"

def stream_code_reply(user_text: str, model_name: str, code_language: str = None):
keys_to_try = get_keys_for_request()
lang = (code_language or "").lower()
if lang not in {"html","css","javascript","python","sql","bash","json","yaml","xml","markdown","typescript","csharp","java","c","cpp","php","go","rust","kotlin","swift"}:
lang = "html"
system_text = BASE_RULES_FA + f"""
[TURN MODE: CODE]
هیچ ابزاری را فراخوانی نکن. کد کامل و قابل اجرا تولید کن داخل ```{lang}. توضیح کوتاه فارسی مجاز، تمرکز روی کد.
"""
contents = [{"role":"user","parts":[{"text": user_text }]}]
for api_key in keys_to_try:
try:
url = f"{GOOGLE_API_BASE_URL}/{model_name}:streamGenerateContent?alt=sse&key={api_key}"
payload = { "contents": contents, "tools": None, "systemInstruction": {"parts": [{"text": system_text}]}, "generationConfig": {"temperature": 0.2, "maxOutputTokens": 2048} }
with requests.post(url, json=payload, stream=True, timeout=120) as response:
if response.status_code == 429: continue
response.raise_for_status()
for line in response.iter_lines():
if line: yield line.decode('utf-8') + "\n\n"
return
except requests.exceptions.RequestException as e:
logging.warning(f"Code stream error: {e}")
continue
yield f"data: {json.dumps({'error': {'code':'CODE_STREAM_FAILED','message':'تولید کد موقتاً در دسترس نیست.'}})}\n\n"

def stream_file_analysis(history_for_gemini, model_name: str):
keys_to_try = get_keys_for_request()
system_text = BASE_RULES_FA + """
[TURN MODE: FILE ANALYSIS]
ابزارها را فراخوانی نکن. فایل ضمیمه‌شده در آخرین پیام کاربر را تحلیل کن: نوع فایل را دقیق تشخیص بده و محتوای آن را خلاصه و منظم شرح بده.
اگر تصویر است و کاربر صرفاً تحلیل خواسته، فقط محتوای تصویر را توضیح بده (نه ساخت/ویرایش).
"""
for api_key in keys_to_try:
try:
url = f"{GOOGLE_API_BASE_URL}/{model_name}:streamGenerateContent?alt=sse&key={api_key}"
payload = { "contents": history_for_gemini, "tools": None, "systemInstruction": {"parts": [{"text": system_text}]}, "generationConfig": {"temperature": 0.3, "maxOutputTokens": 2048} }
with requests.post(url, json=payload, stream=True, timeout=180) as response:
if response.status_code == 429: continue
response.raise_for_status()
for line in response.iter_lines():
if line: yield line.decode('utf-8') + "\n\n"
return
except requests.exceptions.RequestException as e:
logging.warning(f"File analysis error: {e}")
continue
yield f"data: {json.dumps({'error': {'code':'FILE_ANALYSIS_FAILED','message':'تحلیل فایل موقتاً در دسترس نیست.'}})}\n\n"

------------------------ روت‌ها ------------------------

@app.route('/')
def index():
return render_template('index.html')

@app.route('/chat', methods=['POST'])
def chat():
data = request.json
if not data:
return Response(json.dumps({"error": "Invalid request"}), status=400, mimetype='application/json')

text
model = data.get('model', 'gemini-2.5-flash') history = data.get('history', []) action_payload = data.get('action') chat_id = data.get('chatId') if not chat_id: return Response(json.dumps({"error": "chatId is required"}), status=400, mimetype='application/json') # #################################################################### # START: منطق جدید و اصلاح شده برای مدیریت فایل # این بلوک کد، مشکل اصلی شما را حل می‌کند # #################################################################### last_user_message = None # آخرین پیام کاربر را پیدا کن for i in range(len(history) - 1, -1, -1): if history[i].get('role') == 'user': last_user_message = history[i] break if last_user_message: cache_file_path = CACHE_DIR / f"{chat_id}.json" lock_path = str(cache_file_path) + ".lock" cache_lock = FileLock(lock_path) parts = last_user_message.get('parts', []) # چک کن آیا کلاینت در این پیام یک فایل جدید فرستاده یا نه inline_data_part = next((p for p in parts if 'inlineData' in p), None) with cache_lock: if inline_data_part: # یک فایل جدید آپلود شده است. آن را در کش ذخیره می‌کنیم. try: with open(cache_file_path, 'w') as f: json.dump(inline_data_part['inlineData'], f) logging.info(f"✅ فایل جدید برای چت {chat_id} دریافت و در کش ذخیره شد.") except Exception as e: logging.error(f"❌ خطا در ذخیره فایل در کش برای چت {chat_id}: {e}") else: # فایل جدیدی ارسال نشده. تلاش می‌کنیم فایل را از کش بازیابی کنیم (برای سوالات بعدی) if cache_file_path.exists(): try: with open(cache_file_path, 'r') as f: cached_data = json.load(f) # فایل کش شده را به پیام فعلی کاربر اضافه می‌کنیم تا مدل زمینه را داشته باشد parts.insert(0, {'inlineData': cached_data}) last_user_message['parts'] = parts logging.info(f"✅ فایل از کش برای چت {chat_id} بازیابی و به تاریخچه اضافه شد.") except Exception as e: logging.warning(f"⚠️ خواندن فایل از کش برای چت {chat_id} ناموفق بود: {e}") # #################################################################### # END: منطق جدید و اصلاح شده برای مدیریت فایل # #################################################################### # -------- اکشن مستقیم clarify -------- if action_payload and action_payload.get('intent') == 'regenerate_with_enhancement': def stream_action_result(): try: base_prompt = action_payload.get("base_english_prompt") enhancement_request = action_payload.get("enhancement_request") aspect_ratio = action_payload.get("aspect_ratio", "9:16") if not base_prompt or not enhancement_request: yield f"data: {json.dumps({'error': {'code':'MISSING_ARGS','message':'پارامترهای لازم موجود نیست.'}})}\n\n" return logging.info(f"🚀 بهبود پرامپت برای بازسازی: '{enhancement_request}'") new_prompt = enhance_prompt(base_prompt, enhancement_request, model) yield f"data: {json.dumps({'intent':'generate_image','english_prompt': new_prompt,'aspect_ratio': aspect_ratio})}\n\n" except Exception as e: logging.error(f"❌ خطای اجرای اکشن: {e}") yield f"data: {json.dumps({'error': {'code':'ACTION_EXECUTION_FAILED','message': str(e)}})}\n\n" return Response(stream_action_result(), mimetype='text/event-stream') # ------------------------ استریم اصلی ------------------------ def stream_events(): def send_event(event_data): return f"data: {json.dumps(event_data)}\n\n" history_for_gemini = [] for msg in history: if 'role' not in msg: continue new_msg = {'role': msg['role'], 'parts': []} for part in (msg.get('parts') or []): if isinstance(part, dict) and any(k in part for k in ['text','inlineData','functionCall','functionResponse']): new_msg['parts'].append(part) if new_msg['parts']: history_for_gemini.append(new_msg) last_user_idx_local = -1 for i in range(len(history_for_gemini) - 1, -1, -1): if history_for_gemini[i].get('role') == 'user': last_user_idx_local = i break last_user_text = "" has_inline_file = False if last_user_idx_local != -1: lu = history_for_gemini[last_user_idx_local] last_user_text = ''.join(p.get('text','') for p in (lu.get('parts') or []) if p.get('text')).strip() has_inline_file = any('inlineData' in p for p in (lu.get('parts') or [])) image_in_recent_history = any( m.get('role') == 'model' and any('image_url' in p or 'edited_images' in p for p in (m.get('parts') or [])) for m in history[-4:] ) intent_info = classify_user_intent(last_user_text) if last_user_text else {"intent":"NONE"} intent = (intent_info.get("intent") or "NONE").upper() logging.info(f"Intent: {intent} | has_file={has_inline_file} | image_recent={image_in_recent_history} | text='{last_user_text[:50]}'") # 1) کدنویسی if intent == "CODE_TASK": yield from stream_code_reply(last_user_text, model, intent_info.get("code_language")) return # 2) بعد از تصویر: تشکر/گفت‌وگو => متن کوتاه if image_in_recent_history and last_user_text and intent == "NONE": yield from stream_text_only(last_user_text, model) return # 3) اگر فایل هست و کاربر درخواست خاصی (مثل کد) ندارد => تحلیل فایل if has_inline_file and intent in {"NONE"}: yield from stream_file_analysis(history_for_gemini, model) return # 4) مسیرهای تصویر بر اساس intent if image_in_recent_history and last_user_text: if intent == "SPECIFIC_EDIT": merged = merge_edit_prompt(chat_id, intent_info.get("normalized_edit") or last_user_text, model) update_chat_meta(chat_id, last_edit_prompt=merged) yield send_event({"intent": "edit_image", "prompt": merged}) return elif intent == "ASPECT_RATIO_CHANGE": meta = load_chat_meta(chat_id) ep = meta.get("last_edit_prompt") or meta.get("last_english_prompt") or "" new_ar = intent_info.get("new_aspect_ratio") or meta.get("last_aspect_ratio") or "9:16" yield send_event({"intent": "generate_image", "english_prompt": ep, "aspect_ratio": new_ar}) return elif intent == "QUALITY_ENHANCEMENT": meta = load_chat_meta(chat_id) prev_ep = meta.get("last_edit_prompt") or meta.get("last_english_prompt") or "" prev_ar = meta.get("last_aspect_ratio") or "9:16" enh = intent_info.get("normalized_edit") or last_user_text yield send_event({ "intent": "clarify_action", "question": "بسیار خب! تصویر فعلی را ویرایش کنم یا یک تصویر جدید بسازم؟", "options": { "edit": {"label": "ویرایش همین تصویر", "intent": "edit_image", "prompt": enh}, "regenerate": {"label": "ساخت تصویر جدید", "intent": "regenerate_with_enhancement", "base_english_prompt": prev_ep, "enhancement_request": enh, "aspect_ratio": prev_ar} } }) return elif intent == "NEW_IMAGE": ep = create_artistic_prompt(last_user_text, model) update_chat_meta(chat_id, last_english_prompt=ep, last_aspect_ratio="9:16", last_edit_prompt=None) yield send_event({ "intent": "generate_image_with_text", "text": "در حال ساخت تصویر جدید برای شما...", "image_generation_payload": {"english_prompt": ep, "aspect_ratio": "9:16"}, "follow_up_text": "تصویر شما آماده شد!" }) return # 5) مسیر جنرال با ابزارهای تصویر tools_for_request = get_all_tools() final_system_prompt = BASE_RULES_FA + """\n[TURN MODE: GENERAL]\nبرای درخواست‌های تصویر از ابزارها استفاده کن؛ در غیر این صورت پاسخ متنی/کُد بده.\nاز شبه‌کد برای ابزارها خودداری کن (فقط functionCall واقعی). هرگز متن‌هایی مانند 'functionCall: ...' در خروجی ننویس.""" keys_to_try = get_keys_for_request() accumulated_function_call_args = None function_call_name = None for api_key in keys_to_try: try: url = f"{GOOGLE_API_BASE_URL}/{model}:streamGenerateContent?alt=sse&key={api_key}" payload = {"contents": history_for_gemini, "tools": tools_for_request, "systemInstruction": {"parts": [{"text": final_system_prompt}]}} with requests.post(url, json=payload, stream=True, timeout=720) as response: if response.status_code == 429: continue response.raise_for_status() for line in response.iter_lines(): if line and line.startswith(b'data: '): chunk_str = line.decode('utf-8')[6:] try: data_chunk = json.loads(chunk_str) part = data_chunk.get("candidates", [{}])[0].get("content", {}).get("parts", [{}])[0] if "functionCall" in part: if not accumulated_function_call_args: accumulated_function_call_args = {} function_call_name = part["functionCall"].get("name") args_chunk = part["functionCall"].get("args", {}) for k, v in (args_chunk or {}).items(): if k not in accumulated_function_call_args: accumulated_function_call_args[k] = v elif isinstance(accumulated_function_call_args.get(k), str): accumulated_function_call_args[k] += str(v) elif "text" in part: if not is_tool_like_text(part["text"]): yield sse_text_event(part["text"]) except (json.JSONDecodeError, IndexError): continue # Loop finished for this key. Now process any complete function call. if accumulated_function_call_args: args = accumulated_function_call_args logging.info(f"✅ functionCall: {function_call_name} args={args}") if function_call_name == "handle_specific_edit": raw_req = (args.get("edit_request") or "").strip() if not raw_req: yield send_event({"error": {"code":"EMPTY_EDIT_REQUEST","message":"دستور ویرایش دریافت نشد."}}) else: merged = merge_edit_prompt(chat_id, raw_req, model) update_chat_meta(chat_id, last_edit_prompt=merged) yield send_event({"intent":"edit_image","prompt": merged}) elif function_call_name == "regenerate_with_enhancement": meta = load_chat_meta(chat_id) prev_ep = args.get("previous_english_prompt") or meta.get("last_edit_prompt") or meta.get("last_english_prompt") or "" prev_ar = args.get("previous_aspect_ratio") or meta.get("last_aspect_ratio") or "9:16" enhancement_request = args.get("enhancement_request", "") yield send_event({"intent":"clarify_action", "question":"بسیار خب! با این تغییرات چه کنم؟", "options":{"edit":{"label":"ویرایش همین تصویر","intent":"edit_image","prompt": enhancement_request}, "regenerate":{"label":"ساخت تصویر جدید","intent":"regenerate_with_enhancement","base_english_prompt": prev_ep,"enhancement_request": enhancement_request,"aspect_ratio": prev_ar}}}) elif function_call_name == "change_image_aspect_ratio": meta = load_chat_meta(chat_id) ep = args.get("previous_english_prompt") or meta.get("last_edit_prompt") or meta.get("last_english_prompt") or "" new_ar = args.get("new_aspect_ratio") or meta.get("last_aspect_ratio") or "9:16" yield send_event({"intent":"generate_image","english_prompt": ep,"aspect_ratio": new_ar}) elif function_call_name == "generate_image": ep = args.get("english_prompt") ar = args.get("aspect_ratio", "9:16") update_chat_meta(chat_id, last_english_prompt=ep, last_aspect_ratio=ar, last_edit_prompt=None) yield send_event({"intent":"generate_image_with_text", "text": args.get("initial_response_text"), "image_generation_payload": {"english_prompt": ep,"aspect_ratio": ar}, "follow_up_text": args.get("follow_up_text")}) elif function_call_name == "perform_internet_search": yield from stream_search_results(args.get('query')) return # Successfully processed with this key. except requests.exceptions.RequestException as e: logging.warning(f"خطا در ارتباط با Gemini: {e}. کلید بعدی...") continue # Try next key yield send_event({"error": {"code": "ALL_KEYS_FAILED", "message": "تمام کلیدهای API خطا دادند یا درخواست نامعتبر بود."}}) return Response(stream_events(), mimetype='text/event-stream')

if name == 'main':
app.run(debug=True, host='0.0.0.0', port=os.environ.get("PORT", 7860)) همه قابلیت ها باشه مثل تحلیل فایل ها تصاویر و... جستجو ساخت و ویرایش تصویر و درک پیام ها بعد از ویرایش و... همه باید باشه . مجدداً کد کامل یکجا درست کن بفرست

Thought

Updating app.py

Okay, we need to tweak the app.py to include SSE headers to disable buffering, specifically 'Cache-Control: no-cache' and 'X-Accel-Buffering: no'. These were missing in one of our earlier versions used for chat routes. Also, the auto-continue logic was discussed before, but since the latest request only covers server-side, I’ll focus just on that.

Handling auto-continue in app.py

To support auto-continue in app.py, we could catch the "finishReason: MAX_TOKENS" in the SSE stream, even without front-end changes. The issue is that the current streaming loop doesn’t parse "finishReason." So, if the stream ends due to MAX_TOKENS, we would need to initiate a new request to continue.

Designing server-side auto-continue

To avoid front-end changes, we can chain multiple server-side calls, streaming their data consecutively under the same connection. We need to implement auto-continue at the server level. We'll create a helper function that streams data and tracks when the stream ends due to "MAX_TOKENS" — then create a new history for the continuation.

Refining auto-continue logic

A better approach would be to instruct the model to "Continue exactly from where you left off. Do not repeat previous text." We can include the previous model’s output as part of the context to avoid confusion. Then, implement a loop to handle multiple rounds of streaming calls, ensuring we don’t rewrap text unnecessarily.

Thought for 1m 37s

Answer

app.py

python
import os import re import json import logging from pathlib import Path from flask import Flask, render_template, request, Response from filelock import FileLock import requests class PersianLogFormatter(logging.Formatter): LEVEL_MAP = {logging.DEBUG: "دیباگ", logging.INFO: "اطلاع", logging.WARNING: "هشدار", logging.ERROR: "خطا", logging.CRITICAL: "بحرانی"} def format(self, record): record.levelname = self.LEVEL_MAP.get(record.levelno, record.levelname) return super().format(record) def setup_logging(): log_format = '[%(asctime)s] [%(levelname)s]: %(message)s' date_format = '%Y-%m-%d %H:%M:%S' formatter = PersianLogFormatter(log_format, datefmt=date_format) root_logger = logging.getLogger() if root_logger.hasHandlers(): root_logger.handlers.clear() console_handler = logging.StreamHandler(); console_handler.setFormatter(formatter) root_logger.addHandler(console_handler) root_logger.setLevel(logging.INFO) setup_logging() ALL_GEMINI_API_KEYS_STR = os.getenv('ALL_GEMINI_API_KEYS') if not ALL_GEMINI_API_KEYS_STR: raise RuntimeError("ALL_GEMINI_API_KEYS تنظیم نشده است.") MASTER_API_KEYS = [k.strip() for k in ALL_GEMINI_API_KEYS_STR.split(',') if k.strip()] if not MASTER_API_KEYS: raise RuntimeError("هیچ کلید معتبری یافت نشد.") COUNTER_FILE_PATH = '/dev/shm/gunicorn_key_counter.txt' lock = FileLock(COUNTER_FILE_PATH + ".lock") with lock: if not os.path.exists(COUNTER_FILE_PATH): with open(COUNTER_FILE_PATH, 'w') as f: f.write('0') CACHE_DIR = Path('/dev/shm/file_cache'); CACHE_DIR.mkdir(parents=True, exist_ok=True) META_DIR = Path('/dev/shm/chat_meta'); META_DIR.mkdir(parents=True, exist_ok=True) app = Flask(__name__) GOOGLE_API_BASE_URL = 'https://generativelanguage.googleapis.com/v1beta/models' BASE_RULES_FA = """ دستورالعمل‌های اجباری برای دستیار هوش مصنوعی آلفا: هویت: تو «چت‌بات آلفا» هستی که توسط هوش مصنوعی آلفا توسعه داده شده‌ای. پرهیز از امتناع بی‌جا؛ راه‌حل جایگزین بده. تحلیل جامع چندرسانه‌ای و شروع پاسخ با تشخیص نوع فایل. اگر کاربر فقط فایل فرستاد و صراحتاً ساخت/ویرایش تصویر نخواست، فقط تحلیل کن. اولویت ابزارها: handle_specific_edit > change_image_aspect_ratio > regenerate_with_enhancement > generate_image > perform_internet_search """ def get_and_increment_key_index(): l = FileLock(COUNTER_FILE_PATH + ".lock") with l: try: with open(COUNTER_FILE_PATH, 'r') as f: cur = int(f.read().strip()) except (FileNotFoundError, ValueError): cur = 0 idx = cur % len(MASTER_API_KEYS) with open(COUNTER_FILE_PATH, 'w') as f: f.write(str(cur + 1)) return idx def get_keys_for_request(): s = get_and_increment_key_index() return MASTER_API_KEYS[s:] + MASTER_API_KEYS[:s] def _meta_path(chat_id: str) -> Path: safe = ''.join(c for c in str(chat_id) if c.isalnum() or c in ('-', '_')) return META_DIR / f"{safe}.json" def load_chat_meta(chat_id: str) -> dict: p = _meta_path(chat_id); l = FileLock(str(p) + ".lock") with l: if p.exists(): try: return json.load(open(p, 'r', encoding='utf-8')) except Exception: return {} return {} def save_chat_meta(chat_id: str, meta: dict): p = _meta_path(chat_id); l = FileLock(str(p) + ".lock") with l: try: with open(p, 'w', encoding='utf-8') as f: json.dump(meta or {}, f, ensure_ascii=False) except Exception: pass def update_chat_meta(chat_id: str, **kwargs): m = load_chat_meta(chat_id) m.update({k: v for k, v in kwargs.items() if v is not None}) save_chat_meta(chat_id, m) def get_all_tools(): search_tool = {"name":"perform_internet_search","description":"جستجوی به‌روز","parameters":{"type":"OBJECT","properties":{"query":{"type":"STRING"}},"required":["query"]}} image_tools = [ {"name":"generate_image","description":"ساخت تصویر جدید","parameters":{"type":"OBJECT","properties":{"english_prompt":{"type":"STRING"},"aspect_ratio":{"type":"STRING"},"initial_response_text":{"type":"STRING"},"follow_up_text":{"type":"STRING"}},"required":["english_prompt","initial_response_text","follow_up_text"]}}, {"name":"handle_specific_edit","description":"ویرایش مشخص","parameters":{"type":"OBJECT","properties":{"edit_request":{"type":"STRING"}},"required":["edit_request"]}}, {"name":"regenerate_with_enhancement","description":"بازسازی با بهبود","parameters":{"type":"OBJECT","properties":{"enhancement_request":{"type":"STRING"},"previous_english_prompt":{"type":"STRING"},"previous_aspect_ratio":{"type":"STRING"}},"required":["enhancement_request","previous_english_prompt","previous_aspect_ratio"]}}, {"name":"change_image_aspect_ratio","description":"تغییر نسبت تصویر","parameters":{"type":"OBJECT","properties":{"new_aspect_ratio":{"type":"STRING"},"previous_english_prompt":{"type":"STRING"}},"required":["new_aspect_ratio","previous_english_prompt"]}} ] return [{"function_declarations": image_tools + [search_tool]}] def enhance_prompt(base_prompt, enhancement_request, model): api_key = get_keys_for_request()[0] url = f"{GOOGLE_API_BASE_URL}/{model}:generateContent?key={api_key}" system_prompt = f"You are an expert prompt engineer. Merge the base English image prompt with the user's modification. Return only the final English prompt.\n\nBase Prompt: \"{base_prompt}\"\nUser's Request: \"{enhancement_request}\"" payload = {"contents":[{"role":"user","parts":[{"text":system_prompt}]}],"generationConfig":{"temperature":0.7}} try: r = requests.post(url, json=payload, timeout=60); r.raise_for_status() return r.json()["candidates"][0]["content"]["parts"][0]["text"].strip() except Exception: return f"{base_prompt}, {enhancement_request}" def create_artistic_prompt(user_prompt, model): api_key = get_keys_for_request()[0] url = f"{GOOGLE_API_BASE_URL}/{model}:generateContent?key={api_key}" system_instruction = "Convert user's idea to a highly-detailed English prompt. Output ONLY the final English prompt." payload = {"contents":[{"role":"user","parts":[{"text":user_prompt}]}],"systemInstruction":{"parts":[{"text":system_instruction}]},"generationConfig":{"temperature":0.7}} try: r = requests.post(url, json=payload, timeout=60); r.raise_for_status() return r.json()["candidates"][0]["content"]["parts"][0]["text"].strip() except Exception: return user_prompt def stream_search_results(query): keys = get_keys_for_request() model = 'gemini-2.5-flash' url = f"{GOOGLE_API_BASE_URL}/{model}:streamGenerateContent?alt=sse" payload = {"contents":[{"role":"user","parts":[{"text":query}]}],"tools":[{"google_search":{}}],"systemInstruction":{"parts":[{"text":"Answer in Persian."}]}} for k in keys: try: with requests.post(url, params={'key': k}, json=payload, stream=True, timeout=180) as r: if r.status_code == 429: continue r.raise_for_status() for line in r.iter_lines(): if line: yield line.decode('utf-8') + "\n\n" return except requests.exceptions.RequestException: continue yield f"data: {json.dumps({'error': {'code': 'SEARCH_FAILED','message': 'سرویس جستجو موقتاً در دسترس نیست.'}})}\n\n" def is_tool_like_text(txt: str) -> bool: if not txt: return False patterns = [r'\bfunctionCall\s*:', r'\bhandle_specific_edit\s*\(', r'\bchange_image_aspect_ratio\s*\(', r'\bregenerate_with_enhancement\s*\(', r'\bgenerate_image\s*\(', r'print\s*\(\s*handle_specific_edit'] return any(re.search(p, txt, flags=re.I) for p in patterns) def sse_text_event(text: str) -> str: return f"data: {json.dumps({'candidates':[{'content':{'parts':[{'text': text}]}}]})}\n\n" def classify_user_intent(user_text: str) -> dict: if not user_text or not user_text.strip(): return {"intent":"NONE"} keys = get_keys_for_request() model = 'gemini-1.5-flash' url = f"{GOOGLE_API_BASE_URL}/{model}:generateContent" system = "Return strict JSON: {\"intent\":\"...\", \"normalized_edit\":\"...\", \"new_aspect_ratio\":\"...\", \"code_language\":\"...\"}. intents=[NONE,SPECIFIC_EDIT,ASPECT_RATIO_CHANGE,QUALITY_ENHANCEMENT,NEW_IMAGE,CODE_TASK]." payload = {"contents":[{"role":"user","parts":[{"text": user_text}]}],"systemInstruction":{"parts":[{"text": system}]},"generationConfig":{"temperature":0.0,"maxOutputTokens":128}} for k in keys: try: r = requests.post(f"{url}?key={k}", json=payload, timeout=25) if r.status_code == 429: continue r.raise_for_status() txt = r.json()["candidates"][0]["content"]["parts"][0]["text"].strip().strip('`').strip() s, e = txt.find('{'), txt.rfind('}') if s != -1 and e != -1: txt = txt[s:e+1] d = json.loads(txt) if "intent" not in d: return {"intent":"NONE"} return d except Exception: continue return {"intent":"NONE"} def stream_text_only(user_text: str, model_name: str): keys = get_keys_for_request() system_text = BASE_RULES_FA + "\n[TURN MODE: TEXT ONLY]\n" contents = [{"role":"user","parts":[{"text": user_text }]}] for k in keys: try: url = f"{GOOGLE_API_BASE_URL}/{model_name}:streamGenerateContent?alt=sse&key={k}" payload = {"contents": contents, "tools": None, "systemInstruction": {"parts": [{"text": system_text}]}, "generationConfig": {"temperature": 0.3, "maxOutputTokens": 8192}} with requests.post(url, json=payload, stream=True, timeout=720) as resp: if resp.status_code == 429: continue resp.raise_for_status() for line in resp.iter_lines(): if line and line.startswith(b"data: "): yield line.decode('utf-8') + "\n\n" return except requests.exceptions.RequestException: continue yield f"data: {json.dumps({'error': {'code':'TEXT_STREAM_FAILED','message':'پاسخ متنی موقتاً در دسترس نیست.'}})}\n\n" def stream_code_reply(user_text: str, model_name: str, code_language: str = None): keys = get_keys_for_request() system_text = BASE_RULES_FA + "\n[TURN MODE: CODE]\n" contents = [{"role":"user","parts":[{"text": user_text }]}] for k in keys: try: url = f"{GOOGLE_API_BASE_URL}/{model_name}:streamGenerateContent?alt=sse&key={k}" payload = {"contents": contents, "tools": None, "systemInstruction": {"parts": [{"text": system_text}]}, "generationConfig": {"temperature": 0.2, "maxOutputTokens": 4096}} with requests.post(url, json=payload, stream=True, timeout=720) as r: if r.status_code == 429: continue r.raise_for_status() for line in r.iter_lines(): if line: yield line.decode('utf-8') + "\n\n" return except requests.exceptions.RequestException: continue yield f"data: {json.dumps({'error': {'code':'CODE_STREAM_FAILED','message':'تولید کد موقتاً در دسترس نیست.'}})}\n\n" def stream_file_analysis(history_for_gemini, model_name: str): keys = get_keys_for_request() system_text = BASE_RULES_FA + "\n[TURN MODE: FILE ANALYSIS]\n" for k in keys: try: url = f"{GOOGLE_API_BASE_URL}/{model_name}:streamGenerateContent?alt=sse&key={k}" payload = {"contents": history_for_gemini, "tools": None, "systemInstruction": {"parts": [{"text": system_text}]}, "generationConfig": {"temperature": 0.3, "maxOutputTokens": 8192}} with requests.post(url, json=payload, stream=True, timeout=720) as r: if r.status_code == 429: continue r.raise_for_status() for line in r.iter_lines(): if line: yield line.decode('utf-8') + "\n\n" return except requests.exceptions.RequestException: continue yield f"data: {json.dumps({'error': {'code':'FILE_ANALYSIS_FAILED','message':'تحلیل فایل موقتاً در دسترس نیست.'}})}\n\n" @app.route('/') def index(): return render_template('index.html') @app.route('/chat', methods=['POST']) def chat(): data = request.json or {} model = data.get('model', 'gemini-2.5-flash') history = data.get('history', []) action_payload = data.get('action') chat_id = data.get('chatId') if not chat_id: return Response(json.dumps({"error": "chatId is required"}), status=400, mimetype='application/json') last_user_message = None for i in range(len(history) - 1, -1, -1): if history[i].get('role') == 'user': last_user_message = history[i]; break if last_user_message: cache_file_path = CACHE_DIR / f"{chat_id}.json" cache_lock = FileLock(str(cache_file_path) + ".lock") parts = last_user_message.get('parts', []) inline_data_part = next((p for p in parts if 'inlineData' in p), None) with cache_lock: if inline_data_part: try: with open(cache_file_path, 'w') as f: json.dump(inline_data_part['inlineData'], f) except Exception: pass else: if cache_file_path.exists(): try: with open(cache_file_path, 'r') as f: cached_data = json.load(f) parts.insert(0, {'inlineData': cached_data}) last_user_message['parts'] = parts except Exception: pass if action_payload and action_payload.get('intent') == 'regenerate_with_enhancement': def stream_action_result(): try: base_prompt = action_payload.get("base_english_prompt") enhancement_request = action_payload.get("enhancement_request") aspect_ratio = action_payload.get("aspect_ratio", "9:16") if not base_prompt or not enhancement_request: yield f"data: {json.dumps({'error': {'code':'MISSING_ARGS','message':'پارامترهای لازم موجود نیست.'}})}\n\n"; return new_prompt = enhance_prompt(base_prompt, enhancement_request, model) yield f"data: {json.dumps({'intent':'generate_image','english_prompt': new_prompt,'aspect_ratio': aspect_ratio})}\n\n" except Exception as e: yield f"data: {json.dumps({'error': {'code':'ACTION_EXECUTION_FAILED','message': str(e)}})}\n\n" return Response(stream_action_result(), mimetype='text/event-stream', headers={'Cache-Control':'no-cache','X-Accel-Buffering':'no','Connection':'keep-alive'}) def stream_events(): def send_event(event_data): return f"data: {json.dumps(event_data)}\n\n" history_for_gemini = [] for msg in history: if 'role' not in msg: continue new_msg = {'role': msg['role'], 'parts': []} for part in (msg.get('parts') or []): if isinstance(part, dict) and any(k in part for k in ['text','inlineData','functionCall','functionResponse','image_url','edited_images','fileUrl','mimeType','name']): if 'image_url' in part or 'edited_images' in part: new_msg['parts'].append({'text': '(توضیح: تصویر در پیام‌های قبلی ساخته یا ویرایش شده است.)'}) elif 'fileUrl' in part and 'mimeType' in part and not part.get('mimeType','').startswith('image/'): new_msg['parts'].append({'text': f"(یادداشت: فایلی با نوع {part.get('mimeType')} قبلاً ارسال شده است.)"}) elif 'text' in part or 'inlineData' in part: new_msg['parts'].append(part) if new_msg['parts']: history_for_gemini.append(new_msg) last_user_idx_local = -1 for i in range(len(history_for_gemini) - 1, -1, -1): if history_for_gemini[i].get('role') == 'user': last_user_idx_local = i; break last_user_text = ""; has_inline_file = False if last_user_idx_local != -1: lu = history_for_gemini[last_user_idx_local] last_user_text = ''.join(p.get('text','') for p in (lu.get('parts') or []) if p.get('text')).strip() has_inline_file = any('inlineData' in p for p in (lu.get('parts') or [])) image_in_recent_history = any(m.get('role') == 'model' and any('image_url' in p or 'edited_images' in p for p in (m.get('parts') or [])) for m in history[-4:]) intent_info = classify_user_intent(last_user_text) if last_user_text else {"intent":"NONE"} intent = (intent_info.get("intent") or "NONE").upper() if intent == "CODE_TASK": for chunk in stream_code_reply(last_user_text, model, intent_info.get("code_language")): yield chunk return if image_in_recent_history and last_user_text and intent == "NONE": for chunk in stream_text_only(last_user_text, model): yield chunk return if has_inline_file and intent in {"NONE"}: for chunk in stream_file_analysis(history_for_gemini, model): yield chunk return if image_in_recent_history and last_user_text: if intent == "SPECIFIC_EDIT": merged = (intent_info.get("normalized_edit") or last_user_text) update_chat_meta(chat_id, last_edit_prompt=merged) yield send_event({"intent": "edit_image", "prompt": merged}); return elif intent == "ASPECT_RATIO_CHANGE": meta = load_chat_meta(chat_id) ep = meta.get("last_edit_prompt") or meta.get("last_english_prompt") or "" new_ar = intent_info.get("new_aspect_ratio") or meta.get("last_aspect_ratio") or "9:16" yield send_event({"intent": "generate_image", "english_prompt": ep, "aspect_ratio": new_ar}); return elif intent == "QUALITY_ENHANCEMENT": meta = load_chat_meta(chat_id) prev_ep = meta.get("last_edit_prompt") or meta.get("last_english_prompt") or "" prev_ar = meta.get("last_aspect_ratio") or "9:16" enh = intent_info.get("normalized_edit") or last_user_text yield send_event({"intent": "clarify_action", "question": "بسیار خب! تصویر فعلی را ویرایش کنم یا یک تصویر جدید بسازم؟","options":{"edit":{"label":"ویرایش همین تصویر","intent":"edit_image","prompt": enh},"regenerate":{"label":"ساخت تصویر جدید","intent":"regenerate_with_enhancement","base_english_prompt": prev_ep,"enhancement_request": enh,"aspect_ratio": prev_ar}}}); return elif intent == "NEW_IMAGE": ep = create_artistic_prompt(last_user_text, model) update_chat_meta(chat_id, last_english_prompt=ep, last_aspect_ratio="9:16", last_edit_prompt=None) yield send_event({"intent": "generate_image_with_text", "text": "در حال ساخت تصویر جدید برای شما...","image_generation_payload": {"english_prompt": ep, "aspect_ratio": "9:16"}, "follow_up_text": "تصویر شما آماده شد!"}); return tools_for_request = get_all_tools() final_system_prompt = BASE_RULES_FA + "\n[TURN MODE: GENERAL]\n" rounds = 0 max_rounds = 5 while rounds < max_rounds: rounds += 1 keys = get_keys_for_request() accumulated_args = None fn_name = None this_round_text = "" last_finish_reason = None progressed = False for k in keys: try: url = f"{GOOGLE_API_BASE_URL}/{model}:streamGenerateContent?alt=sse&key={k}" payload = {"contents": history_for_gemini, "tools": tools_for_request, "systemInstruction": {"parts": [{"text": final_system_prompt}]}, "generationConfig":{"temperature":0.3,"maxOutputTokens":8192}} with requests.post(url, json=payload, stream=True, timeout=720) as r: if r.status_code == 429: continue r.raise_for_status() for line in r.iter_lines(): if line and line.startswith(b'data: '): chunk_str = line.decode('utf-8')[6:] try: data_chunk = json.loads(chunk_str) cand = (data_chunk.get("candidates") or [{}])[0] last_finish_reason = cand.get("finishReason", last_finish_reason) part = cand.get("content", {}).get("parts", [{}])[0] if "functionCall" in part: if not accumulated_args: accumulated_args = {}; fn_name = part["functionCall"].get("name") args_chunk = part["functionCall"].get("args", {}) for kk, vv in (args_chunk or {}).items(): if kk not in accumulated_args: accumulated_args[kk] = vv elif isinstance(accumulated_args.get(kk), str): accumulated_args[kk] += str(vv) elif "text" in part: txt = part["text"] this_round_text += txt if not is_tool_like_text(txt): yield sse_text_event(txt) progressed = True except Exception: continue break except requests.exceptions.RequestException: continue if accumulated_args: args = accumulated_args if fn_name == "handle_specific_edit": raw_req = (args.get("edit_request") or "").strip() if not raw_req: yield f"data: {json.dumps({'error': {'code':'EMPTY_EDIT_REQUEST','message':'دستور ویرایش دریافت نشد.'}})}\n\n" else: merged = raw_req update_chat_meta(chat_id, last_edit_prompt=merged) yield f"data: {json.dumps({'intent':'edit_image','prompt': merged})}\n\n" elif fn_name == "regenerate_with_enhancement": meta = load_chat_meta(chat_id) prev_ep = args.get("previous_english_prompt") or meta.get("last_edit_prompt") or meta.get("last_english_prompt") or "" prev_ar = args.get("previous_aspect_ratio") or meta.get("last_aspect_ratio") or "9:16" enhancement_request = args.get("enhancement_request", "") yield f"data: {json.dumps({'intent':'clarify_action','question':'بسیار خب! با این تغییرات چه کنم؟','options':{'edit':{'label':'ویرایش همین تصویر','intent':'edit_image','prompt': enhancement_request},'regenerate':{'label':'ساخت تصویر جدید','intent':'regenerate_with_enhancement','base_english_prompt': prev_ep,'enhancement_request': enhancement_request,'aspect_ratio': prev_ar}}})}\n\n" elif fn_name == "change_image_aspect_ratio": meta = load_chat_meta(chat_id) ep = args.get("previous_english_prompt") or meta.get("last_edit_prompt") or meta.get("last_english_prompt") or "" new_ar = args.get("new_aspect_ratio") or meta.get("last_aspect_ratio") or "9:16" yield f"data: {json.dumps({'intent':'generate_image','english_prompt': ep,'aspect_ratio': new_ar})}\n\n" elif fn_name == "generate_image": ep = args.get("english_prompt"); ar = args.get("aspect_ratio", "9:16") update_chat_meta(chat_id, last_english_prompt=ep, last_aspect_ratio=ar, last_edit_prompt=None) yield f"data: {json.dumps({'intent':'generate_image_with_text','text': args.get('initial_response_text'),'image_generation_payload':{'english_prompt': ep,'aspect_ratio': ar},'follow_up_text': args.get('follow_up_text')})}\n\n" elif fn_name == "perform_internet_search": for c in stream_search_results(args.get('query')): yield c return if last_finish_reason == "MAX_TOKENS" and this_round_text.strip(): history_for_gemini.append({"role":"model","parts":[{"text": this_round_text}]}) history_for_gemini.append({"role":"user","parts":[{"text":"ادامه پاسخ را دقیقاً از همان‌جایی که قطع شد ادامه بده. متن قبلی را تکرار نکن."}]}) continue if progressed: return yield f"data: {json.dumps({'error': {'code': 'ALL_KEYS_FAILED', 'message': 'تمام کلیدهای API خطا دادند یا درخواست نامعتبر بود.'}})}\n\n" return return Response(stream_events(), mimetype='text/event-stream', headers={'Cache-Control':'no-cache','X-Accel-Buffering':'no','Connection':'keep-alive'}) if __name__ == '__main__': app.run(debug=True, host='0.0.0.0', port=os.environ.get("PORT", 7860))

Question

Screenshot_۲۰۲۵-۰۸-۱۱-۱۷-۰۲-۰۰-۱۲۳_com.android.chrome.jpgScreenshot_۲۰۲۵-۰۸-۱۱-۱۷-۰۲-۰۰-۱۲۳_com.android.chrome.jpg

تصویر تحلیل نمیکنه . کد های که باید تغییر بدم کامل یکجا درست کن بفرست . باید هر فایلی رو قشنگ تحلیل کنه بدون مشکل با بهترین روش و حالت

Thought

Addressing code mismatch

It seems there's a mismatch between the newer code and the older code. The newer one handles inlineFiles as part of the payload, while the older version does not. If the front-end architecture shifted to use 'history' and 'inlineFiles' but the backend is still expecting 'newUserMessage', that could explain missing inlineData and why Gemini can't see the images. The fix is clear: the /chat route needs to accept 'inlineFiles'.

Finalizing code implementation

To implement updates, I'll focus on ensuring the update_file_cache_from_inline_files is properly working and that files are injected into the history. If the history has a fileUrl but no inlineData, I'll fetch the file, base64 encode it, and handle requests appropriately. I'll also use gemini-2.5-flash by default, with no comments in the code, just the full "app.py" and maybe "Dockerfile."

Analyzing files efficiently

Based on the user's request, they want the server code to analyze files smoothly and efficiently using the best method. This includes handling any file type without issues. I should focus on ensuring the code can process all file inputs correctly, without errors. The task is clear: make it work seamlessly.

Providing app.py update

The updated app.py will accept the 'inlineFiles' parameter, enabling file handling with metadata (file URL, mimeType, base64 data). It will store files per chat, merge them into history, and fetch missing inlineData from the URL when needed, limiting file size to 12 MB. It will support various file types, like images, audio, video, and PDFs using Gemini 1.5. Additionally, we'll use a caching system using per-chat maps and a memory-based cache for efficient file management.

Refining file handling and analysis

I need to update the flow starting with stream_file_analysis to ensure inlineData is injected earlier. I'll

Thought for 33s

Answer

app.py

python
import os import re import io import json import base64 import logging from pathlib import Path from flask import Flask, render_template, request, Response from filelock import FileLock import requests class PersianLogFormatter(logging.Formatter): LEVEL_MAP = {logging.DEBUG: "دیباگ", logging.INFO: "اطلاع", logging.WARNING: "هشدار", logging.ERROR: "خطا", logging.CRITICAL: "بحرانی"} def format(self, record): record.levelname = self.LEVEL_MAP.get(record.levelno, record.levelname) return super().format(record) def setup_logging(): fmt = '[%(asctime)s] [%(levelname)s]: %(message)s' formatter = PersianLogFormatter(fmt, datefmt='%Y-%m-%d %H:%M:%S') root = logging.getLogger() if root.hasHandlers(): root.handlers.clear() h = logging.StreamHandler(); h.setFormatter(formatter) root.addHandler(h); root.setLevel(logging.INFO) setup_logging() ALL_GEMINI_API_KEYS_STR = os.getenv('ALL_GEMINI_API_KEYS') if not ALL_GEMINI_API_KEYS_STR: raise RuntimeError("ALL_GEMINI_API_KEYS تنظیم نشده است.") MASTER_API_KEYS = [k.strip() for k in ALL_GEMINI_API_KEYS_STR.split(',') if k.strip()] if not MASTER_API_KEYS: raise RuntimeError("هیچ کلید معتبری یافت نشد.") COUNTER_FILE_PATH = '/dev/shm/gunicorn_key_counter.txt' lock = FileLock(COUNTER_FILE_PATH + ".lock") with lock: if not os.path.exists(COUNTER_FILE_PATH): with open(COUNTER_FILE_PATH, 'w') as f: f.write('0') CACHE_DIR = Path('/dev/shm/file_cache'); CACHE_DIR.mkdir(parents=True, exist_ok=True) META_DIR = Path('/dev/shm/chat_meta'); META_DIR.mkdir(parents=True, exist_ok=True) app = Flask(__name__) GOOGLE_API_BASE_URL = 'https://generativelanguage.googleapis.com/v1beta/models' BASE_RULES_FA = """ دستورالعمل‌های اجباری برای دستیار هوش مصنوعی آلفا: هویت: تو «چت‌بات آلفا» هستی که توسط هوش مصنوعی آلفا توسعه داده شده‌ای. پرهیز از امتناع بی‌جا؛ راه‌حل جایگزین بده. تحلیل جامع چندرسانه‌ای و شروع پاسخ با تشخیص نوع فایل. اگر کاربر فقط فایل فرستاد و صراحتاً ساخت/ویرایش تصویر نخواست، فقط تحلیل کن. اولویت ابزارها: handle_specific_edit > change_image_aspect_ratio > regenerate_with_enhancement > generate_image > perform_internet_search """ def get_and_increment_key_index(): l = FileLock(COUNTER_FILE_PATH + ".lock") with l: try: with open(COUNTER_FILE_PATH, 'r') as f: cur = int(f.read().strip()) except (FileNotFoundError, ValueError): cur = 0 idx = cur % len(MASTER_API_KEYS) with open(COUNTER_FILE_PATH, 'w') as f: f.write(str(cur + 1)) return idx def get_keys_for_request(): s = get_and_increment_key_index() return MASTER_API_KEYS[s:] + MASTER_API_KEYS[:s] def _meta_path(chat_id: str) -> Path: safe = ''.join(c for c in str(chat_id) if c.isalnum() or c in ('-', '_')) return META_DIR / f"{safe}.json" def load_chat_meta(chat_id: str) -> dict: p = _meta_path(chat_id); l = FileLock(str(p) + ".lock") with l: if p.exists(): try: return json.load(open(p, 'r', encoding='utf-8')) except Exception: return {} return {} def save_chat_meta(chat_id: str, meta: dict): p = _meta_path(chat_id); l = FileLock(str(p) + ".lock") with l: try: with open(p, 'w', encoding='utf-8') as f: json.dump(meta or {}, f, ensure_ascii=False) except Exception: pass def update_chat_meta(chat_id: str, **kwargs): m = load_chat_meta(chat_id) m.update({k: v for k, v in kwargs.items() if v is not None}) save_chat_meta(chat_id, m) def _file_cache_path(chat_id: str) -> Path: safe = ''.join(c for c in str(chat_id) if c.isalnum() or c in ('-', '_')) return CACHE_DIR / f"{safe}.json" def load_file_cache(chat_id: str) -> dict: p = _file_cache_path(chat_id) if p.exists(): try: return json.load(open(p, 'r', encoding='utf-8')) except Exception: pass return {"files": []} def save_file_cache(chat_id: str, cache: dict): p = _file_cache_path(chat_id); l = FileLock(str(p) + ".lock") with l: try: with open(p, 'w', encoding='utf-8') as f: json.dump(cache or {"files": []}, f, ensure_ascii=False) except Exception: pass def update_file_cache_from_inline_files(chat_id: str, inline_files: list): if not inline_files: return cache = load_file_cache(chat_id) existing = {f.get("fileUrl"): f for f in cache.get("files", []) if f.get("fileUrl")} changed = False for it in inline_files: url = (it or {}).get("fileUrl") mime = (it or {}).get("mimeType") data = (it or {}).get("data") if not mime or not data: continue if not url: url = f"inline://{hash(data)}" rec = {"fileUrl": url, "inlineData": {"mimeType": mime, "data": data}} if url not in existing or existing[url].get("inlineData", {}).get("data") != data: existing[url] = rec; changed = True if changed: cache["files"] = list(existing.values()) save_file_cache(chat_id, cache) def try_fetch_and_inline(url: str, expect_mime: str = None, max_bytes: int = 16_000_000): try: r = requests.get(url, timeout=25, stream=True) r.raise_for_status() content = r.raw.read(max_bytes, decode_content=True) if r.raw.read(1): return None mime = expect_mime or r.headers.get('content-type') or 'application/octet-stream' b64 = base64.b64encode(content).decode('utf-8') return {"inlineData": {"mimeType": mime, "data": b64}} except Exception: return None def inject_inline_data_into_history(chat_id: str, history: list) -> list: cache = load_file_cache(chat_id) mapping = {f.get("fileUrl"): f.get("inlineData") for f in cache.get("files", []) if f.get("fileUrl")} out = [] for msg in history: if msg.get("role") != "user": out.append(msg); continue parts = msg.get("parts", []) new_parts = [] for p in parts: if isinstance(p, dict) and p.get("fileUrl") and not p.get("inlineData"): url = p["fileUrl"] if url in mapping: new_parts.append({"inlineData": mapping[url]}) else: fetched = try_fetch_and_inline(url, p.get("mimeType")) if fetched: new_parts.append(fetched) update_file_cache_from_inline_files(chat_id, [{"fileUrl": url, "mimeType": fetched["inlineData"]["mimeType"], "data": fetched["inlineData"]["data"]}]) if isinstance(p, dict) and p.get("image_url") and not p.get("inlineData"): fetched = try_fetch_and_inline(p["image_url"], "image/png") if fetched: new_parts.append(fetched) new_parts.append(p) msg2 = {"role": msg["role"], "parts": new_parts} out.append(msg2) return out def get_all_tools(): search_tool = {"name":"perform_internet_search","description":"جستجوی به‌روز","parameters":{"type":"OBJECT","properties":{"query":{"type":"STRING"}},"required":["query"]}} image_tools = [ {"name":"generate_image","description":"ساخت تصویر جدید","parameters":{"type":"OBJECT","properties":{"english_prompt":{"type":"STRING"},"aspect_ratio":{"type":"STRING"},"initial_response_text":{"type":"STRING"},"follow_up_text":{"type":"STRING"}},"required":["english_prompt","initial_response_text","follow_up_text"]}}, {"name":"handle_specific_edit","description":"ویرایش مشخص","parameters":{"type":"OBJECT","properties":{"edit_request":{"type":"STRING"}},"required":["edit_request"]}}, {"name":"regenerate_with_enhancement","description":"بازسازی با بهبود","parameters":{"type":"OBJECT","properties":{"enhancement_request":{"type":"STRING"},"previous_english_prompt":{"type":"STRING"},"previous_aspect_ratio":{"type":"STRING"}},"required":["enhancement_request","previous_english_prompt","previous_aspect_ratio"]}}, {"name":"change_image_aspect_ratio","description":"تغییر نسبت تصویر","parameters":{"type":"OBJECT","properties":{"new_aspect_ratio":{"type":"STRING"},"previous_english_prompt":{"type":"STRING"}},"required":["new_aspect_ratio","previous_english_prompt"]}} ] return [{"function_declarations": image_tools + [search_tool]}] def enhance_prompt(base_prompt, enhancement_request, model): api_key = get_keys_for_request()[0] url = f"{GOOGLE_API_BASE_URL}/{model}:generateContent?key={api_key}" system_prompt = f"You are an expert prompt engineer. Merge the base English image prompt with the user's modification. Return only the final English prompt.\n\nBase Prompt: \"{base_prompt}\"\nUser's Request: \"{enhancement_request}\"" payload = {"contents":[{"role":"user","parts":[{"text":system_prompt}]}],"generationConfig":{"temperature":0.7}} try: r = requests.post(url, json=payload, timeout=60); r.raise_for_status() return r.json()["candidates"][0]["content"]["parts"][0]["text"].strip() except Exception: return f"{base_prompt}, {enhancement_request}" def create_artistic_prompt(user_prompt, model): api_key = get_keys_for_request()[0] url = f"{GOOGLE_API_BASE_URL}/{model}:generateContent?key={api_key}" system_instruction = "Convert user's idea to a highly-detailed English prompt. Output ONLY the final English prompt." payload = {"contents":[{"role":"user","parts":[{"text":user_prompt}]}],"systemInstruction":{"parts":[{"text":system_instruction}]},"generationConfig":{"temperature":0.7}} try: r = requests.post(url, json=payload, timeout=60); r.raise_for_status() return r.json()["candidates"][0]["content"]["parts"][0]["text"].strip() except Exception: return user_prompt def stream_search_results(query): keys = get_keys_for_request() model = 'gemini-2.5-flash' url = f"{GOOGLE_API_BASE_URL}/{model}:streamGenerateContent?alt=sse" payload = {"contents":[{"role":"user","parts":[{"text":query}]}],"tools":[{"google_search":{}}],"systemInstruction":{"parts":[{"text":"Answer in Persian."}]}} for k in keys: try: with requests.post(url, params={'key': k}, json=payload, stream=True, timeout=180) as r: if r.status_code == 429: continue r.raise_for_status() for line in r.iter_lines(): if line: yield line.decode('utf-8') + "\n\n" return except requests.exceptions.RequestException: continue yield f"data: {json.dumps({'error': {'code': 'SEARCH_FAILED','message': 'سرویس جستجو موقتاً در دسترس نیست.'}})}\n\n" def is_tool_like_text(txt: str) -> bool: if not txt: return False patterns = [r'\bfunctionCall\s*:', r'\bhandle_specific_edit\s*\(', r'\bchange_image_aspect_ratio\s*\(', r'\bregenerate_with_enhancement\s*\(', r'\bgenerate_image\s*\(', r'print\s*\(\s*handle_specific_edit'] return any(re.search(p, txt, flags=re.I) for p in patterns) def sse_text_event(text: str) -> str: return f"data: {json.dumps({'candidates':[{'content':{'parts':[{'text': text}]}}]})}\n\n" def classify_user_intent(user_text: str) -> dict: if not user_text or not user_text.strip(): return {"intent":"NONE"} keys = get_keys_for_request() model = 'gemini-1.5-flash' url = f"{GOOGLE_API_BASE_URL}/{model}:generateContent" system = "Return strict JSON: {\"intent\":\"...\", \"normalized_edit\":\"...\", \"new_aspect_ratio\":\"...\", \"code_language\":\"...\"}. intents=[NONE,SPECIFIC_EDIT,ASPECT_RATIO_CHANGE,QUALITY_ENHANCEMENT,NEW_IMAGE,CODE_TASK]." payload = {"contents":[{"role":"user","parts":[{"text": user_text}]}],"systemInstruction":{"parts":[{"text": system}]},"generationConfig":{"temperature":0.0,"maxOutputTokens":128}} for k in keys: try: r = requests.post(f"{url}?key={k}", json=payload, timeout=25) if r.status_code == 429: continue r.raise_for_status() txt = r.json()["candidates"][0]["content"]["parts"][0]["text"].strip().strip('`').strip() s, e = txt.find('{'), txt.rfind('}') if s != -1 and e != -1: txt = txt[s:e+1] d = json.loads(txt) if "intent" not in d: return {"intent":"NONE"} return d except Exception: continue return {"intent":"NONE"} def stream_text_only(user_text: str, model_name: str): keys = get_keys_for_request() system_text = BASE_RULES_FA + "\n[TURN MODE: TEXT ONLY]\n" contents = [{"role":"user","parts":[{"text": user_text }]}] for k in keys: try: url = f"{GOOGLE_API_BASE_URL}/{model_name}:streamGenerateContent?alt=sse&key={k}" payload = {"contents": contents, "tools": None, "systemInstruction": {"parts": [{"text": system_text}]}, "generationConfig": {"temperature": 0.3, "maxOutputTokens": 8192}} with requests.post(url, json=payload, stream=True, timeout=720) as resp: if resp.status_code == 429: continue resp.raise_for_status() for line in resp.iter_lines(): if line and line.startswith(b"data: "): yield line.decode('utf-8') + "\n\n" return except requests.exceptions.RequestException: continue yield f"data: {json.dumps({'error': {'code':'TEXT_STREAM_FAILED','message':'پاسخ متنی موقتاً در دسترس نیست.'}})}\n\n" def stream_code_reply(user_text: str, model_name: str, code_language: str = None): keys = get_keys_for_request() system_text = BASE_RULES_FA + "\n[TURN MODE: CODE]\n" contents = [{"role":"user","parts":[{"text": user_text }]}] for k in keys: try: url = f"{GOOGLE_API_BASE_URL}/{model_name}:streamGenerateContent?alt=sse&key={k}" payload = {"contents": contents, "tools": None, "systemInstruction": {"parts": [{"text": system_text}]}, "generationConfig": {"temperature": 0.2, "maxOutputTokens": 4096}} with requests.post(url, json=payload, stream=True, timeout=720) as r: if r.status_code == 429: continue r.raise_for_status() for line in r.iter_lines(): if line: yield line.decode('utf-8') + "\n\n" return except requests.exceptions.RequestException: continue yield f"data: {json.dumps({'error': {'code':'CODE_STREAM_FAILED','message':'تولید کد موقتاً در دسترس نیست.'}})}\n\n" def stream_file_analysis(history_for_gemini, model_name: str): keys = get_keys_for_request() system_text = BASE_RULES_FA + """ [TURN MODE: FILE ANALYSIS] فایل ضمیمه را دقیق تحلیل کن: نوع فایل را مشخص کن، محتوای کلیدی را خلاصه کن، ساختار را شرح بده، نکات مهم و استخراج‌پذیر را فهرست کن. """ for k in keys: try: url = f"{GOOGLE_API_BASE_URL}/{model_name}:streamGenerateContent?alt=sse&key={k}" payload = {"contents": history_for_gemini, "tools": None, "systemInstruction": {"parts": [{"text": system_text}]}, "generationConfig": {"temperature": 0.3, "maxOutputTokens": 8192}} with requests.post(url, json=payload, stream=True, timeout=720) as r: if r.status_code == 429: continue r.raise_for_status() for line in r.iter_lines(): if line: yield line.decode('utf-8') + "\n\n" return except requests.exceptions.RequestException: continue yield f"data: {json.dumps({'error': {'code':'FILE_ANALYSIS_FAILED','message':'تحلیل فایل موقتاً در دسترس نیست.'}})}\n\n" @app.route('/') def index(): return render_template('index.html') @app.route('/chat', methods=['POST']) def chat(): data = request.json or {} model = data.get('model', 'gemini-2.5-flash') history = data.get('history', []) action_payload = data.get('action') chat_id = data.get('chatId') inline_files = data.get('inlineFiles', []) if not chat_id: return Response(json.dumps({"error": "chatId is required"}), status=400, mimetype='application/json') try: update_file_cache_from_inline_files(chat_id, inline_files) except Exception: pass def stream_action_result(): try: base_prompt = action_payload.get("base_english_prompt") enhancement_request = action_payload.get("enhancement_request") aspect_ratio = action_payload.get("aspect_ratio", "9:16") if not base_prompt or not enhancement_request: yield f"data: {json.dumps({'error': {'code':'MISSING_ARGS','message':'پارامترهای لازم موجود نیست.'}})}\n\n"; return new_prompt = enhance_prompt(base_prompt, enhancement_request, model) yield f"data: {json.dumps({'intent':'generate_image','english_prompt': new_prompt,'aspect_ratio': aspect_ratio})}\n\n" except Exception as e: yield f"data: {json.dumps({'error': {'code':'ACTION_EXECUTION_FAILED','message': str(e)}})}\n\n" if action_payload and action_payload.get('intent') == 'regenerate_with_enhancement': return Response(stream_action_result(), mimetype='text/event-stream', headers={'Cache-Control':'no-cache','X-Accel-Buffering':'no','Connection':'keep-alive'}) def stream_events(): def send_event(event_data): return f"data: {json.dumps(event_data)}\n\n" history_for_gemini = [] for msg in history: if 'role' not in msg: continue new_msg = {'role': msg['role'], 'parts': []} for part in (msg.get('parts') or []): if isinstance(part, dict) and any(k in part for k in ['text','inlineData','functionCall','functionResponse','image_url','edited_images','fileUrl','mimeType','name']): new_msg['parts'].append(part) if new_msg['parts']: history_for_gemini.append(new_msg) history_for_gemini = inject_inline_data_into_history(chat_id, history_for_gemini) last_user_idx_local = -1 for i in range(len(history_for_gemini) - 1, -1, -1): if history_for_gemini[i].get('role') == 'user': last_user_idx_local = i; break last_user_text = ""; has_inline_file = False if last_user_idx_local != -1: lu = history_for_gemini[last_user_idx_local] last_user_text = ''.join(p.get('text','') for p in (lu.get('parts') or []) if p.get('text')).strip() has_inline_file = any('inlineData' in p for p in (lu.get('parts') or [])) image_in_recent_history = any(m.get('role') == 'model' and any('image_url' in p or 'edited_images' in p for p in (m.get('parts') or [])) for m in history[-4:]) intent_info = classify_user_intent(last_user_text) if last_user_text else {"intent":"NONE"} intent = (intent_info.get("intent") or "NONE").upper() if intent == "CODE_TASK": for c in stream_code_reply(last_user_text, model, intent_info.get("code_language")): yield c return if image_in_recent_history and last_user_text and intent == "NONE": for c in stream_text_only(last_user_text, model): yield c return if has_inline_file and intent in {"NONE"}: for c in stream_file_analysis(history_for_gemini, model): yield c return if image_in_recent_history and last_user_text: if intent == "SPECIFIC_EDIT": merged = (intent_info.get("normalized_edit") or last_user_text) update_chat_meta(chat_id, last_edit_prompt=merged) yield send_event({"intent": "edit_image", "prompt": merged}); return elif intent == "ASPECT_RATIO_CHANGE": meta = load_chat_meta(chat_id) ep = meta.get("last_edit_prompt") or meta.get("last_english_prompt") or "" new_ar = intent_info.get("new_aspect_ratio") or meta.get("last_aspect_ratio") or "9:16" yield send_event({"intent": "generate_image", "english_prompt": ep, "aspect_ratio": new_ar}); return elif intent == "QUALITY_ENHANCEMENT": meta = load_chat_meta(chat_id) prev_ep = meta.get("last_edit_prompt") or meta.get("last_english_prompt") or "" prev_ar = meta.get("last_aspect_ratio") or "9:16" enh = intent_info.get("normalized_edit") or last_user_text yield send_event({"intent": "clarify_action", "question": "بسیار خب! تصویر فعلی را ویرایش کنم یا یک تصویر جدید بسازم؟","options":{"edit":{"label":"ویرایش همین تصویر","intent":"edit_image","prompt": enh},"regenerate":{"label":"ساخت تصویر جدید","intent":"regenerate_with_enhancement","base_english_prompt": prev_ep,"enhancement_request": enh,"aspect_ratio": prev_ar}}}); return elif intent == "NEW_IMAGE": ep = create_artistic_prompt(last_user_text, model) update_chat_meta(chat_id, last_english_prompt=ep, last_aspect_ratio="9:16", last_edit_prompt=None) yield send_event({"intent": "generate_image_with_text", "text": "در حال ساخت تصویر جدید برای شما...","image_generation_payload": {"english_prompt": ep, "aspect_ratio": "9:16"}, "follow_up_text": "تصویر شما آماده شد!"}); return tools_for_request = get_all_tools() final_system_prompt = BASE_RULES_FA + "\n[TURN MODE: GENERAL]\n" rounds = 0 max_rounds = 5 while rounds < max_rounds: rounds += 1 keys = get_keys_for_request() accumulated_args = None fn_name = None this_round_text = "" last_finish_reason = None progressed = False for k in keys: try: url = f"{GOOGLE_API_BASE_URL}/{model}:streamGenerateContent?alt=sse&key={k}" payload = {"contents": history_for_gemini, "tools": tools_for_request, "systemInstruction": {"parts": [{"text": final_system_prompt}]}, "generationConfig":{"temperature":0.3,"maxOutputTokens":8192}} with requests.post(url, json=payload, stream=True, timeout=720) as r: if r.status_code == 429: continue r.raise_for_status() for line in r.iter_lines(): if line and line.startswith(b'data: '): chunk_str = line.decode('utf-8')[6:] try: data_chunk = json.loads(chunk_str) cand = (data_chunk.get("candidates") or [{}])[0] last_finish_reason = cand.get("finishReason", last_finish_reason) part = cand.get("content", {}).get("parts", [{}])[0] if "functionCall" in part: if not accumulated_args: accumulated_args = {}; fn_name = part["functionCall"].get("name") args_chunk = part["functionCall"].get("args", {}) for kk, vv in (args_chunk or {}).items(): if kk not in accumulated_args: accumulated_args[kk] = vv elif isinstance(accumulated_args.get(kk), str): accumulated_args[kk] += str(vv) elif "text" in part: txt = part["text"] this_round_text += txt if not is_tool_like_text(txt): yield sse_text_event(txt) progressed = True except Exception: continue break except requests.exceptions.RequestException: continue if accumulated_args: args = accumulated_args if fn_name == "handle_specific_edit": raw_req = (args.get("edit_request") or "").strip() if not raw_req: yield f"data: {json.dumps({'error': {'code':'EMPTY_EDIT_REQUEST','message':'دستور ویرایش دریافت نشد.'}})}\n\n" else: merged = raw_req update_chat_meta(chat_id, last_edit_prompt=merged) yield f"data: {json.dumps({'intent':'edit_image','prompt': merged})}\n\n" elif fn_name == "regenerate_with_enhancement": meta = load_chat_meta(chat_id) prev_ep = args.get("previous_english_prompt") or meta.get("last_edit_prompt") or meta.get("last_english_prompt") or "" prev_ar = args.get("previous_aspect_ratio") or meta.get("last_aspect_ratio") or "9:16" enhancement_request = args.get("enhancement_request", "") yield f"data: {json.dumps({'intent':'clarify_action','question':'بسیار خب! با این تغییرات چه کنم؟','options':{'edit':{'label':'ویرایش همین تصویر','intent':'edit_image','prompt': enhancement_request},'regenerate':{'label':'ساخت تصویر جدید','intent':'regenerate_with_enhancement','base_english_prompt': prev_ep,'enhancement_request': enhancement_request,'aspect_ratio': prev_ar}}})}\n\n" elif fn_name == "change_image_aspect_ratio": meta = load_chat_meta(chat_id) ep = args.get("previous_english_prompt") or meta.get("last_edit_prompt") or meta.get("last_english_prompt") or "" new_ar = args.get("new_aspect_ratio") or meta.get("last_aspect_ratio") or "9:16" yield f"data: {json.dumps({'intent':'generate_image','english_prompt': ep,'aspect_ratio': new_ar})}\n\n" elif fn_name == "generate_image": ep = args.get("english_prompt"); ar = args.get("aspect_ratio", "9:16") update_chat_meta(chat_id, last_english_prompt=ep, last_aspect_ratio=ar, last_edit_prompt=None) yield f"data: {json.dumps({'intent':'generate_image_with_text','text': args.get('initial_response_text'),'image_generation_payload':{'english_prompt': ep,'aspect_ratio': ar},'follow_up_text': args.get('follow_up_text')})}\n\n" elif fn_name == "perform_internet_search": for c in stream_search_results(args.get('query')): yield c return if last_finish_reason == "MAX_TOKENS" and this_round_text.strip(): history_for_gemini.append({"role":"model","parts":[{"text": this_round_text}]}) history_for_gemini.append({"role":"user","parts":[{"text":"ادامه پاسخ را دقیقاً از همان‌جایی که قطع شد ادامه بده. تکرار نکن."}]}) continue if progressed: return yield f"data: {json.dumps({'error': {'code': 'ALL_KEYS_FAILED', 'message': 'تمام کلیدهای API خطا دادند یا درخواست نامعتبر بود.'}})}\n\n" return return Response(stream_events(), mimetype='text/event-stream', headers={'Cache-Control':'no-cache','X-Accel-Buffering':'no','Connection':'keep-alive'}) if __name__ == '__main__': app.run(debug=True, host='0.0.0.0', port=os.environ.get("PORT", 7860))

Share this Q&A