""" MaxServSuperAGI Server v2.5-popen Based on working Apr30 bkup22 that PRODUCED GLBs + Popen async polling (no threads) """ from flask import Flask, request, jsonify, send_from_directory, send_file import subprocess, json, os, time, base64, re, urllib.request, shutil from pathlib import Path from flask_cors import CORS app = Flask(__name__) CORS(app) DEFAULT_MODEL = "mlx-community/gemma-3-12b-it-4bit" conversations = {} # ═══ ASYNC JOB TRACKING via Popen (no threads) ═══ _3d_procs = {} def _register_proc(job_id, proc, job_dir, output_name, decimation, tex_size, engine='trellis2', attempt=1, cmd=None, env=None): _3d_procs[job_id] = { 'proc': proc, 'job_dir': str(job_dir), 'output_name': output_name, 'decimation': decimation, 'tex_size': tex_size, 'engine': engine, 'started': time.time(), 'attempt': attempt, 'cmd': cmd, 'env': env, } @app.route("/", methods=["GET"]) @app.route("/health", methods=["GET"]) def health(): return "MaxServSuperAGI Server is running" @app.route("/api/tags", methods=["GET"]) def list_models(): return jsonify({"models": [{"name": "gemma3:12b", "model": DEFAULT_MODEL, "size": 8070000000, "details": {"family": "gemma3", "parameter_size": "12B"}}]}) @app.route("/api/chat", methods=["POST"]) def chat(): data = request.json; messages = data.get("messages", []); model = data.get("model", DEFAULT_MODEL) model_map = {"gemma3:4b": "mlx-community/gemma-3-4b-it-4bit", "gemma3:12b": "mlx-community/gemma-3-12b-it-4bit", "llama3.1:8b": "mlx-community/Meta-Llama-3.1-8B-Instruct-4bit", "qwen3:14b": "mlx-community/Qwen2.5-14B-Instruct-4bit"} mlx_model = model_map.get(model, DEFAULT_MODEL) prompt_parts = [] for msg in messages: role = msg.get("role", "user"); content = msg.get("content", "") if role == "system": prompt_parts.append(f"System: {content}") elif role == "user": prompt_parts.append(f"User: {content}") elif role == "assistant": prompt_parts.append(f"Assistant: {content}") prompt = "\n".join(prompt_parts) + "\nAssistant:" try: start_time = time.time() result = subprocess.run(["python", "-m", "mlx_lm.generate", "--model", mlx_model, "--max-tokens", str(data.get("max_tokens", 500)), "--prompt", prompt], capture_output=True, text=True, timeout=120, env={**os.environ, "VIRTUAL_ENV": os.environ.get("VIRTUAL_ENV", "")}) output = result.stdout; response_text = "" if "==========" in output: parts = output.split("==========") if len(parts) >= 3: response_text = parts[1].strip() elif len(parts) >= 2: response_text = parts[-1].strip() if not response_text: response_text = output.strip() elapsed = time.time() - start_time return jsonify({"model": model, "message": {"role": "assistant", "content": response_text}, "done": True, "total_duration": int(elapsed * 1e9), "eval_count": len(response_text.split())}) except subprocess.TimeoutExpired: return jsonify({"error": "Generation timed out (120s)"}), 504 except Exception as e: return jsonify({"error": str(e)}), 500 @app.route("/api/generate", methods=["POST"]) def generate(): data = request.json; data["messages"] = [{"role": "user", "content": data.get("prompt", "")}]; return chat() # ═══ LTX VIDEO ═══ OUTPUT_DIR = os.path.expanduser('~/AI/output'); os.makedirs(OUTPUT_DIR, exist_ok=True) LTX_REPO = os.path.expanduser('~/AI/ltx-video/repo'); LTX_MODEL = 'prince-canuma/LTX-2-distilled' @app.route('/api/serve-video/', methods=['GET']) def serve_video(filename): if not filename.endswith(".mp4") or "/" in filename or ".." in filename: return jsonify({"error": "Invalid filename"}), 400 if not os.path.exists(os.path.join(OUTPUT_DIR, filename)): return jsonify({"error": "File not found"}), 404 return send_from_directory(OUTPUT_DIR, filename, mimetype="video/mp4") @app.route('/api/gen-video', methods=['POST']) def generate_video(): data = request.json; prompt = data.get("prompt", "") if not prompt: return jsonify({"error": "Prompt is required"}), 400 width = data.get("width", 768); height = data.get("height", 512); num_frames = data.get("num_frames", 33) output_name = data.get("output_name", f"video_{int(time.time())}"); start_image = data.get("start_image_url", None) output_path = os.path.join(OUTPUT_DIR, f"{output_name}.mp4") try: cmd = ["uv", "run", "mlx_video.ltx_2.generate", "--prompt", prompt, "--model-repo", LTX_MODEL, "--width", str(width), "--height", str(height), "-n", str(num_frames), "--output-path", output_path] if start_image: img_path = os.path.join(OUTPUT_DIR, f"{output_name}_input.jpg") subprocess.run(["curl", "-sL", "-o", img_path, start_image], timeout=30) if os.path.exists(img_path) and os.path.getsize(img_path) > 1000: cmd.extend(["--image", img_path]) start_time = time.time(); result = subprocess.run(cmd, capture_output=True, text=True, timeout=3600, cwd=LTX_REPO); elapsed = time.time() - start_time if not os.path.exists(output_path) or os.path.getsize(output_path) < 10000: return jsonify({"error": "Video not generated", "stderr": (result.stderr or "")[-500:]}), 500 download_url = f"{request.scheme}://{request.host}/api/serve-video/{output_name}.mp4" return jsonify({"status": "complete", "download_url": download_url, "duration_seconds": round(elapsed, 1), "prompt": prompt, "resolution": f"{width}x{height}", "frames": num_frames, "file_size_mb": round(os.path.getsize(output_path) / (1024*1024), 2)}) except subprocess.TimeoutExpired: return jsonify({"error": "Generation timed out"}), 504 except Exception as e: return jsonify({"error": str(e)}), 500 @app.route('/api/list-videos', methods=['GET']) def list_videos(): videos = [] for fn in sorted(os.listdir(OUTPUT_DIR), reverse=True): if fn.endswith(".mp4"): fp = os.path.join(OUTPUT_DIR, fn); videos.append({"filename": fn, "size_mb": round(os.path.getsize(fp)/(1024*1024), 2), "created": time.ctime(os.path.getmtime(fp))}) return jsonify({"videos": videos, "count": len(videos)}) # ═══ FLUX IMAGE ═══ FLUX_MODEL_REPO = "filipstrand/FLUX.1-Krea-dev-mflux-4bit"; FLUX_BASE_MODEL = "krea-dev" IMAGE_OUTPUT_DIR = os.path.expanduser("~/AI/images"); os.makedirs(IMAGE_OUTPUT_DIR, exist_ok=True) @app.route('/api/gen-image', methods=['POST']) def generate_image(): data = request.json or {}; prompt = (data.get("prompt") or "").strip() if not prompt: return jsonify({"success": False, "error": "Prompt is required"}), 400 width = max(256, (int(data.get("width", 1024)) // 16) * 16); height = max(256, (int(data.get("height", 1024)) // 16) * 16) steps = max(10, min(50, int(data.get("steps", 25)))); guidance = float(data.get("guidance_scale", 4.5)) seed_in = data.get("seed"); seed = int(seed_in) if seed_in not in (None, "") else int(time.time()) % 1_000_000 output_name = f"flux_{int(time.time())}_{seed}"; output_path = os.path.join(IMAGE_OUTPUT_DIR, f"{output_name}.png") try: start_time = time.time() cmd = ["mflux-generate", "--model", FLUX_MODEL_REPO, "--base-model", FLUX_BASE_MODEL, "--prompt", prompt, "--steps", str(steps), "--seed", str(seed), "--width", str(width), "--height", str(height), "--guidance", str(guidance), "--output", output_path] subprocess.run(cmd, capture_output=True, text=True, timeout=600, env={**os.environ, "VIRTUAL_ENV": os.environ.get("VIRTUAL_ENV", "")}) elapsed = time.time() - start_time if not os.path.exists(output_path) or os.path.getsize(output_path) < 1000: return jsonify({"success": False, "error": "Image generation failed"}), 500 with open(output_path, "rb") as f: img_b64 = base64.b64encode(f.read()).decode("ascii") return jsonify({"success": True, "image_base64": img_b64, "mime_type": "image/png", "seed_used": seed, "render_time_sec": round(elapsed, 1), "width": width, "height": height, "steps": steps, "model": "flux-krea-dev"}) except subprocess.TimeoutExpired: return jsonify({"success": False, "error": "Generation timed out (10 min)"}), 504 except Exception as e: return jsonify({"success": False, "error": str(e)}), 500 @app.route('/api/list-images', methods=['GET']) def list_images(): images = [] if os.path.isdir(IMAGE_OUTPUT_DIR): for fn in sorted(os.listdir(IMAGE_OUTPUT_DIR), reverse=True): if fn.endswith(".png"): fp = os.path.join(IMAGE_OUTPUT_DIR, fn); images.append({"filename": fn, "size_kb": round(os.path.getsize(fp)/1024, 1), "created": time.ctime(os.path.getmtime(fp))}) return jsonify({"images": images[:50], "count": len(images)}) # ═══ ROUGH CUT VIDEO ═══ COMPILE_DIR = Path.home() / "cam_compile"; COMPILE_DIR.mkdir(exist_ok=True) @app.route('/api/compile-rough-video', methods=['POST']) def compile_rough_video(): data = request.get_json() or {}; clips = data.get('clips', []); resolution = int(data.get('resolution', 1080)); output_name = data.get('output_name', f'rough_{int(time.time())}'); fps = int(data.get('fps', 24)) if not clips: return jsonify({'success': False, 'error': 'No clips provided'}), 400 output_name = ''.join(c for c in output_name if c.isalnum() or c in '_-')[:80] res_map = {720:(1280,720),1080:(1920,1080),2160:(3840,2160)}; target_w, target_h = res_map.get(resolution, (1920,1080)) job_dir = COMPILE_DIR / output_name if job_dir.exists(): shutil.rmtree(job_dir, ignore_errors=True) job_dir.mkdir(exist_ok=True) downloaded = [] for ci, clip in enumerate(clips): vid_url = clip.get('video_url'); if not vid_url: continue vid_file = job_dir / f"clip_{ci:04d}.mp4" try: urllib.request.urlretrieve(vid_url, str(vid_file)) except: continue if vid_file.stat().st_size < 5000: continue aud_file = None if clip.get('audio_url'): aud_path = job_dir / f"clip_{ci:04d}_audio.mp3" try: urllib.request.urlretrieve(clip['audio_url'], str(aud_path)); aud_file = str(aud_path) if aud_path.stat().st_size > 500 else None except: pass downloaded.append({'video': str(vid_file), 'audio': aud_file, 'index': ci}) if not downloaded: shutil.rmtree(job_dir, ignore_errors=True); return jsonify({'success': False, 'error': 'No clips downloaded'}), 400 scale_filter = f"scale={target_w}:{target_h}:force_original_aspect_ratio=decrease,pad={target_w}:{target_h}:(ow-iw)/2:(oh-ih)/2:black,setsar=1" normalised = [] for di, d in enumerate(downloaded): source = d['video'] if d['audio']: merge_file = job_dir / f"merge_{di:04d}.mp4" subprocess.run(['ffmpeg','-y','-i',d['video'],'-i',d['audio'],'-c:v','copy','-c:a','aac','-b:a','128k','-map','0:v:0','-map','1:a:0','-shortest',str(merge_file)], capture_output=True, timeout=120) if merge_file.exists() and merge_file.stat().st_size > 5000: source = str(merge_file) norm_file = job_dir / f"norm_{di:04d}.mp4" subprocess.run(['ffmpeg','-y','-i',source,'-vf',scale_filter,'-c:v','libx264','-preset','fast','-crf','23','-c:a','aac','-b:a','128k','-ar','44100','-ac','2','-r',str(fps),'-pix_fmt','yuv420p','-movflags','+faststart',str(norm_file)], capture_output=True, timeout=180) if norm_file.exists() and norm_file.stat().st_size > 5000: normalised.append(str(norm_file)) if not normalised: shutil.rmtree(job_dir, ignore_errors=True); return jsonify({'success': False, 'error': 'All clips failed normalisation'}), 500 concat_list = job_dir / "concat.txt" with open(concat_list, 'w') as f: for nf in normalised: f.write(f"file '{nf}'\n") output_file = job_dir / f"{output_name}.mp4" subprocess.run(['ffmpeg','-y','-f','concat','-safe','0','-i',str(concat_list),'-c','copy','-movflags','+faststart',str(output_file)], capture_output=True, timeout=300) if not output_file.exists(): shutil.rmtree(job_dir, ignore_errors=True); return jsonify({'success': False, 'error': 'Concatenation failed'}), 500 duration = 0 try: probe = subprocess.run(['ffprobe','-v','error','-show_entries','format=duration','-of','csv=p=0',str(output_file)], capture_output=True, text=True, timeout=10); duration = round(float(probe.stdout.strip()), 1) except: pass for f in job_dir.iterdir(): if f.name != output_file.name: f.unlink(missing_ok=True) return jsonify({'success': True, 'download_url': f'/api/download-compiled/{output_name}', 'duration': duration, 'file_size': output_file.stat().st_size, 'clip_count': len(normalised)}) @app.route('/api/download-compiled/', methods=['GET']) def download_compiled(name): safe_name = ''.join(c for c in name if c.isalnum() or c in '_-') fmt = request.args.get('format', 'mp4'); ext = 'mp3' if fmt == 'mp3' else 'mp4'; mime = 'audio/mpeg' if fmt == 'mp3' else 'video/mp4' filepath = COMPILE_DIR / safe_name / f"{safe_name}.{ext}" if not filepath.exists(): return jsonify({'error': 'File not found'}), 404 return send_file(str(filepath), mimetype=mime, as_attachment=True, download_name=f"{safe_name}.{ext}") @app.route('/api/compile-status', methods=['GET']) def compile_status(): jobs = [] if COMPILE_DIR.exists(): for d in sorted(COMPILE_DIR.iterdir(), reverse=True): if d.is_dir(): mp4 = d / f"{d.name}.mp4" if mp4.exists(): jobs.append({'name': d.name, 'size_mb': round(mp4.stat().st_size/(1024*1024), 2), 'created': time.ctime(mp4.stat().st_mtime)}) return jsonify({'jobs': jobs[:20]}) # ═══ LIVEPORTRAIT ═══ LP_OUTPUT_DIR = os.path.expanduser('~/AI/liveportrait_output'); os.makedirs(LP_OUTPUT_DIR, exist_ok=True) LP_REPO = os.path.expanduser('~/AI/LivePortrait'); LP_PYTHON = os.path.join(LP_REPO, 'lp-env', 'bin', 'python') @app.route('/api/liveportrait', methods=['POST']) def liveportrait(): data = request.get_json() or {}; ref_url = data.get('ref_image_url', ''); drive_url = data.get('driving_video_url', ''); audio_url = data.get('audio_url', '') if not ref_url: return jsonify({'error': 'ref_image_url is required'}), 400 if not drive_url: return jsonify({'error': 'driving_video_url is required'}), 400 job_id = f"lp_{int(time.time())}_{os.urandom(3).hex()}"; job_dir = Path(LP_OUTPUT_DIR) / job_id; job_dir.mkdir(exist_ok=True) ref_file = job_dir / "ref.jpg"; drive_file = job_dir / "driving.mp4"; audio_file = job_dir / "audio.mp3" if audio_url else None try: urllib.request.urlretrieve(ref_url, str(ref_file)); urllib.request.urlretrieve(drive_url, str(drive_file)) if audio_url: urllib.request.urlretrieve(audio_url, str(audio_file)) except Exception as e: shutil.rmtree(job_dir, ignore_errors=True); return jsonify({'error': f'Download failed: {e}'}), 400 if ref_file.stat().st_size < 1000 or drive_file.stat().st_size < 5000: shutil.rmtree(job_dir, ignore_errors=True); return jsonify({'error': 'Invalid input files'}), 400 lp_output_dir = job_dir / "lp_out"; lp_output_dir.mkdir(exist_ok=True) cmd = [LP_PYTHON, os.path.join(LP_REPO, "inference.py"), "-s", str(ref_file), "-d", str(drive_file), "-o", str(lp_output_dir)] start_time = time.time() try: result = subprocess.run(cmd, capture_output=True, text=True, timeout=600, cwd=LP_REPO, env={**os.environ, 'PYTORCH_ENABLE_MPS_FALLBACK': '1'}) except subprocess.TimeoutExpired: shutil.rmtree(job_dir, ignore_errors=True); return jsonify({'error': 'LivePortrait timed out'}), 504 except Exception as e: shutil.rmtree(job_dir, ignore_errors=True); return jsonify({'error': f'LivePortrait failed: {e}'}), 500 lp_video = None for f in sorted(lp_output_dir.rglob("*.mp4"), key=lambda p: p.stat().st_mtime, reverse=True): if f.stat().st_size > 5000: lp_video = f; break if not lp_video: shutil.rmtree(job_dir, ignore_errors=True); return jsonify({'error': 'LivePortrait produced no video'}), 500 elapsed = time.time() - start_time; final_video = job_dir / f"{job_id}.mp4" if audio_file and audio_file.exists() and audio_file.stat().st_size > 500: subprocess.run(['ffmpeg','-y','-i',str(lp_video),'-i',str(audio_file),'-c:v','libx264','-preset','fast','-crf','23','-c:a','aac','-b:a','128k','-map','0:v:0','-map','1:a:0','-shortest','-pix_fmt','yuv420p','-movflags','+faststart',str(final_video)], capture_output=True, timeout=120) if not final_video.exists(): shutil.copy2(str(lp_video), str(final_video)) else: shutil.copy2(str(lp_video), str(final_video)) duration = 0 try: probe = subprocess.run(['ffprobe','-v','error','-show_entries','format=duration','-of','csv=p=0',str(final_video)], capture_output=True, text=True, timeout=10); duration = round(float(probe.stdout.strip()), 1) except: pass for f in job_dir.iterdir(): if f.name != final_video.name: if f.is_dir(): shutil.rmtree(f, ignore_errors=True) else: f.unlink(missing_ok=True) return jsonify({'status': 'complete', 'video_url': f'/api/serve-lp-video/{job_id}', 'duration': duration, 'render_time': round(elapsed, 1), 'file_size': final_video.stat().st_size, 'engine': 'liveportrait'}) @app.route('/api/serve-lp-video/', methods=['GET']) def serve_lp_video(job_id): safe_id = ''.join(c for c in job_id if c.isalnum() or c in '_-') for base_dir in [LP_OUTPUT_DIR, ST_OUTPUT_DIR]: p = Path(base_dir) / safe_id / f"{safe_id}.mp4" if p.exists(): return send_file(str(p), mimetype='video/mp4') return jsonify({'error': 'Video not found'}), 404 # ═══ SADTALKER ═══ ST_OUTPUT_DIR = os.path.expanduser('~/AI/sadtalker_output'); os.makedirs(ST_OUTPUT_DIR, exist_ok=True) ST_REPO = os.path.expanduser('~/AI/SadTalker'); ST_PYTHON = os.path.join(ST_REPO, 'st-env', 'bin', 'python') @app.route('/api/sadtalker', methods=['POST']) def sadtalker(): data = request.get_json() or {}; ref_url = data.get('ref_image_url', ''); audio_url = data.get('audio_url', '') if not ref_url or not audio_url: return jsonify({'error': 'ref_image_url and audio_url required'}), 400 job_id = f"st_{int(time.time())}_{os.urandom(3).hex()}"; job_dir = Path(ST_OUTPUT_DIR) / job_id; job_dir.mkdir(exist_ok=True) ref_file = job_dir / "ref.jpg"; audio_file = job_dir / "audio.mp3" try: urllib.request.urlretrieve(ref_url, str(ref_file)); urllib.request.urlretrieve(audio_url, str(audio_file)) except Exception as e: shutil.rmtree(job_dir, ignore_errors=True); return jsonify({'error': f'Download failed: {e}'}), 400 if ref_file.stat().st_size < 1000 or audio_file.stat().st_size < 500: shutil.rmtree(job_dir, ignore_errors=True); return jsonify({'error': 'Invalid input files'}), 400 st_result_dir = job_dir / "st_out"; st_result_dir.mkdir(exist_ok=True) cmd = [ST_PYTHON, os.path.join(ST_REPO, "inference.py"), "--driven_audio", str(audio_file), "--source_image", str(ref_file), "--result_dir", str(st_result_dir), "--still", "--preprocess", "crop"] start_time = time.time() try: result = subprocess.run(cmd, capture_output=True, text=True, timeout=600, cwd=ST_REPO) except subprocess.TimeoutExpired: shutil.rmtree(job_dir, ignore_errors=True); return jsonify({'error': 'SadTalker timed out'}), 504 except Exception as e: shutil.rmtree(job_dir, ignore_errors=True); return jsonify({'error': f'SadTalker failed: {e}'}), 500 st_video = None for f in sorted(st_result_dir.rglob("*.mp4"), key=lambda p: p.stat().st_mtime, reverse=True): if f.stat().st_size > 5000: st_video = f; break if not st_video: shutil.rmtree(job_dir, ignore_errors=True); return jsonify({'error': 'SadTalker produced no video'}), 500 elapsed = time.time() - start_time; final_video = job_dir / f"{job_id}.mp4" subprocess.run(['ffmpeg','-y','-i',str(st_video),'-c:v','libx264','-preset','fast','-crf','23','-c:a','aac','-b:a','128k','-pix_fmt','yuv420p','-movflags','+faststart',str(final_video)], capture_output=True, timeout=120) if not final_video.exists(): shutil.copy2(str(st_video), str(final_video)) duration = 0 try: probe = subprocess.run(['ffprobe','-v','error','-show_entries','format=duration','-of','csv=p=0',str(final_video)], capture_output=True, text=True, timeout=10); duration = round(float(probe.stdout.strip()), 1) except: pass for f in job_dir.iterdir(): if f.name != final_video.name: if f.is_dir(): shutil.rmtree(f, ignore_errors=True) else: f.unlink(missing_ok=True) return jsonify({'status': 'complete', 'video_url': f'/api/serve-lp-video/{job_id}', 'duration': duration, 'render_time': round(elapsed, 1), 'file_size': final_video.stat().st_size, 'engine': 'sadtalker'}) # ═══════════════════════════════════════════════════════ # 3D ASSET GENERATION — TRELLIS.2 + HUNYUAN3D-2.1 # EXACT generate.py flags from working bkup22 + Popen async # ═══════════════════════════════════════════════════════ try: import trimesh; TRIMESH_OK = True except ImportError: TRIMESH_OK = False; print("⚠ trimesh not installed") THREED_OUTPUT_DIR = os.path.expanduser('~/AI/3d_output'); os.makedirs(THREED_OUTPUT_DIR, exist_ok=True) TRELLIS_REPO = os.path.expanduser('~/AI/trellis-mac'); TRELLIS_PYTHON = os.path.join(TRELLIS_REPO, '.venv', 'bin', 'python') HUNYUAN_REPO = os.path.expanduser('~/AI/Hunyuan3D-2.1'); HUNYUAN_VENV = os.path.join(HUNYUAN_REPO, 'hy3d-env', 'bin', 'python') def _download_image_for_3d(url, dest_path): try: urllib.request.urlretrieve(url, str(dest_path)); return Path(dest_path).exists() and Path(dest_path).stat().st_size > 1000 except Exception as e: print(f" ❌ Image download failed: {e}"); return False def _decimate_glb(input_path, output_path, target_faces=5000): """ trimesh decimation STRIPS PBR textures from GLB files. Skip decimation — serve full textured GLB instead. TRELLIS generate.py already does internal mesh simplification. Raw output (~100-200K faces) is fine for web viewing via model-viewer. Proper decimation preserving textures requires Xcode + mtlgemm (Phase 2). """ input_size = Path(input_path).stat().st_size if Path(input_path).exists() else 0 face_count = -1 if TRIMESH_OK: try: mesh = trimesh.load(str(input_path), force='mesh') if mesh is not None: face_count = len(mesh.faces) except: pass print(f" 📐 Raw mesh: {face_count} faces, {input_size/1024:.0f}KB — serving full textured GLB (decimation strips textures)") shutil.copy2(str(input_path), str(output_path)) return face_count # ═══ TRELLIS — Popen async ═══ @app.route('/api/gen-3d-trellis', methods=['POST']) def gen_3d_trellis(): data = request.get_json() or {} image_url = data.get('image_url', ''); image_b64 = data.get('image_base64', '') output_name = data.get('output_name', f'trellis_{int(time.time())}') decimation = int(data.get('decimation_target', 5000)); tex_size = int(data.get('texture_size', 1024)) if not image_url and not image_b64: return jsonify({'success': False, 'error': 'image_url or image_base64 required'}), 400 output_name = ''.join(c for c in output_name if c.isalnum() or c in '_-')[:60] job_dir = Path(THREED_OUTPUT_DIR) / output_name if job_dir.exists(): shutil.rmtree(job_dir, ignore_errors=True) job_dir.mkdir(exist_ok=True) input_img = job_dir / "input.png" if image_b64: with open(input_img, 'wb') as f: f.write(base64.b64decode(image_b64)) elif image_url: if not _download_image_for_3d(image_url, input_img): shutil.rmtree(job_dir, ignore_errors=True); return jsonify({'success': False, 'error': 'Image download failed'}), 400 if not input_img.exists() or input_img.stat().st_size < 1000: shutil.rmtree(job_dir, ignore_errors=True); return jsonify({'success': False, 'error': 'Invalid input image'}), 400 # Resize large images to 1024x1024 — reduces conv_none crash risk on MPS try: from PIL import Image as PILImage img = PILImage.open(str(input_img)) if img.width > 1024 or img.height > 1024: img.thumbnail((1024, 1024), PILImage.LANCZOS) img.save(str(input_img)) print(f" 📐 Resized input to {img.width}x{img.height} (was larger, reduces crash risk)") except Exception as e: print(f" ⚠ Could not resize input: {e}") raw_glb_base = job_dir / "raw_output" # Random seed each run — avoids always hitting seed=42 which crashes on some images import random initial_seed = str(random.randint(1, 99999)) cmd = [ TRELLIS_PYTHON, os.path.join(TRELLIS_REPO, 'generate.py'), str(input_img), '--output', str(raw_glb_base), '--texture-size', str(tex_size), '--seed', initial_seed, ] env = {**os.environ, 'PYTORCH_ENABLE_MPS_FALLBACK': '1', 'SPARSE_CONV_BACKEND': 'flex_gemm'} print(f"🧊 TRELLIS.2 job: {output_name} (seed={initial_seed}) — launching via Popen...") proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=TRELLIS_REPO, env=env) _register_proc(output_name, proc, job_dir, output_name, decimation, tex_size, 'trellis2', attempt=1, cmd=cmd, env=env) return jsonify({'success': True, 'job_id': output_name, 'status': 'processing'}) # ═══ TEXT-TO-3D: FLUX (sync) → TRELLIS (Popen) ═══ @app.route('/api/gen-3d-text', methods=['POST']) def gen_3d_text(): data = request.get_json() or {}; prompt = (data.get('prompt') or '').strip() if not prompt: return jsonify({'success': False, 'error': 'Prompt is required'}), 400 output_name = data.get('output_name', f't2_3d_{int(time.time())}') output_name = ''.join(c for c in output_name if c.isalnum() or c in '_-')[:60] enhanced_prompt = f"{prompt}, isolated object on pure white background, single centered object, product photography style, clean edges, no shadows, 3D asset reference, front view" flux_data = {'prompt': enhanced_prompt, 'width': int(data.get('flux_width', 1024)), 'height': int(data.get('flux_height', 1024)), 'steps': int(data.get('flux_steps', 20)), 'guidance_scale': 5.0} with app.test_client() as client: flux_resp = client.post('/api/gen-image', json=flux_data); flux_result = flux_resp.get_json() if not flux_result.get('success') or not flux_result.get('image_base64'): return jsonify({'success': False, 'error': 'FLUX image generation failed'}), 500 job_dir = Path(THREED_OUTPUT_DIR) / output_name; job_dir.mkdir(exist_ok=True) input_img = job_dir / "input.png" with open(input_img, 'wb') as f: f.write(base64.b64decode(flux_result['image_base64'])) try: with open(job_dir / f"{output_name}_concept.png", 'wb') as f: f.write(base64.b64decode(flux_result['image_base64'])) except: pass decimation = int(data.get('decimation_target', 5000)); tex_size = int(data.get('texture_size', 1024)) raw_glb_base = job_dir / "raw_output" import random initial_seed = str(random.randint(1, 99999)) cmd = [TRELLIS_PYTHON, os.path.join(TRELLIS_REPO, 'generate.py'), str(input_img), '--output', str(raw_glb_base), '--texture-size', str(tex_size), '--seed', initial_seed] env = {**os.environ, 'PYTORCH_ENABLE_MPS_FALLBACK': '1', 'SPARSE_CONV_BACKEND': 'flex_gemm'} proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=TRELLIS_REPO, env=env) _register_proc(output_name, proc, job_dir, output_name, decimation, tex_size, 'trellis2_flux_chain', attempt=1, cmd=cmd, env=env) return jsonify({'success': True, 'job_id': output_name, 'status': 'processing'}) # ═══ HUNYUAN3D — Popen async ═══ @app.route('/api/gen-3d-hunyuan', methods=['POST']) def gen_3d_hunyuan(): data = request.get_json() or {} image_url = data.get('image_url', ''); image_b64 = data.get('image_base64', ''); prompt = data.get('prompt', '') output_name = data.get('output_name', f'hy3d_{int(time.time())}') decimation = int(data.get('decimation_target', 5000)); steps = int(data.get('num_inference_steps', 30)) if not image_url and not image_b64 and not prompt: return jsonify({'success': False, 'error': 'image_url, image_base64, or prompt required'}), 400 output_name = ''.join(c for c in output_name if c.isalnum() or c in '_-')[:60] job_dir = Path(THREED_OUTPUT_DIR) / output_name if job_dir.exists(): shutil.rmtree(job_dir, ignore_errors=True) job_dir.mkdir(exist_ok=True) mode = 'text' if (not image_url and not image_b64) else 'image' input_img = job_dir / "input.png" if image_b64: with open(input_img, 'wb') as f: f.write(base64.b64decode(image_b64)) elif image_url: if not _download_image_for_3d(image_url, input_img): shutil.rmtree(job_dir, ignore_errors=True); return jsonify({'success': False, 'error': 'Image download failed'}), 400 raw_glb = job_dir / "raw_output.glb"; raw_obj = job_dir / "raw_output.obj" safe_prompt = prompt.replace('"', '\\"') if prompt else '' gen_script = job_dir / "run_gen.py" sc = f'import sys\nsys.path.insert(0, \'{HUNYUAN_REPO}\')\nimport torch\nfrom hy3dgen.shapegen import Hunyuan3DDiTFlowMatchingPipeline\ndevice = \'mps\' if torch.backends.mps.is_available() else \'cpu\'\npipeline = Hunyuan3DDiTFlowMatchingPipeline.from_pretrained(\'tencent/Hunyuan3D-2\', device=device)\n' if mode == 'image': sc += f'from PIL import Image\nimage = Image.open(\'{input_img}\').convert(\'RGBA\')\ntry:\n from hy3dgen.rembg import BackgroundRemover\n image = BackgroundRemover()(image)\nexcept Exception: pass\nmesh = pipeline(image=image, num_inference_steps={steps})[0]\n' else: sc += f'mesh = pipeline(prompt="{safe_prompt}", num_inference_steps={steps})[0]\n' sc += f'mesh.export(\'{raw_glb}\')\ntry:\n mesh.export(\'{raw_obj}\')\nexcept Exception: pass\nprint("HUNYUAN_DONE")\n' with open(gen_script, 'w') as f: f.write(sc) env = {**os.environ, 'PYTORCH_ENABLE_MPS_FALLBACK': '1'} print(f"🐉 Hunyuan3D job: {output_name} — launching via Popen...") proc = subprocess.Popen([HUNYUAN_VENV, str(gen_script)], stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=HUNYUAN_REPO, env=env) _register_proc(output_name, proc, job_dir, output_name, decimation, 1024, 'hunyuan3d') return jsonify({'success': True, 'job_id': output_name, 'status': 'processing'}) # ═══════════════════════════════════════════ # 3D JOB POLL — checks Popen process + filesystem # No threads. proc.poll() + look for GLB files. # ═══════════════════════════════════════════ @app.route('/api/3d-job/', methods=['GET']) def check_3d_job(job_id): safe_id = ''.join(c for c in job_id if c.isalnum() or c in '_-') job = _3d_procs.get(safe_id) if not job: return jsonify({'status': 'not_found', 'error': 'Job not found'}), 404 proc = job['proc'] job_dir = Path(job['job_dir']) output_name = job['output_name'] elapsed = round(time.time() - job['started'], 1) # Still running? ret = proc.poll() if ret is None: return jsonify({'status': 'processing', 'elapsed': elapsed}) # Process finished — hunt for GLB raw_glb = job_dir / "raw_output.glb" raw_glb_dbl = job_dir / "raw_output.glb.glb" if not raw_glb.exists() and raw_glb_dbl.exists(): shutil.move(str(raw_glb_dbl), str(raw_glb)) if not raw_glb.exists(): for cn in [job_dir / "raw_output.glb", job_dir / "raw_output.glb.glb"]: if cn.exists() and cn.stat().st_size > 5000: shutil.move(str(cn), str(raw_glb)); break if not raw_glb.exists(): for c in sorted(Path(TRELLIS_REPO).glob("*.glb"), key=lambda p: p.stat().st_mtime, reverse=True): shutil.move(str(c), str(raw_glb)); break if not raw_glb.exists(): for c in sorted(job_dir.glob("*.glb"), key=lambda p: p.stat().st_mtime, reverse=True): if c.name != f"{output_name}.glb": raw_glb = c; break if not raw_glb.exists(): for c in sorted(job_dir.glob("*.obj"), key=lambda p: p.stat().st_mtime, reverse=True): raw_glb = c; break if not raw_glb.exists() or raw_glb.stat().st_size < 5000: stderr = proc.stderr.read().decode('utf-8', errors='replace')[-500:] if proc.stderr else '' stdout = proc.stdout.read().decode('utf-8', errors='replace')[-500:] if proc.stdout else '' # AUTO-RETRY: conv_none "index out of bounds" crash — retry with different seed (up to 3 attempts) if 'out of bounds' in stderr and job.get('attempt', 1) < 3 and job.get('cmd'): attempt = job['attempt'] + 1 new_seed = str(int(time.time()) % 99999 + attempt * 7) retry_cmd = job['cmd'] + ['--seed', new_seed] print(f" ⚠ conv_none crash on attempt {attempt-1} — retrying with seed {new_seed} (attempt {attempt}/3)...") new_proc = subprocess.Popen(retry_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=TRELLIS_REPO, env=job['env']) job['proc'] = new_proc job['attempt'] = attempt return jsonify({'status': 'processing', 'elapsed': elapsed, 'retry': attempt}) print(f" ❌ No output for {safe_id} (exit code {ret})") print(f" dir: {[f.name for f in job_dir.iterdir()] if job_dir.exists() else 'GONE'}") print(f" stderr: {stderr}") print(f" stdout: {stdout}") del _3d_procs[safe_id] return jsonify({'status': 'failed', 'error': f'No 3D output (exit {ret})', 'stderr': stderr, 'elapsed': elapsed}) # SUCCESS — decimate and finalize print(f" ✅ GLB found: {raw_glb.stat().st_size/(1024*1024):.1f}MB in {elapsed}s") final_glb = job_dir / f"{output_name}.glb" final_faces = _decimate_glb(raw_glb, final_glb, target_faces=job['decimation']) # Thumbnail thumb_path = job_dir / f"{output_name}_thumb.png" if TRIMESH_OK: try: mesh = trimesh.load(str(final_glb), force='mesh') if mesh is not None: png_data = trimesh.Scene(mesh).save_image(resolution=[512, 512]) if png_data: with open(thumb_path, 'wb') as f: f.write(png_data) except: pass if raw_glb.exists() and raw_glb != final_glb: raw_glb.unlink(missing_ok=True) inp = job_dir / "input.png" if inp.exists(): inp.unlink(missing_ok=True) file_size = final_glb.stat().st_size print(f" ✅ Final: {final_glb.name} — {final_faces} faces, {file_size/1024:.0f}KB") result = { 'success': True, 'engine': job['engine'], 'download_url': f'/api/serve-3d/{output_name}/{output_name}.glb', 'thumb_url': f'/api/serve-3d/{output_name}/{output_name}_thumb.png' if thumb_path.exists() else None, 'poly_count': final_faces, 'texture_size': job['tex_size'], 'file_size': file_size, 'render_time': elapsed, 'format': 'glb', 'has_pbr': True, } del _3d_procs[safe_id] return jsonify({'status': 'complete', 'result': result, 'elapsed': elapsed}) # ═══ 3D SERVE / LIST / CLEANUP / HEALTH / CANCEL ═══ @app.route('/api/3d-job//cancel', methods=['POST']) def cancel_3d_job(job_id): safe_id = ''.join(c for c in job_id if c.isalnum() or c in '_-') job = _3d_procs.get(safe_id) if not job: return jsonify({'success': False, 'error': 'Job not found'}), 404 proc = job['proc'] if proc.poll() is None: proc.kill() print(f" 🛑 Killed 3D job {safe_id}") del _3d_procs[safe_id] return jsonify({'success': True, 'message': f'Job {safe_id} cancelled'}) @app.route('/api/serve-3d//', methods=['GET']) def serve_3d(folder, filename): safe_folder = ''.join(c for c in folder if c.isalnum() or c in '_-') safe_file = ''.join(c for c in filename if c.isalnum() or c in '_-.') if '..' in safe_file or '/' in safe_file: return jsonify({'error': 'Invalid filename'}), 400 file_path = Path(THREED_OUTPUT_DIR) / safe_folder / safe_file if not file_path.exists(): return jsonify({'error': 'File not found'}), 404 mime_map = {'.glb': 'model/gltf-binary', '.obj': 'text/plain', '.fbx': 'application/octet-stream', '.png': 'image/png', '.jpg': 'image/jpeg'} return send_file(str(file_path), mimetype=mime_map.get(file_path.suffix.lower(), 'application/octet-stream')) @app.route('/api/list-3d', methods=['GET']) def list_3d(): assets = [] if Path(THREED_OUTPUT_DIR).exists(): for d in sorted(Path(THREED_OUTPUT_DIR).iterdir(), reverse=True): if d.is_dir(): for f in d.iterdir(): if f.suffix in ('.glb', '.obj', '.fbx'): assets.append({'name': d.name, 'file': f.name, 'format': f.suffix[1:], 'size_kb': round(f.stat().st_size/1024, 1), 'created': time.ctime(f.stat().st_mtime), 'download_url': f'/api/serve-3d/{d.name}/{f.name}', 'thumb_url': f'/api/serve-3d/{d.name}/{d.name}_thumb.png' if (d/f"{d.name}_thumb.png").exists() else None}) return jsonify({'assets': assets[:50], 'count': len(assets)}) @app.route('/api/cleanup-3d', methods=['POST']) def cleanup_3d(): days = (request.get_json() or {}).get('days', 14); cutoff = time.time()-(days*86400); deleted = 0 if Path(THREED_OUTPUT_DIR).exists(): for d in Path(THREED_OUTPUT_DIR).iterdir(): if d.is_dir() and d.stat().st_mtime < cutoff: shutil.rmtree(d, ignore_errors=True); deleted += 1 return jsonify({'deleted': deleted, 'older_than_days': days}) @app.route('/api/health-3d', methods=['GET']) def health_3d(): engines = { 'trellis2': {'installed': os.path.exists(TRELLIS_PYTHON) and os.path.isdir(TRELLIS_REPO), 'path': TRELLIS_REPO, 'label': 'VEX·0 Forge (TRELLIS.2)'}, 'hunyuan3d': {'installed': os.path.exists(HUNYUAN_VENV) and os.path.isdir(HUNYUAN_REPO), 'path': HUNYUAN_REPO, 'label': 'VEX·0H Forge (Hunyuan3D)'}, } return jsonify({'engines': engines, 'output_dir': str(THREED_OUTPUT_DIR), 'any_local': any(e['installed'] for e in engines.values())}) # ═══ MAIN ═══ if __name__ == "__main__": print("🤖 MaxServSuperAGI Server v2.5-popen starting on port 8787...") print(f"📦 Default LLM: {DEFAULT_MODEL}") print(f"🎥 LTX video model: {LTX_MODEL}") print(f"🎨 FLUX Krea model: {FLUX_MODEL_REPO}") print(f"🎭 LivePortrait: {LP_REPO}") print(f"🗣️ SadTalker: {ST_REPO}") print(f"🧊 TRELLIS.2: {TRELLIS_REPO}") print(f"🐉 Hunyuan3D v2.1: {HUNYUAN_REPO}") print(f"─────────────────────────────────────") print(f"🔗 Chat: http://localhost:8787/api/chat") print(f"🎬 Video: http://localhost:8787/api/gen-video") print(f"🖼️ Image: http://localhost:8787/api/gen-image") print(f"🎭 LivePortrait: http://localhost:8787/api/liveportrait") print(f"🗣️ SadTalker: http://localhost:8787/api/sadtalker") print(f"🧊 3D (TRELLIS): http://localhost:8787/api/gen-3d-trellis") print(f"🧊 3D (Text→3D): http://localhost:8787/api/gen-3d-text") print(f"🐉 3D (Hunyuan): http://localhost:8787/api/gen-3d-hunyuan") print(f"📊 3D Job Poll: http://localhost:8787/api/3d-job/") print(f"🛑 3D Cancel: http://localhost:8787/api/3d-job//cancel") print(f"❤️ 3D Health: http://localhost:8787/api/health-3d") app.run(host="0.0.0.0", port=8787, debug=False, threaded=True)