
import os
import random
import cv2
import numpy as np
from moviepy import VideoFileClip, concatenate_videoclips
from moviepy.video.VideoClip import ColorClip
from moviepy.video.compositing.CompositeVideoClip import CompositeVideoClip

# === Configuration ===
SOURCE_FOLDERS = [
    "D:/recup/current/films",
    "G:/"
]
OUTPUT_FILE = "C:/Users/Morusque/Desktop/montage_humains_1920x1080.mp4"
CLIP_DURATION = 0.2  # seconds
CLIP_CROP_START = 100.0  # seconds
CLIP_CROP_STOP = 100.0  # seconds
NUM_CLIPS = 100
TARGET_RES = (1920, 1080)
FPS = 30

# === Human detection function (OpenCV HOG) ===
hog = cv2.HOGDescriptor()
hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())

def has_human_frame(frame):
    # frame is RGB (MoviePy), convert to BGR for OpenCV
    bgr = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
    gray = cv2.cvtColor(bgr, cv2.COLOR_BGR2GRAY)
    boxes, _ = hog.detectMultiScale(gray, winStride=(8,8), padding=(8,8), scale=1.05)
    return len(boxes) > 0

# === Load and shuffle videos recursively ===
video_extensions = ('.mp4', '.mov', '.avi', '.mkv', '.wmv', '.ram', '.rm', '.flv', '.webm', '.mpg', '.mpeg', '.3gp', '.m4v')
video_files = []
for folder in SOURCE_FOLDERS:
    for root, _, files in os.walk(folder):
        for file in files:
            if file.lower().endswith(video_extensions):
                video_files.append(os.path.join(root, file))

random.shuffle(video_files)

extracted_clips = []

# === Extract valid clips ===
for video_path in video_files:
    if len(extracted_clips) >= NUM_CLIPS:
        break

    try:
        clip = VideoFileClip(video_path)
        print(f"🎬 {video_path} ({clip.duration:.2f}s)")

        if clip.duration-CLIP_CROP_START-CLIP_CROP_STOP < CLIP_DURATION:
            print("⏭️ Trop court.")
            continue

        start = random.uniform(CLIP_CROP_START, clip.duration - CLIP_CROP_STOP - CLIP_DURATION)
        sub = clip.subclipped(start, start + CLIP_DURATION)

        # Resize and center on 1920x1080
        sub = sub.resized(height=TARGET_RES[1])
        if sub.w > TARGET_RES[0]:
            sub = sub.resized(width=TARGET_RES[0])

        background = ColorClip(size=TARGET_RES, color=(0, 0, 0), duration=sub.duration)
        composed = CompositeVideoClip([background, sub.with_position(("center", "center"))])
        composed.fps = FPS
        composed.audio = sub.audio

        extracted_clips.append(composed)

    except Exception as e:
        print(f"❌ Erreur avec {video_path} : {e}")

# === Final export ===
if extracted_clips:
    print("\n🔗 Concaténation...")
    final = concatenate_videoclips(extracted_clips, method="compose")
    final.write_videofile(
        OUTPUT_FILE,
        codec="libx264",
        audio_codec="aac",
        fps=FPS
    )
    final.close()
    print(f"\n✅ Montage final avec humains : {OUTPUT_FILE}")
else:
    print("\n❌ Aucun clip avec humain détecté.")

# === Cleanup ===
for c in extracted_clips:
    try:
        c.close()
    except:
        pass
