101 lines
3.1 KiB
Python
101 lines
3.1 KiB
Python
import os
|
|
import cv2
|
|
import numpy as np
|
|
import subprocess
|
|
import random
|
|
from pathlib import Path
|
|
|
|
SOURCE_DIR = Path.cwd()
|
|
CLIP_DIR = Path("clips")
|
|
CLIP_DIR.mkdir(exist_ok=True)
|
|
CLIP_DURATION = .5 # seconds
|
|
FRAME_SAMPLE_INTERVAL = 1 # seconds
|
|
|
|
# Scoring weights — adjust to taste
|
|
WEIGHT_DARKNESS = -1.0 # lower brightness = better
|
|
WEIGHT_DOMINANT_COLOR = 1.0 # how "gray" a frame is
|
|
WEIGHT_EDGE_DENSITY = 0.5 # silhouette/contrast
|
|
|
|
def get_video_duration(video_path):
|
|
result = subprocess.run([
|
|
'ffprobe', '-v', 'error', '-show_entries', 'format=duration',
|
|
'-of', 'default=noprint_wrappers=1:nokey=1', str(video_path)
|
|
], stdout=subprocess.PIPE)
|
|
return float(result.stdout.strip())
|
|
|
|
def get_candidate_scores(video_path, interval):
|
|
cap = cv2.VideoCapture(str(video_path))
|
|
fps = cap.get(cv2.CAP_PROP_FPS)
|
|
total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
|
|
duration = total_frames / fps
|
|
|
|
frame_scores = []
|
|
|
|
for t in range(0, int(duration - CLIP_DURATION)):
|
|
cap.set(cv2.CAP_PROP_POS_MSEC, t * 1000)
|
|
success, frame = cap.read()
|
|
if not success:
|
|
continue
|
|
|
|
frame_small = cv2.resize(frame, (64, 64))
|
|
gray = cv2.cvtColor(frame_small, cv2.COLOR_BGR2GRAY)
|
|
avg_brightness = np.mean(gray)
|
|
|
|
# Color dominance: check if close to grayscale (low saturation)
|
|
hsv = cv2.cvtColor(frame_small, cv2.COLOR_BGR2HSV)
|
|
saturation = hsv[:, :, 1]
|
|
grayish_score = 255 - np.mean(saturation)
|
|
|
|
# Silhouette: edge density using Canny edge detector
|
|
edges = cv2.Canny(gray, 100, 200)
|
|
edge_density = np.count_nonzero(edges) / edges.size
|
|
|
|
# Composite score
|
|
score = (
|
|
WEIGHT_DARKNESS * avg_brightness +
|
|
WEIGHT_DOMINANT_COLOR * grayish_score +
|
|
WEIGHT_EDGE_DENSITY * edge_density
|
|
)
|
|
|
|
frame_scores.append((t, score))
|
|
|
|
cap.release()
|
|
return sorted(frame_scores, key=lambda x: x[1], reverse=True)
|
|
|
|
def extract_clip(video_path, start_time, output_path):
|
|
subprocess.run([
|
|
'ffmpeg', '-y', '-ss', str(start_time), '-i', str(video_path),
|
|
'-t', str(CLIP_DURATION), '-c', 'copy', str(output_path)
|
|
], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
|
|
|
|
concat_entries = []
|
|
|
|
for video in SOURCE_DIR.rglob("*.mp4"):
|
|
if CLIP_DIR in video.parents:
|
|
continue
|
|
|
|
print(f"Analyzing {video.name}...")
|
|
duration = get_video_duration(video)
|
|
if duration <= CLIP_DURATION:
|
|
print(f"Skipping {video.name}, too short.")
|
|
continue
|
|
|
|
scored = get_candidate_scores(video, FRAME_SAMPLE_INTERVAL)
|
|
if not scored:
|
|
print(f"No good frames found in {video.name}")
|
|
continue
|
|
|
|
best_time = scored[0][0]
|
|
clip_path = CLIP_DIR / f"clip_{video.stem}.mp4"
|
|
extract_clip(video, best_time, clip_path)
|
|
concat_entries.append(f"file '{clip_path.as_posix()}'")
|
|
|
|
# Write concat list
|
|
concat_file = CLIP_DIR / "concat_list.txt"
|
|
concat_file.write_text("\n".join(concat_entries))
|
|
|
|
# Merge all into one final output
|
|
subprocess.run([
|
|
'ffmpeg', '-f', 'concat', '-safe', '0',
|
|
'-i', str(concat_file), '-c', 'copy', 'final_output.mp4'
|
|
])
|