Development of an AI system for replacing faces in videos (Face Swap)
Face swap is a technology for replacing faces in videos. It is used in entertainment applications, visualization of clothing/accessories, educational materials with anonymization, and production (stuntman replacement). It requires strict compliance with the law and obtaining consent.
Roop / Reactor — open source solutions
import subprocess
import os
class FaceSwapService:
def __init__(self, model_dir: str = "./models"):
self.model_dir = model_dir
# Используем InsightFace для детекции + sim swap для замены
def swap_face_in_video(
self,
source_face: str, # Изображение с исходным лицом
target_video: str, # Видео, в котором меняем лицо
output_path: str,
face_index: int = 0 # Какое лицо в target заменяем
) -> str:
subprocess.run([
"python", "roop/run.py",
"--source", source_face,
"--target", target_video,
"--output", output_path,
"--face-restore",
"--frame-processor", "face_swapper", "face_enhancer",
"-e", "colab-run"
], check=True)
return output_path
def batch_swap(self, face_path: str, video_paths: list[str], output_dir: str) -> list[str]:
results = []
for video in video_paths:
filename = os.path.basename(video)
output = os.path.join(output_dir, f"swapped_{filename}")
results.append(self.swap_face_in_video(face_path, video, output))
return results
InsightFace Direct (Advanced Control)
import insightface
import cv2
import numpy as np
from gfpgan import GFPGANer
class AdvancedFaceSwapper:
def __init__(self):
self.face_analysis = insightface.app.FaceAnalysis(
name="buffalo_l",
providers=["CUDAExecutionProvider"]
)
self.face_analysis.prepare(ctx_id=0, det_size=(640, 640))
self.swapper = insightface.model_zoo.get_model(
"inswapper_128.onnx",
download=True
)
self.enhancer = GFPGANer(model_path="GFPGANv1.4.pth", upscale=1)
def swap_faces_in_frame(
self,
source_face_embedding,
frame: np.ndarray,
target_face_idx: int = 0
) -> np.ndarray:
faces = self.face_analysis.get(frame)
if not faces or target_face_idx >= len(faces):
return frame
target_face = faces[target_face_idx]
result = self.swapper.get(frame, target_face, source_face_embedding, paste_back=True)
# Улучшаем лицо после замены
_, _, result = self.enhancer.enhance(result, has_aligned=False, paste_back=True)
return result
def process_video(
self,
source_image: str,
target_video: str,
output_path: str
) -> str:
# Получаем embedding исходного лица
source_img = cv2.imread(source_image)
source_faces = self.face_analysis.get(source_img)
if not source_faces:
raise ValueError("Лицо не найдено в source image")
source_embedding = source_faces[0]
cap = cv2.VideoCapture(target_video)
fps = cap.get(cv2.CAP_PROP_FPS)
w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
out = cv2.VideoWriter(output_path, cv2.VideoWriter_fourcc(*"mp4v"), fps, (w, h))
while True:
ret, frame = cap.read()
if not ret:
break
processed = self.swap_faces_in_frame(source_embedding, frame)
out.write(processed)
cap.release()
out.release()
return output_path
Legal Restrictions and Ethics
Forbidden:
- Deepfake pornography (criminal in many jurisdictions)
- Creating fake content featuring real people without their consent
- Use in disinformation, fraud
Required:
- Written consent from the person whose face is being used
- Explicitly marking content as AI-generated
- Policy for the storage and deletion of biometric data (Federal Law No. 152)
Adding a watermark:
def add_ai_watermark(video_path: str, output_path: str) -> str:
subprocess.run([
"ffmpeg", "-i", video_path,
"-vf", "drawtext=text='AI GENERATED':fontsize=24:fontcolor=white:alpha=0.7:x=10:y=10",
output_path
], check=True)
return output_path
Timeframe: Face Swap service for images – 1 week. For videos with queue and face enhancement – 2–3 weeks.







