AI Image Colorization
AI colorization restores color in black-and-white photographs and video. Neural network models understand context: sky is blue, grass is green, skin is flesh-toned — without manual markup.
DeOldify — Classic Colorization
from deoldify import device
from deoldify.device_id import DeviceId
from deoldify.visualize import get_image_colorizer
import PIL.Image as Image
import io
device.set(device=DeviceId.GPU0)
colorizer = get_image_colorizer(artistic=True) # artistic=True — more saturated colors
def colorize_image(image_bytes: bytes, render_factor: int = 35) -> bytes:
"""
render_factor: 7-45, higher = more saturated colors, slower
"""
input_image = Image.open(io.BytesIO(image_bytes)).convert("L").convert("RGB")
# Save temporarily
import tempfile, os
with tempfile.NamedTemporaryFile(suffix=".jpg", delete=False) as f:
input_image.save(f.name)
temp_path = f.name
result = colorizer.get_transformed_image(temp_path, render_factor=render_factor)
os.unlink(temp_path)
buf = io.BytesIO()
result.save(buf, format="JPEG", quality=95)
return buf.getvalue()
Stable Diffusion img2img Approach
from diffusers import StableDiffusionImg2ImgPipeline
import torch
pipe = StableDiffusionImg2ImgPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5",
torch_dtype=torch.float16
).to("cuda")
def colorize_with_sd(bw_image: bytes, prompt_hint: str = "") -> bytes:
init_image = Image.open(io.BytesIO(bw_image)).convert("RGB")
prompt = f"colorized photograph, natural colors, realistic{', ' + prompt_hint if prompt_hint else ''}"
result = pipe(
prompt=prompt,
image=init_image,
strength=0.5, # Low strength preserves structure
guidance_scale=8.0,
num_inference_steps=30
).images[0]
buf = io.BytesIO()
result.save(buf, format="JPEG", quality=95)
return buf.getvalue()
DeOldify is better for historical photographs (trained on real B&W images). SD img2img provides more control via prompt (can specify era, region). For video: DeOldify supports frame-by-frame colorization with temporal consistency. Integration timeline — 1–2 days.







