Development of AI Outpainting for Image Extension
Outpainting extends an image beyond its original boundaries, seamlessly filling the new area with content matching the original. Used for creating widescreen banners from square photos, extending space around objects, creating panoramas.
diffusers Outpainting
from diffusers import StableDiffusionXLInpaintPipeline
from PIL import Image, ImageOps
import torch
import numpy as np
import io
class OutpaintingService:
def __init__(self):
self.pipe = StableDiffusionXLInpaintPipeline.from_pretrained(
"diffusers/stable-diffusion-xl-1.0-inpainting-0.1",
torch_dtype=torch.float16
).to("cuda")
def extend_image(
self,
image_bytes: bytes,
extend_left: int = 0,
extend_right: int = 0,
extend_top: int = 0,
extend_bottom: int = 0,
prompt: str = "seamless continuation of the scene",
steps: int = 40
) -> bytes:
original = Image.open(io.BytesIO(image_bytes)).convert("RGB")
orig_w, orig_h = original.size
# New canvas size
new_w = orig_w + extend_left + extend_right
new_h = orig_h + extend_top + extend_bottom
# Align to multiple of 8
new_w = (new_w // 8) * 8
new_h = (new_h // 8) * 8
# Create extended canvas
canvas = Image.new("RGB", (new_w, new_h), (128, 128, 128))
canvas.paste(original, (extend_left, extend_top))
# Mask: white = extended zone, black = original
mask = Image.new("L", (new_w, new_h), 255)
mask_draw_area = Image.new("L", (orig_w, orig_h), 0)
mask.paste(mask_draw_area, (extend_left, extend_top))
result = self.pipe(
prompt=prompt,
image=canvas,
mask_image=mask,
height=new_h,
width=new_w,
num_inference_steps=steps,
guidance_scale=8.0,
strength=0.99
).images[0]
buf = io.BytesIO()
result.save(buf, format="PNG")
return buf.getvalue()
Format Conversion
class AspectRatioConverter:
"""Convert square to 16:9 or 9:16 via outpainting"""
def __init__(self, outpainting_service: OutpaintingService):
self.service = outpainting_service
def square_to_landscape(self, image_bytes: bytes, prompt: str = "") -> bytes:
"""1:1 → 16:9 (add on sides)"""
img = Image.open(io.BytesIO(image_bytes))
target_w = int(img.height * 16 / 9)
extend_each = (target_w - img.width) // 2
return self.service.extend_image(
image_bytes,
extend_left=extend_each,
extend_right=extend_each,
prompt=prompt or "seamless background extension, same scene"
)
def square_to_portrait(self, image_bytes: bytes, prompt: str = "") -> bytes:
"""1:1 → 9:16 (add top and bottom)"""
img = Image.open(io.BytesIO(image_bytes))
target_h = int(img.width * 16 / 9)
extend_each = (target_h - img.height) // 2
return self.service.extend_image(
image_bytes,
extend_top=extend_each,
extend_bottom=extend_each,
prompt=prompt or "seamless extension, matching environment"
)
Tiled Outpainting for Large Extensions
def extend_large(
image_bytes: bytes,
total_extension: int,
direction: str = "right",
tile_size: int = 512,
overlap: int = 128
) -> bytes:
"""Extend large areas with tiled overlap"""
current_image = image_bytes
steps = (total_extension - overlap) // (tile_size - overlap)
for i in range(steps):
extension = min(tile_size, total_extension - i * (tile_size - overlap))
if direction == "right":
current_image = outpainting_service.extend_image(
current_image,
extend_right=extension,
prompt="seamless continuation"
)
# Similar for other directions
return current_image
Applications
| Task | Original Format | Target Format |
|---|---|---|
| Instagram → YouTube banner | 1:1 | 16:9 |
| Product photo → landing | 4:5 | 21:9 |
| Portrait → book cover | 3:4 | 2:3 with text space |
| Landscape → panorama | 16:9 | 32:9 |
Timeline: basic outpainting API — 2–3 days. Format conversion tool with preview — 1–2 weeks.







