AI Game Experience Personalization System Development

We design and deploy artificial intelligence systems: from prototype to production-ready solutions. Our team combines expertise in machine learning, data engineering and MLOps to make AI work not in the lab, but in real business.
Showing 1 of 1 servicesAll 1566 services
AI Game Experience Personalization System Development
Medium
~1-2 weeks
FAQ
AI Development Areas
AI Solution Development Stages
Latest works
  • image_website-b2b-advance_0.png
    B2B ADVANCE company website development
    1212
  • image_web-applications_feedme_466_0.webp
    Development of a web application for FEEDME
    1161
  • image_websites_belfingroup_462_0.webp
    Website development for BELFINGROUP
    852
  • image_ecommerce_furnoro_435_0.webp
    Development of an online store for the company FURNORO
    1041
  • image_logo-advance_0.png
    B2B Advance company logo design
    561
  • image_crm_enviok_479_0.webp
    Development of a web application for Enviok
    822

AI Game Experience Personalization System

Game personalization is not just item recommendations. AI adapts difficulty in real-time, generates narrative based on player style, matches opponents of equal skill, and creates unique events for each player. The goal: maximize time-in-game and lifetime value.

Dynamic Difficulty Adjustment (DDA)

import numpy as np
from collections import deque
from dataclasses import dataclass
from typing import Optional

@dataclass
class GameSession:
    player_id: str
    skill_level: float       # 0-1
    current_difficulty: float  # 0-1
    recent_outcomes: deque    # True=win, False=lose
    frustration_score: float
    boredom_score: float

class DynamicDifficultyAdjuster:
    """
    Flow theory: player maintains state of flow between boredom and frustration.
    Target win rate: 65-75% for optimal engagement.
    """

    TARGET_WIN_RATE = 0.70
    ADJUSTMENT_SPEED = 0.05  # Difficulty change per step
    WINDOW_SIZE = 10          # Recent N outcomes for assessment

    def update_difficulty(self, session: GameSession,
                           last_outcome: bool,
                           time_to_complete_seconds: float,
                           health_remaining_pct: float = 1.0) -> float:
        """Update difficulty after each encounter"""
        session.recent_outcomes.append(last_outcome)

        if len(session.recent_outcomes) < 3:
            return session.current_difficulty

        recent_win_rate = sum(session.recent_outcomes) / len(session.recent_outcomes)

        # Frustration: series of losses + low health
        if not last_outcome and health_remaining_pct < 0.1:
            session.frustration_score = min(1.0, session.frustration_score + 0.2)
        else:
            session.frustration_score = max(0.0, session.frustration_score - 0.05)

        # Boredom: completion too fast + high health
        if last_outcome and time_to_complete_seconds < 30 and health_remaining_pct > 0.8:
            session.boredom_score = min(1.0, session.boredom_score + 0.15)
        else:
            session.boredom_score = max(0.0, session.boredom_score - 0.05)

        # Adjust difficulty
        new_difficulty = session.current_difficulty

        if session.frustration_score > 0.6:
            new_difficulty -= self.ADJUSTMENT_SPEED * 1.5  # Decrease faster
        elif session.boredom_score > 0.6:
            new_difficulty += self.ADJUSTMENT_SPEED * 1.5  # Increase faster
        elif recent_win_rate > self.TARGET_WIN_RATE + 0.1:
            new_difficulty += self.ADJUSTMENT_SPEED
        elif recent_win_rate < self.TARGET_WIN_RATE - 0.1:
            new_difficulty -= self.ADJUSTMENT_SPEED

        session.current_difficulty = float(np.clip(new_difficulty, 0.1, 1.0))
        return session.current_difficulty

    def scale_enemy_parameters(self, base_enemy: dict,
                                difficulty: float) -> dict:
        """Scale enemy parameters by difficulty"""
        scale_factor = 0.5 + difficulty * 1.0  # 0.1 → 0.6x, 1.0 → 1.5x

        return {
            'hp': int(base_enemy['hp'] * scale_factor),
            'damage': round(base_enemy['damage'] * scale_factor, 2),
            'speed': round(base_enemy['speed'] * (0.8 + difficulty * 0.4), 2),
            'accuracy': min(0.95, base_enemy['accuracy'] * scale_factor),
            'ai_reaction_ms': int(base_enemy['ai_reaction_ms'] / scale_factor),
            'loot_bonus_pct': int(difficulty * 50)  # Higher rewards for higher difficulty
        }


class MatchmakingSystem:
    """Opponent and team matching system"""

    def __init__(self):
        self.rating_k_factor = 32  # Elo K-factor

    def compute_elo_rating(self, player_rating: float,
                            opponent_rating: float,
                            outcome: float) -> float:
        """Update Elo rating"""
        expected = 1 / (1 + 10 ** ((opponent_rating - player_rating) / 400))
        return player_rating + self.rating_k_factor * (outcome - expected)

    def find_match(self, player_id: str,
                    player_rating: float,
                    player_latency_ms: int,
                    available_players: list[dict],
                    max_wait_seconds: int = 60) -> Optional[list[dict]]:
        """
        Balance: rating + ping + wait time.
        Expand rating window over time.
        """
        wait_factor = min(max_wait_seconds, 30) / 30  # 0-1
        rating_window = 100 + wait_factor * 200  # 100-300 Elo

        candidates = []
        for p in available_players:
            if p['player_id'] == player_id:
                continue

            rating_diff = abs(p['rating'] - player_rating)
            if rating_diff > rating_window:
                continue

            # Ping: prefer < 80ms difference
            latency_diff = abs(p.get('latency_ms', 50) - player_latency_ms)
            latency_score = max(0, 1.0 - latency_diff / 150)

            rating_score = 1.0 - rating_diff / rating_window
            combined_score = rating_score * 0.7 + latency_score * 0.3

            candidates.append({**p, 'match_score': combined_score})

        if not candidates:
            return None

        # Return best candidate
        return sorted(candidates, key=lambda x: -x['match_score'])[:1]


class PersonalizedEventGenerator:
    """Generation of personalized in-game events"""

    PLAYER_MOTIVATIONS = {
        'explorer': ['discovery_event', 'hidden_area_unlock', 'lore_reveal'],
        'achiever': ['challenge_milestone', 'rare_achievement', 'collection_complete'],
        'socializer': ['guild_event', 'co-op_mission', 'friend_challenge'],
        'competitor': ['ranked_event', 'leaderboard_challenge', 'pvp_tournament'],
    }

    def generate_personal_event(self, player: dict,
                                  days_since_last_event: int,
                                  player_motivation: str) -> dict:
        """Personal event matching motivational profile"""
        # Frequency: max 1 event per 3 days
        if days_since_last_event < 3:
            return {'event': None, 'reason': 'Too soon since last event'}

        event_types = self.PLAYER_MOTIVATIONS.get(
            player_motivation, self.PLAYER_MOTIVATIONS['achiever']
        )
        event_type = np.random.choice(event_types)

        # Event parameters from player profile
        event_config = self._configure_event(event_type, player)

        return {
            'event_type': event_type,
            'config': event_config,
            'expires_in_hours': 48,
            'notification_channel': 'push_and_in_game',
            'reward_scale': min(2.0, 1.0 + player.get('days_active', 0) / 100)
        }

    def _configure_event(self, event_type: str, player: dict) -> dict:
        configs = {
            'challenge_milestone': {
                'target': player.get('current_level', 1) * 10,
                'reward': 'unique_badge',
                'difficulty': 'hard'
            },
            'pvp_tournament': {
                'bracket': 'skill_based',
                'rounds': 3,
                'prize_pool': '1000 gems'
            },
            'hidden_area_unlock': {
                'area_tier': min(5, player.get('current_level', 1) // 10 + 1),
                'hint_provided': True
            }
        }
        return configs.get(event_type, {'type': event_type})

DDA in major mobile games (data from Supercell, King) shows: optimal win rate of 65-75% maximizes D7 retention. Below 55% win rate causes frustration churn, above 85% causes boredom churn. Personalized events increase DAU/MAU ratio by 8-15% with correct motivation segmentation.