AI Learning Management System LMS Management Development

We design and deploy artificial intelligence systems: from prototype to production-ready solutions. Our team combines expertise in machine learning, data engineering and MLOps to make AI work not in the lab, but in real business.
Showing 1 of 1 servicesAll 1566 services
AI Learning Management System LMS Management Development
Medium
~1-2 weeks
FAQ
AI Development Areas
AI Solution Development Stages
Latest works
  • image_website-b2b-advance_0.png
    B2B ADVANCE company website development
    1212
  • image_web-applications_feedme_466_0.webp
    Development of a web application for FEEDME
    1161
  • image_websites_belfingroup_462_0.webp
    Website development for BELFINGROUP
    852
  • image_ecommerce_furnoro_435_0.webp
    Development of an online store for the company FURNORO
    1041
  • image_logo-advance_0.png
    B2B Advance company logo design
    561
  • image_crm_enviok_479_0.webp
    Development of a web application for Enviok
    822

AI-система управления LMS

AI-интеграция в LMS (Learning Management System) — это автоматизация рутины преподавателя и администратора: оценка заданий, генерация тестов, раннее выявление отстающих, персонализированные напоминания. Платформы типа Moodle, Canvas, Teachable получают ML-слой поверх существующей инфраструктуры.

Автоматическая оценка заданий

from anthropic import Anthropic
import pandas as pd
import numpy as np
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity

class AssignmentGrader:
    """AI-оценка открытых заданий"""

    def __init__(self, rubric: dict):
        self.rubric = rubric
        self.llm = Anthropic()

    def grade_essay(self, submission: str, model_answer: str) -> dict:
        """Оценка эссе по рубрике с LLM"""
        criteria_text = '\n'.join([
            f"- {criterion}: {max_points} баллов. {description}"
            for criterion, (max_points, description) in self.rubric.items()
        ])

        response = self.llm.messages.create(
            model="claude-3-5-sonnet-20241022",
            max_tokens=500,
            messages=[{
                "role": "user",
                "content": f"""Grade this student essay according to the rubric.

RUBRIC:
{criteria_text}

MODEL ANSWER (for reference):
{model_answer[:500]}

STUDENT SUBMISSION:
{submission[:800]}

Return JSON:
{{
  "scores": {{"criterion_name": score, ...}},
  "total": total_score,
  "max_total": max_possible,
  "feedback": "specific feedback in Russian",
  "strengths": ["..."],
  "improvements": ["..."]
}}"""
            }]
        )

        import json
        try:
            return json.loads(response.content[0].text)
        except Exception:
            return {'total': 0, 'feedback': 'Ошибка автоматической проверки', 'error': True}

    def grade_code_assignment(self, code: str, test_cases: list[dict]) -> dict:
        """Оценка кода: запуск тестов + анализ качества"""
        # Запуск test cases (в изолированной среде)
        test_results = []
        passed = 0
        for tc in test_cases:
            try:
                # В production: Docker sandbox, timeout
                result = self._run_safely(code, tc['input'])
                correct = str(result).strip() == str(tc['expected']).strip()
                test_results.append({'input': tc['input'], 'passed': correct})
                if correct:
                    passed += 1
            except Exception as e:
                test_results.append({'input': tc['input'], 'passed': False, 'error': str(e)})

        functional_score = passed / len(test_cases) * 100

        # Анализ качества кода через LLM
        quality_response = self.llm.messages.create(
            model="claude-3-5-sonnet-20241022",
            max_tokens=200,
            messages=[{
                "role": "user",
                "content": f"""Evaluate code quality (1-10) and give brief feedback in Russian.
Consider: readability, efficiency, edge cases, style.

Code:

{code[:600]}


Return JSON: {{"quality_score": 7, "feedback": "..."}}"""
            }]
        )

        import json
        try:
            quality = json.loads(quality_response.content[0].text)
        except Exception:
            quality = {'quality_score': 5, 'feedback': ''}

        return {
            'functional_score': functional_score,
            'quality_score': quality.get('quality_score', 5),
            'total_score': functional_score * 0.7 + quality.get('quality_score', 5) * 3,
            'tests_passed': f"{passed}/{len(test_cases)}",
            'feedback': quality.get('feedback', ''),
            'test_details': test_results
        }

    def _run_safely(self, code: str, input_data) -> str:
        """Заглушка — в production: subprocess + Docker + timeout"""
        return "placeholder"


class QuizGenerator:
    """Генерация тестов из учебных материалов"""

    def __init__(self):
        self.llm = Anthropic()

    def generate_quiz(self, content: str, n_questions: int = 5,
                       difficulty: str = 'medium',
                       question_types: list = None) -> list[dict]:
        """Генерация quiz из учебного материала"""
        if question_types is None:
            question_types = ['multiple_choice', 'true_false', 'fill_blank']

        response = self.llm.messages.create(
            model="claude-3-5-sonnet-20241022",
            max_tokens=1000,
            messages=[{
                "role": "user",
                "content": f"""Generate {n_questions} quiz questions in Russian.

Content:
{content[:1500]}

Requirements:
- Difficulty: {difficulty}
- Mix of types: {', '.join(question_types)}
- Test understanding, not memorization
- Include distractors for multiple choice

Return JSON array:
[{{
  "type": "multiple_choice",
  "question": "...",
  "options": ["A) ...", "B) ...", "C) ...", "D) ..."],
  "correct_answer": "A",
  "explanation": "Why this answer is correct"
}}]"""
            }]
        )

        import json
        try:
            return json.loads(response.content[0].text)
        except Exception:
            return []


class EarlyWarningSystem:
    """Раннее выявление студентов в зоне риска"""

    def compute_risk_scores(self, engagement_data: pd.DataFrame) -> pd.DataFrame:
        """
        Индикаторы риска отчисления/бросания курса:
        - Снижение активности последних 2 недель
        - Низкие оценки + долгое время ответа
        - Пропуск дедлайнов
        """
        risk_df = engagement_data.copy()

        # Тренд активности
        risk_df['activity_trend'] = (
            risk_df['logins_last_week'] - risk_df['logins_week_before']
        ) / (risk_df['logins_week_before'] + 1)

        # Нормализованные факторы риска
        risk_factors = pd.DataFrame({
            'low_grades': (risk_df['avg_score_last_3'] < 0.6).astype(float),
            'declining_activity': (risk_df['activity_trend'] < -0.3).astype(float),
            'missed_deadlines': (risk_df['missed_deadlines_count'] > 1).astype(float),
            'no_login_7d': (risk_df['days_since_last_login'] > 7).astype(float),
            'low_forum_activity': (risk_df['forum_posts_total'] == 0).astype(float),
        })

        # Взвешенный скор риска
        weights = {
            'low_grades': 0.25,
            'declining_activity': 0.25,
            'missed_deadlines': 0.30,
            'no_login_7d': 0.15,
            'low_forum_activity': 0.05
        }

        risk_df['risk_score'] = sum(
            risk_factors[factor] * weight
            for factor, weight in weights.items()
        )

        risk_df['risk_level'] = pd.cut(
            risk_df['risk_score'],
            bins=[0, 0.3, 0.6, 1.0],
            labels=['low', 'medium', 'high']
        )

        return risk_df.sort_values('risk_score', ascending=False)

    def generate_intervention(self, student: dict) -> dict:
        """Рекомендованное вмешательство по уровню риска"""
        risk_level = student.get('risk_level', 'low')

        interventions = {
            'low': {
                'action': 'automated_reminder',
                'message': 'Автоматическое напоминание об активных заданиях',
                'urgency': 'low'
            },
            'medium': {
                'action': 'personalized_email',
                'message': 'Персонализированное письмо с поддержкой от LLM',
                'urgency': 'medium',
                'assigned_to': 'system'
            },
            'high': {
                'action': 'mentor_outreach',
                'message': 'Личный контакт от куратора/ментора',
                'urgency': 'high',
                'assigned_to': 'human_mentor'
            }
        }

        return interventions.get(risk_level, interventions['low'])

AI-LMS типично снижает нагрузку преподавателя на проверку заданий на 60-70%. Система раннего предупреждения при своевременном вмешательстве снижает dropout rate на 15-25%. Автогенерация тестов сокращает время подготовки учебных материалов вдвое.