AI-based system for matching participants and networking at events
Conferences are valuable for networking. But attendees waste time in casual conversations instead of meeting relevant people. AI matching finds optimal matchmaking pairs: it looks for complementarity (who's looking ≠ who's offering), not just similarities.
Networking Matching Algorithm
import numpy as np
import pandas as pd
from sentence_transformers import SentenceTransformer
from sklearn.metrics.pairwise import cosine_similarity
from anthropic import Anthropic
import json
class NetworkingMatcher:
"""Матчинг участников мероприятия для нетворкинга"""
def __init__(self):
self.encoder = SentenceTransformer('paraphrase-multilingual-mpnet-base-v2')
self.llm = Anthropic()
def build_participant_profile(self, participant: dict) -> dict:
"""Структурированный профиль участника"""
return {
'id': participant['id'],
'name': participant.get('name', ''),
'role': participant.get('job_title', ''),
'company': participant.get('company', ''),
'seeking': participant.get('looking_for', ''), # Что ищет
'offering': participant.get('can_offer', ''), # Что может дать
'interests': participant.get('topics_of_interest', []),
'industries': participant.get('industries', []),
'bio_embedding': self._encode_bio(participant)
}
def _encode_bio(self, participant: dict) -> np.ndarray:
bio_text = f"{participant.get('job_title', '')} at {participant.get('company', '')}. " \
f"Interests: {', '.join(participant.get('topics_of_interest', []))}. " \
f"Looking for: {participant.get('looking_for', '')}."
return self.encoder.encode(bio_text, normalize_embeddings=True)
def compute_match_score(self, p1: dict, p2: dict) -> float:
"""
Комплементарный матчинг: p1 ищет то, что p2 предлагает, и наоборот.
Плюс общие интересы для conversation starters.
"""
# Семантическое сходство биографий (общий контекст)
bio_similarity = float(cosine_similarity(
p1['bio_embedding'].reshape(1, -1),
p2['bio_embedding'].reshape(1, -1)
)[0, 0])
# Комплементарность: p1.seeking ↔ p2.offering
if p1.get('seeking') and p2.get('offering'):
seeking_offering_sim = float(cosine_similarity(
self.encoder.encode(p1['seeking'], normalize_embeddings=True).reshape(1, -1),
self.encoder.encode(p2['offering'], normalize_embeddings=True).reshape(1, -1)
)[0, 0])
else:
seeking_offering_sim = 0.3
# Обратная комплементарность: p2.seeking ↔ p1.offering
if p2.get('seeking') and p1.get('offering'):
reverse_sim = float(cosine_similarity(
self.encoder.encode(p2['seeking'], normalize_embeddings=True).reshape(1, -1),
self.encoder.encode(p1['offering'], normalize_embeddings=True).reshape(1, -1)
)[0, 0])
else:
reverse_sim = 0.3
# Совпадение индустрий
p1_industries = set(p1.get('industries', []))
p2_industries = set(p2.get('industries', []))
industry_overlap = len(p1_industries & p2_industries) / max(len(p1_industries | p2_industries), 1)
# Взвешенный скор
score = (
bio_similarity * 0.20 +
(seeking_offering_sim + reverse_sim) / 2 * 0.60 +
industry_overlap * 0.20
)
return float(np.clip(score, 0, 1))
def generate_matches(self, participants: list[dict],
matches_per_person: int = 5) -> list[dict]:
"""Генерация персональных нетворкинг-матчей"""
profiles = [self.build_participant_profile(p) for p in participants]
n = len(profiles)
# Матрица скоров
scores = np.zeros((n, n))
for i in range(n):
for j in range(i+1, n):
score = self.compute_match_score(profiles[i], profiles[j])
scores[i, j] = score
scores[j, i] = score
# Для каждого участника — топ-K матчей
all_matches = []
for i, profile in enumerate(profiles):
top_indices = np.argsort(-scores[i])[:matches_per_person + 1]
top_indices = [j for j in top_indices if j != i][:matches_per_person]
for j in top_indices:
all_matches.append({
'participant_a': profile['id'],
'participant_b': profiles[j]['id'],
'match_score': round(float(scores[i, j]), 3),
'icebreaker': self._generate_icebreaker(profile, profiles[j])
})
# Дедупликация (каждая пара только один раз)
seen_pairs = set()
unique_matches = []
for match in all_matches:
pair = tuple(sorted([match['participant_a'], match['participant_b']]))
if pair not in seen_pairs:
seen_pairs.add(pair)
unique_matches.append(match)
return sorted(unique_matches, key=lambda x: -x['match_score'])
def _generate_icebreaker(self, p1: dict, p2: dict) -> str:
"""Conversation starter для встречи"""
response = self.llm.messages.create(
model="claude-3-5-sonnet-20241022",
max_tokens=80,
messages=[{
"role": "user",
"content": f"""Write a 1-sentence icebreaker for a networking meeting in Russian.
Person 1: {p1.get('role')} at {p1.get('company')}, looking for: {p1.get('seeking', '')}
Person 2: {p2.get('role')} at {p2.get('company')}, offering: {p2.get('offering', '')}
Highlight the specific synergy. Be concrete and natural."""
}]
)
return response.content[0].text.strip()
class MeetingScheduler:
"""Оптимизация расписания встреч"""
def schedule_meetings(self, matches: list[dict],
participants: dict,
time_slots: list[str],
meeting_duration_min: int = 15) -> list[dict]:
"""Жадное расписание: максимизируем количество встреч"""
scheduled = []
participant_slots = {pid: set() for pid in participants}
# Сортируем матчи по скору (лучшие первыми)
sorted_matches = sorted(matches, key=lambda x: -x['match_score'])
for match in sorted_matches:
pa, pb = match['participant_a'], match['participant_b']
# Ищем свободный слот для обоих
pa_busy = participant_slots[pa]
pb_busy = participant_slots[pb]
for slot in time_slots:
if slot not in pa_busy and slot not in pb_busy:
scheduled.append({
**match,
'time_slot': slot,
'duration_min': meeting_duration_min,
'location': f"Table {len(scheduled) % 20 + 1}"
})
participant_slots[pa].add(slot)
participant_slots[pb].add(slot)
break
return scheduled
AI-powered networking at conferences increases the number of "useful" meetings (rated as valuable by attendees) from 30-40% to 65-75% of total meetings. Key insight: complementary matching (seeking/offering) works better than similarity matching (matching similar roles).







