Files
yrtv/web/services/stats_service.py

777 lines
31 KiB
Python
Raw Normal View History

2026-01-26 02:13:06 +08:00
from web.database import query_db
class StatsService:
2026-01-26 18:36:47 +08:00
@staticmethod
def get_team_stats_summary():
"""
Calculates aggregate statistics for matches where at least 2 roster members played together.
Returns:
{
'map_stats': [{'map_name', 'count', 'wins', 'win_rate'}],
'elo_stats': [{'range', 'count', 'wins', 'win_rate'}],
'duration_stats': [{'range', 'count', 'wins', 'win_rate'}],
'round_stats': [{'type', 'count', 'wins', 'win_rate'}]
}
"""
# 1. Get Active Roster
from web.services.web_service import WebService
import json
lineups = WebService.get_lineups()
active_roster_ids = []
if lineups:
try:
raw_ids = json.loads(lineups[0]['player_ids_json'])
active_roster_ids = [str(uid) for uid in raw_ids]
except:
pass
if not active_roster_ids:
return {}
# 2. Find matches with >= 2 roster members
# We need match_id, map_name, scores, winner_team, duration, avg_elo
# And we need to determine if "Our Team" won.
placeholders = ','.join('?' for _ in active_roster_ids)
# Step A: Get Candidate Match IDs (matches with >= 2 roster players)
# Also get the team_id of our players in that match to determine win
candidate_sql = f"""
SELECT mp.match_id, MAX(mp.team_id) as our_team_id
FROM fact_match_players mp
WHERE CAST(mp.steam_id_64 AS TEXT) IN ({placeholders})
GROUP BY mp.match_id
HAVING COUNT(DISTINCT mp.steam_id_64) >= 2
"""
candidate_rows = query_db('l2', candidate_sql, active_roster_ids)
if not candidate_rows:
return {}
candidate_map = {row['match_id']: row['our_team_id'] for row in candidate_rows}
match_ids = list(candidate_map.keys())
match_placeholders = ','.join('?' for _ in match_ids)
# Step B: Get Match Details
match_sql = f"""
SELECT m.match_id, m.map_name, m.score_team1, m.score_team2, m.winner_team, m.duration,
AVG(fmt.group_origin_elo) as avg_elo
FROM fact_matches m
LEFT JOIN fact_match_teams fmt ON m.match_id = fmt.match_id AND fmt.group_origin_elo > 0
WHERE m.match_id IN ({match_placeholders})
GROUP BY m.match_id
"""
match_rows = query_db('l2', match_sql, match_ids)
# 3. Process Data
# Buckets initialization
map_stats = {}
elo_ranges = ['<1000', '1000-1200', '1200-1400', '1400-1600', '1600-1800', '1800-2000', '2000+']
elo_stats = {r: {'wins': 0, 'total': 0} for r in elo_ranges}
dur_ranges = ['<30m', '30-45m', '45m+']
dur_stats = {r: {'wins': 0, 'total': 0} for r in dur_ranges}
round_types = ['Stomp (<15)', 'Normal', 'Close (>23)', 'Choke (24)']
round_stats = {r: {'wins': 0, 'total': 0} for r in round_types}
for m in match_rows:
mid = m['match_id']
# Determine Win
# Use candidate_map to get our_team_id.
# Note: winner_team is usually int (1 or 2) or string.
# our_team_id from fact_match_players is usually int (1 or 2).
# This logic assumes simple team ID matching.
# If sophisticated "UID in Winning Group" logic is needed, we'd need more queries.
# For aggregate stats, let's assume team_id matching is sufficient for 99% cases or fallback to simple check.
# Actually, let's try to be consistent with get_matches logic if possible,
# but getting group_uids for ALL matches is heavy.
# Let's trust team_id for this summary.
our_tid = candidate_map[mid]
winner_tid = m['winner_team']
# Type normalization
try:
is_win = (int(our_tid) == int(winner_tid)) if (our_tid and winner_tid) else False
except:
is_win = (str(our_tid) == str(winner_tid)) if (our_tid and winner_tid) else False
# 1. Map Stats
map_name = m['map_name'] or 'Unknown'
if map_name not in map_stats:
map_stats[map_name] = {'wins': 0, 'total': 0}
map_stats[map_name]['total'] += 1
if is_win: map_stats[map_name]['wins'] += 1
# 2. ELO Stats
elo = m['avg_elo']
if elo:
if elo < 1000: e_key = '<1000'
elif elo < 1200: e_key = '1000-1200'
elif elo < 1400: e_key = '1200-1400'
elif elo < 1600: e_key = '1400-1600'
elif elo < 1800: e_key = '1600-1800'
elif elo < 2000: e_key = '1800-2000'
else: e_key = '2000+'
elo_stats[e_key]['total'] += 1
if is_win: elo_stats[e_key]['wins'] += 1
# 3. Duration Stats
dur = m['duration'] # seconds
if dur:
dur_min = dur / 60
if dur_min < 30: d_key = '<30m'
elif dur_min < 45: d_key = '30-45m'
else: d_key = '45m+'
dur_stats[d_key]['total'] += 1
if is_win: dur_stats[d_key]['wins'] += 1
# 4. Round Stats
s1 = m['score_team1'] or 0
s2 = m['score_team2'] or 0
total_rounds = s1 + s2
if total_rounds == 24:
r_key = 'Choke (24)'
round_stats[r_key]['total'] += 1
if is_win: round_stats[r_key]['wins'] += 1
# Note: Close (>23) overlaps with Choke (24).
# User requirement: Close > 23 counts ALL matches > 23, regardless of other categories.
if total_rounds > 23:
r_key = 'Close (>23)'
round_stats[r_key]['total'] += 1
if is_win: round_stats[r_key]['wins'] += 1
if total_rounds < 15:
r_key = 'Stomp (<15)'
round_stats[r_key]['total'] += 1
if is_win: round_stats[r_key]['wins'] += 1
elif total_rounds <= 23: # Only Normal if NOT Stomp and NOT Close (<= 23 and >= 15)
r_key = 'Normal'
round_stats[r_key]['total'] += 1
if is_win: round_stats[r_key]['wins'] += 1
# 4. Format Results
def fmt(stats_dict):
res = []
for k, v in stats_dict.items():
rate = (v['wins'] / v['total'] * 100) if v['total'] > 0 else 0
res.append({'label': k, 'count': v['total'], 'wins': v['wins'], 'win_rate': rate})
return res
# For maps, sort by count
map_res = fmt(map_stats)
map_res.sort(key=lambda x: x['count'], reverse=True)
return {
'map_stats': map_res,
'elo_stats': fmt(elo_stats), # Keep order
'duration_stats': fmt(dur_stats), # Keep order
'round_stats': fmt(round_stats) # Keep order
}
2026-01-26 02:13:06 +08:00
@staticmethod
def get_recent_matches(limit=5):
sql = """
SELECT m.match_id, m.start_time, m.map_name, m.score_team1, m.score_team2, m.winner_team,
p.username as mvp_name
FROM fact_matches m
LEFT JOIN dim_players p ON m.mvp_uid = p.uid
ORDER BY m.start_time DESC
LIMIT ?
"""
return query_db('l2', sql, [limit])
@staticmethod
def get_matches(page=1, per_page=20, map_name=None, date_from=None, date_to=None):
offset = (page - 1) * per_page
args = []
where_clauses = ["1=1"]
if map_name:
where_clauses.append("map_name = ?")
args.append(map_name)
if date_from:
where_clauses.append("start_time >= ?")
args.append(date_from)
if date_to:
where_clauses.append("start_time <= ?")
args.append(date_to)
where_str = " AND ".join(where_clauses)
sql = f"""
SELECT m.match_id, m.start_time, m.map_name, m.score_team1, m.score_team2, m.winner_team, m.duration
FROM fact_matches m
WHERE {where_str}
ORDER BY m.start_time DESC
LIMIT ? OFFSET ?
"""
args.extend([per_page, offset])
matches = query_db('l2', sql, args)
# Enrich matches with Avg ELO, Party info, and Our Team Result
if matches:
match_ids = [m['match_id'] for m in matches]
placeholders = ','.join('?' for _ in match_ids)
# Fetch ELO
elo_sql = f"""
SELECT match_id, AVG(group_origin_elo) as avg_elo
FROM fact_match_teams
WHERE match_id IN ({placeholders}) AND group_origin_elo > 0
GROUP BY match_id
"""
elo_rows = query_db('l2', elo_sql, match_ids)
elo_map = {row['match_id']: row['avg_elo'] for row in elo_rows}
# Fetch Max Party Size
party_sql = f"""
SELECT match_id, MAX(cnt) as max_party
FROM (
SELECT match_id, match_team_id, COUNT(*) as cnt
FROM fact_match_players
WHERE match_id IN ({placeholders}) AND match_team_id > 0
GROUP BY match_id, match_team_id
)
GROUP BY match_id
"""
party_rows = query_db('l2', party_sql, match_ids)
party_map = {row['match_id']: row['max_party'] for row in party_rows}
# --- New: Determine "Our Team" Result ---
# Logic: Check if any player from `active_roster` played in these matches.
# Use WebService to get the active roster
from web.services.web_service import WebService
import json
lineups = WebService.get_lineups()
active_roster_ids = []
if lineups:
try:
# Load IDs and ensure they are all strings for DB comparison consistency
raw_ids = json.loads(lineups[0]['player_ids_json'])
active_roster_ids = [str(uid) for uid in raw_ids]
except:
pass
# If no roster, we can't determine "Our Result"
if not active_roster_ids:
result_map = {}
else:
# 1. Get UIDs for Roster Members involved in these matches
# We query fact_match_players to ensure we get the UIDs actually used in these matches
roster_placeholders = ','.join('?' for _ in active_roster_ids)
uid_sql = f"""
SELECT DISTINCT steam_id_64, uid
FROM fact_match_players
WHERE match_id IN ({placeholders})
AND CAST(steam_id_64 AS TEXT) IN ({roster_placeholders})
"""
combined_args_uid = match_ids + active_roster_ids
uid_rows = query_db('l2', uid_sql, combined_args_uid)
# Set of "Our UIDs" (as strings)
our_uids = set()
for r in uid_rows:
if r['uid']:
our_uids.add(str(r['uid']))
# 2. Get Group UIDs and Winner info from fact_match_teams
# We need to know which group contains our UIDs
teams_sql = f"""
SELECT fmt.match_id, fmt.group_id, fmt.group_uids, m.winner_team
FROM fact_match_teams fmt
JOIN fact_matches m ON fmt.match_id = m.match_id
WHERE fmt.match_id IN ({placeholders})
"""
teams_rows = query_db('l2', teams_sql, match_ids)
# 3. Determine Result per Match
result_map = {}
# Group data by match
match_groups = {} # match_id -> {group_id: [uids...], winner: int}
for r in teams_rows:
mid = r['match_id']
gid = r['group_id']
uids_str = r['group_uids'] or ""
# Split and clean UIDs
uids = set(str(u).strip() for u in uids_str.split(',') if u.strip())
if mid not in match_groups:
match_groups[mid] = {'groups': {}, 'winner': r['winner_team']}
match_groups[mid]['groups'][gid] = uids
# Analyze
for mid, data in match_groups.items():
winner_gid = data['winner']
groups = data['groups']
our_in_winner = False
our_in_loser = False
# Check each group
for gid, uids in groups.items():
# Intersection of Our UIDs and Group UIDs
common = our_uids.intersection(uids)
if common:
if gid == winner_gid:
our_in_winner = True
else:
our_in_loser = True
if our_in_winner and not our_in_loser:
result_map[mid] = 'win'
elif our_in_loser and not our_in_winner:
result_map[mid] = 'loss'
elif our_in_winner and our_in_loser:
result_map[mid] = 'mixed'
else:
# Fallback: If UID matching failed (maybe missing UIDs), try old team_id method?
# Or just leave it as None (safe)
pass
# Convert to dict to modify
matches = [dict(m) for m in matches]
for m in matches:
m['avg_elo'] = elo_map.get(m['match_id'], 0)
m['max_party'] = party_map.get(m['match_id'], 1)
m['our_result'] = result_map.get(m['match_id'])
# Convert to dict to modify
matches = [dict(m) for m in matches]
for m in matches:
m['avg_elo'] = elo_map.get(m['match_id'], 0)
m['max_party'] = party_map.get(m['match_id'], 1)
m['our_result'] = result_map.get(m['match_id'])
2026-01-26 02:13:06 +08:00
# Count total for pagination
count_sql = f"SELECT COUNT(*) as cnt FROM fact_matches WHERE {where_str}"
total = query_db('l2', count_sql, args[:-2], one=True)['cnt']
return matches, total
@staticmethod
def get_match_detail(match_id):
sql = "SELECT * FROM fact_matches WHERE match_id = ?"
return query_db('l2', sql, [match_id], one=True)
@staticmethod
def get_match_players(match_id):
sql = """
SELECT mp.*, p.username, p.avatar_url
FROM fact_match_players mp
LEFT JOIN dim_players p ON mp.steam_id_64 = p.steam_id_64
WHERE mp.match_id = ?
ORDER BY mp.team_id, mp.rating DESC
"""
return query_db('l2', sql, [match_id])
@staticmethod
def get_match_rounds(match_id):
sql = "SELECT * FROM fact_rounds WHERE match_id = ? ORDER BY round_num"
return query_db('l2', sql, [match_id])
@staticmethod
def get_players(page=1, per_page=20, search=None, sort_by='rating_desc'):
offset = (page - 1) * per_page
args = []
where_clauses = ["1=1"]
if search:
# Force case-insensitive search
where_clauses.append("(LOWER(username) LIKE LOWER(?) OR steam_id_64 LIKE ?)")
args.append(f"%{search}%")
args.append(f"%{search}%")
where_str = " AND ".join(where_clauses)
# Sort mapping
order_clause = "rating DESC" # Default logic (this query needs refinement as L2 dim_players doesn't store avg rating)
# Wait, dim_players only has static info. We need aggregated stats.
# Ideally, we should fetch from L3 for player list stats.
# But StatsService is for L2.
# For the Player List, we usually want L3 data (Career stats).
# I will leave the detailed stats logic for FeatureService or do a join here if necessary.
# For now, just listing players from dim_players.
sql = f"""
SELECT * FROM dim_players
WHERE {where_str}
LIMIT ? OFFSET ?
"""
args.extend([per_page, offset])
players = query_db('l2', sql, args)
total = query_db('l2', f"SELECT COUNT(*) as cnt FROM dim_players WHERE {where_str}", args[:-2], one=True)['cnt']
return players, total
@staticmethod
def get_player_info(steam_id):
sql = "SELECT * FROM dim_players WHERE steam_id_64 = ?"
return query_db('l2', sql, [steam_id], one=True)
@staticmethod
def get_daily_match_counts(days=365):
# Return list of {date: 'YYYY-MM-DD', count: N}
sql = """
SELECT date(start_time, 'unixepoch') as day, COUNT(*) as count
FROM fact_matches
WHERE start_time > strftime('%s', 'now', ?)
GROUP BY day
ORDER BY day
"""
# sqlite modifier for 'now' needs format like '-365 days'
modifier = f'-{days} days'
rows = query_db('l2', sql, [modifier])
return rows
@staticmethod
def get_players_by_ids(steam_ids):
if not steam_ids:
return []
placeholders = ','.join('?' for _ in steam_ids)
sql = f"SELECT * FROM dim_players WHERE steam_id_64 IN ({placeholders})"
return query_db('l2', sql, steam_ids)
@staticmethod
def get_player_basic_stats(steam_id):
# Calculate stats from fact_match_players
# Prefer calculating from sums (kills/deaths) for K/D accuracy
# AVG(adr) is used as damage_total might be missing in some sources
sql = """
SELECT
AVG(rating) as rating,
SUM(kills) as total_kills,
SUM(deaths) as total_deaths,
AVG(kd_ratio) as avg_kd,
AVG(kast) as kast,
AVG(adr) as adr,
COUNT(*) as matches_played
FROM fact_match_players
WHERE steam_id_64 = ?
"""
row = query_db('l2', sql, [steam_id], one=True)
if row and row['matches_played'] > 0:
res = dict(row)
# Calculate K/D: Sum Kills / Sum Deaths
kills = res.get('total_kills') or 0
deaths = res.get('total_deaths') or 0
if deaths > 0:
res['kd'] = kills / deaths
else:
res['kd'] = kills # If 0 deaths, K/D is kills (or infinity, but kills is safer for display)
# Fallback to avg_kd if calculation failed (e.g. both 0) but avg_kd exists
if res['kd'] == 0 and res['avg_kd'] and res['avg_kd'] > 0:
res['kd'] = res['avg_kd']
# ADR validation
if res['adr'] is None:
res['adr'] = 0.0
return res
return None
@staticmethod
def get_shared_matches(steam_ids):
# Find matches where ALL steam_ids were present
if not steam_ids or len(steam_ids) < 1:
2026-01-26 02:13:06 +08:00
return []
placeholders = ','.join('?' for _ in steam_ids)
count = len(steam_ids)
# We need to know which team the players were on to determine win/loss
# Assuming they were on the SAME team for "shared experience"
# If count=1, it's just match history
# Query: Get matches where all steam_ids are present
# Also join to get team_id to check if they were on the same team (optional but better)
# For simplicity in v1: Just check presence in the match.
# AND check if the player won.
# We need to return: match_id, map_name, score, result (Win/Loss)
# "Result" is relative to the lineup.
# If they were on the winning team, it's a Win.
2026-01-26 02:13:06 +08:00
sql = f"""
SELECT m.match_id, m.start_time, m.map_name, m.score_team1, m.score_team2, m.winner_team,
MAX(mp.team_id) as player_team_id -- Just take one team_id (assuming same)
2026-01-26 02:13:06 +08:00
FROM fact_matches m
JOIN fact_match_players mp ON m.match_id = mp.match_id
WHERE mp.steam_id_64 IN ({placeholders})
GROUP BY m.match_id
HAVING COUNT(DISTINCT mp.steam_id_64) = ?
ORDER BY m.start_time DESC
"""
args = list(steam_ids)
args.append(count)
rows = query_db('l2', sql, args)
results = []
for r in rows:
# Determine if Win
# winner_team in DB is 'Team 1' or 'Team 2' usually, or the team name.
# fact_matches.winner_team stores the NAME of the winner? Or 'team1'/'team2'?
# Let's check how L2_Builder stores it. Usually it stores the name.
# But fact_match_players.team_id stores the name too.
# Logic: If m.winner_team == mp.team_id, then Win.
is_win = (r['winner_team'] == r['player_team_id'])
# If winner_team is NULL or empty, it's a draw?
if not r['winner_team']:
result_str = 'Draw'
elif is_win:
result_str = 'Win'
else:
result_str = 'Loss'
res = dict(r)
res['is_win'] = is_win # Boolean for styling
res['result_str'] = result_str # Text for display
results.append(res)
return results
2026-01-26 02:13:06 +08:00
@staticmethod
def get_player_trend(steam_id, limit=20):
# We need party_size: count of players with same match_team_id in the same match
# Using a correlated subquery for party_size
2026-01-26 02:13:06 +08:00
sql = """
SELECT * FROM (
SELECT
m.start_time,
mp.rating,
mp.kd_ratio,
mp.adr,
m.match_id,
m.map_name,
mp.is_win,
mp.match_team_id,
(SELECT COUNT(*)
FROM fact_match_players p2
WHERE p2.match_id = mp.match_id
AND p2.match_team_id = mp.match_team_id
AND p2.match_team_id > 0 -- Ensure we don't count 0 (solo default) as a massive party
2026-01-26 18:36:47 +08:00
) as party_size,
(
SELECT COUNT(*)
FROM fact_matches m2
WHERE m2.start_time <= m.start_time
) as match_index
2026-01-26 02:13:06 +08:00
FROM fact_match_players mp
JOIN fact_matches m ON mp.match_id = m.match_id
WHERE mp.steam_id_64 = ?
ORDER BY m.start_time DESC
LIMIT ?
) ORDER BY start_time ASC
"""
return query_db('l2', sql, [steam_id, limit])
2026-01-26 18:36:47 +08:00
@staticmethod
def get_roster_stats_distribution(target_steam_id):
"""
Calculates rank and distribution of the target player within the active roster.
Now covers all L3 Basic Features for Detailed Panel.
2026-01-26 18:36:47 +08:00
"""
from web.services.web_service import WebService
from web.services.feature_service import FeatureService
2026-01-26 18:36:47 +08:00
import json
import numpy as np
# 1. Get Active Roster IDs
lineups = WebService.get_lineups()
active_roster_ids = []
if lineups:
try:
raw_ids = json.loads(lineups[0]['player_ids_json'])
active_roster_ids = [str(uid) for uid in raw_ids]
except:
pass
if not active_roster_ids:
return None
# 2. Fetch L3 features for all roster members
# We need to use FeatureService to get the full L3 set (including detailed stats)
# Assuming L3 data is up to date.
2026-01-26 18:36:47 +08:00
placeholders = ','.join('?' for _ in active_roster_ids)
sql = f"SELECT * FROM dm_player_features WHERE steam_id_64 IN ({placeholders})"
rows = query_db('l3', sql, active_roster_ids)
2026-01-26 18:36:47 +08:00
if not rows:
return None
stats_map = {row['steam_id_64']: dict(row) for row in rows}
target_steam_id = str(target_steam_id)
# If target not in map (e.g. no L3 data), try to add empty default
2026-01-26 18:36:47 +08:00
if target_steam_id not in stats_map:
stats_map[target_steam_id] = {}
# 3. Calculate Distribution for ALL metrics
# Define metrics list (must match Detailed Panel keys)
metrics = [
'basic_avg_rating', 'basic_avg_kd', 'basic_avg_kast', 'basic_avg_rws', 'basic_avg_adr',
'basic_avg_headshot_kills', 'basic_headshot_rate', 'basic_avg_assisted_kill', 'basic_avg_awp_kill', 'basic_avg_jump_count',
'basic_avg_first_kill', 'basic_avg_first_death', 'basic_first_kill_rate', 'basic_first_death_rate',
'basic_avg_kill_2', 'basic_avg_kill_3', 'basic_avg_kill_4', 'basic_avg_kill_5',
'basic_avg_perfect_kill', 'basic_avg_revenge_kill',
# L3 Advanced Dimensions
'sta_last_30_rating', 'sta_win_rating', 'sta_loss_rating', 'sta_rating_volatility', 'sta_time_rating_corr',
'bat_kd_diff_high_elo', 'bat_avg_duel_win_rate', 'bat_avg_duel_freq',
'hps_clutch_win_rate_1v1', 'hps_clutch_win_rate_1v3_plus', 'hps_match_point_win_rate', 'hps_pressure_entry_rate', 'hps_comeback_kd_diff',
'ptl_pistol_kills', 'ptl_pistol_win_rate', 'ptl_pistol_kd',
'side_rating_ct', 'side_rating_t', 'side_first_kill_rate_ct', 'side_first_kill_rate_t', 'side_kd_diff_ct_t',
'util_avg_nade_dmg', 'util_avg_flash_time', 'util_avg_flash_enemy', 'util_usage_rate'
]
# Mapping for L2 legacy calls (if any) - mainly map 'rating' to 'basic_avg_rating' etc if needed
# But here we just use L3 columns directly.
2026-01-26 18:36:47 +08:00
result = {}
for m in metrics:
values = [p.get(m, 0) or 0 for p in stats_map.values()]
target_val = stats_map[target_steam_id].get(m, 0) or 0
2026-01-26 18:36:47 +08:00
if not values:
2026-01-26 18:36:47 +08:00
result[m] = None
continue
values.sort(reverse=True)
# Rank
2026-01-26 18:36:47 +08:00
try:
rank = values.index(target_val) + 1
except ValueError:
rank = len(values)
2026-01-26 18:36:47 +08:00
result[m] = {
'val': target_val,
'rank': rank,
'total': len(values),
'min': min(values),
'max': max(values),
'avg': sum(values) / len(values)
}
# Legacy mapping for top cards (rating, kd, adr, kast)
legacy_map = {
'basic_avg_rating': 'rating',
'basic_avg_kd': 'kd',
'basic_avg_adr': 'adr',
'basic_avg_kast': 'kast'
}
if m in legacy_map:
result[legacy_map[m]] = result[m]
2026-01-26 18:36:47 +08:00
return result
2026-01-26 02:13:06 +08:00
@staticmethod
def get_live_matches():
# Query matches started in last 2 hours with no winner
# Assuming we have a way to ingest live matches.
# For now, this query is 'formal' but will likely return empty on static dataset.
sql = """
SELECT m.match_id, m.map_name, m.score_team1, m.score_team2, m.start_time
FROM fact_matches m
WHERE m.winner_team IS NULL
AND m.start_time > strftime('%s', 'now', '-2 hours')
"""
return query_db('l2', sql)
@staticmethod
def get_head_to_head_stats(match_id):
"""
Returns a matrix of kills between players.
List of {attacker_steam_id, victim_steam_id, kills}
"""
sql = """
SELECT attacker_steam_id, victim_steam_id, COUNT(*) as kills
FROM fact_round_events
WHERE match_id = ? AND event_type = 'kill'
GROUP BY attacker_steam_id, victim_steam_id
"""
return query_db('l2', sql, [match_id])
@staticmethod
def get_match_round_details(match_id):
"""
Returns a detailed dictionary of rounds, events, and economy.
{
round_num: {
info: {winner_side, win_reason_desc, end_time_stamp...},
events: [ {event_type, event_time, attacker..., weapon...}, ... ],
economy: { steam_id: {main_weapon, equipment_value...}, ... }
}
}
"""
# 1. Base Round Info
rounds_sql = "SELECT * FROM fact_rounds WHERE match_id = ? ORDER BY round_num"
rounds_rows = query_db('l2', rounds_sql, [match_id])
if not rounds_rows:
return {}
# 2. Events
events_sql = """
SELECT * FROM fact_round_events
WHERE match_id = ?
ORDER BY round_num, event_time
"""
events_rows = query_db('l2', events_sql, [match_id])
# 3. Economy (if avail)
eco_sql = """
SELECT * FROM fact_round_player_economy
WHERE match_id = ?
"""
eco_rows = query_db('l2', eco_sql, [match_id])
# Structure Data
result = {}
# Initialize rounds
for r in rounds_rows:
r_num = r['round_num']
result[r_num] = {
'info': dict(r),
'events': [],
'economy': {}
}
# Group events
for e in events_rows:
r_num = e['round_num']
if r_num in result:
result[r_num]['events'].append(dict(e))
# Group economy
for eco in eco_rows:
r_num = eco['round_num']
sid = eco['steam_id_64']
if r_num in result:
result[r_num]['economy'][sid] = dict(eco)
return result