Change rating system (DMOJ)
This commit is contained in:
parent
e261fc9e3b
commit
b6556a93f5
9 changed files with 369 additions and 117 deletions
1
.gitignore
vendored
1
.gitignore
vendored
|
@ -15,4 +15,5 @@ sass_processed
|
|||
<desired bridge log path>
|
||||
node_modules/
|
||||
package-lock.json
|
||||
/src
|
||||
|
||||
|
|
18
judge/migrations/0117_auto_20211209_0612.py
Normal file
18
judge/migrations/0117_auto_20211209_0612.py
Normal file
File diff suppressed because one or more lines are too long
208
judge/migrations/0118_rating.py
Normal file
208
judge/migrations/0118_rating.py
Normal file
|
@ -0,0 +1,208 @@
|
|||
import math
|
||||
from operator import attrgetter, itemgetter
|
||||
|
||||
from django.db import migrations, models
|
||||
from django.db.models import Count, OuterRef, Subquery
|
||||
from django.db.models.functions import Coalesce
|
||||
from django.utils import timezone
|
||||
|
||||
|
||||
def tie_ranker(iterable, key=attrgetter('points')):
|
||||
rank = 0
|
||||
delta = 1
|
||||
last = None
|
||||
buf = []
|
||||
for item in iterable:
|
||||
new = key(item)
|
||||
if new != last:
|
||||
for _ in buf:
|
||||
yield rank + (delta - 1) / 2.0
|
||||
rank += delta
|
||||
delta = 0
|
||||
buf = []
|
||||
delta += 1
|
||||
buf.append(item)
|
||||
last = key(item)
|
||||
for _ in buf:
|
||||
yield rank + (delta - 1) / 2.0
|
||||
|
||||
|
||||
def rational_approximation(t):
|
||||
# Abramowitz and Stegun formula 26.2.23.
|
||||
# The absolute value of the error should be less than 4.5 e-4.
|
||||
c = [2.515517, 0.802853, 0.010328]
|
||||
d = [1.432788, 0.189269, 0.001308]
|
||||
numerator = (c[2] * t + c[1]) * t + c[0]
|
||||
denominator = ((d[2] * t + d[1]) * t + d[0]) * t + 1.0
|
||||
return t - numerator / denominator
|
||||
|
||||
|
||||
def normal_CDF_inverse(p):
|
||||
assert 0.0 < p < 1
|
||||
|
||||
# See article above for explanation of this section.
|
||||
if p < 0.5:
|
||||
# F^-1(p) = - G^-1(p)
|
||||
return -rational_approximation(math.sqrt(-2.0 * math.log(p)))
|
||||
else:
|
||||
# F^-1(p) = G^-1(1-p)
|
||||
return rational_approximation(math.sqrt(-2.0 * math.log(1.0 - p)))
|
||||
|
||||
|
||||
def WP(RA, RB, VA, VB):
|
||||
return (math.erf((RB - RA) / math.sqrt(2 * (VA * VA + VB * VB))) + 1) / 2.0
|
||||
|
||||
|
||||
def recalculate_ratings(old_rating, old_volatility, actual_rank, times_rated, is_disqualified):
|
||||
# actual_rank: 1 is first place, N is last place
|
||||
# if there are ties, use the average of places (if places 2, 3, 4, 5 tie, use 3.5 for all of them)
|
||||
|
||||
N = len(old_rating)
|
||||
new_rating = old_rating[:]
|
||||
new_volatility = old_volatility[:]
|
||||
if N <= 1:
|
||||
return new_rating, new_volatility
|
||||
|
||||
ranking = list(range(N))
|
||||
ranking.sort(key=old_rating.__getitem__, reverse=True)
|
||||
|
||||
ave_rating = float(sum(old_rating)) / N
|
||||
sum1 = sum(i * i for i in old_volatility) / N
|
||||
sum2 = sum((i - ave_rating) ** 2 for i in old_rating) / (N - 1)
|
||||
CF = math.sqrt(sum1 + sum2)
|
||||
|
||||
for i in range(N):
|
||||
ERank = 0.5
|
||||
for j in range(N):
|
||||
ERank += WP(old_rating[i], old_rating[j], old_volatility[i], old_volatility[j])
|
||||
|
||||
EPerf = -normal_CDF_inverse((ERank - 0.5) / N)
|
||||
APerf = -normal_CDF_inverse((actual_rank[i] - 0.5) / N)
|
||||
PerfAs = old_rating[i] + CF * (APerf - EPerf)
|
||||
Weight = 1.0 / (1 - (0.42 / (times_rated[i] + 1) + 0.18)) - 1.0
|
||||
if old_rating[i] > 2500:
|
||||
Weight *= 0.8
|
||||
elif old_rating[i] >= 2000:
|
||||
Weight *= 0.9
|
||||
|
||||
Cap = 150.0 + 1500.0 / (times_rated[i] + 2)
|
||||
|
||||
new_rating[i] = (old_rating[i] + Weight * PerfAs) / (1.0 + Weight)
|
||||
|
||||
if abs(old_rating[i] - new_rating[i]) > Cap:
|
||||
if old_rating[i] < new_rating[i]:
|
||||
new_rating[i] = old_rating[i] + Cap
|
||||
else:
|
||||
new_rating[i] = old_rating[i] - Cap
|
||||
|
||||
if times_rated[i] == 0:
|
||||
new_volatility[i] = 385
|
||||
else:
|
||||
new_volatility[i] = math.sqrt(((new_rating[i] - old_rating[i]) ** 2) / Weight +
|
||||
(old_volatility[i] ** 2) / (Weight + 1))
|
||||
|
||||
if is_disqualified[i]:
|
||||
# DQed users can manipulate TopCoder ratings to get higher volatility in order to increase their rating
|
||||
# later on, prohibit this by ensuring their volatility never increases in this situation
|
||||
new_volatility[i] = min(new_volatility[i], old_volatility[i])
|
||||
|
||||
# try to keep the sum of ratings constant
|
||||
adjust = float(sum(old_rating) - sum(new_rating)) / N
|
||||
new_rating = list(map(adjust.__add__, new_rating))
|
||||
# inflate a little if we have to so people who placed first don't lose rating
|
||||
best_rank = min(actual_rank)
|
||||
for i in range(N):
|
||||
if abs(actual_rank[i] - best_rank) <= 1e-3 and new_rating[i] < old_rating[i] + 1:
|
||||
new_rating[i] = old_rating[i] + 1
|
||||
return list(map(int, map(round, new_rating))), list(map(int, map(round, new_volatility)))
|
||||
|
||||
|
||||
def tc_rate_contest(contest, Rating, Profile):
|
||||
rating_subquery = Rating.objects.filter(user=OuterRef('user'))
|
||||
rating_sorted = rating_subquery.order_by('-contest__end_time')
|
||||
users = contest.users.order_by('is_disqualified', '-score', 'cumtime', 'tiebreaker') \
|
||||
.annotate(submissions=Count('submission'),
|
||||
last_rating=Coalesce(Subquery(rating_sorted.values('rating')[:1]), 1200),
|
||||
volatility=Coalesce(Subquery(rating_sorted.values('volatility')[:1]), 535),
|
||||
times=Coalesce(Subquery(rating_subquery.order_by().values('user_id')
|
||||
.annotate(count=Count('id')).values('count')), 0)) \
|
||||
.exclude(user_id__in=contest.rate_exclude.all()) \
|
||||
.filter(virtual=0).values('id', 'user_id', 'score', 'cumtime', 'tiebreaker', 'is_disqualified',
|
||||
'last_rating', 'volatility', 'times')
|
||||
if not contest.rate_all:
|
||||
users = users.filter(submissions__gt=0)
|
||||
if contest.rating_floor is not None:
|
||||
users = users.exclude(last_rating__lt=contest.rating_floor)
|
||||
if contest.rating_ceiling is not None:
|
||||
users = users.exclude(last_rating__gt=contest.rating_ceiling)
|
||||
|
||||
users = list(users)
|
||||
participation_ids = list(map(itemgetter('id'), users))
|
||||
user_ids = list(map(itemgetter('user_id'), users))
|
||||
is_disqualified = list(map(itemgetter('is_disqualified'), users))
|
||||
ranking = list(tie_ranker(users, key=itemgetter('score', 'cumtime', 'tiebreaker')))
|
||||
old_rating = list(map(itemgetter('last_rating'), users))
|
||||
old_volatility = list(map(itemgetter('volatility'), users))
|
||||
times_ranked = list(map(itemgetter('times'), users))
|
||||
rating, volatility = recalculate_ratings(old_rating, old_volatility, ranking, times_ranked, is_disqualified)
|
||||
|
||||
now = timezone.now()
|
||||
ratings = [Rating(user_id=i, contest=contest, rating=r, volatility=v, last_rated=now, participation_id=p, rank=z)
|
||||
for i, p, r, v, z in zip(user_ids, participation_ids, rating, volatility, ranking)]
|
||||
|
||||
Rating.objects.bulk_create(ratings)
|
||||
|
||||
Profile.objects.filter(contest_history__contest=contest, contest_history__virtual=0).update(
|
||||
rating=Subquery(Rating.objects.filter(user=OuterRef('id'))
|
||||
.order_by('-contest__end_time').values('rating')[:1]))
|
||||
|
||||
|
||||
# inspired by rate_all_view
|
||||
def rate_tc(apps, schema_editor):
|
||||
Contest = apps.get_model('judge', 'Contest')
|
||||
Rating = apps.get_model('judge', 'Rating')
|
||||
Profile = apps.get_model('judge', 'Profile')
|
||||
|
||||
with schema_editor.connection.cursor() as cursor:
|
||||
cursor.execute('TRUNCATE TABLE `%s`' % Rating._meta.db_table)
|
||||
Profile.objects.update(rating=None)
|
||||
for contest in Contest.objects.filter(is_rated=True, end_time__lte=timezone.now()).order_by('end_time'):
|
||||
tc_rate_contest(contest, Rating, Profile)
|
||||
|
||||
|
||||
# inspired by rate_all_view
|
||||
def rate_elo_mmr(apps, schema_editor):
|
||||
Rating = apps.get_model('judge', 'Rating')
|
||||
Profile = apps.get_model('judge', 'Profile')
|
||||
|
||||
with schema_editor.connection.cursor() as cursor:
|
||||
cursor.execute('TRUNCATE TABLE `%s`' % Rating._meta.db_table)
|
||||
Profile.objects.update(rating=None)
|
||||
# Don't populate Rating
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('judge', '0117_auto_20211209_0612'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.RunPython(migrations.RunPython.noop, rate_tc, atomic=True),
|
||||
migrations.AddField(
|
||||
model_name='rating',
|
||||
name='mean',
|
||||
field=models.FloatField(verbose_name='raw rating'),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='rating',
|
||||
name='performance',
|
||||
field=models.FloatField(verbose_name='contest performance'),
|
||||
),
|
||||
migrations.RemoveField(
|
||||
model_name='rating',
|
||||
name='volatility',
|
||||
field=models.IntegerField(verbose_name='volatility'),
|
||||
),
|
||||
migrations.RunPython(rate_elo_mmr, migrations.RunPython.noop, atomic=True),
|
||||
]
|
|
@ -531,7 +531,8 @@ class Rating(models.Model):
|
|||
related_name='rating', on_delete=CASCADE)
|
||||
rank = models.IntegerField(verbose_name=_('rank'))
|
||||
rating = models.IntegerField(verbose_name=_('rating'))
|
||||
volatility = models.IntegerField(verbose_name=_('volatility'))
|
||||
mean = models.FloatField(verbose_name=_('raw rating'))
|
||||
performance = models.FloatField(verbose_name=_('contest performance'))
|
||||
last_rated = models.DateTimeField(db_index=True, verbose_name=_('last rated'))
|
||||
|
||||
class Meta:
|
||||
|
|
226
judge/ratings.py
226
judge/ratings.py
|
@ -1,5 +1,5 @@
|
|||
import math
|
||||
from bisect import bisect
|
||||
from math import pi, sqrt, tanh
|
||||
from operator import attrgetter, itemgetter
|
||||
|
||||
from django.db import transaction
|
||||
|
@ -8,6 +8,18 @@ from django.db.models.functions import Coalesce
|
|||
from django.utils import timezone
|
||||
|
||||
|
||||
BETA2 = 328.33 ** 2
|
||||
RATING_INIT = 1200 # Newcomer's rating when applying the rating floor/ceiling
|
||||
MEAN_INIT = 1500.
|
||||
VAR_INIT = 350**2 * (BETA2 / 212**2)
|
||||
SD_INIT = sqrt(VAR_INIT)
|
||||
VALID_RANGE = MEAN_INIT - 20 * SD_INIT, MEAN_INIT + 20 * SD_INIT
|
||||
VAR_PER_CONTEST = 1219.047619 * (BETA2 / 212**2)
|
||||
VAR_LIM = (sqrt(VAR_PER_CONTEST**2 + 4 * BETA2 * VAR_PER_CONTEST) - VAR_PER_CONTEST) / 2
|
||||
SD_LIM = sqrt(VAR_LIM)
|
||||
TANH_C = sqrt(3) / pi
|
||||
|
||||
|
||||
def tie_ranker(iterable, key=attrgetter('points')):
|
||||
rank = 0
|
||||
delta = 1
|
||||
|
@ -28,94 +40,103 @@ def tie_ranker(iterable, key=attrgetter('points')):
|
|||
yield rank + (delta - 1) / 2.0
|
||||
|
||||
|
||||
def rational_approximation(t):
|
||||
# Abramowitz and Stegun formula 26.2.23.
|
||||
# The absolute value of the error should be less than 4.5 e-4.
|
||||
c = [2.515517, 0.802853, 0.010328]
|
||||
d = [1.432788, 0.189269, 0.001308]
|
||||
numerator = (c[2] * t + c[1]) * t + c[0]
|
||||
denominator = ((d[2] * t + d[1]) * t + d[0]) * t + 1.0
|
||||
return t - numerator / denominator
|
||||
def eval_tanhs(tanh_terms, x):
|
||||
return sum((wt / sd) * tanh((x - mu) / (2 * sd)) for mu, sd, wt in tanh_terms)
|
||||
|
||||
|
||||
def normal_CDF_inverse(p):
|
||||
assert 0.0 < p < 1
|
||||
|
||||
# See article above for explanation of this section.
|
||||
if p < 0.5:
|
||||
# F^-1(p) = - G^-1(p)
|
||||
return -rational_approximation(math.sqrt(-2.0 * math.log(p)))
|
||||
else:
|
||||
# F^-1(p) = G^-1(1-p)
|
||||
return rational_approximation(math.sqrt(-2.0 * math.log(1.0 - p)))
|
||||
|
||||
|
||||
def WP(RA, RB, VA, VB):
|
||||
return (math.erf((RB - RA) / math.sqrt(2 * (VA * VA + VB * VB))) + 1) / 2.0
|
||||
|
||||
|
||||
def recalculate_ratings(old_rating, old_volatility, actual_rank, times_rated, is_disqualified):
|
||||
# actual_rank: 1 is first place, N is last place
|
||||
# if there are ties, use the average of places (if places 2, 3, 4, 5 tie, use 3.5 for all of them)
|
||||
|
||||
N = len(old_rating)
|
||||
new_rating = old_rating[:]
|
||||
new_volatility = old_volatility[:]
|
||||
if N <= 1:
|
||||
return new_rating, new_volatility
|
||||
|
||||
ranking = list(range(N))
|
||||
ranking.sort(key=old_rating.__getitem__, reverse=True)
|
||||
|
||||
ave_rating = float(sum(old_rating)) / N
|
||||
sum1 = sum(i * i for i in old_volatility) / N
|
||||
sum2 = sum((i - ave_rating) ** 2 for i in old_rating) / (N - 1)
|
||||
CF = math.sqrt(sum1 + sum2)
|
||||
|
||||
for i in range(N):
|
||||
ERank = 0.5
|
||||
for j in range(N):
|
||||
ERank += WP(old_rating[i], old_rating[j], old_volatility[i], old_volatility[j])
|
||||
|
||||
EPerf = -normal_CDF_inverse((ERank - 0.5) / N)
|
||||
APerf = -normal_CDF_inverse((actual_rank[i] - 0.5) / N)
|
||||
PerfAs = old_rating[i] + CF * (APerf - EPerf)
|
||||
Weight = 1.0 / (1 - (0.42 / (times_rated[i] + 1) + 0.18)) - 1.0
|
||||
if old_rating[i] > 2500:
|
||||
Weight *= 0.8
|
||||
elif old_rating[i] >= 2000:
|
||||
Weight *= 0.9
|
||||
|
||||
Cap = 150.0 + 1500.0 / (times_rated[i] + 2)
|
||||
|
||||
new_rating[i] = (old_rating[i] + Weight * PerfAs) / (1.0 + Weight)
|
||||
|
||||
if times_rated[i] == 0:
|
||||
new_volatility[i] = 385
|
||||
def solve(tanh_terms, y_tg, lin_factor=0, bounds=VALID_RANGE):
|
||||
L, R = bounds
|
||||
Ly, Ry = None, None
|
||||
while R - L > 2:
|
||||
x = (L + R) / 2
|
||||
y = lin_factor * x + eval_tanhs(tanh_terms, x)
|
||||
if y > y_tg:
|
||||
R, Ry = x, y
|
||||
elif y < y_tg:
|
||||
L, Ly = x, y
|
||||
else:
|
||||
new_volatility[i] = math.sqrt(((new_rating[i] - old_rating[i]) ** 2) / Weight +
|
||||
(old_volatility[i] ** 2) / (Weight + 1))
|
||||
return x
|
||||
# Use linear interpolation to be slightly more accurate.
|
||||
if Ly is None:
|
||||
Ly = lin_factor * L + eval_tanhs(tanh_terms, L)
|
||||
if y_tg <= Ly:
|
||||
return L
|
||||
if Ry is None:
|
||||
Ry = lin_factor * R + eval_tanhs(tanh_terms, R)
|
||||
if y_tg >= Ry:
|
||||
return R
|
||||
ratio = (y_tg - Ly) / (Ry - Ly)
|
||||
return L * (1 - ratio) + R * ratio
|
||||
|
||||
if is_disqualified[i]:
|
||||
# DQed users can manipulate TopCoder ratings to get higher volatility in order to increase their rating
|
||||
# later on, prohibit this by ensuring their volatility never increases in this situation
|
||||
new_volatility[i] = min(new_volatility[i], old_volatility[i])
|
||||
|
||||
if abs(old_rating[i] - new_rating[i]) > Cap:
|
||||
if old_rating[i] < new_rating[i]:
|
||||
new_rating[i] = old_rating[i] + Cap
|
||||
else:
|
||||
new_rating[i] = old_rating[i] - Cap
|
||||
def get_var(times_ranked, cache=[VAR_INIT]):
|
||||
while times_ranked >= len(cache):
|
||||
next_var = 1. / (1. / (cache[-1] + VAR_PER_CONTEST) + 1. / BETA2)
|
||||
cache.append(next_var)
|
||||
return cache[times_ranked]
|
||||
|
||||
# try to keep the sum of ratings constant
|
||||
adjust = float(sum(old_rating) - sum(new_rating)) / N
|
||||
new_rating = list(map(adjust.__add__, new_rating))
|
||||
# inflate a little if we have to so people who placed first don't lose rating
|
||||
best_rank = min(actual_rank)
|
||||
for i in range(N):
|
||||
if abs(actual_rank[i] - best_rank) <= 1e-3 and new_rating[i] < old_rating[i] + 1:
|
||||
new_rating[i] = old_rating[i] + 1
|
||||
return list(map(int, map(round, new_rating))), list(map(int, map(round, new_volatility)))
|
||||
|
||||
def recalculate_ratings(ranking, old_mean, times_ranked, historical_p):
|
||||
n = len(ranking)
|
||||
new_p = [0.] * n
|
||||
new_mean = [0.] * n
|
||||
|
||||
# Note: pre-multiply delta by TANH_C to improve efficiency.
|
||||
delta = [TANH_C * sqrt(get_var(t) + VAR_PER_CONTEST + BETA2) for t in times_ranked]
|
||||
p_tanh_terms = [(m, d, 1) for m, d in zip(old_mean, delta)]
|
||||
|
||||
# Calculate performance at index i.
|
||||
def solve_idx(i, bounds=VALID_RANGE):
|
||||
r = ranking[i]
|
||||
y_tg = 0
|
||||
for d, s in zip(delta, ranking):
|
||||
if s > r: # s loses to r
|
||||
y_tg += 1. / d
|
||||
elif s < r: # s beats r
|
||||
y_tg -= 1. / d
|
||||
# Otherwise, this is a tie that counts as half a win, as per Elo-MMR.
|
||||
new_p[i] = solve(p_tanh_terms, y_tg, bounds=bounds)
|
||||
|
||||
# Fill all indices between i and j, inclusive. Use the fact that new_p is non-increasing.
|
||||
def divconq(i, j):
|
||||
if j - i > 1:
|
||||
k = (i + j) // 2
|
||||
solve_idx(k, bounds=(new_p[j], new_p[i]))
|
||||
divconq(i, k)
|
||||
divconq(k, j)
|
||||
|
||||
if n < 2:
|
||||
new_p = list(old_mean)
|
||||
new_mean = list(old_mean)
|
||||
else:
|
||||
# Calculate performance.
|
||||
solve_idx(0)
|
||||
solve_idx(n - 1)
|
||||
divconq(0, n - 1)
|
||||
|
||||
# Calculate mean.
|
||||
for i, r in enumerate(ranking):
|
||||
tanh_terms = []
|
||||
w_prev = 1.
|
||||
w_sum = 0.
|
||||
for j, h in enumerate([new_p[i]] + historical_p[i]):
|
||||
gamma2 = (VAR_PER_CONTEST if j > 0 else 0)
|
||||
h_var = get_var(times_ranked[i] + 1 - j)
|
||||
k = h_var / (h_var + gamma2)
|
||||
w = w_prev * k**2
|
||||
# Future optimization: If j is around 20, then w < 1e-3 and it is possible to break early.
|
||||
tanh_terms.append((h, sqrt(BETA2) * TANH_C, w))
|
||||
w_prev = w
|
||||
w_sum += w / BETA2
|
||||
w0 = 1. / get_var(times_ranked[i] + 1) - w_sum
|
||||
p0 = eval_tanhs(tanh_terms[1:], old_mean[i]) / w0 + old_mean[i]
|
||||
new_mean[i] = solve(tanh_terms, w0 * p0, lin_factor=w0)
|
||||
|
||||
# Display a slightly lower rating to incentivize participation.
|
||||
# As times_ranked increases, new_rating converges to new_mean.
|
||||
new_rating = [max(1, round(m - (sqrt(get_var(t + 1)) - SD_LIM))) for m, t in zip(new_mean, times_ranked)]
|
||||
|
||||
return new_rating, new_mean, new_p
|
||||
|
||||
|
||||
def rate_contest(contest):
|
||||
|
@ -125,13 +146,13 @@ def rate_contest(contest):
|
|||
rating_sorted = rating_subquery.order_by('-contest__end_time')
|
||||
users = contest.users.order_by('is_disqualified', '-score', 'cumtime', 'tiebreaker') \
|
||||
.annotate(submissions=Count('submission'),
|
||||
last_rating=Coalesce(Subquery(rating_sorted.values('rating')[:1]), 1200),
|
||||
volatility=Coalesce(Subquery(rating_sorted.values('volatility')[:1]), 535),
|
||||
last_rating=Coalesce(Subquery(rating_sorted.values('rating')[:1]), RATING_INIT),
|
||||
last_mean=Coalesce(Subquery(rating_sorted.values('mean')[:1]), MEAN_INIT),
|
||||
times=Coalesce(Subquery(rating_subquery.order_by().values('user_id')
|
||||
.annotate(count=Count('id')).values('count')), 0)) \
|
||||
.exclude(user_id__in=contest.rate_exclude.all()) \
|
||||
.filter(virtual=0).values('id', 'user_id', 'score', 'cumtime', 'tiebreaker', 'is_disqualified',
|
||||
'last_rating', 'volatility', 'times')
|
||||
.filter(virtual=0).values('id', 'user_id', 'score', 'cumtime', 'tiebreaker',
|
||||
'last_rating', 'last_mean', 'times')
|
||||
if not contest.rate_all:
|
||||
users = users.filter(submissions__gt=0)
|
||||
if contest.rating_floor is not None:
|
||||
|
@ -142,27 +163,34 @@ def rate_contest(contest):
|
|||
users = list(users)
|
||||
participation_ids = list(map(itemgetter('id'), users))
|
||||
user_ids = list(map(itemgetter('user_id'), users))
|
||||
is_disqualified = list(map(itemgetter('is_disqualified'), users))
|
||||
ranking = list(tie_ranker(users, key=itemgetter('score', 'cumtime', 'tiebreaker')))
|
||||
old_rating = list(map(itemgetter('last_rating'), users))
|
||||
old_volatility = list(map(itemgetter('volatility'), users))
|
||||
old_mean = list(map(itemgetter('last_mean'), users))
|
||||
times_ranked = list(map(itemgetter('times'), users))
|
||||
rating, volatility = recalculate_ratings(old_rating, old_volatility, ranking, times_ranked, is_disqualified)
|
||||
historical_p = [[] for _ in users]
|
||||
|
||||
user_id_to_idx = {uid: i for i, uid in enumerate(user_ids)}
|
||||
for h in Rating.objects.filter(user_id__in=user_ids) \
|
||||
.order_by('-contest__end_time') \
|
||||
.values('user_id', 'performance'):
|
||||
idx = user_id_to_idx[h['user_id']]
|
||||
historical_p[idx].append(h['performance'])
|
||||
|
||||
rating, mean, performance = recalculate_ratings(ranking, old_mean, times_ranked, historical_p)
|
||||
|
||||
now = timezone.now()
|
||||
ratings = [Rating(user_id=i, contest=contest, rating=r, volatility=v, last_rated=now, participation_id=p, rank=z)
|
||||
for i, p, r, v, z in zip(user_ids, participation_ids, rating, volatility, ranking)]
|
||||
ratings = [Rating(user_id=i, contest=contest, rating=r, mean=m, performance=perf,
|
||||
last_rated=now, participation_id=pid, rank=z)
|
||||
for i, pid, r, m, perf, z in zip(user_ids, participation_ids, rating, mean, performance, ranking)]
|
||||
with transaction.atomic():
|
||||
Rating.objects.filter(contest=contest).delete()
|
||||
Rating.objects.bulk_create(ratings)
|
||||
Profile.objects.filter(contest_history__contest=contest, contest_history__virtual=0) \
|
||||
.update(rating=Subquery(Rating.objects.filter(user=OuterRef('id')) \
|
||||
.order_by('-contest__end_time').values('rating')[:1]))
|
||||
return old_rating, old_volatility, ranking, times_ranked, rating, volatility
|
||||
|
||||
Profile.objects.filter(contest_history__contest=contest, contest_history__virtual=0).update(
|
||||
rating=Subquery(Rating.objects.filter(user=OuterRef('id'))
|
||||
.order_by('-contest__end_time').values('rating')[:1]))
|
||||
|
||||
|
||||
RATING_LEVELS = ['Newbie', 'Amateur', 'Expert', 'Candidate Master', 'Master', 'Grandmaster', 'Target']
|
||||
RATING_VALUES = [1000, 1200, 1500, 1800, 2200, 3000]
|
||||
RATING_VALUES = [1000, 1300, 1600, 1900, 2400, 3000]
|
||||
RATING_CLASS = ['rate-newbie', 'rate-amateur', 'rate-expert', 'rate-candidate-master',
|
||||
'rate-master', 'rate-grandmaster', 'rate-target']
|
||||
|
||||
|
|
|
@ -137,16 +137,17 @@ def api_v1_user_info(request, user):
|
|||
participations = ContestParticipation.objects.filter(user=profile, virtual=0, contest__is_visible=True,
|
||||
contest__is_private=False,
|
||||
contest__is_organization_private=False)
|
||||
for contest_key, rating, volatility in participations.values_list('contest__key', 'rating__rating',
|
||||
'rating__volatility'):
|
||||
for contest_key, rating, mean, performance in participations.values_list(
|
||||
'contest__key', 'rating__rating', 'rating__mean', 'rating__performance',
|
||||
):
|
||||
contest_history[contest_key] = {
|
||||
'rating': rating,
|
||||
'volatility': volatility,
|
||||
'raw_rating': mean,
|
||||
'performance': performance,
|
||||
}
|
||||
|
||||
resp['contests'] = {
|
||||
'current_rating': last_rating.rating if last_rating else None,
|
||||
'volatility': last_rating.volatility if last_rating else None,
|
||||
'history': contest_history,
|
||||
}
|
||||
|
||||
|
|
|
@ -89,7 +89,6 @@ def api_v2_user_info(request):
|
|||
|
||||
resp['contests'] = {
|
||||
"current_rating": last_rating[0].rating if last_rating else None,
|
||||
"volatility": last_rating[0].volatility if last_rating else None,
|
||||
'history': contest_history,
|
||||
}
|
||||
|
||||
|
|
|
@ -467,26 +467,26 @@
|
|||
},
|
||||
{
|
||||
begin: 1000,
|
||||
end: 1200,
|
||||
end: 1300,
|
||||
color: 'rgb(0, 169, 0, 0.4)'
|
||||
},
|
||||
{
|
||||
begin: 1200,
|
||||
end: 1500,
|
||||
begin: 1300,
|
||||
end: 1600,
|
||||
color: 'rgb(0, 0, 255, 0.4)'
|
||||
},
|
||||
{
|
||||
begin: 1500,
|
||||
end: 1800,
|
||||
begin: 1600,
|
||||
end: 1900,
|
||||
color: 'rgb(128, 0, 128, 0.37)'
|
||||
},
|
||||
{
|
||||
begin: 1800,
|
||||
end: 2200,
|
||||
begin: 1900,
|
||||
end: 2400,
|
||||
color: 'rgb(255, 177, 0, 0.4)'
|
||||
},
|
||||
{
|
||||
begin: 2200,
|
||||
begin: 2400,
|
||||
end: 3000,
|
||||
color: 'rgb(238, 0, 0, 0.4)'
|
||||
},
|
||||
|
|
|
@ -120,10 +120,6 @@
|
|||
<div class="user-stat-header">{{_('Contests written')}}:</div>
|
||||
<div class="user-stat">{{ratings|length}}</div>
|
||||
</div>
|
||||
<div class="user-stat-container">
|
||||
<div class="user-stat-header">{{ _('Volatility:') }}</div>
|
||||
<div class="user-stat">{{ rating.volatility }}</div>
|
||||
</div>
|
||||
<div class="user-stat-container">
|
||||
<div class="user-stat-header">{{ _('Min. rating:') }}</div>
|
||||
<div class="user-stat">{{ rating_number(min_rating) }}</div>
|
||||
|
|
Loading…
Reference in a new issue