Reformat using black
This commit is contained in:
parent
efee4ad081
commit
a87fb49918
221 changed files with 19127 additions and 7310 deletions
163
judge/ratings.py
163
judge/ratings.py
|
@ -8,19 +8,21 @@ from django.db.models.functions import Coalesce
|
|||
from django.utils import timezone
|
||||
|
||||
|
||||
BETA2 = 328.33 ** 2
|
||||
RATING_INIT = 1200 # Newcomer's rating when applying the rating floor/ceiling
|
||||
MEAN_INIT = 1400.
|
||||
BETA2 = 328.33**2
|
||||
RATING_INIT = 1200 # Newcomer's rating when applying the rating floor/ceiling
|
||||
MEAN_INIT = 1400.0
|
||||
VAR_INIT = 250**2 * (BETA2 / 212**2)
|
||||
SD_INIT = sqrt(VAR_INIT)
|
||||
VALID_RANGE = MEAN_INIT - 20 * SD_INIT, MEAN_INIT + 20 * SD_INIT
|
||||
VAR_PER_CONTEST = 1219.047619 * (BETA2 / 212**2)
|
||||
VAR_LIM = (sqrt(VAR_PER_CONTEST**2 + 4 * BETA2 * VAR_PER_CONTEST) - VAR_PER_CONTEST) / 2
|
||||
VAR_LIM = (
|
||||
sqrt(VAR_PER_CONTEST**2 + 4 * BETA2 * VAR_PER_CONTEST) - VAR_PER_CONTEST
|
||||
) / 2
|
||||
SD_LIM = sqrt(VAR_LIM)
|
||||
TANH_C = sqrt(3) / pi
|
||||
|
||||
|
||||
def tie_ranker(iterable, key=attrgetter('points')):
|
||||
def tie_ranker(iterable, key=attrgetter("points")):
|
||||
rank = 0
|
||||
delta = 1
|
||||
last = None
|
||||
|
@ -71,15 +73,15 @@ def solve(tanh_terms, y_tg, lin_factor=0, bounds=VALID_RANGE):
|
|||
|
||||
def get_var(times_ranked, cache=[VAR_INIT]):
|
||||
while times_ranked >= len(cache):
|
||||
next_var = 1. / (1. / (cache[-1] + VAR_PER_CONTEST) + 1. / BETA2)
|
||||
next_var = 1.0 / (1.0 / (cache[-1] + VAR_PER_CONTEST) + 1.0 / BETA2)
|
||||
cache.append(next_var)
|
||||
return cache[times_ranked]
|
||||
|
||||
|
||||
def recalculate_ratings(ranking, old_mean, times_ranked, historical_p):
|
||||
n = len(ranking)
|
||||
new_p = [0.] * n
|
||||
new_mean = [0.] * n
|
||||
new_p = [0.0] * n
|
||||
new_mean = [0.0] * n
|
||||
|
||||
# Note: pre-multiply delta by TANH_C to improve efficiency.
|
||||
delta = [TANH_C * sqrt(get_var(t) + VAR_PER_CONTEST + BETA2) for t in times_ranked]
|
||||
|
@ -90,10 +92,10 @@ def recalculate_ratings(ranking, old_mean, times_ranked, historical_p):
|
|||
r = ranking[i]
|
||||
y_tg = 0
|
||||
for d, s in zip(delta, ranking):
|
||||
if s > r: # s loses to r
|
||||
y_tg += 1. / d
|
||||
elif s < r: # s beats r
|
||||
y_tg -= 1. / d
|
||||
if s > r: # s loses to r
|
||||
y_tg += 1.0 / d
|
||||
elif s < r: # s beats r
|
||||
y_tg -= 1.0 / d
|
||||
# Otherwise, this is a tie that counts as half a win, as per Elo-MMR.
|
||||
new_p[i] = solve(p_tanh_terms, y_tg, bounds=bounds)
|
||||
|
||||
|
@ -117,10 +119,10 @@ def recalculate_ratings(ranking, old_mean, times_ranked, historical_p):
|
|||
# Calculate mean.
|
||||
for i, r in enumerate(ranking):
|
||||
tanh_terms = []
|
||||
w_prev = 1.
|
||||
w_sum = 0.
|
||||
w_prev = 1.0
|
||||
w_sum = 0.0
|
||||
for j, h in enumerate([new_p[i]] + historical_p[i]):
|
||||
gamma2 = (VAR_PER_CONTEST if j > 0 else 0)
|
||||
gamma2 = VAR_PER_CONTEST if j > 0 else 0
|
||||
h_var = get_var(times_ranked[i] + 1 - j)
|
||||
k = h_var / (h_var + gamma2)
|
||||
w = w_prev * k**2
|
||||
|
@ -128,13 +130,16 @@ def recalculate_ratings(ranking, old_mean, times_ranked, historical_p):
|
|||
tanh_terms.append((h, sqrt(BETA2) * TANH_C, w))
|
||||
w_prev = w
|
||||
w_sum += w / BETA2
|
||||
w0 = 1. / get_var(times_ranked[i] + 1) - w_sum
|
||||
w0 = 1.0 / get_var(times_ranked[i] + 1) - w_sum
|
||||
p0 = eval_tanhs(tanh_terms[1:], old_mean[i]) / w0 + old_mean[i]
|
||||
new_mean[i] = solve(tanh_terms, w0 * p0, lin_factor=w0)
|
||||
|
||||
# Display a slightly lower rating to incentivize participation.
|
||||
# As times_ranked increases, new_rating converges to new_mean.
|
||||
new_rating = [max(1, round(m - (sqrt(get_var(t + 1)) - SD_LIM))) for m, t in zip(new_mean, times_ranked)]
|
||||
new_rating = [
|
||||
max(1, round(m - (sqrt(get_var(t + 1)) - SD_LIM)))
|
||||
for m, t in zip(new_mean, times_ranked)
|
||||
]
|
||||
|
||||
return new_rating, new_mean, new_p
|
||||
|
||||
|
@ -142,17 +147,39 @@ def recalculate_ratings(ranking, old_mean, times_ranked, historical_p):
|
|||
def rate_contest(contest):
|
||||
from judge.models import Rating, Profile
|
||||
|
||||
rating_subquery = Rating.objects.filter(user=OuterRef('user'))
|
||||
rating_sorted = rating_subquery.order_by('-contest__end_time')
|
||||
users = contest.users.order_by('is_disqualified', '-score', 'cumtime', 'tiebreaker') \
|
||||
.annotate(submissions=Count('submission'),
|
||||
last_rating=Coalesce(Subquery(rating_sorted.values('rating')[:1]), RATING_INIT),
|
||||
last_mean=Coalesce(Subquery(rating_sorted.values('mean')[:1]), MEAN_INIT),
|
||||
times=Coalesce(Subquery(rating_subquery.order_by().values('user_id')
|
||||
.annotate(count=Count('id')).values('count')), 0)) \
|
||||
.exclude(user_id__in=contest.rate_exclude.all()) \
|
||||
.filter(virtual=0).values('id', 'user_id', 'score', 'cumtime', 'tiebreaker',
|
||||
'last_rating', 'last_mean', 'times')
|
||||
rating_subquery = Rating.objects.filter(user=OuterRef("user"))
|
||||
rating_sorted = rating_subquery.order_by("-contest__end_time")
|
||||
users = (
|
||||
contest.users.order_by("is_disqualified", "-score", "cumtime", "tiebreaker")
|
||||
.annotate(
|
||||
submissions=Count("submission"),
|
||||
last_rating=Coalesce(
|
||||
Subquery(rating_sorted.values("rating")[:1]), RATING_INIT
|
||||
),
|
||||
last_mean=Coalesce(Subquery(rating_sorted.values("mean")[:1]), MEAN_INIT),
|
||||
times=Coalesce(
|
||||
Subquery(
|
||||
rating_subquery.order_by()
|
||||
.values("user_id")
|
||||
.annotate(count=Count("id"))
|
||||
.values("count")
|
||||
),
|
||||
0,
|
||||
),
|
||||
)
|
||||
.exclude(user_id__in=contest.rate_exclude.all())
|
||||
.filter(virtual=0)
|
||||
.values(
|
||||
"id",
|
||||
"user_id",
|
||||
"score",
|
||||
"cumtime",
|
||||
"tiebreaker",
|
||||
"last_rating",
|
||||
"last_mean",
|
||||
"times",
|
||||
)
|
||||
)
|
||||
if not contest.rate_all:
|
||||
users = users.filter(submissions__gt=0)
|
||||
if contest.rating_floor is not None:
|
||||
|
@ -161,38 +188,76 @@ def rate_contest(contest):
|
|||
users = users.exclude(last_rating__gt=contest.rating_ceiling)
|
||||
|
||||
users = list(users)
|
||||
participation_ids = list(map(itemgetter('id'), users))
|
||||
user_ids = list(map(itemgetter('user_id'), users))
|
||||
ranking = list(tie_ranker(users, key=itemgetter('score', 'cumtime', 'tiebreaker')))
|
||||
old_mean = list(map(itemgetter('last_mean'), users))
|
||||
times_ranked = list(map(itemgetter('times'), users))
|
||||
participation_ids = list(map(itemgetter("id"), users))
|
||||
user_ids = list(map(itemgetter("user_id"), users))
|
||||
ranking = list(tie_ranker(users, key=itemgetter("score", "cumtime", "tiebreaker")))
|
||||
old_mean = list(map(itemgetter("last_mean"), users))
|
||||
times_ranked = list(map(itemgetter("times"), users))
|
||||
historical_p = [[] for _ in users]
|
||||
|
||||
user_id_to_idx = {uid: i for i, uid in enumerate(user_ids)}
|
||||
for h in Rating.objects.filter(user_id__in=user_ids) \
|
||||
.order_by('-contest__end_time') \
|
||||
.values('user_id', 'performance'):
|
||||
idx = user_id_to_idx[h['user_id']]
|
||||
historical_p[idx].append(h['performance'])
|
||||
for h in (
|
||||
Rating.objects.filter(user_id__in=user_ids)
|
||||
.order_by("-contest__end_time")
|
||||
.values("user_id", "performance")
|
||||
):
|
||||
idx = user_id_to_idx[h["user_id"]]
|
||||
historical_p[idx].append(h["performance"])
|
||||
|
||||
rating, mean, performance = recalculate_ratings(ranking, old_mean, times_ranked, historical_p)
|
||||
rating, mean, performance = recalculate_ratings(
|
||||
ranking, old_mean, times_ranked, historical_p
|
||||
)
|
||||
|
||||
now = timezone.now()
|
||||
ratings = [Rating(user_id=i, contest=contest, rating=r, mean=m, performance=perf,
|
||||
last_rated=now, participation_id=pid, rank=z)
|
||||
for i, pid, r, m, perf, z in zip(user_ids, participation_ids, rating, mean, performance, ranking)]
|
||||
ratings = [
|
||||
Rating(
|
||||
user_id=i,
|
||||
contest=contest,
|
||||
rating=r,
|
||||
mean=m,
|
||||
performance=perf,
|
||||
last_rated=now,
|
||||
participation_id=pid,
|
||||
rank=z,
|
||||
)
|
||||
for i, pid, r, m, perf, z in zip(
|
||||
user_ids, participation_ids, rating, mean, performance, ranking
|
||||
)
|
||||
]
|
||||
with transaction.atomic():
|
||||
Rating.objects.bulk_create(ratings)
|
||||
|
||||
Profile.objects.filter(contest_history__contest=contest, contest_history__virtual=0).update(
|
||||
rating=Subquery(Rating.objects.filter(user=OuterRef('id'))
|
||||
.order_by('-contest__end_time').values('rating')[:1]))
|
||||
Profile.objects.filter(
|
||||
contest_history__contest=contest, contest_history__virtual=0
|
||||
).update(
|
||||
rating=Subquery(
|
||||
Rating.objects.filter(user=OuterRef("id"))
|
||||
.order_by("-contest__end_time")
|
||||
.values("rating")[:1]
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
RATING_LEVELS = ['Newbie', 'Amateur', 'Expert', 'Candidate Master', 'Master', 'Grandmaster', 'Target']
|
||||
RATING_LEVELS = [
|
||||
"Newbie",
|
||||
"Amateur",
|
||||
"Expert",
|
||||
"Candidate Master",
|
||||
"Master",
|
||||
"Grandmaster",
|
||||
"Target",
|
||||
]
|
||||
RATING_VALUES = [1000, 1400, 1700, 1900, 2100, 2400, 3000]
|
||||
RATING_CLASS = ['rate-newbie', 'rate-amateur', 'rate-specialist', 'rate-expert', 'rate-candidate-master',
|
||||
'rate-master', 'rate-grandmaster', 'rate-target']
|
||||
RATING_CLASS = [
|
||||
"rate-newbie",
|
||||
"rate-amateur",
|
||||
"rate-specialist",
|
||||
"rate-expert",
|
||||
"rate-candidate-master",
|
||||
"rate-master",
|
||||
"rate-grandmaster",
|
||||
"rate-target",
|
||||
]
|
||||
|
||||
|
||||
def rating_level(rating):
|
||||
|
@ -213,4 +278,4 @@ def rating_progress(rating):
|
|||
return 1.0
|
||||
prev = 0 if not level else RATING_VALUES[level - 1]
|
||||
next = RATING_VALUES[level]
|
||||
return (rating - prev + 0.0) / (next - prev)
|
||||
return (rating - prev + 0.0) / (next - prev)
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue