Cloned DMOJ

This commit is contained in:
thanhluong 2020-01-21 15:35:58 +09:00
parent f623974b58
commit 49dc9ff10c
513 changed files with 132349 additions and 39 deletions

0
judge/utils/__init__.py Normal file
View file

8
judge/utils/cachedict.py Normal file
View file

@ -0,0 +1,8 @@
class CacheDict(dict):
def __init__(self, func):
super(CacheDict, self).__init__()
self.func = func
def __missing__(self, key):
self[key] = value = self.func(key)
return value

48
judge/utils/camo.py Normal file
View file

@ -0,0 +1,48 @@
import hmac
from hashlib import sha1
from django.conf import settings
from judge.utils.unicode import utf8bytes
class CamoClient(object):
"""Based on https://github.com/sionide21/camo-client"""
def __init__(self, server, key, excluded=(), https=False):
self.server = server.rstrip('/')
self.key = key
self.https = https
self.excluded = excluded
def image_url(self, url):
return '%s/%s/%s' % (self.server,
hmac.new(utf8bytes(self.key), utf8bytes(url), sha1).hexdigest(),
utf8bytes(url).hex())
def rewrite_url(self, url):
if url.startswith(self.server) or url.startswith(self.excluded):
return url
elif url.startswith(('http://', 'https://')):
return self.image_url(url)
elif url.startswith('//'):
return self.rewrite_url(('https:' if self.https else 'http:') + url)
else:
return url
def update_tree(self, doc):
for img in doc.xpath('.//img'):
for attr in ('src', 'data-src'):
if img.get(attr):
img.set(attr, self.rewrite_url(img.get(attr)))
for obj in doc.xpath('.//object'):
if obj.get('data'):
obj.set('data', self.rewrite_url(obj.get('data')))
if settings.DMOJ_CAMO_URL and settings.DMOJ_CAMO_KEY:
client = CamoClient(settings.DMOJ_CAMO_URL, key=settings.DMOJ_CAMO_KEY,
excluded=settings.DMOJ_CAMO_EXCLUDE,
https=settings.DMOJ_CAMO_HTTPS)
else:
client = None

148
judge/utils/caniuse.py Normal file
View file

@ -0,0 +1,148 @@
import requests
from ua_parser import user_agent_parser
_SUPPORT_DATA = requests.get('https://raw.githubusercontent.com/Fyrd/caniuse/master/data.json').json()['data']
SUPPORT = 'y'
PARTIAL_SUPPORT = 'a'
UNSUPPORTED = 'n'
POLYFILL = 'p'
UNKNOWN = 'u'
PREFIX = 'x'
DISABLED = 'd'
def safe_int(string):
try:
return int(string)
except (ValueError, TypeError):
return 0
class BrowserFamily(object):
def __init__(self, data):
self._data = data
self._ranges = ranges = []
self._versions = versions = {}
max_version = ()
max_support = UNKNOWN
for version, support in data.items():
if version == 'all':
self.max_support = support
elif '-' in version:
start, end = version.split('-')
start = tuple(map(int, start.split('.')))
end = tuple(map(int, end.split('.'))) + (1e3000,)
ranges.append((start, end, support))
if end > max_version:
max_version = end
max_support = support
else:
try:
version = tuple(map(int, version.split('.')))
except ValueError:
pass
else:
if version > max_version:
max_version = version
max_support = support
versions[version] = support
self.max_version = max_version
self.max_support = max_support
def check(self, major, minor, patch):
int_major, int_minor, int_patch = map(safe_int, (major, minor, patch))
version = (int_major, int_minor, int_patch)
if version > self.max_version:
return self.max_support
for key in ((int_major, int_minor, int_patch), (int_major, int_minor), (int_major,), major):
try:
return self._versions[key]
except KeyError:
pass
for start, end, support in self._ranges:
if start <= version < end:
return support
return UNKNOWN
class Feat(object):
def __init__(self, data):
self._data = data
self._family = {name: BrowserFamily(data) for name, data in data['stats'].items()}
def __getitem__(self, item):
return self._family[item]
class Database(object):
def __init__(self, data):
self._data = data
self._feats = {feat: Feat(data) for feat, data in data.items()}
def __getitem__(self, item):
return self._feats[item]
database = Database(_SUPPORT_DATA)
class CanIUse(object):
def __init__(self, ua):
self._agent = user_agent_parser.Parse(ua)
os_family = self._agent['os']['family']
browser_family = self._agent['user_agent']['family']
family = None
if os_family == 'Android':
if 'Firefox' in browser_family:
family = 'and_ff'
elif 'Chrome' in browser_family:
family = 'and_chr'
elif 'Android' in browser_family:
family = 'android'
else:
if 'Edge' in browser_family:
family = 'edge'
elif 'Firefox' in browser_family:
family = 'firefox'
elif 'Chrome' in browser_family:
family = 'chrome'
elif 'IE' in browser_family:
family = 'ie'
elif 'Opera' in browser_family:
family = 'opera'
elif 'Safari' in browser_family:
family = 'safari'
self._family = family
def _check_feat(self, feat):
if not self._family:
return UNKNOWN
try:
stats = feat[self._family]
except KeyError:
return UNKNOWN
else:
ua = self._agent['user_agent']
return stats.check(ua['major'], ua['minor'], ua['patch'])[0]
def __getattr__(self, attr):
try:
feat = database[attr.replace('_', '-')]
except KeyError:
raise AttributeError(attr)
else:
result = self._check_feat(feat)
setattr(self, attr, result)
return result

67
judge/utils/celery.py Normal file
View file

@ -0,0 +1,67 @@
from django.http import HttpResponseRedirect
from django.urls import reverse
from django.utils.http import urlencode
class Progress:
def __init__(self, task, total, stage=None):
self.task = task
self._total = total
self._done = 0
self._stage = stage
def _update_state(self):
self.task.update_state(
state='PROGRESS',
meta={
'done': self._done,
'total': self._total,
'stage': self._stage,
},
)
@property
def done(self):
return self._done
@done.setter
def done(self, value):
self._done = value
self._update_state()
@property
def total(self):
return self._total
@total.setter
def total(self, value):
self._total = value
self._done = min(self._done, value)
self._update_state()
def did(self, delta):
self._done += delta
self._update_state()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type is None:
self.done = self._total
def task_status_url(result, message=None, redirect=None):
args = {}
if message:
args['message'] = message
if redirect:
args['redirect'] = redirect
url = reverse('task_status', args=[result.id])
if args:
url += '?' + urlencode(args)
return url
def redirect_to_task_status(result, message=None, redirect=None):
return HttpResponseRedirect(task_status_url(result, message, redirect))

View file

@ -0,0 +1,290 @@
import math
from functools import reduce
from django.core.paginator import InvalidPage, Page, Paginator
__all__ = (
'InvalidPage',
'ExPaginator',
'DiggPaginator',
'QuerySetDiggPaginator',
)
class ExPaginator(Paginator):
"""Adds a ``softlimit`` option to ``page()``. If True, querying a
page number larger than max. will not fail, but instead return the
last available page.
This is useful when the data source can not provide an exact count
at all times (like some search engines), meaning the user could
possibly see links to invalid pages at some point which we wouldn't
want to fail as 404s.
>>> items = range(1, 1000)
>>> paginator = ExPaginator(items, 10)
>>> paginator.page(1000)
Traceback (most recent call last):
InvalidPage: That page contains no results
>>> paginator.page(1000, softlimit=True)
<Page 100 of 100>
# [bug] graceful handling of non-int args
>>> paginator.page("str")
Traceback (most recent call last):
InvalidPage: That page number is not an integer
"""
def _ensure_int(self, num, e):
# see Django #7307
try:
return int(num)
except ValueError:
raise e
def page(self, number, softlimit=False):
try:
return super(ExPaginator, self).page(number)
except InvalidPage as e:
number = self._ensure_int(number, e)
if number > self.num_pages and softlimit:
return self.page(self.num_pages, softlimit=False)
else:
raise e
class DiggPaginator(ExPaginator):
"""
Based on Django's default paginator, it adds "Digg-style" page ranges
with a leading block of pages, an optional middle block, and another
block at the end of the page range. They are available as attributes
on the page:
{# with: page = digg_paginator.page(1) #}
{% for num in page.leading_range %} ...
{% for num in page.main_range %} ...
{% for num in page.trailing_range %} ...
Additionally, ``page_range`` contains a nun-numeric ``False`` element
for every transition between two ranges.
{% for num in page.page_range %}
{% if not num %} ... {# literally output dots #}
{% else %}{{ num }}
{% endif %}
{% endfor %}
Additional arguments passed to the constructor allow customization of
how those bocks are constructed:
body=5, tail=2
[1] 2 3 4 5 ... 91 92
|_________| |___|
body tail
|_____|
margin
body=5, tail=2, padding=2
1 2 ... 6 7 [8] 9 10 ... 91 92
|_| |__|
^padding^
|_| |__________| |___|
tail body tail
``margin`` is the minimum number of pages required between two ranges; if
there are less, they are combined into one.
When ``align_left`` is set to ``True``, the paginator operates in a
special mode that always skips the right tail, e.g. does not display the
end block unless necessary. This is useful for situations in which the
exact number of items/pages is not actually known.
# odd body length
>>> print DiggPaginator(range(1,1000), 10, body=5).page(1)
1 2 3 4 5 ... 99 100
>>> print DiggPaginator(range(1,1000), 10, body=5).page(100)
1 2 ... 96 97 98 99 100
# even body length
>>> print DiggPaginator(range(1,1000), 10, body=6).page(1)
1 2 3 4 5 6 ... 99 100
>>> print DiggPaginator(range(1,1000), 10, body=6).page(100)
1 2 ... 95 96 97 98 99 100
# leading range and main range are combined when close; note how
# we have varying body and padding values, and their effect.
>>> print DiggPaginator(range(1,1000), 10, body=5, padding=2, margin=2).page(3)
1 2 3 4 5 ... 99 100
>>> print DiggPaginator(range(1,1000), 10, body=6, padding=2, margin=2).page(4)
1 2 3 4 5 6 ... 99 100
>>> print DiggPaginator(range(1,1000), 10, body=5, padding=1, margin=2).page(6)
1 2 3 4 5 6 7 ... 99 100
>>> print DiggPaginator(range(1,1000), 10, body=5, padding=2, margin=2).page(7)
1 2 ... 5 6 7 8 9 ... 99 100
>>> print DiggPaginator(range(1,1000), 10, body=5, padding=1, margin=2).page(7)
1 2 ... 5 6 7 8 9 ... 99 100
# the trailing range works the same
>>> print DiggPaginator(range(1,1000), 10, body=5, padding=2, margin=2, ).page(98)
1 2 ... 96 97 98 99 100
>>> print DiggPaginator(range(1,1000), 10, body=6, padding=2, margin=2, ).page(97)
1 2 ... 95 96 97 98 99 100
>>> print DiggPaginator(range(1,1000), 10, body=5, padding=1, margin=2, ).page(95)
1 2 ... 94 95 96 97 98 99 100
>>> print DiggPaginator(range(1,1000), 10, body=5, padding=2, margin=2, ).page(94)
1 2 ... 92 93 94 95 96 ... 99 100
>>> print DiggPaginator(range(1,1000), 10, body=5, padding=1, margin=2, ).page(94)
1 2 ... 92 93 94 95 96 ... 99 100
# all three ranges may be combined as well
>>> print DiggPaginator(range(1,151), 10, body=6, padding=2).page(7)
1 2 3 4 5 6 7 8 9 ... 14 15
>>> print DiggPaginator(range(1,151), 10, body=6, padding=2).page(8)
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
>>> print DiggPaginator(range(1,151), 10, body=6, padding=1).page(8)
1 2 3 4 5 6 7 8 9 ... 14 15
# no leading or trailing ranges might be required if there are only
# a very small number of pages
>>> print DiggPaginator(range(1,80), 10, body=10).page(1)
1 2 3 4 5 6 7 8
>>> print DiggPaginator(range(1,80), 10, body=10).page(8)
1 2 3 4 5 6 7 8
>>> print DiggPaginator(range(1,12), 10, body=5).page(1)
1 2
# test left align mode
>>> print DiggPaginator(range(1,1000), 10, body=5, align_left=True).page(1)
1 2 3 4 5
>>> print DiggPaginator(range(1,1000), 10, body=5, align_left=True).page(50)
1 2 ... 48 49 50 51 52
>>> print DiggPaginator(range(1,1000), 10, body=5, align_left=True).page(97)
1 2 ... 95 96 97 98 99
>>> print DiggPaginator(range(1,1000), 10, body=5, align_left=True).page(100)
1 2 ... 96 97 98 99 100
# padding: default value
>>> DiggPaginator(range(1,1000), 10, body=10).padding
4
# padding: automatic reduction
>>> DiggPaginator(range(1,1000), 10, body=5).padding
2
>>> DiggPaginator(range(1,1000), 10, body=6).padding
2
# padding: sanity check
>>> DiggPaginator(range(1,1000), 10, body=5, padding=3)
Traceback (most recent call last):
ValueError: padding too large for body (max 2)
"""
def __init__(self, *args, **kwargs):
self.body = kwargs.pop('body', 10)
self.tail = kwargs.pop('tail', 2)
self.align_left = kwargs.pop('align_left', False)
self.margin = kwargs.pop('margin', 4) # TODO: make the default relative to body?
# validate padding value
max_padding = int(math.ceil(self.body / 2.0) - 1)
self.padding = kwargs.pop('padding', min(4, max_padding))
if self.padding > max_padding:
raise ValueError('padding too large for body (max %d)' % max_padding)
super(DiggPaginator, self).__init__(*args, **kwargs)
def page(self, number, *args, **kwargs):
"""Return a standard ``Page`` instance with custom, digg-specific
page ranges attached.
"""
page = super(DiggPaginator, self).page(number, *args, **kwargs)
number = int(number) # we know this will work
# easier access
num_pages, body, tail, padding, margin = \
self.num_pages, self.body, self.tail, self.padding, self.margin
# put active page in middle of main range
main_range = list(map(int, [
math.floor(number - body / 2.0) + 1, # +1 = shift odd body to right
math.floor(number + body / 2.0)]))
# adjust bounds
if main_range[0] < 1:
main_range = list(map(abs(main_range[0] - 1).__add__, main_range))
if main_range[1] > num_pages:
main_range = list(map((num_pages - main_range[1]).__add__, main_range))
# Determine leading and trailing ranges; if possible and appropriate,
# combine them with the main range, in which case the resulting main
# block might end up considerable larger than requested. While we
# can't guarantee the exact size in those cases, we can at least try
# to come as close as possible: we can reduce the other boundary to
# max padding, instead of using half the body size, which would
# otherwise be the case. If the padding is large enough, this will
# of course have no effect.
# Example:
# total pages=100, page=4, body=5, (default padding=2)
# 1 2 3 [4] 5 6 ... 99 100
# total pages=100, page=4, body=5, padding=1
# 1 2 3 [4] 5 ... 99 100
# If it were not for this adjustment, both cases would result in the
# first output, regardless of the padding value.
if main_range[0] <= tail + margin:
leading = []
main_range = [1, max(body, min(number + padding, main_range[1]))]
main_range[0] = 1
else:
leading = list(range(1, tail + 1))
# basically same for trailing range, but not in ``left_align`` mode
if self.align_left:
trailing = []
else:
if main_range[1] >= num_pages - (tail + margin) + 1:
trailing = []
if not leading:
# ... but handle the special case of neither leading nor
# trailing ranges; otherwise, we would now modify the
# main range low bound, which we just set in the previous
# section, again.
main_range = [1, num_pages]
else:
main_range = [min(num_pages - body + 1, max(number - padding, main_range[0])), num_pages]
else:
trailing = list(range(num_pages - tail + 1, num_pages + 1))
# finally, normalize values that are out of bound; this basically
# fixes all the things the above code screwed up in the simple case
# of few enough pages where one range would suffice.
main_range = [max(main_range[0], 1), min(main_range[1], num_pages)]
# make the result of our calculations available as custom ranges
# on the ``Page`` instance.
page.main_range = list(range(main_range[0], main_range[1] + 1))
page.leading_range = leading
page.trailing_range = trailing
page.page_range = reduce(lambda x, y: x + ((x and y) and [False]) + y,
[page.leading_range, page.main_range, page.trailing_range])
page.__class__ = DiggPage
return page
class DiggPage(Page):
def __str__(self):
return " ... ".join(filter(None, [
" ".join(map(str, self.leading_range)),
" ".join(map(str, self.main_range)),
" ".join(map(str, self.trailing_range))]))
@property
def num_pages(self):
return self.paginator.num_pages
QuerySetDiggPaginator = DiggPaginator
if __name__ == "__main__":
import doctest
doctest.testmod()

45
judge/utils/file_cache.py Normal file
View file

@ -0,0 +1,45 @@
import errno
import os
from gzip import open as gzip_open
from urllib.parse import urljoin
class HashFileCache(object):
def __init__(self, root, url, gzip=False):
self.root = root
self.url = url
self.gzip = gzip
def create(self, hash):
try:
os.makedirs(os.path.join(self.root, hash))
except OSError as e:
if e.errno != errno.EEXIST:
raise
def has_file(self, hash, file):
return os.path.isfile(self.get_path(hash, file))
def get_path(self, hash, file):
return os.path.join(self.root, hash, file)
def get_url(self, hash, file):
return urljoin(self.url, '%s/%s' % (hash, file))
def read_file(self, hash, file):
return open(self.get_path(hash, file), 'rb')
def read_data(self, hash, file):
with self.read_file(hash, file) as f:
return f.read()
def cache_data(self, hash, file, data, url=True, gzip=True):
if gzip and self.gzip:
with gzip_open(self.get_path(hash, file + '.gz'), 'wb') as f:
f.write(data)
with open(self.get_path(hash, file), 'wb') as f:
f.write(data)
if url:
return self.get_url(hash, file)

185
judge/utils/mathoid.py Normal file
View file

@ -0,0 +1,185 @@
import hashlib
import logging
import re
import requests
from django.conf import settings
from django.core.cache import caches
from django.utils.html import format_html
from django.utils.safestring import mark_safe
from mistune import escape
from judge.utils.file_cache import HashFileCache
from judge.utils.unicode import utf8bytes, utf8text
logger = logging.getLogger('judge.mathoid')
reescape = re.compile(r'(?<!\\)(?:\\{2})*[$]')
REPLACES = [
('\u2264', r'\le'),
('\u2265', r'\ge'),
('\u2026', '...'),
('\u2212', '-'),
('&le;', r'\le'),
('&ge;', r'\ge'),
('&lt;', '<'),
('&gt;', '>'),
('&amp;', '&'),
('&#8722;', '-'),
('&#8804;', r'\le'),
('&#8805;', r'\ge'),
('&#8230;', '...'),
(r'\lt', '<'),
(r'\gt', '>'),
]
def format_math(math):
for a, b in REPLACES:
math = math.replace(a, b)
return math
class MathoidMathParser(object):
types = ('svg', 'mml', 'tex', 'jax')
def __init__(self, type):
self.type = type
self.mathoid_url = settings.MATHOID_URL
self.cache = HashFileCache(settings.MATHOID_CACHE_ROOT,
settings.MATHOID_CACHE_URL,
settings.MATHOID_GZIP)
mml_cache = settings.MATHOID_MML_CACHE
self.mml_cache = mml_cache and caches[mml_cache]
self.css_cache = caches[settings.MATHOID_CSS_CACHE]
self.mml_cache_ttl = settings.MATHOID_MML_CACHE_TTL
def query_mathoid(self, formula, hash):
self.cache.create(hash)
try:
response = requests.post(self.mathoid_url, data={
'q': reescape.sub(lambda m: '\\' + m.group(0), formula).encode('utf-8'),
'type': 'tex' if formula.startswith(r'\displaystyle') else 'inline-tex',
})
response.raise_for_status()
data = response.json()
except requests.ConnectionError:
logger.exception('Failed to connect to mathoid for: %s', formula)
return
except requests.HTTPError as e:
logger.error('Mathoid failed to render: %s\n%s', formula, e.response.text)
return
except Exception:
logger.exception('Failed to connect to mathoid for: %s', formula)
return
if not data['success']:
logger.error('Mathoid failure for: %s\n%s', formula, data)
return
if any(i not in data for i in ('mml', 'png', 'svg', 'mathoidStyle')):
logger.error('Mathoid did not return required information (mml, png, svg, mathoidStyle needed):\n%s', data)
return
css = data['mathoidStyle']
mml = data['mml']
result = {
'css': css, 'mml': mml,
'png': self.cache.cache_data(hash, 'png', bytearray(data['png']['data'])),
'svg': self.cache.cache_data(hash, 'svg', data['svg'].encode('utf-8')),
}
self.cache.cache_data(hash, 'mml', mml.encode('utf-8'), url=False, gzip=False)
self.cache.cache_data(hash, 'css', css.encode('utf-8'), url=False, gzip=False)
return result
def query_cache(self, hash):
result = {
'svg': self.cache.get_url(hash, 'svg'),
'png': self.cache.get_url(hash, 'png'),
}
key = 'mathoid:css:' + hash
css = result['css'] = self.css_cache.get(key)
if css is None:
css = result['css'] = self.cache.read_data(hash, 'css').decode('utf-8')
self.css_cache.set(key, css, self.mml_cache_ttl)
mml = None
if self.mml_cache:
mml = result['mml'] = self.mml_cache.get('mathoid:mml:' + hash)
if mml is None:
mml = result['mml'] = self.cache.read_data(hash, 'mml').decode('utf-8')
if self.mml_cache:
self.mml_cache.set('mathoid:mml:' + hash, mml, self.mml_cache_ttl)
return result
def get_result(self, formula):
if self.type == 'tex':
return
hash = hashlib.sha1(utf8bytes(formula)).hexdigest()
formula = utf8text(formula)
if self.cache.has_file(hash, 'css'):
result = self.query_cache(hash)
else:
result = self.query_mathoid(formula, hash)
if not result:
return None
result['tex'] = formula
result['display'] = formula.startswith(r'\displaystyle')
return {
'mml': self.output_mml,
'msp': self.output_msp,
'svg': self.output_svg,
'jax': self.output_jax,
'png': self.output_png,
'raw': lambda x: x,
}[self.type](result)
def output_mml(self, result):
return result['mml']
def output_msp(self, result):
# 100% MediaWiki compatibility.
return format_html('<span class="{5}-math">'
'<span class="mwe-math-mathml-{5} mwe-math-mathml-a11y"'
' style="display: none;">{0}</span>'
'<img src="{1}" class="mwe-math-fallback-image-{5}"'
' onerror="this.src=\'{2}\';this.onerror=null"'
' aria-hidden="true" style="{3}" alt="{4}"></span>',
mark_safe(result['mml']), result['svg'], result['png'], result['css'], result['tex'],
['inline', 'display'][result['display']])
def output_jax(self, result):
return format_html('<span class="{4}">'
'''<img class="tex-image" src="{0}" style="{2}" alt="{3}"'''
''' onerror="this.src='{1}';this.onerror=null">'''
'''<span class="tex-text" style="display:none">{5}{3}{5}</span>'''
'</span>',
result['svg'], result['png'], result['css'], result['tex'],
['inline-math', 'display-math'][result['display']], ['~', '$$'][result['display']])
def output_svg(self, result):
return format_html('<img class="{4}" src="{0}" style="{2}" alt="{3}" '
'''onerror="this.src='{1}';this.onerror=null">''',
result['svg'], result['png'], result['css'], result['tex'],
['inline-math', 'display-math'][result['display']])
def output_png(self, result):
return format_html('<img class="{3}" src="{0}" style="{1}" alt="{2}">',
result['png'], result['css'], result['tex'],
['inline-math', 'display-math'][result['display']])
def display_math(self, math):
math = format_math(math)
return self.get_result(r'\displaystyle ' + math) or r'\[%s\]' % escape(math)
def inline_math(self, math):
math = format_math(math)
return self.get_result(math) or r'\(%s\)' % escape(math)

24
judge/utils/opengraph.py Normal file
View file

@ -0,0 +1,24 @@
from django.core.cache import cache
from django.template.defaultfilters import truncatewords
from judge.jinja2.markdown import markdown
from judge.jinja2.reference import reference
def generate_opengraph(cache_key, data, style):
metadata = cache.get(cache_key)
if metadata is None:
description = None
tree = reference(markdown(data, style)).tree
for p in tree.iterfind('.//p'):
text = p.text_content().strip()
if text:
description = text
break
if description:
for remove in (r'\[', r'\]', r'\(', r'\)'):
description = description.replace(remove, '')
img = tree.xpath('.//img')
metadata = truncatewords(description, 60), img[0].get('src') if img else None
cache.set(cache_key, metadata, 86400)
return metadata

198
judge/utils/problem_data.py Normal file
View file

@ -0,0 +1,198 @@
import json
import os
import re
import yaml
from django.conf import settings
from django.core.files.base import ContentFile
from django.core.files.storage import FileSystemStorage
from django.urls import reverse
from django.utils.translation import gettext as _
if os.altsep:
def split_path_first(path, repath=re.compile('[%s]' % re.escape(os.sep + os.altsep))):
return repath.split(path, 1)
else:
def split_path_first(path):
return path.split(os.sep, 1)
class ProblemDataStorage(FileSystemStorage):
def __init__(self):
super(ProblemDataStorage, self).__init__(settings.DMOJ_PROBLEM_DATA_ROOT)
def url(self, name):
path = split_path_first(name)
if len(path) != 2:
raise ValueError('This file is not accessible via a URL.')
return reverse('problem_data_file', args=path)
def _save(self, name, content):
if self.exists(name):
self.delete(name)
return super(ProblemDataStorage, self)._save(name, content)
def get_available_name(self, name, max_length=None):
return name
def rename(self, old, new):
return os.rename(self.path(old), self.path(new))
class ProblemDataError(Exception):
def __init__(self, message):
super(ProblemDataError, self).__init__(message)
self.message = message
class ProblemDataCompiler(object):
def __init__(self, problem, data, cases, files):
self.problem = problem
self.data = data
self.cases = cases
self.files = files
self.generator = data.generator
def make_init(self):
cases = []
batch = None
def end_batch():
if not batch['batched']:
raise ProblemDataError(_('Empty batches not allowed.'))
cases.append(batch)
def make_checker(case):
if case.checker_args:
return {
'name': case.checker,
'args': json.loads(case.checker_args),
}
return case.checker
for i, case in enumerate(self.cases, 1):
if case.type == 'C':
data = {}
if batch:
case.points = None
case.is_pretest = batch['is_pretest']
else:
if case.points is None:
raise ProblemDataError(_('Points must be defined for non-batch case #%d.') % i)
data['is_pretest'] = case.is_pretest
if not self.generator:
if case.input_file not in self.files:
raise ProblemDataError(_('Input file for case %d does not exist: %s') %
(i, case.input_file))
if case.output_file not in self.files:
raise ProblemDataError(_('Output file for case %d does not exist: %s') %
(i, case.output_file))
if case.input_file:
data['in'] = case.input_file
if case.output_file:
data['out'] = case.output_file
if case.points is not None:
data['points'] = case.points
if case.generator_args:
data['generator_args'] = case.generator_args.splitlines()
if case.output_limit is not None:
data['output_limit_length'] = case.output_limit
if case.output_prefix is not None:
data['output_prefix_length'] = case.output_prefix
if case.checker:
data['checker'] = make_checker(case)
else:
case.checker_args = ''
case.save(update_fields=('checker_args', 'is_pretest'))
(batch['batched'] if batch else cases).append(data)
elif case.type == 'S':
if batch:
end_batch()
if case.points is None:
raise ProblemDataError(_('Batch start case #%d requires points.') % i)
batch = {
'points': case.points,
'batched': [],
'is_pretest': case.is_pretest,
}
if case.generator_args:
batch['generator_args'] = case.generator_args.splitlines()
if case.output_limit is not None:
batch['output_limit_length'] = case.output_limit
if case.output_prefix is not None:
batch['output_prefix_length'] = case.output_prefix
if case.checker:
batch['checker'] = make_checker(case)
else:
case.checker_args = ''
case.input_file = ''
case.output_file = ''
case.save(update_fields=('checker_args', 'input_file', 'output_file'))
elif case.type == 'E':
if not batch:
raise ProblemDataError(_('Attempt to end batch outside of one in case #%d') % i)
case.is_pretest = batch['is_pretest']
case.input_file = ''
case.output_file = ''
case.generator_args = ''
case.checker = ''
case.checker_args = ''
case.save()
end_batch()
batch = None
if batch:
end_batch()
init = {}
if self.data.zipfile:
zippath = split_path_first(self.data.zipfile.name)
if len(zippath) != 2:
raise ProblemDataError(_('How did you corrupt the zip path?'))
init['archive'] = zippath[1]
if self.generator:
generator_path = split_path_first(self.generator.name)
if len(generator_path) != 2:
raise ProblemDataError(_('How did you corrupt the generator path?'))
init['generator'] = generator_path[1]
pretests = [case for case in cases if case['is_pretest']]
for case in cases:
del case['is_pretest']
if pretests:
init['pretest_test_cases'] = pretests
if cases:
init['test_cases'] = cases
if self.data.output_limit is not None:
init['output_limit_length'] = self.data.output_limit
if self.data.output_prefix is not None:
init['output_prefix_length'] = self.data.output_prefix
if self.data.checker:
init['checker'] = make_checker(self.data)
else:
self.data.checker_args = ''
return init
def compile(self):
from judge.models import problem_data_storage
yml_file = '%s/init.yml' % self.problem.code
try:
init = yaml.safe_dump(self.make_init())
except ProblemDataError as e:
self.data.feedback = e.message
self.data.save()
problem_data_storage.delete(yml_file)
else:
self.data.feedback = ''
self.data.save()
problem_data_storage.save(yml_file, ContentFile(init))
@classmethod
def generate(cls, *args, **kwargs):
self = cls(*args, **kwargs)
self.compile()

147
judge/utils/problems.py Normal file
View file

@ -0,0 +1,147 @@
from collections import defaultdict
from math import e
from django.core.cache import cache
from django.db.models import Case, Count, ExpressionWrapper, F, Max, Q, When
from django.db.models.fields import FloatField
from django.utils import timezone
from django.utils.translation import gettext as _, gettext_noop
from judge.models import Problem, Submission
__all__ = ['contest_completed_ids', 'get_result_data', 'user_completed_ids', 'user_authored_ids', 'user_editable_ids']
def user_authored_ids(profile):
result = set(Problem.objects.filter(authors=profile).values_list('id', flat=True))
return result
def user_editable_ids(profile):
result = set((Problem.objects.filter(authors=profile) | Problem.objects.filter(curators=profile))
.values_list('id', flat=True))
return result
def contest_completed_ids(participation):
key = 'contest_complete:%d' % participation.id
result = cache.get(key)
if result is None:
result = set(participation.submissions.filter(submission__result='AC', points=F('problem__points'))
.values_list('problem__problem__id', flat=True).distinct())
cache.set(key, result, 86400)
return result
def user_completed_ids(profile):
key = 'user_complete:%d' % profile.id
result = cache.get(key)
if result is None:
result = set(Submission.objects.filter(user=profile, result='AC', points=F('problem__points'))
.values_list('problem_id', flat=True).distinct())
cache.set(key, result, 86400)
return result
def contest_attempted_ids(participation):
key = 'contest_attempted:%s' % participation.id
result = cache.get(key)
if result is None:
result = {id: {'achieved_points': points, 'max_points': max_points}
for id, max_points, points in (participation.submissions
.values_list('problem__problem__id', 'problem__points')
.annotate(points=Max('points'))
.filter(points__lt=F('problem__points')))}
cache.set(key, result, 86400)
return result
def user_attempted_ids(profile):
key = 'user_attempted:%s' % profile.id
result = cache.get(key)
if result is None:
result = {id: {'achieved_points': points, 'max_points': max_points}
for id, max_points, points in (Submission.objects.filter(user=profile)
.values_list('problem__id', 'problem__points')
.annotate(points=Max('points'))
.filter(points__lt=F('problem__points')))}
cache.set(key, result, 86400)
return result
def _get_result_data(results):
return {
'categories': [
# Using gettext_noop here since this will be tacked into the cache, so it must be language neutral.
# The caller, SubmissionList.get_result_data will run ugettext on the name.
{'code': 'AC', 'name': gettext_noop('Accepted'), 'count': results['AC']},
{'code': 'WA', 'name': gettext_noop('Wrong'), 'count': results['WA']},
{'code': 'CE', 'name': gettext_noop('Compile Error'), 'count': results['CE']},
{'code': 'TLE', 'name': gettext_noop('Timeout'), 'count': results['TLE']},
{'code': 'ERR', 'name': gettext_noop('Error'),
'count': results['MLE'] + results['OLE'] + results['IR'] + results['RTE'] + results['AB'] + results['IE']},
],
'total': sum(results.values()),
}
def get_result_data(*args, **kwargs):
if args:
submissions = args[0]
if kwargs:
raise ValueError(_("Can't pass both queryset and keyword filters"))
else:
submissions = Submission.objects.filter(**kwargs) if kwargs is not None else Submission.objects
raw = submissions.values('result').annotate(count=Count('result')).values_list('result', 'count')
return _get_result_data(defaultdict(int, raw))
def editable_problems(user, profile=None):
subquery = Problem.objects.all()
if profile is None:
profile = user.profile
if not user.has_perm('judge.edit_all_problem'):
subfilter = Q(authors__id=profile.id) | Q(curators__id=profile.id)
if user.has_perm('judge.edit_public_problem'):
subfilter |= Q(is_public=True)
subquery = subquery.filter(subfilter)
return subquery
def hot_problems(duration, limit):
cache_key = 'hot_problems:%d:%d' % (duration.total_seconds(), limit)
qs = cache.get(cache_key)
if qs is None:
qs = Problem.objects.filter(is_public=True, is_organization_private=False,
submission__date__gt=timezone.now() - duration, points__gt=3, points__lt=25)
qs0 = qs.annotate(k=Count('submission__user', distinct=True)).order_by('-k').values_list('k', flat=True)
if not qs0:
return []
# make this an aggregate
mx = float(qs0[0])
qs = qs.annotate(unique_user_count=Count('submission__user', distinct=True))
# fix braindamage in excluding CE
qs = qs.annotate(submission_volume=Count(Case(
When(submission__result='AC', then=1),
When(submission__result='WA', then=1),
When(submission__result='IR', then=1),
When(submission__result='RTE', then=1),
When(submission__result='TLE', then=1),
When(submission__result='OLE', then=1),
output_field=FloatField(),
)))
qs = qs.annotate(ac_volume=Count(Case(
When(submission__result='AC', then=1),
output_field=FloatField(),
)))
qs = qs.filter(unique_user_count__gt=max(mx / 3.0, 1))
qs = qs.annotate(ordering=ExpressionWrapper(
0.5 * F('points') * (0.4 * F('ac_volume') / F('submission_volume') + 0.6 * F('ac_rate')) +
100 * e ** (F('unique_user_count') / mx), output_field=FloatField(),
)).order_by('-ordering').defer('description')[:limit]
cache.set(cache_key, qs, 900)
return qs

137
judge/utils/pwned.py Normal file
View file

@ -0,0 +1,137 @@
"""
Based on https://github.com/ubernostrum/pwned-passwords-django.
Original license:
Copyright (c) 2018, James Bennett
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* Neither the name of the author nor the names of other
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."""
import hashlib
import logging
import requests
from django.conf import settings
from django.contrib.auth.password_validation import CommonPasswordValidator
from django.core.exceptions import ValidationError
from django.utils.six import string_types
from django.utils.translation import gettext as _, ungettext
from judge.utils.unicode import utf8bytes
log = logging.getLogger(__name__)
API_ENDPOINT = 'https://api.pwnedpasswords.com/range/{}'
REQUEST_TIMEOUT = 2.0 # 2 seconds
def _get_pwned(prefix):
"""
Fetches a dict of all hash suffixes from Pwned Passwords for a
given SHA-1 prefix.
"""
try:
response = requests.get(
url=API_ENDPOINT.format(prefix),
timeout=getattr(
settings,
'PWNED_PASSWORDS_API_TIMEOUT',
REQUEST_TIMEOUT,
),
)
response.raise_for_status()
except requests.RequestException:
# Gracefully handle timeouts and HTTP error response codes.
log.warning('Skipped Pwned Passwords check due to error', exc_info=True)
return None
results = {}
for line in response.text.splitlines():
line_suffix, _, times = line.partition(':')
results[line_suffix] = int(times)
return results
def pwned_password(password):
"""
Checks a password against the Pwned Passwords database.
"""
if not isinstance(password, string_types):
raise TypeError('Password values to check must be strings.')
password_hash = hashlib.sha1(utf8bytes(password)).hexdigest().upper()
prefix, suffix = password_hash[:5], password_hash[5:]
results = _get_pwned(prefix)
if results is None:
# Gracefully handle timeouts and HTTP error response codes.
return None
return results.get(suffix, 0)
class PwnedPasswordsValidator(object):
"""
Password validator which checks the Pwned Passwords database.
"""
DEFAULT_HELP_MESSAGE = _("Your password can't be a commonly used password.")
DEFAULT_PWNED_MESSAGE = _('This password is too common.')
def __init__(self, error_message=None, help_message=None):
self.help_message = help_message or self.DEFAULT_HELP_MESSAGE
error_message = error_message or self.DEFAULT_PWNED_MESSAGE
# If there is no plural, use the same message for both forms.
if isinstance(error_message, string_types):
singular, plural = error_message, error_message
else:
singular, plural = error_message
self.error_message = {
'singular': singular,
'plural': plural,
}
def validate(self, password, user=None):
amount = pwned_password(password)
if amount is None:
# HIBP API failure. Instead of allowing a potentially compromised
# password, check Django's list of common passwords generated from
# the same database.
CommonPasswordValidator().validate(password, user)
elif amount:
raise ValidationError(
ungettext(
self.error_message['singular'],
self.error_message['plural'],
amount,
),
params={'amount': amount},
code='pwned_password',
)
def get_help_text(self):
return self.help_message

34
judge/utils/ranker.py Normal file
View file

@ -0,0 +1,34 @@
from operator import attrgetter
def ranker(iterable, key=attrgetter('points'), rank=0):
delta = 1
last = None
for item in iterable:
new = key(item)
if new != last:
rank += delta
delta = 0
delta += 1
yield rank, item
last = key(item)
def tie_ranker(iterable, key=attrgetter('points')):
rank = 0
delta = 1
last = None
buf = []
for item in iterable:
new = key(item)
if new != last:
for i in buf:
yield rank + (delta - 1) / 2.0, i
rank += delta
delta = 0
buf = []
delta += 1
buf.append(item)
last = key(item)
for i in buf:
yield rank + (delta - 1) / 2.0, i

96
judge/utils/raw_sql.py Normal file
View file

@ -0,0 +1,96 @@
from copy import copy
from django.db import connections
from django.db.models import Field
from django.db.models.expressions import RawSQL
from django.db.models.sql.constants import INNER, LOUTER
from django.db.models.sql.datastructures import Join
from django.utils import six
from judge.utils.cachedict import CacheDict
def unique_together_left_join(queryset, model, link_field_name, filter_field_name, filter_value, parent_model=None):
link_field = copy(model._meta.get_field(link_field_name).remote_field)
filter_field = model._meta.get_field(filter_field_name)
def restrictions(where_class, alias, related_alias):
cond = where_class()
cond.add(filter_field.get_lookup('exact')(filter_field.get_col(alias), filter_value), 'AND')
return cond
link_field.get_extra_restriction = restrictions
if parent_model is not None:
parent_alias = parent_model._meta.db_table
else:
parent_alias = queryset.query.get_initial_alias()
return queryset.query.join(Join(model._meta.db_table, parent_alias, None, LOUTER, link_field, True))
class RawSQLJoin(Join):
def __init__(self, subquery, subquery_params, parent_alias, table_alias, join_type, join_field, nullable,
filtered_relation=None):
self.subquery_params = subquery_params
super().__init__(subquery, parent_alias, table_alias, join_type, join_field, nullable, filtered_relation)
def as_sql(self, compiler, connection):
compiler.quote_cache[self.table_name] = '(%s)' % self.table_name
sql, params = super().as_sql(compiler, connection)
return sql, self.subquery_params + params
class FakeJoinField:
def __init__(self, joining_columns):
self.joining_columns = joining_columns
def get_joining_columns(self):
return self.joining_columns
def get_extra_restriction(self, where_class, alias, remote_alias):
pass
def join_sql_subquery(queryset, subquery, params, join_fields, alias, join_type=INNER, parent_model=None):
if parent_model is not None:
parent_alias = parent_model._meta.db_table
else:
parent_alias = queryset.query.get_initial_alias()
queryset.query.external_aliases.add(alias)
join = RawSQLJoin(subquery, params, parent_alias, alias, join_type, FakeJoinField(join_fields), join_type == LOUTER)
queryset.query.join(join)
join.table_alias = alias
def RawSQLColumn(model, field=None):
if isinstance(model, Field):
field = model
model = field.model
if isinstance(field, six.string_types):
field = model._meta.get_field(field)
return RawSQL('%s.%s' % (model._meta.db_table, field.get_attname_column()[1]), ())
def make_straight_join_query(QueryType):
class Query(QueryType):
def join(self, join, *args, **kwargs):
alias = super().join(join, *args, **kwargs)
join = self.alias_map[alias]
if join.join_type == INNER:
join.join_type = 'STRAIGHT_JOIN'
return alias
return Query
straight_join_cache = CacheDict(make_straight_join_query)
def use_straight_join(queryset):
if connections[queryset.db].vendor != 'mysql':
return
try:
cloner = queryset.query.chain
except AttributeError:
cloner = queryset.query.clone
queryset.query = cloner(straight_join_cache[type(queryset.query)])

11
judge/utils/recaptcha.py Normal file
View file

@ -0,0 +1,11 @@
try:
from snowpenguin.django.recaptcha2.fields import ReCaptchaField
from snowpenguin.django.recaptcha2.widgets import ReCaptchaWidget
except ImportError:
ReCaptchaField = None
ReCaptchaWidget = None
else:
from django.conf import settings
if not hasattr(settings, 'RECAPTCHA_PRIVATE_KEY'):
ReCaptchaField = None
ReCaptchaWidget = None

53
judge/utils/stats.py Normal file
View file

@ -0,0 +1,53 @@
from operator import itemgetter
__all__ = ('chart_colors', 'highlight_colors', 'get_pie_chart', 'get_bar_chart')
chart_colors = [0x3366CC, 0xDC3912, 0xFF9900, 0x109618, 0x990099, 0x3B3EAC, 0x0099C6, 0xDD4477, 0x66AA00, 0xB82E2E,
0x316395, 0x994499, 0x22AA99, 0xAAAA11, 0x6633CC, 0xE67300, 0x8B0707, 0x329262, 0x5574A6, 0x3B3EAC]
highlight_colors = []
def _highlight_colors():
for color in chart_colors:
r, g, b = color >> 16, (color >> 8) & 0xFF, color & 0xFF
highlight_colors.append('#%02X%02X%02X' % (min(int(r * 1.2), 255),
min(int(g * 1.2), 255),
min(int(b * 1.2), 255)))
_highlight_colors()
chart_colors = list(map('#%06X'.__mod__, chart_colors))
def get_pie_chart(data):
return {
'labels': list(map(itemgetter(0), data)),
'datasets': [
{
'backgroundColor': chart_colors,
'highlightBackgroundColor': highlight_colors,
'data': list(map(itemgetter(1), data)),
},
],
}
def get_bar_chart(data, **kwargs):
return {
'labels': list(map(itemgetter(0), data)),
'datasets': [
{
'backgroundColor': kwargs.get('fillColor', 'rgba(151,187,205,0.5)'),
'borderColor': kwargs.get('strokeColor', 'rgba(151,187,205,0.8)'),
'borderWidth': 1,
'hoverBackgroundColor': kwargs.get('highlightFill', 'rgba(151,187,205,0.75)'),
'hoverBorderColor': kwargs.get('highlightStroke', 'rgba(151,187,205,1)'),
'data': list(map(itemgetter(1), data)),
},
],
}

12
judge/utils/strings.py Normal file
View file

@ -0,0 +1,12 @@
def safe_int_or_none(value):
try:
return int(value)
except (ValueError, TypeError):
return None
def safe_float_or_none(value):
try:
return float(value)
except (ValueError, TypeError):
return None

View file

@ -0,0 +1,8 @@
from django.conf import settings
if 'newsletter' in settings.INSTALLED_APPS:
from newsletter.models import Subscription
else:
Subscription = None
newsletter_id = None if Subscription is None else settings.DMOJ_NEWSLETTER_ID_ON_REGISTER

85
judge/utils/texoid.py Normal file
View file

@ -0,0 +1,85 @@
import hashlib
import json
import logging
from base64 import b64decode
import requests
from django.conf import settings
from django.core.cache import caches
from judge.utils.file_cache import HashFileCache
from judge.utils.unicode import utf8bytes
logger = logging.getLogger('judge.texoid')
TEXOID_ENABLED = hasattr(settings, 'TEXOID_URL')
class TexoidRenderer(object):
def __init__(self):
self.cache = HashFileCache(settings.TEXOID_CACHE_ROOT,
settings.TEXOID_CACHE_URL,
settings.TEXOID_GZIP)
self.meta_cache = caches[settings.TEXOID_META_CACHE]
self.meta_cache_ttl = settings.TEXOID_META_CACHE_TTL
def query_texoid(self, document, hash):
self.cache.create(hash)
try:
response = requests.post(settings.TEXOID_URL, data=utf8bytes(document), headers={
'Content-Type': 'application/x-tex',
})
response.raise_for_status()
except requests.HTTPError as e:
if e.response.status == 400:
logger.error('Texoid failed to render: %s\n%s', document, e.response.text)
else:
logger.exception('Failed to connect to texoid for: %s', document)
return
except Exception:
logger.exception('Failed to connect to texoid for: %s', document)
return
try:
data = response.json()
except ValueError:
logger.exception('Invalid texoid response for: %s\n%s', document, response.text)
return
if not data['success']:
logger.error('Texoid failure for: %s\n%s', document, data)
return {'error': data['error']}
meta = data['meta']
self.cache.cache_data(hash, 'meta', utf8bytes(json.dumps(meta)), url=False, gzip=False)
result = {
'png': self.cache.cache_data(hash, 'png', b64decode(data['png'])),
'svg': self.cache.cache_data(hash, 'svg', data['svg'].encode('utf-8')),
'meta': meta,
}
return result
def query_cache(self, hash):
result = {
'svg': self.cache.get_url(hash, 'svg'),
'png': self.cache.get_url(hash, 'png'),
}
key = 'texoid:meta:' + hash
cached_meta = self.meta_cache.get(key)
if cached_meta is None:
cached_meta = json.loads(self.cache.read_data(hash, 'meta').decode('utf-8'))
self.meta_cache.set(key, cached_meta, self.meta_cache_ttl)
result['meta'] = cached_meta
return result
def get_result(self, formula):
hash = hashlib.sha1(utf8bytes(formula)).hexdigest()
if self.cache.has_file(hash, 'svg'):
return self.query_cache(hash)
else:
return self.query_texoid(formula, hash)

17
judge/utils/tickets.py Normal file
View file

@ -0,0 +1,17 @@
from django.contrib.contenttypes.models import ContentType
from django.db.models import Q
from judge.models import Problem
from judge.utils.problems import editable_problems
def own_ticket_filter(profile_id):
return Q(assignees__id=profile_id) | Q(user_id=profile_id)
def filter_visible_tickets(queryset, user, profile=None):
if profile is None:
profile = user.profile
return queryset.filter(own_ticket_filter(profile.id) |
Q(content_type=ContentType.objects.get_for_model(Problem),
object_id__in=editable_problems(user, profile))).distinct()

90
judge/utils/timedelta.py Normal file
View file

@ -0,0 +1,90 @@
import datetime
from django.utils.translation import npgettext, pgettext, ungettext
def nice_repr(timedelta, display='long', sep=', '):
"""
Turns a datetime.timedelta object into a nice string repr.
display can be 'minimal', 'short' or 'long' [default].
>>> from datetime import timedelta as td
>>> nice_repr(td(days=1, hours=2, minutes=3, seconds=4))
'1 day, 2 hours, 3 minutes, 4 seconds'
>>> nice_repr(td(days=1, seconds=1), 'minimal')
'1d, 1s'
"""
assert isinstance(timedelta, datetime.timedelta), 'First argument must be a timedelta.'
result = []
weeks = timedelta.days // 7
days = timedelta.days % 7
hours = timedelta.seconds // 3600
minutes = (timedelta.seconds % 3600) // 60
seconds = timedelta.seconds % 60
if display == 'simple-no-seconds':
days += weeks * 7
if days:
if hours or minutes:
return '%d day%s %d:%02d' % (days, 's'[days == 1:], hours, minutes)
return '%d day%s' % (days, 's'[days == 1:])
else:
return '%d:%02d' % (hours, minutes)
elif display == 'sql':
days += weeks * 7
return '%d %02d:%02d:%02d' % (days, hours, minutes, seconds)
elif display == 'simple':
days += weeks * 7
if days:
return '%d day%s %02d:%02d:%02d' % (days, 's'[days == 1:], hours, minutes, seconds)
else:
return '%02d:%02d:%02d' % (hours, minutes, seconds)
elif display == 'localized':
days += weeks * 7
if days:
return npgettext('time format with day', '%d day %h:%m:%s', '%d days %h:%m:%s', days) \
.replace('%d', str(days)).replace('%h', '%02d' % hours).replace('%m', '%02d' % minutes) \
.replace('%s', '%02d' % seconds)
else:
return pgettext('time format without day', '%h:%m:%s') \
.replace('%h', '%02d' % hours).replace('%m', '%02d' % minutes).replace('%s', '%02d' % seconds)
elif display == 'localized-no-seconds':
days += weeks * 7
if days:
if hours or minutes:
return npgettext('time format no seconds with day', '%d day %h:%m', '%d days %h:%m', days) \
.replace('%d', str(days)).replace('%h', '%02d' % hours).replace('%m', '%02d' % minutes)
return ungettext('%d day', '%d days', days) % days
else:
return pgettext('hours and minutes', '%h:%m').replace('%h', '%02d' % hours).replace('%m', '%02d' % minutes)
elif display == 'concise':
days += weeks * 7
if days:
return '%dd %02d:%02d:%02d' % (days, hours, minutes, seconds)
else:
return '%02d:%02d:%02d' % (hours, minutes, seconds)
elif display == 'noday':
days += weeks * 7
hours += days * 24
return '%02d:%02d:%02d' % (hours, minutes, seconds)
elif display == 'minimal':
words = ['w', 'd', 'h', 'm', 's']
elif display == 'short':
words = [' wks', ' days', ' hrs', ' min', ' sec']
else:
words = [' weeks', ' days', ' hours', ' minutes', ' seconds']
values = [weeks, days, hours, minutes, seconds]
for i in range(len(values)):
if values[i]:
if values[i] == 1 and len(words[i]) > 1:
result.append('%i%s' % (values[i], words[i].rstrip('s')))
else:
result.append('%i%s' % (values[i], words[i]))
return sep.join(result)

17
judge/utils/unicode.py Normal file
View file

@ -0,0 +1,17 @@
from django.utils import six
def utf8bytes(maybe_text):
if maybe_text is None:
return
if isinstance(maybe_text, six.binary_type):
return maybe_text
return maybe_text.encode('utf-8')
def utf8text(maybe_bytes, errors='strict'):
if maybe_bytes is None:
return
if isinstance(maybe_bytes, six.text_type):
return maybe_bytes
return maybe_bytes.decode('utf-8', errors)

115
judge/utils/views.py Normal file
View file

@ -0,0 +1,115 @@
from django.shortcuts import render
from django.utils.decorators import method_decorator
from django.views.generic import FormView
from django.views.generic.detail import SingleObjectMixin
from judge.utils.diggpaginator import DiggPaginator
def class_view_decorator(function_decorator):
"""Convert a function based decorator into a class based decorator usable
on class based Views.
Can't subclass the `View` as it breaks inheritance (super in particular),
so we monkey-patch instead.
"""
def simple_decorator(View):
View.dispatch = method_decorator(function_decorator)(View.dispatch)
return View
return simple_decorator
def generic_message(request, title, message, status=None):
return render(request, 'generic-message.html', {
'message': message,
'title': title,
}, status=status)
def paginate_query_context(request):
query = request.GET.copy()
query.setlist('page', [])
query = query.urlencode()
if query:
return {'page_prefix': '%s?%s&page=' % (request.path, query),
'first_page_href': '%s?%s' % (request.path, query)}
else:
return {'page_prefix': '%s?page=' % request.path,
'first_page_href': request.path}
class TitleMixin(object):
title = '(untitled)'
content_title = None
def get_context_data(self, **kwargs):
context = super(TitleMixin, self).get_context_data(**kwargs)
context['title'] = self.get_title()
content_title = self.get_content_title()
if content_title is not None:
context['content_title'] = content_title
return context
def get_content_title(self):
return self.content_title
def get_title(self):
return self.title
class DiggPaginatorMixin(object):
def get_paginator(self, queryset, per_page, orphans=0,
allow_empty_first_page=True, **kwargs):
return DiggPaginator(queryset, per_page, body=6, padding=2,
orphans=orphans, allow_empty_first_page=allow_empty_first_page, **kwargs)
class QueryStringSortMixin(object):
all_sorts = None
default_sort = None
default_desc = ()
def get_default_sort_order(self, request):
return self.default_sort
def get(self, request, *args, **kwargs):
order = request.GET.get('order', '')
if not ((not order.startswith('-') or order.count('-') == 1) and (order.lstrip('-') in self.all_sorts)):
order = self.get_default_sort_order(request)
self.order = order
return super(QueryStringSortMixin, self).get(request, *args, **kwargs)
def get_sort_context(self):
query = self.request.GET.copy()
query.setlist('order', [])
query = query.urlencode()
sort_prefix = '%s?%s&order=' % (self.request.path, query) if query else '%s?order=' % self.request.path
current = self.order.lstrip('-')
links = {key: sort_prefix + ('-' if key in self.default_desc else '') + key for key in self.all_sorts}
links[current] = sort_prefix + ('' if self.order.startswith('-') else '-') + current
order = {key: '' for key in self.all_sorts}
order[current] = ' \u25BE' if self.order.startswith('-') else u' \u25B4'
return {'sort_links': links, 'sort_order': order}
def get_sort_paginate_context(self):
return paginate_query_context(self.request)
def short_circuit_middleware(view):
view.short_circuit_middleware = True
return view
class SingleObjectFormView(SingleObjectMixin, FormView):
def post(self, request, *args, **kwargs):
self.object = self.get_object()
return super().post(request, *args, **kwargs)
def get(self, request, *args, **kwargs):
self.object = self.get_object()
return super().get(request, *args, **kwargs)