Reformat using black

This commit is contained in:
cuom1999 2022-05-14 12:57:27 -05:00
parent efee4ad081
commit a87fb49918
221 changed files with 19127 additions and 7310 deletions

View file

@ -10,39 +10,44 @@ class CamoClient(object):
"""Based on https://github.com/sionide21/camo-client"""
def __init__(self, server, key, excluded=(), https=False):
self.server = server.rstrip('/')
self.server = server.rstrip("/")
self.key = key
self.https = https
self.excluded = excluded
def image_url(self, url):
return '%s/%s/%s' % (self.server,
hmac.new(utf8bytes(self.key), utf8bytes(url), sha1).hexdigest(),
utf8bytes(url).hex())
return "%s/%s/%s" % (
self.server,
hmac.new(utf8bytes(self.key), utf8bytes(url), sha1).hexdigest(),
utf8bytes(url).hex(),
)
def rewrite_url(self, url):
if url.startswith(self.server) or url.startswith(self.excluded):
return url
elif url.startswith(('http://', 'https://')):
elif url.startswith(("http://", "https://")):
return self.image_url(url)
elif url.startswith('//'):
return self.rewrite_url(('https:' if self.https else 'http:') + url)
elif url.startswith("//"):
return self.rewrite_url(("https:" if self.https else "http:") + url)
else:
return url
def update_tree(self, doc):
for img in doc.xpath('.//img'):
for attr in ('src', 'data-src'):
for img in doc.xpath(".//img"):
for attr in ("src", "data-src"):
if img.get(attr):
img.set(attr, self.rewrite_url(img.get(attr)))
for obj in doc.xpath('.//object'):
if obj.get('data'):
obj.set('data', self.rewrite_url(obj.get('data')))
for obj in doc.xpath(".//object"):
if obj.get("data"):
obj.set("data", self.rewrite_url(obj.get("data")))
if settings.DMOJ_CAMO_URL and settings.DMOJ_CAMO_KEY:
client = CamoClient(settings.DMOJ_CAMO_URL, key=settings.DMOJ_CAMO_KEY,
excluded=settings.DMOJ_CAMO_EXCLUDE,
https=settings.DMOJ_CAMO_HTTPS)
client = CamoClient(
settings.DMOJ_CAMO_URL,
key=settings.DMOJ_CAMO_KEY,
excluded=settings.DMOJ_CAMO_EXCLUDE,
https=settings.DMOJ_CAMO_HTTPS,
)
else:
client = None

View file

@ -1,15 +1,17 @@
import requests
from ua_parser import user_agent_parser
_SUPPORT_DATA = requests.get('https://raw.githubusercontent.com/Fyrd/caniuse/master/data.json').json()['data']
_SUPPORT_DATA = requests.get(
"https://raw.githubusercontent.com/Fyrd/caniuse/master/data.json"
).json()["data"]
SUPPORT = 'y'
PARTIAL_SUPPORT = 'a'
UNSUPPORTED = 'n'
POLYFILL = 'p'
UNKNOWN = 'u'
PREFIX = 'x'
DISABLED = 'd'
SUPPORT = "y"
PARTIAL_SUPPORT = "a"
UNSUPPORTED = "n"
POLYFILL = "p"
UNKNOWN = "u"
PREFIX = "x"
DISABLED = "d"
def safe_int(string):
@ -28,19 +30,19 @@ class BrowserFamily(object):
max_support = UNKNOWN
for version, support in data.items():
if version == 'all':
if version == "all":
self.max_support = support
elif '-' in version:
start, end = version.split('-')
start = tuple(map(int, start.split('.')))
end = tuple(map(int, end.split('.'))) + (1e3000,)
elif "-" in version:
start, end = version.split("-")
start = tuple(map(int, start.split(".")))
end = tuple(map(int, end.split("."))) + (1e3000,)
ranges.append((start, end, support))
if end > max_version:
max_version = end
max_support = support
else:
try:
version = tuple(map(int, version.split('.')))
version = tuple(map(int, version.split(".")))
except ValueError:
pass
else:
@ -59,7 +61,12 @@ class BrowserFamily(object):
if version > self.max_version:
return self.max_support
for key in ((int_major, int_minor, int_patch), (int_major, int_minor), (int_major,), major):
for key in (
(int_major, int_minor, int_patch),
(int_major, int_minor),
(int_major,),
major,
):
try:
return self._versions[key]
except KeyError:
@ -75,7 +82,9 @@ class BrowserFamily(object):
class Feat(object):
def __init__(self, data):
self._data = data
self._family = {name: BrowserFamily(data) for name, data in data['stats'].items()}
self._family = {
name: BrowserFamily(data) for name, data in data["stats"].items()
}
def __getitem__(self, item):
return self._family[item]
@ -97,31 +106,31 @@ class CanIUse(object):
def __init__(self, ua):
self._agent = user_agent_parser.Parse(ua)
os_family = self._agent['os']['family']
browser_family = self._agent['user_agent']['family']
os_family = self._agent["os"]["family"]
browser_family = self._agent["user_agent"]["family"]
family = None
if os_family == 'Android':
if 'Firefox' in browser_family:
family = 'and_ff'
elif 'Chrome' in browser_family:
family = 'and_chr'
elif 'Android' in browser_family:
family = 'android'
if os_family == "Android":
if "Firefox" in browser_family:
family = "and_ff"
elif "Chrome" in browser_family:
family = "and_chr"
elif "Android" in browser_family:
family = "android"
else:
if 'Edge' in browser_family:
family = 'edge'
elif 'Firefox' in browser_family:
family = 'firefox'
elif 'Chrome' in browser_family:
family = 'chrome'
elif 'IE' in browser_family:
family = 'ie'
elif 'Opera' in browser_family:
family = 'opera'
elif 'Safari' in browser_family:
family = 'safari'
if "Edge" in browser_family:
family = "edge"
elif "Firefox" in browser_family:
family = "firefox"
elif "Chrome" in browser_family:
family = "chrome"
elif "IE" in browser_family:
family = "ie"
elif "Opera" in browser_family:
family = "opera"
elif "Safari" in browser_family:
family = "safari"
self._family = family
@ -134,12 +143,12 @@ class CanIUse(object):
except KeyError:
return UNKNOWN
else:
ua = self._agent['user_agent']
return stats.check(ua['major'], ua['minor'], ua['patch'])[0]
ua = self._agent["user_agent"]
return stats.check(ua["major"], ua["minor"], ua["patch"])[0]
def __getattr__(self, attr):
try:
feat = database[attr.replace('_', '-')]
feat = database[attr.replace("_", "-")]
except KeyError:
raise AttributeError(attr)
else:

View file

@ -12,11 +12,11 @@ class Progress:
def _update_state(self):
self.task.update_state(
state='PROGRESS',
state="PROGRESS",
meta={
'done': self._done,
'total': self._total,
'stage': self._stage,
"done": self._done,
"total": self._total,
"stage": self._stage,
},
)
@ -54,12 +54,12 @@ class Progress:
def task_status_url(result, message=None, redirect=None):
args = {}
if message:
args['message'] = message
args["message"] = message
if redirect:
args['redirect'] = redirect
url = reverse('task_status', args=[result.id])
args["redirect"] = redirect
url = reverse("task_status", args=[result.id])
if args:
url += '?' + urlencode(args)
url += "?" + urlencode(args)
return url

View file

@ -4,10 +4,10 @@ from functools import reduce
from django.core.paginator import InvalidPage, Page, Paginator
__all__ = (
'InvalidPage',
'ExPaginator',
'DiggPaginator',
'QuerySetDiggPaginator',
"InvalidPage",
"ExPaginator",
"DiggPaginator",
"QuerySetDiggPaginator",
)
@ -182,15 +182,17 @@ class DiggPaginator(ExPaginator):
"""
def __init__(self, *args, **kwargs):
self.body = kwargs.pop('body', 10)
self.tail = kwargs.pop('tail', 2)
self.align_left = kwargs.pop('align_left', False)
self.margin = kwargs.pop('margin', 4) # TODO: make the default relative to body?
self.body = kwargs.pop("body", 10)
self.tail = kwargs.pop("tail", 2)
self.align_left = kwargs.pop("align_left", False)
self.margin = kwargs.pop(
"margin", 4
) # TODO: make the default relative to body?
# validate padding value
max_padding = int(math.ceil(self.body / 2.0) - 1)
self.padding = kwargs.pop('padding', min(4, max_padding))
self.padding = kwargs.pop("padding", min(4, max_padding))
if self.padding > max_padding:
raise ValueError('padding too large for body (max %d)' % max_padding)
raise ValueError("padding too large for body (max %d)" % max_padding)
super(DiggPaginator, self).__init__(*args, **kwargs)
def page(self, number, *args, **kwargs):
@ -202,13 +204,24 @@ class DiggPaginator(ExPaginator):
number = int(number) # we know this will work
# easier access
num_pages, body, tail, padding, margin = \
self.num_pages, self.body, self.tail, self.padding, self.margin
num_pages, body, tail, padding, margin = (
self.num_pages,
self.body,
self.tail,
self.padding,
self.margin,
)
# put active page in middle of main range
main_range = list(map(int, [
math.floor(number - body / 2.0) + 1, # +1 = shift odd body to right
math.floor(number + body / 2.0)]))
main_range = list(
map(
int,
[
math.floor(number - body / 2.0) + 1, # +1 = shift odd body to right
math.floor(number + body / 2.0),
],
)
)
# adjust bounds
if main_range[0] < 1:
main_range = list(map(abs(main_range[0] - 1).__add__, main_range))
@ -249,7 +262,10 @@ class DiggPaginator(ExPaginator):
# section, again.
main_range = [1, num_pages]
else:
main_range = [min(num_pages - body + 1, max(number - padding, main_range[0])), num_pages]
main_range = [
min(num_pages - body + 1, max(number - padding, main_range[0])),
num_pages,
]
else:
trailing = list(range(num_pages - tail + 1, num_pages + 1))
@ -263,8 +279,10 @@ class DiggPaginator(ExPaginator):
page.main_range = list(range(main_range[0], main_range[1] + 1))
page.leading_range = leading
page.trailing_range = trailing
page.page_range = reduce(lambda x, y: x + ((x and y) and [False]) + y,
[page.leading_range, page.main_range, page.trailing_range])
page.page_range = reduce(
lambda x, y: x + ((x and y) and [False]) + y,
[page.leading_range, page.main_range, page.trailing_range],
)
page.__class__ = DiggPage
return page
@ -272,10 +290,16 @@ class DiggPaginator(ExPaginator):
class DiggPage(Page):
def __str__(self):
return " ... ".join(filter(None, [
" ".join(map(str, self.leading_range)),
" ".join(map(str, self.main_range)),
" ".join(map(str, self.trailing_range))]))
return " ... ".join(
filter(
None,
[
" ".join(map(str, self.leading_range)),
" ".join(map(str, self.main_range)),
" ".join(map(str, self.trailing_range)),
],
)
)
@property
def num_pages(self):

View file

@ -24,10 +24,10 @@ class HashFileCache(object):
return os.path.join(self.root, hash, file)
def get_url(self, hash, file):
return urljoin(self.url, '%s/%s' % (hash, file))
return urljoin(self.url, "%s/%s" % (hash, file))
def read_file(self, hash, file):
return open(self.get_path(hash, file), 'rb')
return open(self.get_path(hash, file), "rb")
def read_data(self, hash, file):
with self.read_file(hash, file) as f:
@ -35,10 +35,10 @@ class HashFileCache(object):
def cache_data(self, hash, file, data, url=True, gzip=True):
if gzip and self.gzip:
with gzip_open(self.get_path(hash, file + '.gz'), 'wb') as f:
with gzip_open(self.get_path(hash, file + ".gz"), "wb") as f:
f.write(data)
with open(self.get_path(hash, file), 'wb') as f:
with open(self.get_path(hash, file), "wb") as f:
f.write(data)
if url:

View file

@ -8,27 +8,25 @@ import os, os.path
import tempfile
import shutil
__all__ = (
'handle_upload', 'save_upload', 'FineUploadForm', 'FineUploadFileInput'
)
__all__ = ("handle_upload", "save_upload", "FineUploadForm", "FineUploadFileInput")
def combine_chunks(total_parts, total_size, source_folder, dest):
if not os.path.exists(os.path.dirname(dest)):
os.makedirs(os.path.dirname(dest))
with open(dest, 'wb+') as destination:
with open(dest, "wb+") as destination:
for i in range(total_parts):
part = os.path.join(source_folder, str(i))
with open(part, 'rb') as source:
with open(part, "rb") as source:
destination.write(source.read())
def save_upload(f, path):
if not os.path.exists(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
with open(path, 'wb+') as destination:
if hasattr(f, 'multiple_chunks') and f.multiple_chunks():
with open(path, "wb+") as destination:
if hasattr(f, "multiple_chunks") and f.multiple_chunks():
for chunk in f.chunks():
destination.write(chunk)
else:
@ -37,29 +35,35 @@ def save_upload(f, path):
# pass callback function to post_upload
def handle_upload(f, fileattrs, upload_dir, post_upload=None):
chunks_dir = os.path.join(tempfile.gettempdir(), 'chunk_upload_tmp')
chunks_dir = os.path.join(tempfile.gettempdir(), "chunk_upload_tmp")
if not os.path.exists(os.path.dirname(chunks_dir)):
os.makedirs(os.path.dirname(chunks_dir))
chunked = False
dest_folder = upload_dir
dest = os.path.join(dest_folder, fileattrs['qqfilename'])
dest = os.path.join(dest_folder, fileattrs["qqfilename"])
# Chunked
if fileattrs.get('qqtotalparts') and int(fileattrs['qqtotalparts']) > 1:
if fileattrs.get("qqtotalparts") and int(fileattrs["qqtotalparts"]) > 1:
chunked = True
dest_folder = os.path.join(chunks_dir, fileattrs['qquuid'])
dest = os.path.join(dest_folder, fileattrs['qqfilename'], str(fileattrs['qqpartindex']))
dest_folder = os.path.join(chunks_dir, fileattrs["qquuid"])
dest = os.path.join(
dest_folder, fileattrs["qqfilename"], str(fileattrs["qqpartindex"])
)
save_upload(f, dest)
# If the last chunk has been sent, combine the parts.
if chunked and (fileattrs['qqtotalparts'] - 1 == fileattrs['qqpartindex']):
combine_chunks(fileattrs['qqtotalparts'],
fileattrs['qqtotalfilesize'],
if chunked and (fileattrs["qqtotalparts"] - 1 == fileattrs["qqpartindex"]):
combine_chunks(
fileattrs["qqtotalparts"],
fileattrs["qqtotalfilesize"],
source_folder=os.path.dirname(dest),
dest=os.path.join(upload_dir, fileattrs['qqfilename']))
dest=os.path.join(upload_dir, fileattrs["qqfilename"]),
)
shutil.rmtree(os.path.dirname(os.path.dirname(dest)))
if post_upload and (not chunked or fileattrs['qqtotalparts'] - 1 == fileattrs['qqpartindex']):
if post_upload and (
not chunked or fileattrs["qqtotalparts"] - 1 == fileattrs["qqpartindex"]
):
post_upload()
@ -75,13 +79,16 @@ class FineUploadForm(forms.Form):
class FineUploadFileInput(ClearableFileInput):
template_name = 'widgets/fine_uploader.html'
template_name = "widgets/fine_uploader.html"
def fine_uploader_id(self, name):
return name + '_' + 'fine_uploader'
return name + "_" + "fine_uploader"
def get_context(self, name, value, attrs):
context = super().get_context(name, value, attrs)
context['widget'].update({
'fine_uploader_id': self.fine_uploader_id(name),
})
return context
context["widget"].update(
{
"fine_uploader_id": self.fine_uploader_id(name),
}
)
return context

View file

@ -12,25 +12,25 @@ from mistune import escape
from judge.utils.file_cache import HashFileCache
from judge.utils.unicode import utf8bytes, utf8text
logger = logging.getLogger('judge.mathoid')
reescape = re.compile(r'(?<!\\)(?:\\{2})*[$]')
logger = logging.getLogger("judge.mathoid")
reescape = re.compile(r"(?<!\\)(?:\\{2})*[$]")
REPLACES = [
('\u2264', r'\le'),
('\u2265', r'\ge'),
('\u2026', '...'),
('\u2212', '-'),
('&le;', r'\le'),
('&ge;', r'\ge'),
('&lt;', '<'),
('&gt;', '>'),
('&amp;', '&'),
('&#8722;', '-'),
('&#8804;', r'\le'),
('&#8805;', r'\ge'),
('&#8230;', '...'),
(r'\lt', '<'),
(r'\gt', '>'),
("\u2264", r"\le"),
("\u2265", r"\ge"),
("\u2026", "..."),
("\u2212", "-"),
("&le;", r"\le"),
("&ge;", r"\ge"),
("&lt;", "<"),
("&gt;", ">"),
("&amp;", "&"),
("&#8722;", "-"),
("&#8804;", r"\le"),
("&#8805;", r"\ge"),
("&#8230;", "..."),
(r"\lt", "<"),
(r"\gt", ">"),
]
@ -41,15 +41,17 @@ def format_math(math):
class MathoidMathParser(object):
types = ('svg', 'mml', 'tex', 'jax')
types = ("svg", "mml", "tex", "jax")
def __init__(self, type):
self.type = type
self.mathoid_url = settings.MATHOID_URL
self.cache = HashFileCache(settings.MATHOID_CACHE_ROOT,
settings.MATHOID_CACHE_URL,
settings.MATHOID_GZIP)
self.cache = HashFileCache(
settings.MATHOID_CACHE_ROOT,
settings.MATHOID_CACHE_URL,
settings.MATHOID_GZIP,
)
mml_cache = settings.MATHOID_MML_CACHE
self.mml_cache = mml_cache and caches[mml_cache]
@ -61,69 +63,80 @@ class MathoidMathParser(object):
self.cache.create(hash)
try:
response = requests.post(self.mathoid_url, data={
'q': reescape.sub(lambda m: '\\' + m.group(0), formula).encode('utf-8'),
'type': 'tex' if formula.startswith(r'\displaystyle') else 'inline-tex',
})
response = requests.post(
self.mathoid_url,
data={
"q": reescape.sub(lambda m: "\\" + m.group(0), formula).encode(
"utf-8"
),
"type": "tex"
if formula.startswith(r"\displaystyle")
else "inline-tex",
},
)
response.raise_for_status()
data = response.json()
except requests.ConnectionError:
logger.exception('Failed to connect to mathoid for: %s', formula)
logger.exception("Failed to connect to mathoid for: %s", formula)
return
except requests.HTTPError as e:
logger.error('Mathoid failed to render: %s\n%s', formula, e.response.text)
logger.error("Mathoid failed to render: %s\n%s", formula, e.response.text)
return
except Exception:
logger.exception('Failed to connect to mathoid for: %s', formula)
logger.exception("Failed to connect to mathoid for: %s", formula)
return
if not data['success']:
logger.error('Mathoid failure for: %s\n%s', formula, data)
if not data["success"]:
logger.error("Mathoid failure for: %s\n%s", formula, data)
return
if any(i not in data for i in ('mml', 'png', 'svg', 'mathoidStyle')):
logger.error('Mathoid did not return required information (mml, png, svg, mathoidStyle needed):\n%s', data)
if any(i not in data for i in ("mml", "png", "svg", "mathoidStyle")):
logger.error(
"Mathoid did not return required information (mml, png, svg, mathoidStyle needed):\n%s",
data,
)
return
css = data['mathoidStyle']
mml = data['mml']
css = data["mathoidStyle"]
mml = data["mml"]
result = {
'css': css, 'mml': mml,
'png': self.cache.cache_data(hash, 'png', bytearray(data['png']['data'])),
'svg': self.cache.cache_data(hash, 'svg', data['svg'].encode('utf-8')),
"css": css,
"mml": mml,
"png": self.cache.cache_data(hash, "png", bytearray(data["png"]["data"])),
"svg": self.cache.cache_data(hash, "svg", data["svg"].encode("utf-8")),
}
self.cache.cache_data(hash, 'mml', mml.encode('utf-8'), url=False, gzip=False)
self.cache.cache_data(hash, 'css', css.encode('utf-8'), url=False, gzip=False)
self.cache.cache_data(hash, "mml", mml.encode("utf-8"), url=False, gzip=False)
self.cache.cache_data(hash, "css", css.encode("utf-8"), url=False, gzip=False)
return result
def query_cache(self, hash):
result = {
'svg': self.cache.get_url(hash, 'svg'),
'png': self.cache.get_url(hash, 'png'),
"svg": self.cache.get_url(hash, "svg"),
"png": self.cache.get_url(hash, "png"),
}
key = 'mathoid:css:' + hash
css = result['css'] = self.css_cache.get(key)
key = "mathoid:css:" + hash
css = result["css"] = self.css_cache.get(key)
if css is None:
css = result['css'] = self.cache.read_data(hash, 'css').decode('utf-8')
css = result["css"] = self.cache.read_data(hash, "css").decode("utf-8")
self.css_cache.set(key, css, self.mml_cache_ttl)
mml = None
if self.mml_cache:
mml = result['mml'] = self.mml_cache.get('mathoid:mml:' + hash)
mml = result["mml"] = self.mml_cache.get("mathoid:mml:" + hash)
if mml is None:
mml = result['mml'] = self.cache.read_data(hash, 'mml').decode('utf-8')
mml = result["mml"] = self.cache.read_data(hash, "mml").decode("utf-8")
if self.mml_cache:
self.mml_cache.set('mathoid:mml:' + hash, mml, self.mml_cache_ttl)
self.mml_cache.set("mathoid:mml:" + hash, mml, self.mml_cache_ttl)
return result
def get_result(self, formula):
if self.type == 'tex':
if self.type == "tex":
return
hash = hashlib.sha1(utf8bytes(formula)).hexdigest()
formula = utf8text(formula)
if self.cache.has_file(hash, 'css'):
if self.cache.has_file(hash, "css"):
result = self.query_cache(hash)
else:
result = self.query_mathoid(formula, hash)
@ -131,55 +144,76 @@ class MathoidMathParser(object):
if not result:
return None
result['tex'] = formula
result['display'] = formula.startswith(r'\displaystyle')
result["tex"] = formula
result["display"] = formula.startswith(r"\displaystyle")
return {
'mml': self.output_mml,
'msp': self.output_msp,
'svg': self.output_svg,
'jax': self.output_jax,
'png': self.output_png,
'raw': lambda x: x,
"mml": self.output_mml,
"msp": self.output_msp,
"svg": self.output_svg,
"jax": self.output_jax,
"png": self.output_png,
"raw": lambda x: x,
}[self.type](result)
def output_mml(self, result):
return result['mml']
return result["mml"]
def output_msp(self, result):
# 100% MediaWiki compatibility.
return format_html('<span class="{5}-math">'
'<span class="mwe-math-mathml-{5} mwe-math-mathml-a11y"'
' style="display: none;">{0}</span>'
'<img src="{1}" class="mwe-math-fallback-image-{5}"'
' onerror="this.src=\'{2}\';this.onerror=null"'
' aria-hidden="true" style="{3}" alt="{4}"></span>',
mark_safe(result['mml']), result['svg'], result['png'], result['css'], result['tex'],
['inline', 'display'][result['display']])
return format_html(
'<span class="{5}-math">'
'<span class="mwe-math-mathml-{5} mwe-math-mathml-a11y"'
' style="display: none;">{0}</span>'
'<img src="{1}" class="mwe-math-fallback-image-{5}"'
" onerror=\"this.src='{2}';this.onerror=null\""
' aria-hidden="true" style="{3}" alt="{4}"></span>',
mark_safe(result["mml"]),
result["svg"],
result["png"],
result["css"],
result["tex"],
["inline", "display"][result["display"]],
)
def output_jax(self, result):
return format_html('<span class="{4}">'
'''<img class="tex-image" src="{0}" style="{2}" alt="{3}"'''
''' onerror="this.src='{1}';this.onerror=null">'''
'''<span class="tex-text" style="display:none">{5}{3}{5}</span>'''
'</span>',
result['svg'], result['png'], result['css'], result['tex'],
['inline-math', 'display-math'][result['display']], ['~', '$$'][result['display']])
return format_html(
'<span class="{4}">'
'''<img class="tex-image" src="{0}" style="{2}" alt="{3}"'''
""" onerror="this.src='{1}';this.onerror=null">"""
"""<span class="tex-text" style="display:none">{5}{3}{5}</span>"""
"</span>",
result["svg"],
result["png"],
result["css"],
result["tex"],
["inline-math", "display-math"][result["display"]],
["~", "$$"][result["display"]],
)
def output_svg(self, result):
return format_html('<img class="{4}" src="{0}" style="{2}" alt="{3}" '
'''onerror="this.src='{1}';this.onerror=null">''',
result['svg'], result['png'], result['css'], result['tex'],
['inline-math', 'display-math'][result['display']])
return format_html(
'<img class="{4}" src="{0}" style="{2}" alt="{3}" '
"""onerror="this.src='{1}';this.onerror=null">""",
result["svg"],
result["png"],
result["css"],
result["tex"],
["inline-math", "display-math"][result["display"]],
)
def output_png(self, result):
return format_html('<img class="{3}" src="{0}" style="{1}" alt="{2}">',
result['png'], result['css'], result['tex'],
['inline-math', 'display-math'][result['display']])
return format_html(
'<img class="{3}" src="{0}" style="{1}" alt="{2}">',
result["png"],
result["css"],
result["tex"],
["inline-math", "display-math"][result["display"]],
)
def display_math(self, math):
math = format_math(math)
return self.get_result(r'\displaystyle ' + math) or r'\[%s\]' % escape(math)
return self.get_result(r"\displaystyle " + math) or r"\[%s\]" % escape(math)
def inline_math(self, math):
math = format_math(math)
return self.get_result(math) or r'\(%s\)' % escape(math)
return self.get_result(math) or r"\(%s\)" % escape(math)

View file

@ -10,15 +10,15 @@ def generate_opengraph(cache_key, data, style):
if metadata is None:
description = None
tree = reference(markdown(data, style)).tree
for p in tree.iterfind('.//p'):
for p in tree.iterfind(".//p"):
text = p.text_content().strip()
if text:
description = text
break
if description:
for remove in (r'\[', r'\]', r'\(', r'\)'):
description = description.replace(remove, '')
img = tree.xpath('.//img')
metadata = truncatewords(description, 60), img[0].get('src') if img else None
for remove in (r"\[", r"\]", r"\(", r"\)"):
description = description.replace(remove, "")
img = tree.xpath(".//img")
metadata = truncatewords(description, 60), img[0].get("src") if img else None
cache.set(cache_key, metadata, 86400)
return metadata

View file

@ -13,13 +13,18 @@ from django.urls import reverse
from django.utils.translation import gettext as _
from django.core.cache import cache
VALIDATOR_TEMPLATE_PATH = 'validator_template/template.py'
VALIDATOR_TEMPLATE_PATH = "validator_template/template.py"
if os.altsep:
def split_path_first(path, repath=re.compile('[%s]' % re.escape(os.sep + os.altsep))):
def split_path_first(
path, repath=re.compile("[%s]" % re.escape(os.sep + os.altsep))
):
return repath.split(path, 1)
else:
def split_path_first(path):
return path.split(os.sep, 1)
@ -31,8 +36,8 @@ class ProblemDataStorage(FileSystemStorage):
def url(self, name):
path = split_path_first(name)
if len(path) != 2:
raise ValueError('This file is not accessible via a URL.')
return reverse('problem_data_file', args=path)
raise ValueError("This file is not accessible via a URL.")
return reverse("problem_data_file", args=path)
def _save(self, name, content):
if self.exists(name):
@ -66,8 +71,8 @@ class ProblemDataCompiler(object):
batch = None
def end_batch():
if not batch['batched']:
raise ProblemDataError(_('Empty batches not allowed.'))
if not batch["batched"]:
raise ProblemDataError(_("Empty batches not allowed."))
cases.append(batch)
def make_checker_for_validator(case):
@ -75,109 +80,123 @@ class ProblemDataCompiler(object):
validator_path = split_path_first(case.custom_validator.name)
if len(validator_path) != 2:
raise ProblemDataError(_('How did you corrupt the custom checker path?'))
raise ProblemDataError(
_("How did you corrupt the custom checker path?")
)
checker = os.path.join(settings.DMOJ_PROBLEM_DATA_ROOT,
validator_path[0],
checker_name)
checker = os.path.join(
settings.DMOJ_PROBLEM_DATA_ROOT, validator_path[0], checker_name
)
validator_name = validator_path[1]
shutil.copy(VALIDATOR_TEMPLATE_PATH, checker)
# replace {{filecpp}} and {{problemid}} in checker file
filedata = open(checker, 'r').read()
filedata = filedata.replace('{{filecpp}}', "\'%s\'" % validator_name)
filedata = filedata.replace('{{problemid}}', "\'%s\'" % validator_path[0])
open(checker, 'w').write(filedata)
filedata = open(checker, "r").read()
filedata = filedata.replace("{{filecpp}}", "'%s'" % validator_name)
filedata = filedata.replace("{{problemid}}", "'%s'" % validator_path[0])
open(checker, "w").write(filedata)
return checker_name
def make_checker(case):
if (case.checker == 'custom'):
if case.checker == "custom":
custom_checker_path = split_path_first(case.custom_checker.name)
if len(custom_checker_path) != 2:
raise ProblemDataError(_('How did you corrupt the custom checker path?'))
return(custom_checker_path[1])
raise ProblemDataError(
_("How did you corrupt the custom checker path?")
)
return custom_checker_path[1]
if (case.checker == 'customval'):
if case.checker == "customval":
return make_checker_for_validator(case)
if case.checker_args:
return {
'name': case.checker,
'args': json.loads(case.checker_args),
"name": case.checker,
"args": json.loads(case.checker_args),
}
return case.checker
for i, case in enumerate(self.cases, 1):
if case.type == 'C':
if case.type == "C":
data = {}
if batch:
case.points = None
case.is_pretest = batch['is_pretest']
case.is_pretest = batch["is_pretest"]
else:
if case.points is None:
raise ProblemDataError(_('Points must be defined for non-batch case #%d.') % i)
data['is_pretest'] = case.is_pretest
raise ProblemDataError(
_("Points must be defined for non-batch case #%d.") % i
)
data["is_pretest"] = case.is_pretest
if not self.generator:
if case.input_file not in self.files:
raise ProblemDataError(_('Input file for case %d does not exist: %s') %
(i, case.input_file))
raise ProblemDataError(
_("Input file for case %d does not exist: %s")
% (i, case.input_file)
)
if case.output_file not in self.files:
raise ProblemDataError(_('Output file for case %d does not exist: %s') %
(i, case.output_file))
raise ProblemDataError(
_("Output file for case %d does not exist: %s")
% (i, case.output_file)
)
if case.input_file:
data['in'] = case.input_file
data["in"] = case.input_file
if case.output_file:
data['out'] = case.output_file
data["out"] = case.output_file
if case.points is not None:
data['points'] = case.points
data["points"] = case.points
if case.generator_args:
data['generator_args'] = case.generator_args.splitlines()
data["generator_args"] = case.generator_args.splitlines()
if case.output_limit is not None:
data['output_limit_length'] = case.output_limit
data["output_limit_length"] = case.output_limit
if case.output_prefix is not None:
data['output_prefix_length'] = case.output_prefix
data["output_prefix_length"] = case.output_prefix
if case.checker:
data['checker'] = make_checker(case)
data["checker"] = make_checker(case)
else:
case.checker_args = ''
case.save(update_fields=('checker_args', 'is_pretest'))
(batch['batched'] if batch else cases).append(data)
elif case.type == 'S':
case.checker_args = ""
case.save(update_fields=("checker_args", "is_pretest"))
(batch["batched"] if batch else cases).append(data)
elif case.type == "S":
if batch:
end_batch()
if case.points is None:
raise ProblemDataError(_('Batch start case #%d requires points.') % i)
raise ProblemDataError(
_("Batch start case #%d requires points.") % i
)
batch = {
'points': case.points,
'batched': [],
'is_pretest': case.is_pretest,
"points": case.points,
"batched": [],
"is_pretest": case.is_pretest,
}
if case.generator_args:
batch['generator_args'] = case.generator_args.splitlines()
batch["generator_args"] = case.generator_args.splitlines()
if case.output_limit is not None:
batch['output_limit_length'] = case.output_limit
batch["output_limit_length"] = case.output_limit
if case.output_prefix is not None:
batch['output_prefix_length'] = case.output_prefix
batch["output_prefix_length"] = case.output_prefix
if case.checker:
batch['checker'] = make_checker(case)
batch["checker"] = make_checker(case)
else:
case.checker_args = ''
case.input_file = ''
case.output_file = ''
case.save(update_fields=('checker_args', 'input_file', 'output_file'))
elif case.type == 'E':
case.checker_args = ""
case.input_file = ""
case.output_file = ""
case.save(update_fields=("checker_args", "input_file", "output_file"))
elif case.type == "E":
if not batch:
raise ProblemDataError(_('Attempt to end batch outside of one in case #%d') % i)
case.is_pretest = batch['is_pretest']
case.input_file = ''
case.output_file = ''
case.generator_args = ''
case.checker = ''
case.checker_args = ''
raise ProblemDataError(
_("Attempt to end batch outside of one in case #%d") % i
)
case.is_pretest = batch["is_pretest"]
case.input_file = ""
case.output_file = ""
case.generator_args = ""
case.checker = ""
case.checker_args = ""
case.save()
end_batch()
batch = None
@ -189,44 +208,44 @@ class ProblemDataCompiler(object):
if self.data.zipfile:
zippath = split_path_first(self.data.zipfile.name)
if len(zippath) != 2:
raise ProblemDataError(_('How did you corrupt the zip path?'))
init['archive'] = zippath[1]
raise ProblemDataError(_("How did you corrupt the zip path?"))
init["archive"] = zippath[1]
if self.generator:
generator_path = split_path_first(self.generator.name)
if len(generator_path) != 2:
raise ProblemDataError(_('How did you corrupt the generator path?'))
init['generator'] = generator_path[1]
raise ProblemDataError(_("How did you corrupt the generator path?"))
init["generator"] = generator_path[1]
pretests = [case for case in cases if case['is_pretest']]
pretests = [case for case in cases if case["is_pretest"]]
for case in cases:
del case['is_pretest']
del case["is_pretest"]
if pretests:
init['pretest_test_cases'] = pretests
init["pretest_test_cases"] = pretests
if cases:
init['test_cases'] = cases
init["test_cases"] = cases
if self.data.output_limit is not None:
init['output_limit_length'] = self.data.output_limit
init["output_limit_length"] = self.data.output_limit
if self.data.output_prefix is not None:
init['output_prefix_length'] = self.data.output_prefix
init["output_prefix_length"] = self.data.output_prefix
if self.data.checker:
if self.data.checker == 'interact':
init['interactive'] = {
'files': split_path_first(self.data.interactive_judge.name)[1],
'feedback': True
if self.data.checker == "interact":
init["interactive"] = {
"files": split_path_first(self.data.interactive_judge.name)[1],
"feedback": True,
}
init['unbuffered'] = True
init["unbuffered"] = True
else:
init['checker'] = make_checker(self.data)
init["checker"] = make_checker(self.data)
else:
self.data.checker_args = ''
self.data.checker_args = ""
return init
def compile(self):
from judge.models import problem_data_storage
yml_file = '%s/init.yml' % self.problem.code
yml_file = "%s/init.yml" % self.problem.code
try:
init = yaml.safe_dump(self.make_init())
except ProblemDataError as e:
@ -234,7 +253,7 @@ class ProblemDataCompiler(object):
self.data.save()
problem_data_storage.delete(yml_file)
else:
self.data.feedback = ''
self.data.feedback = ""
self.data.save()
problem_data_storage.save(yml_file, ContentFile(init))
@ -245,26 +264,27 @@ class ProblemDataCompiler(object):
def get_visible_content(data):
data = data or b''
data = data.replace(b'\r\n', b'\r').replace(b'\r', b'\n')
data = data or b""
data = data.replace(b"\r\n", b"\r").replace(b"\r", b"\n")
data = data.decode('utf-8')
data = data.decode("utf-8")
if (len(data) > settings.TESTCASE_VISIBLE_LENGTH):
data = data[:settings.TESTCASE_VISIBLE_LENGTH]
data += '.' * 3
if len(data) > settings.TESTCASE_VISIBLE_LENGTH:
data = data[: settings.TESTCASE_VISIBLE_LENGTH]
data += "." * 3
return data
def get_file_cachekey(file):
return hashlib.sha1(file.encode()).hexdigest()
def get_problem_case(problem, files):
result = {}
uncached_files = []
for file in files:
cache_key = 'problem_archive:%s:%s' % (problem.code, get_file_cachekey(file))
cache_key = "problem_archive:%s:%s" % (problem.code, get_file_cachekey(file))
qs = cache.get(cache_key)
if qs is None:
uncached_files.append(file)
@ -274,33 +294,33 @@ def get_problem_case(problem, files):
if not uncached_files:
return result
archive_path = os.path.join(settings.DMOJ_PROBLEM_DATA_ROOT,
str(problem.data_files.zipfile))
archive_path = os.path.join(
settings.DMOJ_PROBLEM_DATA_ROOT, str(problem.data_files.zipfile)
)
if not os.path.exists(archive_path):
raise Exception(
'archive file "%s" does not exist' % archive_path)
raise Exception('archive file "%s" does not exist' % archive_path)
try:
archive = zipfile.ZipFile(archive_path, 'r')
archive = zipfile.ZipFile(archive_path, "r")
except zipfile.BadZipfile:
raise Exception('bad archive: "%s"' % archive_path)
for file in uncached_files:
cache_key = 'problem_archive:%s:%s' % (problem.code, get_file_cachekey(file))
cache_key = "problem_archive:%s:%s" % (problem.code, get_file_cachekey(file))
with archive.open(file) as f:
s = f.read(settings.TESTCASE_VISIBLE_LENGTH + 3)
# add this so there are no characters left behind (ex, 'á' = 2 utf-8 chars)
while True:
try:
s.decode('utf-8')
s.decode("utf-8")
break
except UnicodeDecodeError:
next_char = f.read(1)
if next_char:
s += next_char
else:
raise Exception('File %s is not able to decode in utf-8' % file)
raise Exception("File %s is not able to decode in utf-8" % file)
qs = get_visible_content(s)
cache.set(cache_key, qs, 86400)
result[file] = qs
return result
return result

View file

@ -11,79 +11,120 @@ from django.utils.translation import gettext as _, gettext_noop
from judge.models import Problem, Submission
__all__ = ['contest_completed_ids', 'get_result_data', 'user_completed_ids', 'user_authored_ids', 'user_editable_ids']
__all__ = [
"contest_completed_ids",
"get_result_data",
"user_completed_ids",
"user_authored_ids",
"user_editable_ids",
]
def user_authored_ids(profile):
result = set(Problem.objects.filter(authors=profile).values_list('id', flat=True))
result = set(Problem.objects.filter(authors=profile).values_list("id", flat=True))
return result
def user_editable_ids(profile):
result = set((Problem.objects.filter(authors=profile) | Problem.objects.filter(curators=profile))
.values_list('id', flat=True))
result = set(
(
Problem.objects.filter(authors=profile)
| Problem.objects.filter(curators=profile)
).values_list("id", flat=True)
)
return result
def contest_completed_ids(participation):
key = 'contest_complete:%d' % participation.id
key = "contest_complete:%d" % participation.id
result = cache.get(key)
if result is None:
result = set(participation.submissions.filter(submission__result='AC', points=F('problem__points'))
.values_list('problem__problem__id', flat=True).distinct())
result = set(
participation.submissions.filter(
submission__result="AC", points=F("problem__points")
)
.values_list("problem__problem__id", flat=True)
.distinct()
)
cache.set(key, result, 86400)
return result
def user_completed_ids(profile):
key = 'user_complete:%d' % profile.id
key = "user_complete:%d" % profile.id
result = cache.get(key)
if result is None:
result = set(Submission.objects.filter(user=profile, result='AC', points=F('problem__points'))
.values_list('problem_id', flat=True).distinct())
result = set(
Submission.objects.filter(
user=profile, result="AC", points=F("problem__points")
)
.values_list("problem_id", flat=True)
.distinct()
)
cache.set(key, result, 86400)
return result
def contest_attempted_ids(participation):
key = 'contest_attempted:%s' % participation.id
key = "contest_attempted:%s" % participation.id
result = cache.get(key)
if result is None:
result = {id: {'achieved_points': points, 'max_points': max_points}
for id, max_points, points in (participation.submissions
.values_list('problem__problem__id', 'problem__points')
.annotate(points=Max('points'))
.filter(points__lt=F('problem__points')))}
result = {
id: {"achieved_points": points, "max_points": max_points}
for id, max_points, points in (
participation.submissions.values_list(
"problem__problem__id", "problem__points"
)
.annotate(points=Max("points"))
.filter(points__lt=F("problem__points"))
)
}
cache.set(key, result, 86400)
return result
def user_attempted_ids(profile):
key = 'user_attempted:%s' % profile.id
key = "user_attempted:%s" % profile.id
result = cache.get(key)
if result is None:
result = {id: {'achieved_points': points, 'max_points': max_points}
for id, max_points, points in (Submission.objects.filter(user=profile)
.values_list('problem__id', 'problem__points')
.annotate(points=Max('points'))
.filter(points__lt=F('problem__points')))}
result = {
id: {"achieved_points": points, "max_points": max_points}
for id, max_points, points in (
Submission.objects.filter(user=profile)
.values_list("problem__id", "problem__points")
.annotate(points=Max("points"))
.filter(points__lt=F("problem__points"))
)
}
cache.set(key, result, 86400)
return result
def _get_result_data(results):
return {
'categories': [
"categories": [
# Using gettext_noop here since this will be tacked into the cache, so it must be language neutral.
# The caller, SubmissionList.get_result_data will run ugettext on the name.
{'code': 'AC', 'name': gettext_noop('Accepted'), 'count': results['AC']},
{'code': 'WA', 'name': gettext_noop('Wrong'), 'count': results['WA']},
{'code': 'CE', 'name': gettext_noop('Compile Error'), 'count': results['CE']},
{'code': 'TLE', 'name': gettext_noop('Timeout'), 'count': results['TLE']},
{'code': 'ERR', 'name': gettext_noop('Error'),
'count': results['MLE'] + results['OLE'] + results['IR'] + results['RTE'] + results['AB'] + results['IE']},
{"code": "AC", "name": gettext_noop("Accepted"), "count": results["AC"]},
{"code": "WA", "name": gettext_noop("Wrong"), "count": results["WA"]},
{
"code": "CE",
"name": gettext_noop("Compile Error"),
"count": results["CE"],
},
{"code": "TLE", "name": gettext_noop("Timeout"), "count": results["TLE"]},
{
"code": "ERR",
"name": gettext_noop("Error"),
"count": results["MLE"]
+ results["OLE"]
+ results["IR"]
+ results["RTE"]
+ results["AB"]
+ results["IE"],
},
],
'total': sum(results.values()),
"total": sum(results.values()),
}
@ -93,8 +134,16 @@ def get_result_data(*args, **kwargs):
if kwargs:
raise ValueError(_("Can't pass both queryset and keyword filters"))
else:
submissions = Submission.objects.filter(**kwargs) if kwargs is not None else Submission.objects
raw = submissions.values('result').annotate(count=Count('result')).values_list('result', 'count')
submissions = (
Submission.objects.filter(**kwargs)
if kwargs is not None
else Submission.objects
)
raw = (
submissions.values("result")
.annotate(count=Count("result"))
.values_list("result", "count")
)
return _get_result_data(defaultdict(int, raw))
@ -102,48 +151,73 @@ def editable_problems(user, profile=None):
subquery = Problem.objects.all()
if profile is None:
profile = user.profile
if not user.has_perm('judge.edit_all_problem'):
if not user.has_perm("judge.edit_all_problem"):
subfilter = Q(authors__id=profile.id) | Q(curators__id=profile.id)
if user.has_perm('judge.edit_public_problem'):
if user.has_perm("judge.edit_public_problem"):
subfilter |= Q(is_public=True)
subquery = subquery.filter(subfilter)
return subquery
def hot_problems(duration, limit):
cache_key = 'hot_problems:%d:%d' % (duration.total_seconds(), limit)
cache_key = "hot_problems:%d:%d" % (duration.total_seconds(), limit)
qs = cache.get(cache_key)
if qs is None:
qs = Problem.get_public_problems() \
.filter(submission__date__gt=timezone.now() - duration)
qs0 = qs.annotate(k=Count('submission__user', distinct=True)).order_by('-k').values_list('k', flat=True)
qs = Problem.get_public_problems().filter(
submission__date__gt=timezone.now() - duration
)
qs0 = (
qs.annotate(k=Count("submission__user", distinct=True))
.order_by("-k")
.values_list("k", flat=True)
)
if not qs0:
return []
# make this an aggregate
mx = float(qs0[0])
qs = qs.annotate(unique_user_count=Count('submission__user', distinct=True))
qs = qs.annotate(unique_user_count=Count("submission__user", distinct=True))
# fix braindamage in excluding CE
qs = qs.annotate(submission_volume=Count(Case(
When(submission__result='AC', then=1),
When(submission__result='WA', then=1),
When(submission__result='IR', then=1),
When(submission__result='RTE', then=1),
When(submission__result='TLE', then=1),
When(submission__result='OLE', then=1),
output_field=FloatField(),
)))
qs = qs.annotate(ac_volume=Count(Case(
When(submission__result='AC', then=1),
output_field=FloatField(),
)))
qs = qs.annotate(
submission_volume=Count(
Case(
When(submission__result="AC", then=1),
When(submission__result="WA", then=1),
When(submission__result="IR", then=1),
When(submission__result="RTE", then=1),
When(submission__result="TLE", then=1),
When(submission__result="OLE", then=1),
output_field=FloatField(),
)
)
)
qs = qs.annotate(
ac_volume=Count(
Case(
When(submission__result="AC", then=1),
output_field=FloatField(),
)
)
)
qs = qs.filter(unique_user_count__gt=max(mx / 3.0, 1))
qs = qs.annotate(ordering=ExpressionWrapper(
0.02 * F('points') * (0.4 * F('ac_volume') / F('submission_volume') + 0.6 * F('ac_rate')) +
100 * e ** (F('unique_user_count') / mx), output_field=FloatField(),
)).order_by('-ordering').defer('description')[:limit]
qs = (
qs.annotate(
ordering=ExpressionWrapper(
0.02
* F("points")
* (
0.4 * F("ac_volume") / F("submission_volume")
+ 0.6 * F("ac_rate")
)
+ 100 * e ** (F("unique_user_count") / mx),
output_field=FloatField(),
)
)
.order_by("-ordering")
.defer("description")[:limit]
)
cache.set(cache_key, qs, 900)
return qs
return qs

View file

@ -47,7 +47,7 @@ from judge.utils.unicode import utf8bytes
log = logging.getLogger(__name__)
API_ENDPOINT = 'https://api.pwnedpasswords.com/range/{}'
API_ENDPOINT = "https://api.pwnedpasswords.com/range/{}"
REQUEST_TIMEOUT = 2.0 # 2 seconds
@ -61,19 +61,19 @@ def _get_pwned(prefix):
url=API_ENDPOINT.format(prefix),
timeout=getattr(
settings,
'PWNED_PASSWORDS_API_TIMEOUT',
"PWNED_PASSWORDS_API_TIMEOUT",
REQUEST_TIMEOUT,
),
)
response.raise_for_status()
except requests.RequestException:
# Gracefully handle timeouts and HTTP error response codes.
log.warning('Skipped Pwned Passwords check due to error', exc_info=True)
log.warning("Skipped Pwned Passwords check due to error", exc_info=True)
return None
results = {}
for line in response.text.splitlines():
line_suffix, _, times = line.partition(':')
line_suffix, _, times = line.partition(":")
results[line_suffix] = int(times)
return results
@ -84,7 +84,7 @@ def pwned_password(password):
Checks a password against the Pwned Passwords database.
"""
if not isinstance(password, string_types):
raise TypeError('Password values to check must be strings.')
raise TypeError("Password values to check must be strings.")
password_hash = hashlib.sha1(utf8bytes(password)).hexdigest().upper()
prefix, suffix = password_hash[:5], password_hash[5:]
results = _get_pwned(prefix)
@ -98,8 +98,9 @@ class PwnedPasswordsValidator(object):
"""
Password validator which checks the Pwned Passwords database.
"""
DEFAULT_HELP_MESSAGE = _("Your password can't be a commonly used password.")
DEFAULT_PWNED_MESSAGE = _('This password is too common.')
DEFAULT_PWNED_MESSAGE = _("This password is too common.")
def __init__(self, error_message=None, help_message=None):
self.help_message = help_message or self.DEFAULT_HELP_MESSAGE
@ -111,8 +112,8 @@ class PwnedPasswordsValidator(object):
else:
singular, plural = error_message
self.error_message = {
'singular': singular,
'plural': plural,
"singular": singular,
"plural": plural,
}
def validate(self, password, user=None):
@ -125,12 +126,12 @@ class PwnedPasswordsValidator(object):
elif amount:
raise ValidationError(
ungettext(
self.error_message['singular'],
self.error_message['plural'],
self.error_message["singular"],
self.error_message["plural"],
amount,
),
params={'amount': amount},
code='pwned_password',
params={"amount": amount},
code="pwned_password",
)
def get_help_text(self):

View file

@ -1,7 +1,7 @@
from operator import attrgetter
def ranker(iterable, key=attrgetter('points'), rank=0):
def ranker(iterable, key=attrgetter("points"), rank=0):
delta = 1
last = None
for item in iterable:
@ -12,4 +12,3 @@ def ranker(iterable, key=attrgetter('points'), rank=0):
delta += 1
yield rank, item
last = key(item)

View file

@ -10,13 +10,18 @@ from django.utils import six
from judge.utils.cachedict import CacheDict
def unique_together_left_join(queryset, model, link_field_name, filter_field_name, filter_value, parent_model=None):
def unique_together_left_join(
queryset, model, link_field_name, filter_field_name, filter_value, parent_model=None
):
link_field = copy(model._meta.get_field(link_field_name).remote_field)
filter_field = model._meta.get_field(filter_field_name)
def restrictions(where_class, alias, related_alias):
cond = where_class()
cond.add(filter_field.get_lookup('exact')(filter_field.get_col(alias), filter_value), 'AND')
cond.add(
filter_field.get_lookup("exact")(filter_field.get_col(alias), filter_value),
"AND",
)
return cond
link_field.get_extra_restriction = restrictions
@ -25,17 +30,36 @@ def unique_together_left_join(queryset, model, link_field_name, filter_field_nam
parent_alias = parent_model._meta.db_table
else:
parent_alias = queryset.query.get_initial_alias()
return queryset.query.join(Join(model._meta.db_table, parent_alias, None, LOUTER, link_field, True))
return queryset.query.join(
Join(model._meta.db_table, parent_alias, None, LOUTER, link_field, True)
)
class RawSQLJoin(Join):
def __init__(self, subquery, subquery_params, parent_alias, table_alias, join_type, join_field, nullable,
filtered_relation=None):
def __init__(
self,
subquery,
subquery_params,
parent_alias,
table_alias,
join_type,
join_field,
nullable,
filtered_relation=None,
):
self.subquery_params = subquery_params
super().__init__(subquery, parent_alias, table_alias, join_type, join_field, nullable, filtered_relation)
super().__init__(
subquery,
parent_alias,
table_alias,
join_type,
join_field,
nullable,
filtered_relation,
)
def as_sql(self, compiler, connection):
compiler.quote_cache[self.table_name] = '(%s)' % self.table_name
compiler.quote_cache[self.table_name] = "(%s)" % self.table_name
sql, params = super().as_sql(compiler, connection)
return sql, self.subquery_params + params
@ -51,13 +75,23 @@ class FakeJoinField:
pass
def join_sql_subquery(queryset, subquery, params, join_fields, alias, join_type=INNER, parent_model=None):
def join_sql_subquery(
queryset, subquery, params, join_fields, alias, join_type=INNER, parent_model=None
):
if parent_model is not None:
parent_alias = parent_model._meta.db_table
else:
parent_alias = queryset.query.get_initial_alias()
queryset.query.external_aliases.add(alias)
join = RawSQLJoin(subquery, params, parent_alias, alias, join_type, FakeJoinField(join_fields), join_type == LOUTER)
join = RawSQLJoin(
subquery,
params,
parent_alias,
alias,
join_type,
FakeJoinField(join_fields),
join_type == LOUTER,
)
queryset.query.join(join)
join.table_alias = alias
@ -68,7 +102,7 @@ def RawSQLColumn(model, field=None):
model = field.model
if isinstance(field, six.string_types):
field = model._meta.get_field(field)
return RawSQL('%s.%s' % (model._meta.db_table, field.get_attname_column()[1]), ())
return RawSQL("%s.%s" % (model._meta.db_table, field.get_attname_column()[1]), ())
def make_straight_join_query(QueryType):
@ -77,7 +111,7 @@ def make_straight_join_query(QueryType):
alias = super().join(join, *args, **kwargs)
join = self.alias_map[alias]
if join.join_type == INNER:
join.join_type = 'STRAIGHT_JOIN'
join.join_type = "STRAIGHT_JOIN"
return alias
return Query
@ -87,7 +121,7 @@ straight_join_cache = CacheDict(make_straight_join_query)
def use_straight_join(queryset):
if connections[queryset.db].vendor != 'mysql':
if connections[queryset.db].vendor != "mysql":
return
try:
cloner = queryset.query.chain

View file

@ -6,6 +6,7 @@ except ImportError:
ReCaptchaWidget = None
else:
from django.conf import settings
if not hasattr(settings, 'RECAPTCHA_PRIVATE_KEY'):
if not hasattr(settings, "RECAPTCHA_PRIVATE_KEY"):
ReCaptchaField = None
ReCaptchaWidget = None

View file

@ -1,10 +1,30 @@
from operator import itemgetter
__all__ = ('chart_colors', 'highlight_colors', 'get_pie_chart', 'get_bar_chart')
__all__ = ("chart_colors", "highlight_colors", "get_pie_chart", "get_bar_chart")
chart_colors = [0x3366CC, 0xDC3912, 0xFF9900, 0x109618, 0x990099, 0x3B3EAC, 0x0099C6, 0xDD4477, 0x66AA00, 0xB82E2E,
0x316395, 0x994499, 0x22AA99, 0xAAAA11, 0x6633CC, 0xE67300, 0x8B0707, 0x329262, 0x5574A6, 0x3B3EAC]
chart_colors = [
0x3366CC,
0xDC3912,
0xFF9900,
0x109618,
0x990099,
0x3B3EAC,
0x0099C6,
0xDD4477,
0x66AA00,
0xB82E2E,
0x316395,
0x994499,
0x22AA99,
0xAAAA11,
0x6633CC,
0xE67300,
0x8B0707,
0x329262,
0x5574A6,
0x3B3EAC,
]
highlight_colors = []
@ -13,25 +33,26 @@ highlight_colors = []
def _highlight_colors():
for color in chart_colors:
r, g, b = color >> 16, (color >> 8) & 0xFF, color & 0xFF
highlight_colors.append('#%02X%02X%02X' % (min(int(r * 1.2), 255),
min(int(g * 1.2), 255),
min(int(b * 1.2), 255)))
highlight_colors.append(
"#%02X%02X%02X"
% (min(int(r * 1.2), 255), min(int(g * 1.2), 255), min(int(b * 1.2), 255))
)
_highlight_colors()
chart_colors = list(map('#%06X'.__mod__, chart_colors))
chart_colors = list(map("#%06X".__mod__, chart_colors))
def get_pie_chart(data):
return {
'labels': list(map(itemgetter(0), data)),
'datasets': [
"labels": list(map(itemgetter(0), data)),
"datasets": [
{
'backgroundColor': chart_colors,
'highlightBackgroundColor': highlight_colors,
'data': list(map(itemgetter(1), data)),
"backgroundColor": chart_colors,
"highlightBackgroundColor": highlight_colors,
"data": list(map(itemgetter(1), data)),
},
],
}
@ -39,30 +60,39 @@ def get_pie_chart(data):
def get_bar_chart(data, **kwargs):
return {
'labels': list(map(itemgetter(0), data)),
'datasets': [
"labels": list(map(itemgetter(0), data)),
"datasets": [
{
'backgroundColor': kwargs.get('fillColor', 'rgba(151,187,205,0.5)'),
'borderColor': kwargs.get('strokeColor', 'rgba(151,187,205,0.8)'),
'borderWidth': 1,
'hoverBackgroundColor': kwargs.get('highlightFill', 'rgba(151,187,205,0.75)'),
'hoverBorderColor': kwargs.get('highlightStroke', 'rgba(151,187,205,1)'),
'data': list(map(itemgetter(1), data)),
"backgroundColor": kwargs.get("fillColor", "rgba(151,187,205,0.5)"),
"borderColor": kwargs.get("strokeColor", "rgba(151,187,205,0.8)"),
"borderWidth": 1,
"hoverBackgroundColor": kwargs.get(
"highlightFill", "rgba(151,187,205,0.75)"
),
"hoverBorderColor": kwargs.get(
"highlightStroke", "rgba(151,187,205,1)"
),
"data": list(map(itemgetter(1), data)),
},
],
}
def get_histogram(data, **kwargs):
return {
'labels': [round(i, 1) for i in list(map(itemgetter(0), data))],
'datasets': [
"labels": [round(i, 1) for i in list(map(itemgetter(0), data))],
"datasets": [
{
'backgroundColor': kwargs.get('fillColor', 'rgba(151,187,205,0.5)'),
'borderColor': kwargs.get('strokeColor', 'rgba(151,187,205,0.8)'),
'borderWidth': 1,
'hoverBackgroundColor': kwargs.get('highlightFill', 'rgba(151,187,205,0.75)'),
'hoverBorderColor': kwargs.get('highlightStroke', 'rgba(151,187,205,1)'),
'data': list(map(itemgetter(1), data)),
"backgroundColor": kwargs.get("fillColor", "rgba(151,187,205,0.5)"),
"borderColor": kwargs.get("strokeColor", "rgba(151,187,205,0.8)"),
"borderWidth": 1,
"hoverBackgroundColor": kwargs.get(
"highlightFill", "rgba(151,187,205,0.75)"
),
"hoverBorderColor": kwargs.get(
"highlightStroke", "rgba(151,187,205,1)"
),
"data": list(map(itemgetter(1), data)),
},
],
}
}

View file

@ -1,8 +1,10 @@
from django.conf import settings
if 'newsletter' in settings.INSTALLED_APPS:
if "newsletter" in settings.INSTALLED_APPS:
from newsletter.models import Subscription
else:
Subscription = None
newsletter_id = None if Subscription is None else settings.DMOJ_NEWSLETTER_ID_ON_REGISTER
newsletter_id = (
None if Subscription is None else settings.DMOJ_NEWSLETTER_ID_ON_REGISTER
)

View file

@ -10,16 +10,16 @@ from django.core.cache import caches
from judge.utils.file_cache import HashFileCache
from judge.utils.unicode import utf8bytes
logger = logging.getLogger('judge.texoid')
logger = logging.getLogger("judge.texoid")
TEXOID_ENABLED = hasattr(settings, 'TEXOID_URL')
TEXOID_ENABLED = hasattr(settings, "TEXOID_URL")
class TexoidRenderer(object):
def __init__(self):
self.cache = HashFileCache(settings.TEXOID_CACHE_ROOT,
settings.TEXOID_CACHE_URL,
settings.TEXOID_GZIP)
self.cache = HashFileCache(
settings.TEXOID_CACHE_ROOT, settings.TEXOID_CACHE_URL, settings.TEXOID_GZIP
)
self.meta_cache = caches[settings.TEXOID_META_CACHE]
self.meta_cache_ttl = settings.TEXOID_META_CACHE_TTL
@ -27,59 +27,69 @@ class TexoidRenderer(object):
self.cache.create(hash)
try:
response = requests.post(settings.TEXOID_URL, data=utf8bytes(document), headers={
'Content-Type': 'application/x-tex',
})
response = requests.post(
settings.TEXOID_URL,
data=utf8bytes(document),
headers={
"Content-Type": "application/x-tex",
},
)
response.raise_for_status()
except requests.HTTPError as e:
if e.response.status == 400:
logger.error('Texoid failed to render: %s\n%s', document, e.response.text)
logger.error(
"Texoid failed to render: %s\n%s", document, e.response.text
)
else:
logger.exception('Failed to connect to texoid for: %s', document)
logger.exception("Failed to connect to texoid for: %s", document)
return
except Exception:
logger.exception('Failed to connect to texoid for: %s', document)
logger.exception("Failed to connect to texoid for: %s", document)
return
try:
data = response.json()
except ValueError:
logger.exception('Invalid texoid response for: %s\n%s', document, response.text)
logger.exception(
"Invalid texoid response for: %s\n%s", document, response.text
)
return
if not data['success']:
logger.error('Texoid failure for: %s\n%s', document, data)
return {'error': data['error']}
if not data["success"]:
logger.error("Texoid failure for: %s\n%s", document, data)
return {"error": data["error"]}
meta = data['meta']
self.cache.cache_data(hash, 'meta', utf8bytes(json.dumps(meta)), url=False, gzip=False)
meta = data["meta"]
self.cache.cache_data(
hash, "meta", utf8bytes(json.dumps(meta)), url=False, gzip=False
)
result = {
'png': self.cache.cache_data(hash, 'png', b64decode(data['png'])),
'svg': self.cache.cache_data(hash, 'svg', data['svg'].encode('utf-8')),
'meta': meta,
"png": self.cache.cache_data(hash, "png", b64decode(data["png"])),
"svg": self.cache.cache_data(hash, "svg", data["svg"].encode("utf-8")),
"meta": meta,
}
return result
def query_cache(self, hash):
result = {
'svg': self.cache.get_url(hash, 'svg'),
'png': self.cache.get_url(hash, 'png'),
"svg": self.cache.get_url(hash, "svg"),
"png": self.cache.get_url(hash, "png"),
}
key = 'texoid:meta:' + hash
key = "texoid:meta:" + hash
cached_meta = self.meta_cache.get(key)
if cached_meta is None:
cached_meta = json.loads(self.cache.read_data(hash, 'meta').decode('utf-8'))
cached_meta = json.loads(self.cache.read_data(hash, "meta").decode("utf-8"))
self.meta_cache.set(key, cached_meta, self.meta_cache_ttl)
result['meta'] = cached_meta
result["meta"] = cached_meta
return result
def get_result(self, formula):
hash = hashlib.sha1(utf8bytes(formula)).hexdigest()
if self.cache.has_file(hash, 'svg'):
if self.cache.has_file(hash, "svg"):
return self.query_cache(hash)
else:
return self.query_texoid(formula, hash)

View file

@ -12,6 +12,10 @@ def own_ticket_filter(profile_id):
def filter_visible_tickets(queryset, user, profile=None):
if profile is None:
profile = user.profile
return queryset.filter(own_ticket_filter(profile.id) |
Q(content_type=ContentType.objects.get_for_model(Problem),
object_id__in=editable_problems(user, profile))).distinct()
return queryset.filter(
own_ticket_filter(profile.id)
| Q(
content_type=ContentType.objects.get_for_model(Problem),
object_id__in=editable_problems(user, profile),
)
).distinct()

View file

@ -3,7 +3,7 @@ import datetime
from django.utils.translation import npgettext, pgettext, ungettext
def nice_repr(timedelta, display='long', sep=', '):
def nice_repr(timedelta, display="long", sep=", "):
"""
Turns a datetime.timedelta object into a nice string repr.
@ -16,7 +16,9 @@ def nice_repr(timedelta, display='long', sep=', '):
'1d, 1s'
"""
assert isinstance(timedelta, datetime.timedelta), 'First argument must be a timedelta.'
assert isinstance(
timedelta, datetime.timedelta
), "First argument must be a timedelta."
result = []
@ -26,65 +28,94 @@ def nice_repr(timedelta, display='long', sep=', '):
minutes = (timedelta.seconds % 3600) // 60
seconds = timedelta.seconds % 60
if display == 'simple-no-seconds':
if display == "simple-no-seconds":
days += weeks * 7
if days:
if hours or minutes:
return '%d day%s %d:%02d' % (days, 's'[days == 1:], hours, minutes)
return '%d day%s' % (days, 's'[days == 1:])
return "%d day%s %d:%02d" % (days, "s"[days == 1 :], hours, minutes)
return "%d day%s" % (days, "s"[days == 1 :])
else:
return '%d:%02d' % (hours, minutes)
elif display == 'sql':
return "%d:%02d" % (hours, minutes)
elif display == "sql":
days += weeks * 7
return '%d %02d:%02d:%02d' % (days, hours, minutes, seconds)
elif display == 'simple':
return "%d %02d:%02d:%02d" % (days, hours, minutes, seconds)
elif display == "simple":
days += weeks * 7
if days:
return '%d day%s %02d:%02d:%02d' % (days, 's'[days == 1:], hours, minutes, seconds)
return "%d day%s %02d:%02d:%02d" % (
days,
"s"[days == 1 :],
hours,
minutes,
seconds,
)
else:
return '%02d:%02d:%02d' % (hours, minutes, seconds)
elif display == 'localized':
return "%02d:%02d:%02d" % (hours, minutes, seconds)
elif display == "localized":
days += weeks * 7
if days:
return npgettext('time format with day', '%d day %h:%m:%s', '%d days %h:%m:%s', days) \
.replace('%d', str(days)).replace('%h', '%02d' % hours).replace('%m', '%02d' % minutes) \
.replace('%s', '%02d' % seconds)
return (
npgettext(
"time format with day", "%d day %h:%m:%s", "%d days %h:%m:%s", days
)
.replace("%d", str(days))
.replace("%h", "%02d" % hours)
.replace("%m", "%02d" % minutes)
.replace("%s", "%02d" % seconds)
)
else:
return pgettext('time format without day', '%h:%m:%s') \
.replace('%h', '%02d' % hours).replace('%m', '%02d' % minutes).replace('%s', '%02d' % seconds)
elif display == 'localized-no-seconds':
return (
pgettext("time format without day", "%h:%m:%s")
.replace("%h", "%02d" % hours)
.replace("%m", "%02d" % minutes)
.replace("%s", "%02d" % seconds)
)
elif display == "localized-no-seconds":
days += weeks * 7
if days:
if hours or minutes:
return npgettext('time format no seconds with day', '%d day %h:%m', '%d days %h:%m', days) \
.replace('%d', str(days)).replace('%h', '%02d' % hours).replace('%m', '%02d' % minutes)
return ungettext('%d day', '%d days', days) % days
return (
npgettext(
"time format no seconds with day",
"%d day %h:%m",
"%d days %h:%m",
days,
)
.replace("%d", str(days))
.replace("%h", "%02d" % hours)
.replace("%m", "%02d" % minutes)
)
return ungettext("%d day", "%d days", days) % days
else:
return pgettext('hours and minutes', '%h:%m').replace('%h', '%02d' % hours).replace('%m', '%02d' % minutes)
elif display == 'concise':
return (
pgettext("hours and minutes", "%h:%m")
.replace("%h", "%02d" % hours)
.replace("%m", "%02d" % minutes)
)
elif display == "concise":
days += weeks * 7
if days:
return '%dd %02d:%02d:%02d' % (days, hours, minutes, seconds)
return "%dd %02d:%02d:%02d" % (days, hours, minutes, seconds)
else:
return '%02d:%02d:%02d' % (hours, minutes, seconds)
elif display == 'noday':
return "%02d:%02d:%02d" % (hours, minutes, seconds)
elif display == "noday":
days += weeks * 7
hours += days * 24
return '%02d:%02d:%02d' % (hours, minutes, seconds)
elif display == 'minimal':
words = ['w', 'd', 'h', 'm', 's']
elif display == 'short':
words = [' wks', ' days', ' hrs', ' min', ' sec']
return "%02d:%02d:%02d" % (hours, minutes, seconds)
elif display == "minimal":
words = ["w", "d", "h", "m", "s"]
elif display == "short":
words = [" wks", " days", " hrs", " min", " sec"]
else:
words = [' weeks', ' days', ' hours', ' minutes', ' seconds']
words = [" weeks", " days", " hours", " minutes", " seconds"]
values = [weeks, days, hours, minutes, seconds]
for i in range(len(values)):
if values[i]:
if values[i] == 1 and len(words[i]) > 1:
result.append('%i%s' % (values[i], words[i].rstrip('s')))
result.append("%i%s" % (values[i], words[i].rstrip("s")))
else:
result.append('%i%s' % (values[i], words[i]))
result.append("%i%s" % (values[i], words[i]))
return sep.join(result)

View file

@ -6,12 +6,12 @@ def utf8bytes(maybe_text):
return
if isinstance(maybe_text, six.binary_type):
return maybe_text
return maybe_text.encode('utf-8')
return maybe_text.encode("utf-8")
def utf8text(maybe_bytes, errors='strict'):
def utf8text(maybe_bytes, errors="strict"):
if maybe_bytes is None:
return
if isinstance(maybe_bytes, six.text_type):
return maybe_bytes
return maybe_bytes.decode('utf-8', errors)
return maybe_bytes.decode("utf-8", errors)

View file

@ -6,6 +6,7 @@ from django.views.generic.detail import SingleObjectMixin
from judge.utils.diggpaginator import DiggPaginator
from django.utils.html import mark_safe
def class_view_decorator(function_decorator):
"""Convert a function based decorator into a class based decorator usable
on class based Views.
@ -22,34 +23,43 @@ def class_view_decorator(function_decorator):
def generic_message(request, title, message, status=None):
return render(request, 'generic-message.html', {
'message': message,
'title': title,
}, status=status)
return render(
request,
"generic-message.html",
{
"message": message,
"title": title,
},
status=status,
)
def paginate_query_context(request):
query = request.GET.copy()
query.setlist('page', [])
query.setlist("page", [])
query = query.urlencode()
if query:
return {'page_prefix': '%s?%s&page=' % (request.path, query),
'first_page_href': '%s?%s' % (request.path, query)}
return {
"page_prefix": "%s?%s&page=" % (request.path, query),
"first_page_href": "%s?%s" % (request.path, query),
}
else:
return {'page_prefix': '%s?page=' % request.path,
'first_page_href': request.path}
return {
"page_prefix": "%s?page=" % request.path,
"first_page_href": request.path,
}
class TitleMixin(object):
title = '(untitled)'
title = "(untitled)"
content_title = None
def get_context_data(self, **kwargs):
context = super(TitleMixin, self).get_context_data(**kwargs)
context['title'] = self.get_title()
context["title"] = self.get_title()
content_title = self.get_content_title()
if content_title is not None:
context['content_title'] = content_title
context["content_title"] = content_title
return context
def get_content_title(self):
@ -60,10 +70,18 @@ class TitleMixin(object):
class DiggPaginatorMixin(object):
def get_paginator(self, queryset, per_page, orphans=0,
allow_empty_first_page=True, **kwargs):
return DiggPaginator(queryset, per_page, body=6, padding=2,
orphans=orphans, allow_empty_first_page=allow_empty_first_page, **kwargs)
def get_paginator(
self, queryset, per_page, orphans=0, allow_empty_first_page=True, **kwargs
):
return DiggPaginator(
queryset,
per_page,
body=6,
padding=2,
orphans=orphans,
allow_empty_first_page=allow_empty_first_page,
**kwargs
)
class QueryStringSortMixin(object):
@ -75,8 +93,11 @@ class QueryStringSortMixin(object):
return self.default_sort
def get(self, request, *args, **kwargs):
order = request.GET.get('order', '')
if not ((not order.startswith('-') or order.count('-') == 1) and (order.lstrip('-') in self.all_sorts)):
order = request.GET.get("order", "")
if not (
(not order.startswith("-") or order.count("-") == 1)
and (order.lstrip("-") in self.all_sorts)
):
order = self.get_default_sort_order(request)
self.order = order
@ -84,17 +105,26 @@ class QueryStringSortMixin(object):
def get_sort_context(self):
query = self.request.GET.copy()
query.setlist('order', [])
query.setlist("order", [])
query = query.urlencode()
sort_prefix = '%s?%s&order=' % (self.request.path, query) if query else '%s?order=' % self.request.path
current = self.order.lstrip('-')
sort_prefix = (
"%s?%s&order=" % (self.request.path, query)
if query
else "%s?order=" % self.request.path
)
current = self.order.lstrip("-")
links = {key: sort_prefix + ('-' if key in self.default_desc else '') + key for key in self.all_sorts}
links[current] = sort_prefix + ('' if self.order.startswith('-') else '-') + current
links = {
key: sort_prefix + ("-" if key in self.default_desc else "") + key
for key in self.all_sorts
}
links[current] = (
sort_prefix + ("" if self.order.startswith("-") else "-") + current
)
order = {key: '' for key in self.all_sorts}
order[current] = ' \u25BE' if self.order.startswith('-') else u' \u25B4'
return {'sort_links': links, 'sort_order': order}
order = {key: "" for key in self.all_sorts}
order[current] = " \u25BE" if self.order.startswith("-") else " \u25B4"
return {"sort_links": links, "sort_order": order}
def get_sort_paginate_context(self):
return paginate_query_context(self.request)