Sanitize user input on markdown fields
This is an XSS vulnribilitiy. This also blocks a number of MD attributes that a user might attempt to use. The following are the allowed attributes. ['a', 'abbr', 'acronym', 'b', 'blockquote', 'code', 'em', 'i', 'li', 'ol', 'p', 'pre', 'strong', 'ul'] I belive this to be acceptable, as honeslty, a speaker using H1 is going to stomp all over the page and make it harder for the reviewer to parse. UX wise, it's less than great. A user can do # title and be left with <h1> in the sanitized output.
This commit is contained in:
parent
298b162be6
commit
0652471164
2 changed files with 8 additions and 11 deletions
|
@ -6,7 +6,7 @@ django-sitetree>=1.7.0
|
|||
django-taggit==0.18.0
|
||||
django-timezone-field>=2.0
|
||||
easy-thumbnails==2.3
|
||||
html5lib==0.9999999
|
||||
bleach
|
||||
markdown==2.6.5
|
||||
pytz==2015.7
|
||||
django-ical==1.4
|
||||
|
|
|
@ -1,17 +1,14 @@
|
|||
from __future__ import unicode_literals
|
||||
|
||||
import bleach
|
||||
import markdown
|
||||
|
||||
|
||||
tags = bleach.sanitizer.ALLOWED_TAGS[:]
|
||||
tags.extend(['p', 'pre'])
|
||||
|
||||
|
||||
def parse(text):
|
||||
|
||||
# First run through the Markdown parser
|
||||
text = markdown.markdown(text, extensions=["extra"], safe_mode=False)
|
||||
|
||||
# Sanitize using html5lib
|
||||
# bits = []
|
||||
# parser = html5parser.HTMLParser(tokenizer=sanitizer.HTMLSanitizer)
|
||||
# for token in parser.parseFragment(text).childNodes:
|
||||
# bits.append(token.toxml())
|
||||
# return "".join(bits)
|
||||
md = markdown.markdown(text, extensions=['extra'])
|
||||
text = bleach.clean(md, tags=tags)
|
||||
return text
|
||||
|
|
Loading…
Reference in a new issue