summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--CMakeLists.txt2
-rwxr-xr-xruntests.py178
-rw-r--r--src/html/html.c2
3 files changed, 104 insertions, 78 deletions
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 637099d..afa3d22 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -24,7 +24,7 @@ enable_testing()
# To get verbose output: cmake --build build --target "test" -- ARGS='-V'
add_test(spectest_library
python "${CMAKE_SOURCE_DIR}/runtests.py" "--spec"
- "${CMAKE_SOURCE_DIR}/spec.txt" "--library_dir" "${CMAKE_BINARY_DIR}/src"
+ "${CMAKE_SOURCE_DIR}/spec.txt" "--library-dir" "${CMAKE_BINARY_DIR}/src"
)
add_test(spectest_executable
python "${CMAKE_SOURCE_DIR}/runtests.py" "--spec" "${CMAKE_SOURCE_DIR}/spec.txt" "--program" "${CMAKE_BINARY_DIR}/src/cmark"
diff --git a/runtests.py b/runtests.py
index 906a573..b3c8d98 100755
--- a/runtests.py
+++ b/runtests.py
@@ -10,6 +10,7 @@ import argparse
from HTMLParser import HTMLParser
from htmlentitydefs import name2codepoint
import re
+import cgi
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Run cmark tests.')
@@ -19,8 +20,13 @@ if __name__ == "__main__":
help='path to spec')
parser.add_argument('--pattern', dest='pattern', nargs='?',
default=None, help='limit to sections matching regex pattern')
- parser.add_argument('--library_dir', dest='library_dir', nargs='?',
+ parser.add_argument('--library-dir', dest='library_dir', nargs='?',
default=None, help='directory containing dynamic library')
+ parser.add_argument('--no-normalize', dest='normalize',
+ action='store_const', const=False, default=True)
+ parser.add_argument('--debug-normalization', dest='debug_normalization',
+ action='store_const', const=True,
+ default=False, help='filter stdin through normalizer for testing')
args = parser.parse_args(sys.argv[1:])
if not args.program:
@@ -60,106 +66,119 @@ class MyHTMLParser(HTMLParser):
self.last = "starttag"
self.in_pre = False
self.output = u""
+ self.last_tag = ""
def handle_data(self, data):
- if self.in_pre:
- self.output += data
- else:
- data = normalize_whitespace_re.sub(' ', data)
- data_strip = data.strip()
- if (self.last == "ref") and data_strip and data[0] == " ":
- self.output += " "
- self.data_end_in_space_not_empty = (data[-1] == ' ' and data_strip)
- self.output += data_strip
- self.last = "data"
+ after_tag = self.last == "endtag" or self.last == "starttag"
+ after_block_tag = after_tag and self.is_block_tag(self.last_tag)
+ if after_tag and self.last_tag == "br":
+ data = data.lstrip('\n')
+ data = normalize_whitespace_re.sub(' ', data)
+ if after_block_tag and not self.in_pre:
+ if self.last == "starttag":
+ data = data.lstrip()
+ elif self.last == "endtag":
+ data = data.strip()
+ self.output += data
+ self.last = "data"
def handle_endtag(self, tag):
if tag == "pre":
self.in_pre = False
+ if self.is_block_tag(tag):
+ self.output = self.output.rstrip()
self.output += "</" + tag + ">"
+ self.last_tag = tag
self.last = "endtag"
def handle_starttag(self, tag, attrs):
if tag == "pre":
self.in_pre = True
self.output += "<" + tag
- attrs = filter(lambda attr: attr[0] in significant_attrs, attrs)
+ # For now we don't strip out 'extra' attributes, because of
+ # raw HTML test cases.
+ # attrs = filter(lambda attr: attr[0] in significant_attrs, attrs)
if attrs:
attrs.sort()
- for attr in attrs:
- self.output += " " + attr[0] + "=" + '"' + attr[1] + '"'
+ for (k,v) in attrs:
+ self.output += " " + k
+ if v != None:
+ self.output += ("=" + '"' + cgi.escape(v,quote=True) + '"')
self.output += ">"
+ self.last_tag = tag
self.last = "starttag"
def handle_startendtag(self, tag, attrs):
- """Ignore closing tag for self-closing void elements."""
+ """Ignore closing tag for self-closing """
self.handle_starttag(tag, attrs)
+ self.last_tag = tag
+ self.last = "endtag"
+ def handle_comment(self, data):
+ self.output += '<!--' + data + '-->'
+ self.last = "comment"
+ def handle_decl(self, data):
+ self.output += '<!' + data + '>'
+ self.last = "decl"
+ def handle_unknown_decl(self, data):
+ self.output += '<!' + data + '>'
+ self.last = "decl"
+ def handle_pi(self,data):
+ self.output += '<?' + data + '>'
+ self.last = "pi"
def handle_entityref(self, name):
- self.add_space_from_last_data()
try:
- self.output += unichr(name2codepoint[name])
+ c = unichr(name2codepoint[name])
except KeyError:
- self.output += name
+ c = None
+ self.output_char(c, '&' + name + ';')
self.last = "ref"
def handle_charref(self, name):
- self.add_space_from_last_data()
try:
if name.startswith("x"):
c = unichr(int(name[1:], 16))
else:
c = unichr(int(name))
- self.output += c
except ValueError:
- self.output += name
+ c = None
+ self.output_char(c, '&' + name + ';')
self.last = "ref"
# Helpers.
- def add_space_from_last_data(self):
- """Maintain the space at: `a <span>b</span>`"""
- if self.last == 'data' and self.data_end_in_space_not_empty:
- self.output += ' '
+ def output_char(self, c, fallback):
+ if c == u'<':
+ self.output += "&lt;"
+ elif c == u'>':
+ self.output += "&gt;"
+ elif c == u'&':
+ self.output += "&amp;"
+ elif c == u'"':
+ self.output += "&quot;"
+ elif c == None:
+ self.output += fallback
+ else:
+ self.output += c
-def normalize(html):
+ def is_block_tag(self,tag):
+ return (tag in ['article', 'header', 'aside', 'hgroup', 'blockquote',
+ 'hr', 'iframe', 'body', 'li', 'map', 'button', 'object', 'canvas',
+ 'ol', 'caption', 'output', 'col', 'p', 'colgroup', 'pre', 'dd',
+ 'progress', 'div', 'section', 'dl', 'table', 'td', 'dt',
+ 'tbody', 'embed', 'textarea', 'fieldset', 'tfoot', 'figcaption',
+ 'th', 'figure', 'thead', 'footer', 'tr', 'form', 'ul',
+ 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'video', 'script', 'style'])
+
+def normalize_html(html):
r"""
- Return normalized form of HTML which igores insignificant output differences.
- Multiple inner whitespaces to a single space
- >>> normalize("<p>a \t\nb</p>")
- u'<p>a b</p>'
- Surrounding whitespaces are removed:
- >>> normalize("<p> a</p>")
- u'<p>a</p>'
- >>> normalize("<p>a </p>")
- u'<p>a</p>'
- TODO: how to deal with the following cases without a full list of the void tags?
- >>> normalize("<p>a <b>b</b></p>")
- u'<p>a<b>b</b></p>'
- >>> normalize("<p><b>b</b> c</p>")
- u'<p><b>b</b>c</p>'
- >>> normalize("<p>a <br></p>")
- u'<p>a<br></p>'
- `pre` elements preserve whitespace:
- >>> normalize("<pre>a \t\nb</pre>")
- u'<pre>a \t\nb</pre>'
- Self-closing tags:
- >>> normalize("<p><br /></p>")
- u'<p><br></p>'
- References are converted to Unicode:
- >>> normalize("<p>&lt;</p>")
- u'<p><</p>'
- >>> normalize("<p>&#60;</p>")
- u'<p><</p>'
- >>> normalize("<p>&#x3C;</p>")
- u'<p><</p>'
- >>> normalize("<p>&#x4E2D;</p>")
- u'<p>\u4e2d</p>'
- Spaces around entities are kept:
- >>> normalize("<p>a &lt; b</p>")
- u'<p>a < b</p>'
- >>> normalize("<p>a&lt;b</p>")
- u'<p>a<b</p>'
- Most attributes are ignored:
- >>> normalize('<p id="a"></p>')
- u'<p></p>'
- Critical attributes are considered and sorted alphabetically:
- >>> normalize('<a href="a"></a>')
- u'<a href="a"></a>'
- >>> normalize('<img src="a" alt="a">')
- u'<img alt="a" src="a">'
+ Return normalized form of HTML which ignores insignificant output
+ differences:
+
+ * Multiple inner whitespaces are collapsed to a single space (except
+ in pre tags).
+ * Outer whitespace (outside block-level tags) is removed.
+ * Self-closing tags are converted to open tags.
+ * Attributes are sorted and lowercased.
+ * References are converted to unicode, except that '<', '>', '&', and
+ '&' are rendered using entities.
+
+ Known limitations:
+
+ * HTMLParser just swallows CDATA.
+ * HTMLParser seems to treat unknown declarations as comments.
"""
parser = MyHTMLParser()
parser.feed(html.decode(encoding='UTF-8'))
@@ -170,13 +189,17 @@ def print_test_header(headertext, example_number, start_line, end_line):
print "Example %d (lines %d-%d) %s" % (example_number,start_line,end_line,headertext)
def do_test(markdown_lines, expected_html_lines, headertext,
- example_number, start_line, end_line, prog=None):
+ example_number, start_line, end_line, prog, normalize):
real_markdown_text = ''.join(markdown_lines).replace('→','\t')
[retcode, actual_html, err] = md2html(real_markdown_text, prog)
if retcode == 0:
actual_html_lines = actual_html.splitlines(True)
expected_html = ''.join(expected_html_lines)
- if normalize(actual_html) == normalize(expected_html):
+ if normalize:
+ passed = normalize_html(actual_html) == normalize_html(expected_html)
+ else:
+ passed = actual_html == expected_html
+ if passed:
return 'pass'
else:
print_test_header(headertext, example_number,start_line,end_line)
@@ -192,7 +215,7 @@ def do_test(markdown_lines, expected_html_lines, headertext,
print(err)
return 'error'
-def do_tests(specfile, prog, pattern):
+def do_tests(specfile, prog, pattern, normalize):
line_number = 0
start_line = 0
end_line = 0
@@ -208,7 +231,7 @@ def do_tests(specfile, prog, pattern):
header_re = re.compile('#+ ')
if pattern:
- pattern_re = re.compile(pattern)
+ pattern_re = re.compile(pattern, re.IGNORECASE)
with open(specfile, 'r') as specf:
for line in specf:
@@ -228,7 +251,8 @@ def do_tests(specfile, prog, pattern):
if active:
result = do_test(markdown_lines, html_lines,
headertext, example_number,
- start_line, end_line, prog)
+ start_line, end_line, prog,
+ normalize)
if result == 'pass':
passed = passed + 1
elif result == 'fail':
@@ -248,7 +272,9 @@ def do_tests(specfile, prog, pattern):
return (failed == 0 and errored == 0)
if __name__ == "__main__":
- if do_tests(args.spec, args.program, args.pattern):
+ if args.debug_normalization:
+ print normalize_html(sys.stdin.read())
+ elif do_tests(args.spec, args.program, args.pattern, args.normalize):
exit(0)
else:
exit(1)
diff --git a/src/html/html.c b/src/html/html.c
index 8110f87..0c6c399 100644
--- a/src/html/html.c
+++ b/src/html/html.c
@@ -210,7 +210,7 @@ static void inlines_to_html(strbuf *html, cmark_node* ils)
escape_html(html, ils->as.link.title, -1);
}
- strbuf_puts(html, "\"/>");
+ strbuf_puts(html, "\" />");
break;
case NODE_STRONG: