sud0nick: Resolve merge conflicts for PR #50.

This commit is contained in:
Foxtrot
2019-06-19 18:09:12 +01:00
23 changed files with 10355 additions and 1674 deletions

View File

@@ -0,0 +1,200 @@
# coding: utf-8
"""
tinycss.colors3
---------------
Parser for CSS 3 Fonts syntax:
https://www.w3.org/TR/css-fonts-3/
Adds support for font-face and font-feature-values rules.
:copyright: (c) 2016 by Kozea.
:license: BSD, see LICENSE for more details.
"""
from __future__ import division, unicode_literals
from .css21 import CSS21Parser, ParseError
class FontFaceRule(object):
"""A parsed at-rule for font faces.
.. attribute:: at_keyword
Always ``'@font-face'``.
.. attribute:: declarations
A list of :class:`~.css21.Declaration` objects.
.. attribute:: line
Source line where this was read.
.. attribute:: column
Source column where this was read.
"""
def __init__(self, at_keyword, declarations, line, column):
assert at_keyword == '@font-face'
self.at_keyword = at_keyword
self.declarations = declarations
self.line = line
self.column = column
class FontFeatureValuesRule(object):
"""A parsed at-rule for font feature values.
.. attribute:: at_keyword
Always ``'@font-feature-values'``.
.. attribute:: line
Source line where this was read.
.. attribute:: column
Source column where this was read.
.. attribute:: at_rules
The list of parsed at-rules inside the @font-feature-values block, in
source order.
.. attribute:: family_names
A list of strings representing font families.
"""
def __init__(self, at_keyword, at_rules, family_names, line, column):
assert at_keyword == '@font-feature-values'
self.at_keyword = at_keyword
self.family_names = family_names
self.at_rules = at_rules
self.line = line
self.column = column
class FontFeatureRule(object):
"""A parsed at-rule for font features.
.. attribute:: at_keyword
One of the 16 following strings:
* ``@stylistic``
* ``@styleset``
* ``@character-variant``
* ``@swash``
* ``@ornaments``
* ``@annotation``
.. attribute:: declarations
A list of :class:`~.css21.Declaration` objects.
.. attribute:: line
Source line where this was read.
.. attribute:: column
Source column where this was read.
"""
def __init__(self, at_keyword, declarations, line, column):
self.at_keyword = at_keyword
self.declarations = declarations
self.line = line
self.column = column
class CSSFonts3Parser(CSS21Parser):
"""Extend :class:`~.css21.CSS21Parser` for `CSS 3 Fonts`_ syntax.
.. _CSS 3 Fonts: https://www.w3.org/TR/css-fonts-3/
"""
FONT_FEATURE_VALUES_AT_KEYWORDS = [
'@stylistic',
'@styleset',
'@character-variant',
'@swash',
'@ornaments',
'@annotation',
]
def parse_at_rule(self, rule, previous_rules, errors, context):
if rule.at_keyword == '@font-face':
if rule.head:
raise ParseError(
rule.head[0],
'unexpected {0} token in {1} rule header'.format(
rule.head[0].type, rule.at_keyword))
declarations, body_errors = self.parse_declaration_list(rule.body)
errors.extend(body_errors)
return FontFaceRule(
rule.at_keyword, declarations, rule.line, rule.column)
elif rule.at_keyword == '@font-feature-values':
family_names = tuple(
self.parse_font_feature_values_family_names(rule.head))
at_rules, body_errors = (
self.parse_rules(rule.body or [], '@font-feature-values'))
errors.extend(body_errors)
return FontFeatureValuesRule(
rule.at_keyword, at_rules, family_names,
rule.line, rule.column)
elif rule.at_keyword in self.FONT_FEATURE_VALUES_AT_KEYWORDS:
if context != '@font-feature-values':
raise ParseError(
rule, '{0} rule not allowed in {1}'.format(
rule.at_keyword, context))
declarations, body_errors = self.parse_declaration_list(rule.body)
errors.extend(body_errors)
return FontFeatureRule(
rule.at_keyword, declarations, rule.line, rule.column)
return super(CSSFonts3Parser, self).parse_at_rule(
rule, previous_rules, errors, context)
def parse_font_feature_values_family_names(self, tokens):
"""Parse an @font-feature-values selector.
:param tokens:
An iterable of token, typically from the ``head`` attribute of
an unparsed :class:`AtRule`.
:returns:
A generator of strings representing font families.
:raises:
:class:`~.parsing.ParseError` on invalid selectors
"""
family = ''
current_string = False
for token in tokens:
if token.type == 'DELIM' and token.value == ',' and family:
yield family
family = ''
current_string = False
elif token.type == 'STRING' and not family and (
current_string is False):
family = token.value
current_string = True
elif token.type == 'IDENT' and not current_string:
if family:
family += ' '
family += token.value
elif token.type != 'S':
family = ''
break
if family:
yield family
else:
raise ParseError(token, 'invalid @font-feature-values selector')

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,144 @@
# coding: utf-8
"""
Tests for the Fonts 3 parser
----------------------------
:copyright: (c) 2016 by Kozea.
:license: BSD, see LICENSE for more details.
"""
from __future__ import unicode_literals
import pytest
from tinycss.fonts3 import CSSFonts3Parser
from . import assert_errors
from .test_tokenizer import jsonify
@pytest.mark.parametrize(('css', 'expected_family_names', 'expected_errors'), [
('@font-feature-values foo {}', ('foo',), []),
('@font-feature-values Foo Test {}', ('Foo Test',), []),
('@font-feature-values \'Foo Test\' {}', ('Foo Test',), []),
('@font-feature-values Foo Test, Foo Lol, "Foo tooo"', (
'Foo Test', 'Foo Lol', 'Foo tooo'), []),
('@font-feature-values Foo , Foo lol {}', ('Foo', 'Foo lol'), []),
('@font-feature-values Foo , "Foobar" , Lol {}', (
'Foo', 'Foobar', 'Lol'), []),
('@font-feature-values Foo, {}', None, [
'invalid @font-feature-values selector']),
('@font-feature-values ,Foo {}', None, [
'invalid @font-feature-values selector']),
('@font-feature-values Test,"Foo", {}', None, [
'invalid @font-feature-values selector']),
('@font-feature-values Test "Foo" {}', None, [
'invalid @font-feature-values selector']),
('@font-feature-values Test Foo, Test "bar", "foo" {}', None, [
'invalid @font-feature-values selector']),
('@font-feature-values Test/Foo {}', None, [
'invalid @font-feature-values selector']),
('@font-feature-values /Foo {}', None, [
'invalid @font-feature-values selector']),
('@font-feature-values #Foo {}', None, [
'invalid @font-feature-values selector']),
# TODO: this currently works but should not work
# ('@font-feature-values test@foo {}', None, [
# 'invalid @font-feature-values selector']),
('@font-feature-values Hawaii 5-0 {}', None, [
'invalid @font-feature-values selector']),
])
def test_font_feature_values_selectors(css, expected_family_names,
expected_errors):
stylesheet = CSSFonts3Parser().parse_stylesheet(css)
assert_errors(stylesheet.errors, expected_errors)
if stylesheet.rules:
assert len(stylesheet.rules) == 1
rule = stylesheet.rules[0]
assert rule.at_keyword == '@font-feature-values'
assert rule.family_names == expected_family_names
@pytest.mark.parametrize(('css', 'expected_declarations', 'expected_errors'), [
('@font-face {}', [], []),
('@font-face test { src: "lol"; font-family: "bar" }', None, [
'unexpected IDENT token in @font-face rule header']),
('@font-face { src: "lol"; font-family: "bar" }', [
('src', [('STRING', 'lol')]),
('font-family', [('STRING', 'bar')])], []),
('@font-face { src: "lol"; font-family: "bar"; src: "baz" }', [
('src', [('STRING', 'lol')]),
('font-family', [('STRING', 'bar')]),
('src', [('STRING', 'baz')])], []),
])
def test_font_face_content(css, expected_declarations, expected_errors):
stylesheet = CSSFonts3Parser().parse_stylesheet(css)
assert_errors(stylesheet.errors, expected_errors)
def declarations(rule):
return [(decl.name, list(jsonify(decl.value)))
for decl in rule.declarations]
if expected_declarations is None:
assert stylesheet.rules == []
assert expected_errors
else:
assert len(stylesheet.rules) == 1
rule = stylesheet.rules[0]
assert rule.at_keyword == '@font-face'
assert declarations(rule) == expected_declarations
@pytest.mark.parametrize(
('css', 'expected_rules', 'expected_errors'), [
('''@annotation{}''', None, [
'@annotation rule not allowed in stylesheet']),
('''@font-feature-values foo {}''', None, []),
('''@font-feature-values foo {
@swash { ornate: 1; }
@styleset { double-W: 14; sharp-terminals: 16 1; }
}''', [
('@swash', [('ornate', [('INTEGER', 1)])]),
('@styleset', [
('double-w', [('INTEGER', 14)]),
('sharp-terminals', [
('INTEGER', 16), ('S', ' '), ('INTEGER', 1)])])], []),
('''@font-feature-values foo {
@swash { ornate: 14; }
@unknown { test: 1; }
}''', [('@swash', [('ornate', [('INTEGER', 14)])])], [
'unknown at-rule in @font-feature-values context: @unknown']),
('''@font-feature-values foo {
@annotation{boxed:1}
bad: 2;
@brokenstylesetbecauseofbadabove { sharp: 1}
@styleset { sharp-terminals: 16 1; @bad {}}
@styleset { @bad {} top-ignored: 3; top: 9000}
really-bad
}''', [
('@annotation', [('boxed', [('INTEGER', 1)])]),
('@styleset', [
('sharp-terminals', [
('INTEGER', 16), ('S', ' '), ('INTEGER', 1)])]),
('@styleset', [('top', [('INTEGER', 9000)])])], [
'unexpected ; token in selector',
'expected a property name, got ATKEYWORD',
'expected a property name, got ATKEYWORD',
'no declaration block found for ruleset']),
])
def test_font_feature_values_content(css, expected_rules, expected_errors):
stylesheet = CSSFonts3Parser().parse_stylesheet(css)
assert_errors(stylesheet.errors, expected_errors)
if expected_rules is not None:
assert len(stylesheet.rules) == 1
rule = stylesheet.rules[0]
assert rule.at_keyword == '@font-feature-values'
rules = [
(at_rule.at_keyword, [
(decl.name, list(jsonify(decl.value)))
for decl in at_rule.declarations])
for at_rule in rule.at_rules] if rule.at_rules else None
assert rules == expected_rules

View File

@@ -0,0 +1,302 @@
# coding: utf-8
"""
Tests for the tokenizer
-----------------------
:copyright: (c) 2012 by Simon Sapin.
:license: BSD, see LICENSE for more details.
"""
from __future__ import unicode_literals
import os
import sys
import pytest
from tinycss.tokenizer import (
cython_tokenize_flat, python_tokenize_flat, regroup)
def test_speedups():
is_pypy = hasattr(sys, 'pypy_translation_info')
env_skip_tests = os.environ.get('TINYCSS_SKIP_SPEEDUPS_TESTS')
# pragma: no cover
if is_pypy or env_skip_tests:
return
assert cython_tokenize_flat is not None, (
'Cython speedups are not installed, related tests will '
'be skipped. Set the TINYCSS_SKIP_SPEEDUPS_TESTS environment '
'variable if this is expected.')
@pytest.mark.parametrize(('tokenize', 'css_source', 'expected_tokens'), [
(tokenize,) + test_data
for tokenize in (python_tokenize_flat, cython_tokenize_flat)
for test_data in [
('', []),
('red -->', [('IDENT', 'red'), ('S', ' '), ('CDC', '-->')]),
# Longest match rule: no CDC
('red-->', [('IDENT', 'red--'), ('DELIM', '>')]),
(r'p[example="foo(int x) { this.x = x;}"]', [
('IDENT', 'p'),
('[', '['),
('IDENT', 'example'),
('DELIM', '='),
('STRING', 'foo(int x) { this.x = x;}'),
(']', ']')]),
# Numbers are parsed
('42 .5 -4pX 1.25em 30%', [
('INTEGER', 42), ('S', ' '),
('NUMBER', .5), ('S', ' '),
# units are normalized to lower-case:
('DIMENSION', -4, 'px'), ('S', ' '),
('DIMENSION', 1.25, 'em'), ('S', ' '),
('PERCENTAGE', 30, '%')]),
# URLs are extracted
('url(foo.png)', [('URI', 'foo.png')]),
('url("foo.png")', [('URI', 'foo.png')]),
# Escaping
(r'/* Comment with a \ backslash */', [
('COMMENT', '/* Comment with a \ backslash */')]), # Unchanged
# backslash followed by a newline in a string: ignored
('"Lorem\\\nIpsum"', [('STRING', 'LoremIpsum')]),
# backslash followed by a newline outside a string: stands for itself
('Lorem\\\nIpsum', [
('IDENT', 'Lorem'), ('DELIM', '\\'),
('S', '\n'), ('IDENT', 'Ipsum')]),
# Cancel the meaning of special characters
(r'"Lore\m Ipsum"', [('STRING', 'Lorem Ipsum')]), # or not specal
(r'"Lorem \49psum"', [('STRING', 'Lorem Ipsum')]),
(r'"Lorem \49 psum"', [('STRING', 'Lorem Ipsum')]),
(r'"Lorem\"Ipsum"', [('STRING', 'Lorem"Ipsum')]),
(r'"Lorem\\Ipsum"', [('STRING', r'Lorem\Ipsum')]),
(r'"Lorem\5c Ipsum"', [('STRING', r'Lorem\Ipsum')]),
(r'Lorem\+Ipsum', [('IDENT', 'Lorem+Ipsum')]),
(r'Lorem+Ipsum', [
('IDENT', 'Lorem'), ('DELIM', '+'), ('IDENT', 'Ipsum')]),
(r'url(foo\).png)', [('URI', 'foo).png')]),
# Unicode and backslash escaping
('\\26 B', [('IDENT', '&B')]),
('\\&B', [('IDENT', '&B')]),
('@\\26\tB', [('ATKEYWORD', '@&B')]),
('@\\&B', [('ATKEYWORD', '@&B')]),
('#\\26\nB', [('HASH', '#&B')]),
('#\\&B', [('HASH', '#&B')]),
('\\26\r\nB(', [('FUNCTION', '&B(')]),
('\\&B(', [('FUNCTION', '&B(')]),
(r'12.5\000026B', [('DIMENSION', 12.5, '&b')]),
(r'12.5\0000263B', [('DIMENSION', 12.5, '&3b')]), # max 6 digits
(r'12.5\&B', [('DIMENSION', 12.5, '&b')]),
(r'"\26 B"', [('STRING', '&B')]),
(r"'\000026B'", [('STRING', '&B')]),
(r'"\&B"', [('STRING', '&B')]),
(r'url("\26 B")', [('URI', '&B')]),
(r'url(\26 B)', [('URI', '&B')]),
(r'url("\&B")', [('URI', '&B')]),
(r'url(\&B)', [('URI', '&B')]),
(r'Lorem\110000Ipsum', [('IDENT', 'Lorem\uFFFDIpsum')]),
# Bad strings
# String ends at EOF without closing: no error, parsed
('"Lorem\\26Ipsum', [('STRING', 'Lorem&Ipsum')]),
# Unescaped newline: ends the string, error, unparsed
('"Lorem\\26Ipsum\n', [
('BAD_STRING', r'"Lorem\26Ipsum'), ('S', '\n')]),
# Tokenization restarts after the newline, so the second " starts
# a new string (which ends at EOF without errors, as above.)
('"Lorem\\26Ipsum\ndolor" sit', [
('BAD_STRING', r'"Lorem\26Ipsum'), ('S', '\n'),
('IDENT', 'dolor'), ('STRING', ' sit')]),
]])
def test_tokens(tokenize, css_source, expected_tokens):
if tokenize is None: # pragma: no cover
pytest.skip('Speedups not available')
sources = [css_source]
if sys.version_info[0] < 3:
# On Python 2.x, ASCII-only bytestrings can be used
# where Unicode is expected.
sources.append(css_source.encode('ascii'))
for css_source in sources:
tokens = tokenize(css_source, ignore_comments=False)
result = [
(token.type, token.value) + (
() if token.unit is None else (token.unit,))
for token in tokens
]
assert result == expected_tokens
@pytest.mark.parametrize('tokenize', [
python_tokenize_flat, cython_tokenize_flat])
def test_positions(tokenize):
"""Test the reported line/column position of each token."""
if tokenize is None: # pragma: no cover
pytest.skip('Speedups not available')
css = '/* Lorem\nipsum */\fa {\n color: red;\tcontent: "dolor\\\fsit" }'
tokens = tokenize(css, ignore_comments=False)
result = [(token.type, token.line, token.column) for token in tokens]
assert result == [
('COMMENT', 1, 1), ('S', 2, 9),
('IDENT', 3, 1), ('S', 3, 2), ('{', 3, 3),
('S', 3, 4), ('IDENT', 4, 5), (':', 4, 10),
('S', 4, 11), ('IDENT', 4, 12), (';', 4, 15), ('S', 4, 16),
('IDENT', 4, 17), (':', 4, 24), ('S', 4, 25), ('STRING', 4, 26),
('S', 5, 5), ('}', 5, 6)]
@pytest.mark.parametrize(('tokenize', 'css_source', 'expected_tokens'), [
(tokenize,) + test_data
for tokenize in (python_tokenize_flat, cython_tokenize_flat)
for test_data in [
('', []),
(r'Lorem\26 "i\psum"4px', [
('IDENT', 'Lorem&'), ('STRING', 'ipsum'), ('DIMENSION', 4)]),
('not([[lorem]]{ipsum (42)})', [
('FUNCTION', 'not', [
('[', [
('[', [
('IDENT', 'lorem'),
]),
]),
('{', [
('IDENT', 'ipsum'),
('S', ' '),
('(', [
('INTEGER', 42),
])
])
])]),
# Close everything at EOF, no error
('a[b{"d', [
('IDENT', 'a'),
('[', [
('IDENT', 'b'),
('{', [
('STRING', 'd'),
]),
]),
]),
# Any remaining ), ] or } token is a nesting error
('a[b{d]e}', [
('IDENT', 'a'),
('[', [
('IDENT', 'b'),
('{', [
('IDENT', 'd'),
(']', ']'), # The error is visible here
('IDENT', 'e'),
]),
]),
]),
# ref:
('a[b{d}e]', [
('IDENT', 'a'),
('[', [
('IDENT', 'b'),
('{', [
('IDENT', 'd'),
]),
('IDENT', 'e'),
]),
]),
]])
def test_token_grouping(tokenize, css_source, expected_tokens):
if tokenize is None: # pragma: no cover
pytest.skip('Speedups not available')
tokens = regroup(tokenize(css_source, ignore_comments=False))
result = list(jsonify(tokens))
assert result == expected_tokens
def jsonify(tokens):
"""Turn tokens into "JSON-compatible" data structures."""
for token in tokens:
if token.type == 'FUNCTION':
yield (token.type, token.function_name,
list(jsonify(token.content)))
elif token.is_container:
yield token.type, list(jsonify(token.content))
else:
yield token.type, token.value
@pytest.mark.parametrize(('tokenize', 'ignore_comments', 'expected_tokens'), [
(tokenize,) + test_data
for tokenize in (python_tokenize_flat, cython_tokenize_flat)
for test_data in [
(False, [
('COMMENT', '/* lorem */'),
('S', ' '),
('IDENT', 'ipsum'),
('[', [
('IDENT', 'dolor'),
('COMMENT', '/* sit */'),
]),
('BAD_COMMENT', '/* amet')
]),
(True, [
('S', ' '),
('IDENT', 'ipsum'),
('[', [
('IDENT', 'dolor'),
]),
]),
]])
def test_comments(tokenize, ignore_comments, expected_tokens):
if tokenize is None: # pragma: no cover
pytest.skip('Speedups not available')
css_source = '/* lorem */ ipsum[dolor/* sit */]/* amet'
tokens = regroup(tokenize(css_source, ignore_comments))
result = list(jsonify(tokens))
assert result == expected_tokens
@pytest.mark.parametrize(('tokenize', 'css_source'), [
(tokenize, test_data)
for tokenize in (python_tokenize_flat, cython_tokenize_flat)
for test_data in [
r'p[example="foo(int x) { this.x = x;}"]',
'"Lorem\\26Ipsum\ndolor" sit',
'/* Lorem\nipsum */\fa {\n color: red;\tcontent: "dolor\\\fsit" }',
'not([[lorem]]{ipsum (42)})',
'a[b{d]e}',
'a[b{"d',
]])
def test_token_serialize_css(tokenize, css_source):
if tokenize is None: # pragma: no cover
pytest.skip('Speedups not available')
for _regroup in [regroup, lambda x: x]:
tokens = _regroup(tokenize(css_source, ignore_comments=False))
result = ''.join(token.as_css() for token in tokens)
assert result == css_source
@pytest.mark.parametrize(('tokenize', 'css_source'), [
(tokenize, test_data)
for tokenize in (python_tokenize_flat, cython_tokenize_flat)
for test_data in [
'(8, foo, [z])', '[8, foo, (z)]', '{8, foo, [z]}', 'func(8, foo, [z])'
]
])
def test_token_api(tokenize, css_source):
if tokenize is None: # pragma: no cover
pytest.skip('Speedups not available')
tokens = list(regroup(tokenize(css_source)))
assert len(tokens) == 1
token = tokens[0]
expected_len = 7 # 2 spaces, 2 commas, 3 others.
assert len(token.content) == expected_len

View File

@@ -0,0 +1 @@
VERSION = '0.4'